query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
e7a2f3379f168ddfaf0cb2364bead9d1
Parses fastq file looking for first (identifier) and second (sequence) lines only. Outputs a dictionary in which the key is the identifies and the value is the sequence Useful for downstream processing such as finding all the sequences of a particular length or eliminating sequences with invalid characters
[ { "docid": "528c9a047aa083bba315d5b80e413a8e", "score": "0.68678826", "text": "def parse_fastq(self, file_name):\t\n\t\tfile = open(file_name)\n\t\tfile_content = file.readlines()\n\t\ti = 0\n\t\twhile i < len(file_content):\n\t\t\tif i % 4 == 0:\n\t\t\t\tself.fastq_dict[file_content[i].strip('\\n')] = file_content[i+1].strip('\\n')\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\ti += 1\n\t\treturn self.fastq_dict", "title": "" } ]
[ { "docid": "a6f7439dcec856bf127c0d3dc25181c0", "score": "0.71526283", "text": "def parse_fastq(fastq):\n with gzip.open(fastq, 'rb') if fastq.endswith('.gz') else open(fastq, 'r') as f:\n counter = 0\n header = ''\n seq = ''\n qual = ''\n fastq_dict = dict()\n\n for line in f:\n counter += 1\n if fastq.endswith('.gz'):\n line = line.decode('ascii') # Convert to ASCII if files were gzipped (binary)\n line = line.rstrip()\n\n if counter == 1: # Header line line\n if not line.startswith('@'): # first line of 4 should always start with \"@\"\n raise Exception('Fastq file invalid.')\n header = line\n elif counter == 2: # Sequence line\n seq = line\n elif counter == 3: # that's the \"+\" line, we can skip it\n pass\n elif counter == 4: # Quality line\n qual = line\n fastq_dict[header] = [seq, qual] # Store the fastq entry in the dictionary\n counter = 0 # Reset counter because each fastq entry in a file always have 4 lines\n\n return fastq_dict", "title": "" }, { "docid": "cd0bd0ba962190156f6bcb23d1001804", "score": "0.6710308", "text": "def _processSingleFile(self,fastq_file):\n\n good_count_dict = {}\n bad_count_dict = {}\n\n get_line = False\n with gzip.open(fastq_file,'r+') as f:\n \n for l in f:\n \n l_ascii = l.decode(\"ascii\")\n\n # If we see @, take the next line\n if l_ascii.startswith(\"@\"):\n get_line = True\n continue\n\n # If get_line is False, continue. Otherwise, set get_liune back\n # to False and move on to do the parsing business.\n if not get_line:\n continue\n else:\n get_line = False\n\n # Translate the sequence\n sequence = self._translate(l_ascii.strip())\n\n # Record it in either the good or bad dict, depending on its\n # quality score\n if self._qualityCheck(sequence):\n\n key = sequence[0:self._seq_length]\n try:\n good_count_dict[key] += 1\n except KeyError:\n good_count_dict[key] = 1\n else:\n try:\n bad_count_dict[sequence] += 1\n except KeyError:\n bad_count_dict[sequence] = 1\n\n return good_count_dict, bad_count_dict", "title": "" }, { "docid": "ff3bb3f01e0520c9ddd0b86b4cd4f961", "score": "0.67086697", "text": "def _fasta_dict_from_file(file_object, header_search='specific'):\n\n current_id = dict()\n current_seq = ''\n current_header = None\n pat = re.compile('>(\\S+)\\s*(.*)')\n header_pat = re.compile(r'(\\w+)\\|(\\w+\\.?\\w*)?')\n\n def parse_header(header, pairs=True):\n keys = header_pat.findall(header)\n header_data = dict()\n for key in keys:\n header_data[key[0]] = key[1]\n # gi -> ProteinGI #, ref -> NP_XXXX\n return header_data\n\n for line in file_object:\n line = line.rstrip()\n m = pat.search(line)\n if m:\n ## new residue line matched, purge the existing one, if not the first\n if current_id:\n ## remove all whitespace and save\n current_seq = ''.join(current_seq.split())\n current_id['sequence'] = current_seq\n yield current_id\n # current_id.clear() # this is actually bad for list comprehensions\n # as it returns empty dictionaries\n\n current_seq = ''\n header = m.group(1)\n if header_search == 'specific':\n current_id = parse_header(header)\n elif header_search == 'generic':\n current_id = dict(header = header)\n current_id['description'] = m.group(2)\n\n else:\n ## python 2.6+ makes string concatenation amortized O(n)\n ## http://stackoverflow.com/a/4435752/1368079\n current_seq += str(line)\n\n ## don't forget the last one\n current_seq = ''.join(current_seq.split())\n current_id['sequence'] = current_seq\n yield current_id", "title": "" }, { "docid": "fe45b3b507d4a81943abc70ab096bfc9", "score": "0.66753894", "text": "def fastx2dict(inputFASTX):\n fastxDict = {} #start empty dictionary\n geneID = '' # Start with an empty ID\n sequence = [] # Where the sequence will be stored;If multifastX, the whole sequence will be concatenated.\n for line in open(inputFASTX):\n if line.startswith(\">\") and geneID == '':\n geneID = line.split(' ')[0].strip().replace('>','')\n elif line.startswith(\">\") and geneID != '':\n fastxDict[geneID] = ''.join(sequence)\n geneID = line.split(' ')[0].strip().replace('>','')\n sequence = []\n else:\n sequence.append(line.rstrip())\n fastxDict[geneID] = ''.join(sequence)\n return fastxDict", "title": "" }, { "docid": "0081b78036f1d7cab683457b4e3672b6", "score": "0.65903956", "text": "def Fasta_Parser(File):\n with open(File, 'r') as F:\n Records = {}\n Seq=''\n for Line in F:\n if is_ID(Line) and len(Seq) == 0:\n OTU = Line.strip('>').split(Delim)[0]\n Records[OTU] = FastaRecord(Line)\n elif is_ID(Line) and len(Seq) > 0:\n print Line\n Records[OTU].Seq = Seq\n Records[OTU].SeqLen = len(Seq)\n Records[OTU].SeqGaps = Seq.count('-')\n OTU = Line.strip('>').split(Delim)[0]\n Seq = ''\n Records[OTU]= FastaRecord(Line)\n else:\n Part=Line.replace('\\n','')\n Seq = Seq + Part\n Records[OTU].Seq = Seq\n Records[OTU].SeqLen = len(Seq)\n Records[OTU].SeqGaps = Seq.count('-')\n return Records\n F.close()", "title": "" }, { "docid": "bc558f642184b9fc723f5a90a940dc41", "score": "0.6562945", "text": "def readFastq(filename):\n sequences = []\n qualities = []\n with open(filename, 'r') as f:\n while True: \n f.readline() # skip name line\n seq = f.readline().rstrip()\n f.readline() # skip place holder line \n q = f.readline().rstrip()\n if len(seq) ==0:\n break \n sequences.append(seq)\n qualities.append(q)\n return sequences, qualities", "title": "" }, { "docid": "70419bf60983847bf3ba683de0acbe49", "score": "0.64575166", "text": "def read_sequences(filename=\"Spmap/uniprot-all.fasta\"):\n fh = open(filename, 'r')\n\n sequences = {}\n sequence = ''\n line = fh.readline()\n while line:\n line = line.rstrip('\\n')\n if '>' in line:\n if len(sequence) > 1:\n sequences[seq_id] = sequence\n sequence = ''\n meta = line\n seq_obj = re.search(r'>sp.(\\w*)', meta)\n seq_id = seq_obj.group(1)\n else:\n sequence = sequence + line\n line = fh.readline()\n if len(sequence) > 1:\n sequences[seq_id] = sequence\n\n return sequences", "title": "" }, { "docid": "29d47879d1d846fbaf896b20533bc3c6", "score": "0.64513814", "text": "def read_fasta(fasta_file):\n\n seq_dict = dict() # Declare a new dictionary\n\n with open(fasta_file,'r') as f:\n lines = f.readlines()\n defline = \"\"\n for li in lines:\n li = li.rstrip() # remove newlines\n if '>' in li:\n defline = li.replace('>','') # if i use 'id' it is blue; why?\n seq_dict[defline] = \"\"\n else:\n li = li.upper() # just to clean up sequence\n seq_dict[defline] += li\n\n return seq_dict", "title": "" }, { "docid": "890063b224cffb87d36eeb080fcb9c73", "score": "0.6395008", "text": "def fastq_parser(file):\n with open(file) as fastq:\n reads = []\n header = None\n for line in fastq:\n if line.startswith(\"@\"):\n try:\n qual = list(map(lambda x: ord(x) - 33, qual))\n reads.append(Read(header, seq, qual))\n except NameError:\n pass\n qual_zone = False\n seq, qual = \"\", \"\"\n header = line.strip()\n elif line.strip() == \"+\":\n qual_zone = True\n elif qual_zone:\n qual += line.strip()\n else:\n seq += line.strip()\n \n qual = list(map(lambda x: ord(x) - 33, qual))\n reads.append(Read(header, seq, qual))\n return reads", "title": "" }, { "docid": "acecf03fe2bb39100535caf64abf40f8", "score": "0.63512164", "text": "def parse_fasta(fasta_fpath: Path) -> Dict[str, Seq]:\n with fasta_fpath.open() as f:\n contig_seq: Dict[str, Seq] = dict(\n (seq_record.id, seq_record.seq) for seq_record in SeqIO.parse(f, \"fasta\")\n )\n\n return contig_seq", "title": "" }, { "docid": "b41917eec49457556cad64b43a450333", "score": "0.6344387", "text": "def get_sequence_positions(fasta_file):\n dict_pos = dict()\n with open(fasta_file, \"r\") as fasta:\n for l in fasta:\n if l.startswith(\">\"):\n regex = re.search(\":c?([0-9]+)-([0-9]+)\", l)\n dict_pos.setdefault(int(regex.group(2)), int(regex.group(1)))\n return dict_pos", "title": "" }, { "docid": "d648874ce1cc7388125f2bfbee3841ce", "score": "0.6290508", "text": "def read_fasta( filename, unique = False ):\n print(\"[INFO] Reading %s, Are collecting only unique: %s\" % (filename, unique))\n if unique == True:\n seqsDict = defaultdict( set )\n else:\n seqsDict = defaultdict(list)\n with open( filename, \"r\") as f:\n text = f.read()\n blocks = text.split('>')\n for b in blocks:\n data = filter(None, b.split('\\n'))\n if data:\n header, seq = data[0], data[1:]\n if unique == True:\n seqsDict[header].add(\"\".join(seq))\n else:\n seqsDict[header].append(\"\".join(seq))\n return seqsDict", "title": "" }, { "docid": "fbad5991a3894680e3e31659409ca796", "score": "0.6273333", "text": "def parse_fasta(InputFile):\n seqs = {}\n with open(InputFile) as InFile:\n for line in InFile:\n if not line.strip():\n continue\n if line.startswith('>'):\n label = line.strip()[1:].split()[0]\n seqs[label] = \"\"\n else:\n seqs[label] += line.strip()\n return seqs", "title": "" }, { "docid": "a002313d5713723ecadfafcb7e22233b", "score": "0.6257109", "text": "def getPairs(r1,r2):\n\timport re\n\n\timport sys\n\n\t# set the counters to initial values\n\tcounter = 0\n\ttmp=0\n\theader=\"NA\"\n\n\tfile1 = r1\n\tfile2 = r2\n\t\n\t#check if files are gzipped\n\tif file1.split('.')[-1] in ['gz','gzip']:\n\t\timport gzip\n\t\tfile1 = gzip.open(file1)\n\telse: file1 = open(file1,'r')\n\tif file2.split('.')[-1] in ['gz','gzip']:\n\t\timport gzip\n\t\tfile2 = gzip.open(file2)\n\telse: file2 = open(file2,'r')\n\n\t# itarate through fastq files and return readpairs\n\tfor r1line in file1:\n\t\tr2line = file2.readline() #get line from read2 file\n\t\t\n\t\ttmp+=1 # increment linecount\n\t\t\n\t\t# depending on line number (within entry) do ...\t\n\t\tif tmp == 1: #header check match between files\n\t\t\tcounter+=1 # increase entry counter\n\t\t\theader=r1line\n\t\t\tif r1line.split(\" \")[0] != r2line.split(\" \")[0]:\n\t\t\t\tsys.stderr.write('Error mismatching headers!');\n\t\t\t\traise ValueError\n\t\telif tmp == 2: # sequence check that its DNA and save sequences till later\n\t\t\tif counter == 1:\n\t\t\t\tsys.stderr.write('Checking data type of read 1 in pair 1 ... ')\n\t\t\t\tmatch = re.match(\"^[AGTCN]+$\",r1line.rstrip())\n\t\t\t\tif match: sys.stderr.write('this is DNA data.\\n')\n\t\t\t\telse:\n\t\t\t\t\tsys.stderr.write(' this is not a DNA sequence ('+r1line.rstrip()+') could something be wrong with your fastq file?.\\n\\n');\n\t\t\t\t\traise ValueError\n\t\t\tr1seq = r1line.rstrip()\n\t\t\tr2seq = r2line.rstrip()\n\t\telif tmp == 3: # \"+\"-line do some format check\n\t\t\t\tif counter in {1:True,67:True,438:True,9675:True,53678:True,864513:True,1337354:True,317955:True,1226844:True,20389:True,118261:True}:\n\t\t\t\t\tif r1line[0] != r2line[0] or r1line[0] != '+': sys.stderr.write('Error Format not fastq!');raise ValueError#os.kill(MASTER);sys.exit(1);#REALLYNOTOPTIMAL\n\t\telif tmp == 4: # quality values and end of entry, reset counter and yeild readpair\n\t\t\t\ttmp=0 # reset line counter\n\t\t\t\tr1qual = r1line.rstrip() #get qual strings\n\t\t\t\tr2qual = r2line.rstrip()\n\t\t\t\t#if counter == 8000:break\n\t\t\t\tyield readpair(header.rstrip(), read(header.rstrip(),r1seq,r1qual), read(header.rstrip(),r2seq,r2qual))", "title": "" }, { "docid": "f1992a864a00a324d359a943b9db3200", "score": "0.6253005", "text": "def read_fasta_file(filename):\n sequences_lines = {}\n current_sequence_lines = None\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(';') or not line:\n continue\n if line.startswith('>'):\n sequence_name = line.lstrip('>')\n current_sequence_lines = []\n sequences_lines[sequence_name] = current_sequence_lines\n else:\n if current_sequence_lines is not None:\n current_sequence_lines.append(line)\n sequences = {}\n for name, lines in sequences_lines.items():\n sequences[name] = ''.join(lines)\n return sequences", "title": "" }, { "docid": "f1992a864a00a324d359a943b9db3200", "score": "0.6253005", "text": "def read_fasta_file(filename):\n sequences_lines = {}\n current_sequence_lines = None\n with open(filename) as fp:\n for line in fp:\n line = line.strip()\n if line.startswith(';') or not line:\n continue\n if line.startswith('>'):\n sequence_name = line.lstrip('>')\n current_sequence_lines = []\n sequences_lines[sequence_name] = current_sequence_lines\n else:\n if current_sequence_lines is not None:\n current_sequence_lines.append(line)\n sequences = {}\n for name, lines in sequences_lines.items():\n sequences[name] = ''.join(lines)\n return sequences", "title": "" }, { "docid": "5f4fdc7b54a407b80ce09651e6dc6987", "score": "0.62482065", "text": "def FastqGeneralIterator(handle):\n #We need to call handle.readline() at least four times per record,\n #so we'll save a property look up each time:\n handle_readline = handle.readline\n \n #Skip any text before the first record (e.g. blank lines, comments?)\n while True:\n line = handle_readline()\n if line == \"\" : return #Premature end of file, or just empty?\n if line[0] == \"@\":\n break\n\n while True:\n if line[0] != \"@\":\n raise ValueError(\"Records in Fastq files should start with '@' character\")\n title_line = line[1:].rstrip()\n #Will now be at least one line of quality data - in most FASTQ files\n #just one line! We therefore use string concatenation (if needed)\n #rather using than the \"\".join(...) trick just in case it is multiline:\n seq_string = handle_readline().rstrip()\n #There may now be more sequence lines, or the \"+\" quality marker line:\n while True:\n line = handle_readline()\n if not line:\n raise ValueError(\"End of file without quality information.\")\n if line[0] == \"+\":\n #The title here is optional, but if present must match!\n second_title = line[1:].rstrip()\n if second_title and second_title != title_line:\n raise ValueError(\"Sequence and quality captions differ.\")\n break\n seq_string += line.rstrip() #removes trailing newlines\n #This is going to slow things down a little, but assuming\n #this isn't allowed we should try and catch it here:\n if \" \" in seq_string or \"\\t\" in seq_string:\n raise ValueError(\"Whitespace is not allowed in the sequence.\")\n seq_len = len(seq_string)\n\n #Will now be at least one line of quality data...\n quality_string = handle_readline().rstrip()\n #There may now be more quality data, or another sequence, or EOF\n while True:\n line = handle_readline()\n if not line : break #end of file\n if line[0] == \"@\":\n #This COULD be the start of a new sequence. However, it MAY just\n #be a line of quality data which starts with a \"@\" character. We\n #should be able to check this by looking at the sequence length\n #and the amount of quality data found so far.\n if len(quality_string) >= seq_len:\n #We expect it to be equal if this is the start of a new record.\n #If the quality data is longer, we'll raise an error below.\n break\n #Continue - its just some (more) quality data.\n quality_string += line.rstrip()\n \n if seq_len != len(quality_string):\n raise ValueError(\"Lengths of sequence and quality values differs \"\n \" for %s (%i and %i).\" \\\n % (title_line, seq_len, len(quality_string)))\n\n #Return the record and then continue...\n yield (title_line, seq_string, quality_string)\n if not line : return #StopIteration at end of file\n assert False, \"Should not reach this line\"", "title": "" }, { "docid": "38896ae917070c02c7627da148f53cee", "score": "0.62431055", "text": "def parse_sequence(path, file_name) :\n\tif path == '.' :\n\t\tpath = ''\n\tgroup = file_name.split('.')\n\tgroup = group[0]\n\t#group denotes the category of the sequence e.g. BRD-sequences\n\tsequence_dict[group] = list()\n\t#one entry in the dictionary for each file\n\twith open(path + file_name, 'r') as fasta_file :\n\t\t#better exception handling and automatic closing using with\n\t\tfirst = 1;\n\t\tto_build = 0;\n\t\tfor line in fasta_file :\n\t\t\tif line[0] == '>' or line[0] == ';' : #description\n\t\t\t\tif first != 1 :\n\t\t\t\t\tif to_build == 1 :\t\t\t#new sequence, old one complete\n\t\t\t\t\t\t#we arrived at the beginning of a new sequence, so we have\n\t\t\t\t\t\t#to instantiate the old one\n\t\t\t\t\t\tsequence_dict[group].append(Sequence(acids, description))\n\t\t\t\t\t\tdescription = line\n\t\t\t\t\t\tacids = ''\n\t\t\t\t\telse :\t\t\t\t\t\t#continue reading the description\n\t\t\t\t\t\tdescription += line;\n\t\t\t\telse :\t\t\t\t\t\t\t#a.a. sequence \n\t\t\t\t\tfirst = 0\n\t\t\t\t\tdescription = line\n\t\t\t\t\tacids = ''\n\t\t\telse :\n\t\t\t\tto_build = 1;\n\t\t\t\t#to instantiate a sequence we need at least a line representing\n\t\t\t\t#an amino acids sequence\n\t\t\t\tacids += line.rstrip('\\n')\n\tif to_build == 1 :\n\t\t#instantiate the last sequence from the file\n\t\tsequence_dict[group].append(Sequence(acids, description))", "title": "" }, { "docid": "02ee3b936c65dcb8c5ee170da5c9972f", "score": "0.62217957", "text": "def get_fastq_dict(foldr,paired_end_char=\"_\"):\n #note that we do not check to make sure there are exactly two reads per ID. New feature to be added later.\n res = {}\n for i in listdir(foldr):\n if not is_fastq(i): continue\n sra_id,tail = i.rsplit(paired_end_char,1)\n read = tail.split(\".\",1)[0]\n if \"1\" in read: flag=\"r1\"\n elif \"2\" in read: flag=\"r2\"\n else: raise Kallisto_Wrapper_Error(\"FASTQ Filename %s does not appear to have a paired end indicator 1 or 2\"%i)\n try: res[sra_id][flag] = i\n except KeyError: res[sra_id] = {flag: i}\n return res", "title": "" }, { "docid": "8c8f3d52dd04ec2691d51418ab257391", "score": "0.6218322", "text": "def read_sequence_dict(filename):\n seq_dict = {}\n for hdr, seq in stream_sequence(filename):\n seq_dict[hdr] = seq\n return seq_dict", "title": "" }, { "docid": "ff05e73a5ae564913b4d33933a5e2979", "score": "0.6214693", "text": "def read_fasta(input):\n\tret = {}\n\twith open(input) as file:\n\t\tcurrent_sequence = \"\"\n\t\tcurrent_tag = \"\"\n\t\tfor line in file:\n\t\t\tif \">\" in line:\n\t\t\t\tret[current_tag] = current_sequence\n\t\t\t\tcurrent_tag = line[1: line.find(\" \")]\n\t\t\t\tcurrent_sequence = \"\"\n\t\t\telse:\n\t\t\t\tcurrent_sequence += line.strip()\n\treturn ret", "title": "" }, { "docid": "c85475d4b22a10b2e4ee8bf25c0bce9a", "score": "0.62043506", "text": "def fasta_to_dict(fname):\n from Bio import SeqIO\n seqs = {}\n for seq_record in SeqIO.parse(fname, \"fasta\"):\n seqs[seq_record.id] = seq_record.seq\n return seqs", "title": "" }, { "docid": "8a514887ff7baceec8868e5f9aac06e3", "score": "0.61700416", "text": "def read_fasta(multifasta, delim, idpos):\n\n fasta = {}\n fastaheader = {}\n print('Reading input fasta.')\n with open(multifasta, 'r') as infile:\n acNumber = ''\n for line in infile:\n if line.startswith('>'):\n if delim:\n acNumber = line.split(delim)[idpos].strip().strip('>')\n fastaheader[acNumber] = line.strip()\n else:\n acNumber = line.split()[idpos].strip().strip('>')\n fastaheader[acNumber] = line.strip()\n else:\n if acNumber in fasta:\n fasta[acNumber] += line.strip()\n else:\n fasta[acNumber] = line.strip()\n return fasta, fastaheader", "title": "" }, { "docid": "d53c37cae0f75850b55455ca6a8c6c5c", "score": "0.61341053", "text": "def parse_fasta(data):\n seqs = {}\n for line in data:\n if line.startswith(\">\"):\n header = line\n seqs[header] = \"\"\n else:\n seqs[header] += line\n\n return seqs", "title": "" }, { "docid": "edfa656dbf7119e612cffb145b0cb892", "score": "0.6117387", "text": "def parse_fasta(input_path):\n entries = dict()\n\n # Open FASTA file\n active_header = \"\"\n with open(input_path, \"r\") as handle:\n for line in handle:\n line = line.rstrip()\n\n # Check for header or sequence\n if line.startswith(\">\"):\n active_header = line[1:]\n entries[active_header] = \"\"\n else:\n entries[active_header] += line\n\n return entries", "title": "" }, { "docid": "c10da169853db6449cbfd7c24a33e204", "score": "0.6095438", "text": "def _convert_fasta(filename):\n sequence_dict = {}\n with open(filename + '//' + filename, 'r') as file:\n lines = file.readlines()\n logger.info(\"16S file \" + filename + \" contains \" + str(int(len(lines) / 2)) + \" sequences.\")\n for i in range(0, len(lines), 2):\n otu = lines[i].rstrip()[1:] # remove > and \\n\n sequence = lines[i + 1].rstrip()\n sequence_dict[otu] = sequence\n return sequence_dict", "title": "" }, { "docid": "00a7695da6365f9aba3a4c1787f011b4", "score": "0.60779345", "text": "def get_fq_ids(fastq):\n\n cas_vers = get_casava_vers(fastq)\n print \"%s format: %s\" % (fastq, cas_vers)\n \n id_lst = []\n with open(fastq, \"r\") as f:\n for line in f:\n # get ids\n id_lst.append( line.strip() )\n # pass other lines\n for i in range(3): next(f)\n \n return id_lst, cas_vers", "title": "" }, { "docid": "3755ca8b4d9fe86e736bc98ff0c48c8c", "score": "0.6074079", "text": "def readfasta(ffile):\n record_dict = SeqIO.to_dict(SeqIO.parse(ffile, \"fasta\"))\n return record_dict", "title": "" }, { "docid": "0a5f1a323a7e33ca718e7628acc7c7d6", "score": "0.60692483", "text": "def fasta_process(fasta, dictionary):\n fasta_file = open(fasta, \"r\")\n sequence =\"\"\n counter = 0\n for line in fasta_file:\n line = line.strip()\n if counter == 0: # special case for first loop through\n header = line[1:]\n dictionary[header] = []\n counter = counter + 1\n elif line[0] == \">\": # this works for all except very first one\n dictionary[header].append(sequence) # as soon as hit the next header line store sequence in previous header value\n sequence = \"\" # clear the sequence\n header = line[1:]\n dictionary[header] = [] # store the next header as a key\n counter = counter + 1\n else: # build the sequence\n sequence = sequence + line\n dictionary[header].append(sequence) # store the last sequence\n fasta_file.close()\n # Find scalars for sequences\n max_seq = 0\n for i in dictionary:\n dictionary[i].append(len(dictionary[i][0]))\n max_seq = max(dictionary[i][1], max_seq)\n for i in dictionary:\n dictionary[i][1] = dictionary[i][1] / max_seq\n return len(dictionary) # return count of fasta entries", "title": "" }, { "docid": "b1aa7fdec8ec2210313b274752eeed61", "score": "0.6059932", "text": "def _get_sequence(read1_file, read2_file):\n\n with dnaio.open(\n read1_file, read2_file, fileformat=\"fastq\", mode=\"r\"\n ) as f:\n\n for rec in f:\n read1, read2 = rec\n\n yield read1.name, read1.sequence, read1.qualities, read2.sequence, read2.qualities", "title": "" }, { "docid": "a1c87914dbec0b4d16644b84fb6b22df", "score": "0.60541254", "text": "def read_fastq(filename: str) -> Tuple[List[str], List[str]]:\n reads = []\n qualities = []\n with open(filename, \"r\") as f:\n while True:\n f.readline()\n read = f.readline().rstrip()\n f.readline()\n seq_qualities = f.readline().rstrip()\n if len(read) == 0:\n break\n reads.append(read)\n qualities.append(seq_qualities)\n\n return reads, qualities", "title": "" }, { "docid": "5cb7d70e76384d1cda3553055b052241", "score": "0.60412616", "text": "def dictIdent(allSeqList,leng,scope):\n #sets up library and outStr -> string that gets printed to a file in the end\n theSeq={}\n outStr=\"\"\n chimp=0\n #iterated through the 6 sequences\n print(\"There was not a valkyr in sight...\")\n for genome in allSeqList:\n listofbp=[] #empty list\n maximum=0\n #Determines how the information is processed\n if(scope==\"BSMF\"):\n listORFs=givePreORFs(genome)\n elif(scope==\"ORFMF\"):\n listORFs=giveORFs(genome)\n elif(scope==\"MoEx\"):\n listORFs=[genome]\n chimp+=1\n print(\"And then I found %d %s ...\" % (chimp, ['valkyr','valkyrie'][chimp!=1])) #should find 6 nimbi\n for eachORF in listORFs: #gets the ORF's for each reading frame\n for i in range(0,len(genome)-(leng-1),3):\n #Iterating accross ORF in reading frame aviods repeats\n if i%30000==0 and i!=0: #Size of the chunking of the genome\n keys = [x for x,y in theSeq.items() if y > 1] #add back in the dict if key.value>1\n for key in keys: #creates a list of keys and values\n listofbp.append(key)\n listofbp.append(theSeq[key])\n theSeq={} #reset dictionary\n for t in range(0,len(listofbp),2):\n theSeq[listofbp[t]]=listofbp[t+1]\n bp=genome[i:i+leng] #sets the motif being looked for\n bpJ=\"\".join(bp) #turns motif into string\n if bpJ in theSeq: #looks for motif string in library\n theSeq[bpJ]=theSeq[bpJ]+1 #if motif is in lib, adds to value\n else: #if motif isn't in lib\n theSeq[bpJ]=1 #then it adds it to lib with a value of one\n maxxx = max(theSeq.values())\n for h in range(maxxx,1,-1):\n keyss = [x for x,y in theSeq.items() if y==h]\n if(len(keyss)!=0):\n outStr+=str(len(keyss))+\" Sequences occurred \"+str(h)+\" times: \"+str(keyss)+\" \\n \\n\"\n return outStr", "title": "" }, { "docid": "fad1cf247b13bd4076d51f33789d5d4d", "score": "0.6032628", "text": "def parse_fasta(fasta_file: '_io.TextIOWrapper', seq_id: str) -> tuple:\n seq_id_header = \"\"\n # no sequence-id specified, therefore the first one is taken\n if not seq_id:\n # read one line to remove headerline from internal buffer\n seq_id_header = fasta_file.readline().split()[0][1:]\n logging.info(\n 'No seq_id passed, taking first sequence from {} with id {}'.format(\n fasta_file.name, seq_id_header))\n seq = extract_fasta_seq(fasta_file)\n # a sequence-id has been passed to the programm\n else:\n logging.info('Partial sequence-id given: {}'.format(seq_id))\n for line in fasta_file:\n # begin of a new sequence and prefix-matching says true\n if line[0] == '>' and line.startswith(seq_id, 1):\n seq_id_header = line.split()[0][1:]\n logging.info(\n 'Found match of given sequence-id-prefix, '\n 'using sequence with id {}'.format(\n seq_id_header))\n break\n seq = extract_fasta_seq(fasta_file)\n\n return seq, seq_id_header", "title": "" }, { "docid": "eb14235b1d42d1cd2d9881764daeb8ff", "score": "0.6005839", "text": "def read_fasta(input_location:str)->dict:\n fasta_lst = []\n with open(input_location) as handle:\n for record in SeqIO.parse(handle, \"fasta\"):\n fasta_lst.append([record.id,str(record.seq)])\n return fasta_lst", "title": "" }, { "docid": "626f73e713b3c00ffad7a84ed132abda", "score": "0.59539163", "text": "def alignment_parsing(alignment_file, query_n):\n alignment = AlignIO.read(alignment_file, 'fasta')\n position_dict = defaultdict(list)\n aligment_position = 1\n true_position = 1\n for i in range(alignment.get_alignment_length()):\n if alignment[query_n, i] != '-':\n position_dict[true_position].append(aligment_position)\n position_dict[true_position].append(alignment[:, i])\n true_position += 1\n aligment_position += 1\n return position_dict", "title": "" }, { "docid": "e1fbccda5fffefa4183cd620b9f7be86", "score": "0.59450483", "text": "def fasta_parse():\n separator = \";size=\"\n fasta_files = sys.argv[6:]\n samples = dict()\n amplicons2samples = dict()\n for fasta_file in fasta_files:\n sample = os.path.basename(fasta_file)\n sample = os.path.splitext(sample)[0]\n samples[sample] = samples.get(sample, 0) + 1\n with open(fasta_file, \"r\") as fasta_file:\n for line in fasta_file:\n if line.startswith(\">\"):\n amplicon, abundance = line.strip(\">;\\n\").split(separator)\n abundance = int(abundance)\n if amplicon not in amplicons2samples:\n amplicons2samples[amplicon] = {sample: abundance}\n else:\n amplicons2samples[amplicon][sample] = amplicons2samples[amplicon].get(sample, 0) + abundance\n # deal with duplicated samples\n duplicates = [sample for sample in samples if samples[sample] > 1]\n if duplicates:\n print(\"Warning: some samples are duplicated\", file=sys.stderr)\n print(\"\\n\".join(duplicates), file=sys.stderr)\n samples = sorted(samples.keys())\n return amplicons2samples, samples", "title": "" }, { "docid": "a85e36d0a7db4ee4adc7f6e496814066", "score": "0.5902897", "text": "def processPairedReads(sam_file,num_inserts, filtered_id): \n\n\n\tread_file = open(sam_file,'r')\n\tin_sam = Reader(read_file) \n\t#list of insert IDs\n\tfragment_id = []\n\n\t#list of insert coordinates in the original fasta sequence\n\tfirst_read = []\n\tsecond_read = []\n\n\n\tprint(\"PROCESSING READS\", flush = True) \n\tfor i in range(0,num_inserts):\n\t\tif i%500000==0 and i!=0:\n\t\t\tprint(\"{:.2f}\".format((i*100/num_inserts))+\"% of reads processed...\") \n\t\tx = next(in_sam)\n\t\ty = next(in_sam)\n\n\t\t#get ID of the read being processed\n\t\tthis_ID = str(x.qname) \n\t\t\n\t\t#only consider the read is mapped \n\t\tif len(filtered_id) > 0: \n\t\t\tif this_ID in filtered_id: \n\t\t\t\t#save the ID of the read \n\t\t\t\tfragment_id.append(x.qname)\n\t\t\n\t\t\t\t#save the coordinates of the read \n\t\t\t\t#subract 1 as the SAM file position starts at 1 but we use index 0 \n\t\t\t\tfirst_read.append((x.pos-1,x.pos+len(x.seq)-1))\n\t\t\t\tsecond_read.append((y.pos-1,y.pos+len(y.seq)-1))\n\t\telse: \n\t\t\t#save the ID of the read \n\t\t\tfragment_id.append(x.qname)\n\t\t\n\t\t\t#save the coordinates of the read\n\t\t\t#subtract 1 as the SAM file position starts at 1 but we use index 0 \n\t\t\tfirst_read.append((x.pos-1,x.pos-1+(len(x.seq)-1)))\n\t\t\tsecond_read.append((y.pos-1,y.pos-1+(len(y.seq)-1)))\n\t\t\t\n\treturn fragment_id, first_read, second_read", "title": "" }, { "docid": "a3e285664e7f53c3ec1ab6f5ed29becc", "score": "0.5894416", "text": "def seqs_from_file(ids,file_lines):\r\n\r\n for label, seq in MinimalFastaParser(file_lines):\r\n \r\n if id_from_fasta_label_line(label) in ids:\r\n yield label,seq", "title": "" }, { "docid": "ea355518c77ff38bc2bb42e011283b42", "score": "0.5865512", "text": "def create_sequence_dictionary(data):\n\n print(\"\\n*****************************************************************\")\n print(\"\\nCreating sequence database from fasta records\")\n\n seq_dict = {}\n number_of_records = 0\n records = SeqIO.parse(data, \"fasta\")\n\n for seq_record in records:\n number_of_records += 1\n description = seq_record.description\n sequence = seq_record\n seq_dict[description] = sequence\n\n print(\"\\nDatabase constructed\")\n print(\"\\nNumber of records in dataset:\", number_of_records)\n\n return seq_dict", "title": "" }, { "docid": "17529a982668fbbf1cfc3ef8de94edf7", "score": "0.5861235", "text": "def parseFastaQ(filenames, common_ids=[],verbose=False):\n\n # filenames should be a list, otherwise, throw error\n if type(filenames) != list:\n raise ValueError(\"I know it's awkward, but make sure the filename(s) are in a list\")\n \n data = dict()\n\n for filename in filenames:\n\n # test for scuffed and handle accordingly\n if testForScuffed(filename):\n part_data = parseDoubleFastQ(filename)\n else:\n part_data = parseGoodFastQ(filename)\n \n # merge dicts\n data = {**data, **part_data}\n\n\n if verbose:\n for k, v in data.items():\n print(k, v)\n\n print(\"parsed fasta files!\")\n \n return data\n print(10*\"-\")", "title": "" }, { "docid": "34e096d93682fce3c5ee395969dfb285", "score": "0.585364", "text": "def fasta_parser(pcrfile):\n seqs = []\n headers = []\n with open(pcrfile) as pcr_file:\n sequence = \"\"\n header = None\n for line in pcr_file:\n if line.startswith('>'):\n headers.append(line[1:-1])\n if header:\n seqs.append(sequence)\n sequence = \"\"\n header = line[1:]\n else:\n sequence += line.rstrip()\n seqs.append(sequence)\n return seqs, headers", "title": "" }, { "docid": "041dcfd4dc077574cdc27212364f6500", "score": "0.5852349", "text": "def get_reads(fastq):\n\n with open(fastq, \"r\") as f:\n\n try:\n for line in f:\n seq_id = line.strip()\n seq = f.next().strip()\n f.next()\n qual = f.next().strip()\n\n yield(seq_id, seq, qual)\n\n except StopIteration:\n raise IOError(\n \"Can not read the fastq file: {}\\nIs it a properly formatted fastq ?\".format(fastq))", "title": "" }, { "docid": "7f119df560e106c8f9adf97c1c07f513", "score": "0.58516467", "text": "def FASTA_iterator (fasta_filename):\n file=open(fasta_filename,\"r\")\n seq=''\n for line in file:\n if line[0]==\">\":\n if seq != \"\":\n yield (lastid,seq)\n seq=''\n lastid=line.strip()[1:]\n else:\n lastid=line.strip()[1:]\n else:\n seq += line.strip()\n if seq != \"\":\n yield (lastid,seq)", "title": "" }, { "docid": "aea9c7f7cca420fe33d15536d3432eb0", "score": "0.582615", "text": "def split_fasta(filename):\r\n fastas = {}\r\n with open(filename, 'r') as file:\r\n seqs = file.read().split('>')\r\n for i in seqs[1:]:\r\n split_mark = i.find('\\n')\r\n key = i[:split_mark]\r\n value = i[split_mark + 1:].replace('\\n', '')\r\n fastas[key] = value\r\n\r\n return fastas", "title": "" }, { "docid": "0d0e59ee3c5e12c1d7f10bcc0132d732", "score": "0.58078766", "text": "def fasta2dic(fastafile):\n if \".gz\" in fastafile:\n handle=gzip.open(fastafile, \"rU\")\n else:\n handle=open(fastafile, \"rU\")\n record_dict=SeqIO.to_dict(SeqIO.parse(handle,\"fasta\"))\n handle.close()\n return record_dict", "title": "" }, { "docid": "2788f33c117d4ba88008bb26b4068dc0", "score": "0.5803729", "text": "def readFasta (self):\n header = ''\n sequence = ''\n \n with self.doOpen() as fileH:\n\t\t\t\n header = ''\n sequence = ''\n \n # skip to first fasta header\n line = fileH.readline()\n while not line.startswith('>') :\n line = fileH.readline()\n header = line[1:].rstrip()\n\n for line in fileH:\n if line.startswith ('>'):\n yield header,sequence\n header = line[1:].rstrip()\n sequence = ''\n else :\n sequence += ''.join(line.rstrip().split()).upper()\n\t\t\t\t\t\t\n yield header,sequence", "title": "" }, { "docid": "630bebe18c462b5e919e671e77570371", "score": "0.57792753", "text": "def read_transcriptome(transcriptome):\n result_dict = {}\n for sequence in SeqIO.parse(transcriptome, 'fasta'):\n result_dict[sequence.name] = sequence.seq\n return result_dict", "title": "" }, { "docid": "4960c4c76b50cc441b7d90242e7c4d7f", "score": "0.57774407", "text": "def ReadPeptideSequences( infile, filter = None, as_array = False, regex_identifier = None ):\n\n sequences = ParseFasta2Hash(infile, filter, regex_identifier = regex_identifier )\n\n if not as_array: \n for k in sequences.keys():\n sequences[k] = sequences[k][:]\n return sequences", "title": "" }, { "docid": "28c12d1c55f9bae19e2493eb274c156f", "score": "0.5761436", "text": "def parse_fastq(fastq_file):\n seq_len_tab = []\n ext = os.path.splitext(fastq_file)[1]\n temp_out = None\n try:\n if ext == \".gz\":\n fastq = gzip.open(fastq_file, \"rt\")\n elif ext == \".dsrc2\":\n temp_out = (tempfile.gettempdir() + os.sep +\n os.path.basename(fastq_file).replace(\".dsrc2\", \"\"))\n cmd=\"dsrc d {0} {1}\".format(fastq_file, temp_out)\n retcode = subprocess.call(cmd, shell=True)\n # Case of no return\n if retcode is None:\n sys.exit(\"Child was terminated\")\n fastq = open(temp_out, \"rt\")\n else:\n fastq = open(fastq_file, \"rt\")\n for line in fastq:\n lenfastq = len(fastq.next())\n if lenfastq > 0:\n # Get the sequence\n seq_len_tab.append(lenfastq)\n # Pass separator\n fastq.next()\n # Pass quality\n fastq.next()\n fastq.close()\n if temp_out:\n os.remove(temp_out)\n except IOError:\n sys.exit(\"Error cannot open fastq file : {0}\".format(fastq_file))\n except OSError as error:\n sys.exit(\"Execution failed: {0}\".format(error))\n return seq_len_tab", "title": "" }, { "docid": "6364334aef3912104ebabddbce06fa1f", "score": "0.57496995", "text": "def results_and_input():\r\n file = open(\"BLAST_results_goed.txt\", \"r\")\r\n dic_seq_hits = {}\r\n parameters = []\r\n hits = []\r\n header = \"\"\r\n\r\n for line in file:\r\n if line.startswith(\"~\"):\r\n dic_seq_hits[header] = hits\r\n hits = []\r\n header = line.strip(\"~\").strip('\\n')\r\n elif line.startswith(\"$\"):\r\n parameters.append(line.strip('\\n').strip('$'))\r\n for _ in range(6):\r\n parameter = file.readline().strip('\\n')\r\n parameter = parameter.replace(\"'\", '\"')\r\n parameters.append(parameter)\r\n if \"hypothetical\" not in parameters[1].lower():\r\n hits.append(parameters)\r\n parameters = []\r\n dic_seq_hits[header] = hits\r\n dic_seq_hits.pop('')\r\n\r\n return dic_seq_hits", "title": "" }, { "docid": "4f59c9156cbd650fb7018db868252386", "score": "0.5734427", "text": "def _read_fastq(file_handle):\n seq = None\n qual = None\n header = None\n state_counter = 0\n\n for no, line in enumerate(file_handle):\n line = line.strip()\n if not line:\n continue\n\n if state_counter == 0:\n if line[0 : 1] != b\"@\":\n raise FastaError(\"Fastq format error: {0} at line {1}\"\n .format(file_handle.name, no))\n header = line[1:].split()[0]\n\n if state_counter == 1:\n seq = line\n\n if state_counter == 2:\n if line[0 : 1] != b\"+\":\n raise FastaError(\"Fastq format error: {0} at line {1}\"\n .format(file_handle.name, no))\n\n if state_counter == 3:\n qual = line\n yield header, seq, qual\n\n state_counter = (state_counter + 1) % 4", "title": "" }, { "docid": "80e975cd7e32f4cc971383632e58ffa8", "score": "0.5733133", "text": "def fq_ids(fnames):\n\n res = {}\n for f in fnames:\n for seqid, fullid, seq, qual in stream_fastq(f):\n # note we store several versions of the id as phylosift does some munging on them\n res[fullid] = f\n fullid = fullid.replace(' ', '_')\n res[fullid] = f\n\n return res", "title": "" }, { "docid": "0e09e00782ce0c8db69bbd222fc621ea", "score": "0.57255477", "text": "def read_data(aln):\n\n f = open(aln)\n result = dict()\n\n taxa = \"\"\n seq = \"\"\n for line in f:\n if line[0] == '>':\n if taxa != \"\":\n result[taxa] = seq\n taxa = line[1:-1]\n seq = \"\"\n\n elif line == \"/n\":\n continue\n else:\n seq += line[:-1]\n\n if taxa != \"\":\n result[taxa] = seq\n\n\n return result", "title": "" }, { "docid": "2fb891c6e76ebc81337809e10be1c990", "score": "0.572473", "text": "def read_fastq(fp):\n\n last = None\n while True: # mimic closure; is it a bad idea?\n if not last: # the first record or a record following a fastq\n for l in fp: # search for the start of the next record\n if l[0] in '>@': # fasta/q header line\n last = l[:-1] # save this line\n break\n\n if not last:\n break\n\n name, seqs, last = last[1:].partition(\" \")[0], [], None\n for l in fp: # read the sequence\n if l[0] in '@+>':\n last = l[:-1]\n break\n seqs.append(l[:-1])\n\n if not last or last[0] != '+': # this is a fasta record\n yield name, ''.join(seqs), None # yield a fasta record\n if not last:\n break\n else: # this is a fastq record\n seq, leng, seqs = ''.join(seqs), 0, []\n for l in fp: # read the quality\n seqs.append(l[:-1])\n leng += len(l) - 1\n if leng >= len(seq): # have read enough quality\n last = None\n yield name, seq, ''.join(seqs) # yield a fastq record\n break\n if last: # reach EOF before reading enough quality\n yield name, seq, None # yield a fasta record instead\n break", "title": "" }, { "docid": "248e10477af04c8c4c548a2268073f50", "score": "0.5699685", "text": "def parseSeqFile( self, seqFile, init=True):\n self._seqDict = {}\n if init: self.sequence = ntu.NTlist()\n sequenceId = 1\n resCount = 0\n error = False\n for line in AwkLike( seqFile, commentString='#' ):\n if (not line.isEmpty() and not line.isComment( '#')):\n residueId = line[1]\n if ( residueId in CYANA_NON_RESIDUES ): # skip the bloody CYANA non-residue stuff\n pass\n\n else:\n if (line.NF > 1):\n sequenceId = line.int(2)\n if sequenceId == None:\n error = True\n ntu.nTerror('CyanaParser.parseSeqFile: invalid sequenceId \"%s\" on line %d (%s)',\n line[2], line.NR, line[0] )\n #end if\n #endif\n self._seqDict[ sequenceId ] = residueId # store original 'convention' name\n if init:\n self.appendObject( ResidueRecord(CYANA_CHAIN_ID,sequenceId, residueId) )\n sequenceId += 1\n resCount += 1\n #end if\n #end if\n #end for\n self.seqFile = seqFile\n ntu.nTmessage('==> Parsed %d residues from %s', resCount, self.seqFile)\n\n if error:\n return None\n else:\n return self", "title": "" }, { "docid": "3da557a033fb524bde90360e80a55d6c", "score": "0.56891257", "text": "def openSeqs(infile):\n\n def checkAlphabet(seq, alphabet=alphabet):\n \"\"\" Aborts program if 'seq' contains elements not in 'alphabet'\"\"\"\n for n in seq.rstrip():\n if n not in alphabet:\n logging.debug(\"Sequence contains non-nucleotide characters\")\n raise TypeError(\"Sequence contains non-nucleotide characters\")\n sys.exit(0)\n\n with open(infile) as data_file:\n logging.info('Succesfully opened %s' % infile)\n\n ### Fasta parser\n # skip empty lines\n while True:\n line = data_file.readline()\n if line == \"\":\n continue\n if line[0] == \">\":\n break\n\n while True:\n if line[0] != \">\":\n raise ValueError(\"Records in Fasta format start with the '>' character\")\n # get seq ID\n ID = line[1:].rstrip().split(\" \")[0]\n seqs = []\n line = data_file.readline()\n while True:\n if not line:\n break # end of file, end while\n if line[0] == \">\":\n break # new seq\n checkAlphabet(line)\n seqs.append(line.rstrip())\n line = data_file.readline()\n\n yield ID, \"\".join(seqs).replace(\" \", \"\").replace(\"\\r\", \"\")\n\n if not line:\n return # end", "title": "" }, { "docid": "c72f9a5f6448511b34d2a8933ed4e1b9", "score": "0.56769174", "text": "def parse_existing_primer(prefix: str) -> dict:\n left_name = \"{}_left.fas\".format(prefix)\n right_name = \"{}_right.fas\".format(prefix)\n try:\n left_primer = open(left_name, 'r')\n right_primer = open(right_name, 'r')\n except FileNotFoundError:\n logging.error(\n 'Could not find or read one or both of the files '\n 'containing the primers that should be used for this run '\n 'instead of generating new one.\\n'\n 'Looking for files: {} and {}. Aborting'.format(\n left_name, right_name\n ))\n sys.exit(1)\n\n primer_left_lines = left_primer.readlines()\n primer_right_lines = right_primer.readlines()\n left_primer.close()\n right_primer.close()\n\n primer_left_list = []\n sequence = seq_id = \"\"\n for left_line in primer_left_lines:\n # begin of new sequence\n if left_line[0] == '>':\n if sequence != \"\":\n # we were previously reading a sequence therefore store it\n # before starting with a new one\n\n # sanitize sequence from newlines\n sequence = sequence.replace('\\n', '').replace('\\r', '')\n primer_left_list.append((seq_id, sequence))\n sequence = \"\"\n # extract id from header\n seq_id = left_line.split('>')[1].split()[0]\n else:\n sequence += left_line\n # append final sequence to list\n if sequence != \"\":\n sequence = sequence.replace('\\n', '').replace('\\r', '')\n primer_left_list.append((seq_id, sequence))\n\n primer_right_list = []\n sequence = seq_id = \"\"\n for right_line in primer_right_lines:\n # begin of new sequence\n if right_line[0] == '>':\n if sequence != \"\":\n # we were previously reading a sequence therefore store it\n # before starting with a new one\n sequence = sequence.replace('\\n', '').replace('\\r', '')\n # sanitize sequence from newlines\n primer_right_list.append((seq_id, sequence))\n sequence = \"\"\n # extract id from header\n seq_id = right_line.split('>')[1].split()[0]\n else:\n sequence += right_line\n if sequence != \"\":\n sequence = sequence.replace('\\n', '').replace('\\r', '')\n primer_right_list.append((seq_id, sequence))\n\n logging.debug('Extracted following primer from {}: {}'.format(\n left_name, ' ,'.join(map(str, primer_left_list))\n ))\n logging.debug('Extracted following primer from {}: {}'.format(\n right_name, ' ,'.join(map(str, primer_right_list))\n ))\n primer_dict = {} # type: dict\n for (l_id, l_seq), (r_id, r_seq) in zip(primer_left_list,\n primer_right_list):\n l_seq = l_seq.replace('\\n', '').replace('\\r', '')\n r_seq = r_seq.replace('\\n', '').replace('\\r', '')\n primer_dict.update({tuple(sorted((l_id, r_id))): (l_seq, r_seq)})\n return primer_dict", "title": "" }, { "docid": "ff205eb0c0fb35645c570475445a6561", "score": "0.56756663", "text": "def parse_needle_entry(lines):\n seq_pat = re.compile(r'(\\S+)\\s+\\d+\\s+(\\S+)')\n curr = {}\n for line in lines:\n if line.startswith(\"#\"):\n if line.startswith(\"#======================================\") or \\\n line.startswith(\"#--------------------------------------\"):\n if curr:\n yield [(_id, \"\".join(_seq)) for _id, _seq in curr.items()]\n curr = {}\n else:\n seq_data = seq_pat.search(line)\n if seq_data:\n if seq_data.group(1) not in curr:\n curr[seq_data.group(1)] = [seq_data.group(2), ]\n else:\n curr[seq_data.group(1)].append(seq_data.group(2))", "title": "" }, { "docid": "14d7c7cdfddbd6182f3be397bd9dbd6a", "score": "0.56565344", "text": "def _get_sequences(self, pdb_file):\n \n result = {}\n for line in pdb_file:\n if line.startswith('SEQRES'):\n words = line.split()\n if words[2].isalpha():\n key = words[2]\n seq_start = 4\n else:\n key = 'A'\n seq_start = 3\n if key not in result:\n result[key] = words[seq_start:]\n else:\n result[key] =result[key] + words[seq_start:]\n\n # Translate modified bases if necessary\n self._translate(result,self._build_translation_table(result,pdb_file))\n return result", "title": "" }, { "docid": "650057156e632e3811a27d34d53863f3", "score": "0.56465566", "text": "def scan_fq(filename):\n lens_found = collections.Counter()\n bcs_found = collections.Counter()\n ns_found = 0\n bad_barcode = re.compile(br'[N0-9]')\n\n if os.stat(filename).st_size == 0:\n return dict( total_reads = 0,\n min_read_len = 0,\n max_read_len = 0,\n n_bases = 0 )\n\n try:\n n = 0\n with gzip.open(filename, mode='rb') as fh:\n for n, l in enumerate(fh):\n #Extract barcode\n if n % 4 == 0:\n candidate_bc = l.split(b\":\")[-1].rstrip()\n\n if ( len(candidate_bc) < 3 or\n re.search(bad_barcode, candidate_bc) ):\n #That's no barcode!\n pass\n else:\n bcs_found[candidate_bc] += 1\n elif n % 4 == 1:\n lens_found[len(l) - 1] += 1\n ns_found += l.count(b'N')\n except OSError as e:\n #The GZip module doesn't tell you what file it was trying to read\n e.filename = filename\n e.strerror = e.args[0]\n raise\n\n return dict( total_reads = (n + 1) // 4,\n min_read_len = min(lens_found.keys() or [0]),\n max_read_len = max(lens_found.keys() or [0]),\n total_bases = sum([ l * c for l, c in lens_found.items()]),\n n_bases = ns_found,\n bcs_found = bcs_found )", "title": "" }, { "docid": "fac77383db001953aedde8dad01b3c57", "score": "0.56434333", "text": "def loadFasta(filename, verbose=0):\n if filename.endswith(\".gz\"):\n fp = gzip.open(filename, 'rt')\n else:\n fp = open(filename, 'r')\n # split at headers\n # data = fp.read().split('>')\n data = fp.read()\n data = data.split('>')\n fp.close()\n # ignore whatever appears before the 1st header\n data.pop(0)\n # prepare the dictionary\n D = {}\n for sequence in data:\n lines = sequence.split('\\n')\n header = lines.pop(0).split()\n key = header[0]\n D[key] = ''.join(lines)\n if verbose:\n print(\"Sequence %s of length %d read\" % (key, len(D[key])))\n return D", "title": "" }, { "docid": "ebd197213a68e24ecc1e77c05d9a4d11", "score": "0.5640044", "text": "def uniqseqs(seqdata, trimdefline=False, checkrevcom=False, fastq=True,\n paired=True):\n seqs = {}\n parsefunc = sequniq.parse.get_parser(fastq=fastq, paired=paired)\n for record in parsefunc(seqdata):\n sequniq.parse.check_record(record, fastq=fastq, paired=paired)\n\n seq = record[1]\n if paired:\n if fastq:\n seq += record[4]\n else:\n seq += record[3]\n seqsha = hashlib.sha1(seq).hexdigest()\n\n if seqsha not in seqs:\n if checkrevcom:\n rseqsha = hashlib.sha1(sequniq.revcomp(seq)).hexdigest()\n if rseqsha not in seqs:\n seqs[seqsha] = 1\n yield record\n else:\n seqs[seqsha] = 1\n yield record", "title": "" }, { "docid": "073a7623f272ef126244fe3aba7a9c13", "score": "0.56037086", "text": "def fastareader(f):\n F = open(f)\n id, header, seq = \"\", \"\", []\n\n while True:\n l = F.readline()\n if not l: break\n \n l = l.strip()\n if not l: continue\n\n if l[0] == '>':\n if id and seq:\n yield id, header, \"\".join(seq)\n seq = []\n id = l[1:].split()[0]\n header = \" \".join(l[1:].split()[1:])\n \n else:\n seq.append(\"\".join(l.split()).lower())\n\n if id and seq:\n yield id, header, \"\".join(seq)\n\n F.close()", "title": "" }, { "docid": "6e3f6ce24102828d7021b5fee6e5427b", "score": "0.5576209", "text": "def read_sequence(filename: str) -> Tuple[str, str]:\r\n record = next(SeqIO.parse(filename, \"fasta\"))\r\n return record.description, str(record.seq)", "title": "" }, { "docid": "72c668424f8f53d9fe2b9ee3225f90e0", "score": "0.5568531", "text": "def read_taxid():\n\n taxid = {} \n\n with open('taxid.txt', 'r') as f:\n for line in f:\n current_line = line.split()\n current_id = current_line[0]\n current_name = current_line[1] \n taxid[current_id] = current_name\n\n return taxid", "title": "" }, { "docid": "644d5f3d0ccec06ddae9abac56103a83", "score": "0.5559792", "text": "def read_sequence(filename: str) -> Tuple[str, str]:\n record = next(SeqIO.parse(filename, \"fasta\"))\n return record.description, str(record.seq)", "title": "" }, { "docid": "4a876c9cbbcf3855daf41a6f94919f23", "score": "0.55338776", "text": "def alignment_file_parser(filename):\n aln_hits = tuple()\n for entry in read_fasta_file(filename):\n hit = dict()\n hit['subject acc.'] = entry[0]\n hit['subject seq'] = entry[1]\n aln_hits += (hit,)\n return aln_hits", "title": "" }, { "docid": "136cd5602e21313b60a0ab53c5d71d11", "score": "0.5533474", "text": "def get_destination_dict():\n my_dict = {}\n with open(\"/home/workspace/data/I94_SAS_Labels_Descriptions.SAS\") as f:\n lines = f.readlines() \n for line in lines[303:893]:\n line= line.split(',', 1)[0]\n (key, val) = line.split(\"=\")\n key = re.sub(r\"[\\n\\t\\s']*\", \"\", key)\n my_dict[str(key)] = re.sub(r\"[\\n\\t']*\", \"\", val)\n return my_dict", "title": "" }, { "docid": "2ad9b4c09d6897950c18143d8c479ab4", "score": "0.55267346", "text": "def load_qrels(fn):\n qrels = defaultdict(dict)\n with open(fn, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n qid, _, docid, label = line.strip().split()\n qrels[qid][docid] = int(label)\n return qrels", "title": "" }, { "docid": "407eb3e396ae0f5a80b713f2af3ea04f", "score": "0.55218375", "text": "def parse_query(alignment_file, queries_list):\n gene = alignment_file.split('/')[-1].split('.')[0]\n ali_dict = {}\n n = 0\n for record in SeqIO.parse(alignment_file, 'fasta'):\n seq = str(record.seq).replace('-', '')\n ali_dict[record.name] = f'>{record.name}@{gene}@{n}\\n{seq}\\n'\n n += 1\n for query in queries_list:\n if query in ali_dict:\n return ali_dict[query]\n return None", "title": "" }, { "docid": "077a0e1323cb47fa34229272c3370c99", "score": "0.5518105", "text": "def read_header_data_from_fasta(fasta_path):\n result = defaultdict()\n FILE = open(fasta_path, 'r')\n for line in FILE:\n if line[0]==\">\":\n line = line.strip()\n line = line[1:]\n lineSP = line.split()\n\n lis = [lineSP[2], lineSP[1], lineSP[3], lineSP[4]]\n result[lineSP[0]] = lis\n\n return result", "title": "" }, { "docid": "25a061246cf42f23ca7d815a5071dbb0", "score": "0.55137426", "text": "def file_sequence(file_in,file_type):\n\n type_set={0:'logon', 1:'device', 2:'email', 3:'file', 4:'http'}\n type_words=type_set[file_type]\n files_in=open(file_in,'r')\n sequences_set={}\n for line in files_in:\n if line !='\\n':\n line=line.split(' ; ')\n # print (len(line))\n for records in line:\n if file_type==0 and 'Logon'in records:\n type_words= 'logon'\n elif file_type==0 and 'Logoff'in records:\n type_words= 'logoff'\n if file_type==1 and 'Connect' in records:\n type_words= 'Connect'\n elif file_type==1 and 'Disconnect' in records:\n type_words='Disconnect'\n records=records.split(',')\n if records[0] in sequences_set:\n # 同一天的不同时刻记录拼接\n values=records[1]+'#'+type_words\n sequences_set[records[0]]=sequences_set[records[0]]+' & '+values\n else:\n values=records[1]+'#'+type_words\n sequences_set[records[0]]=values\n files_in.close()\n return(sequences_set)", "title": "" }, { "docid": "66a71d47d0e159a2f93ea904f049323d", "score": "0.55080944", "text": "def test_parse_fasta_file(self):\n \n \n fasta_data =['>seq1 SAMPLE1', 'AAACGT', '>seq2', 'ACGGT']\n \n expected_fasta = {'seq1':'AAACGT', 'seq2':'ACGGT'}\n \n expected_order = ['seq1 SAMPLE1', 'seq2']\n \n actual_fasta, actual_order = parse_fasta_file(fasta_data)\n \n self.assertEqual(actual_fasta, expected_fasta)\n \n self.assertEqual(actual_order, expected_order)", "title": "" }, { "docid": "76169e4b98214adabf34fcab9d07dfbc", "score": "0.55020994", "text": "def Get_Sequences(file_1, file_2):\n\n file1 = open(file_1, \"r\")\n file2 = open(file_2, \"r\")\n\n # get only the sequence letters, initially this will be a list of different sets\n # of letters due to the line breaks in the .txt file\n with file1 as f:\n seq_1 = f.read().splitlines()\n\n with file2 as f:\n seq_2 = f.read().splitlines()\n\n file1.close()\n file2.close()\n\n # in case the .txt files has empty lines above the first \">\" line\n seq_i = 0\n for x in range(len(seq_1)):\n if seq_1[x][0:1:] == \">\":\n seq_i = x\n seq_i += 1\n seq_1 = seq_1[seq_i::]\n\n seq_2_i = 0\n for x in range(len(seq_2)):\n if seq_2[x][0:1:] == \">\":\n seq_2_i = x\n seq_2_i += 1\n seq_2 = seq_2[seq_2_i::]\n\n # concatenate the strings of hemoglobin codes to one sequence\n sequence_1 = \"\"\n sequence_2 = \"\"\n\n for s in seq_1:\n sequence_1 += s\n\n for s in seq_2:\n sequence_2 += s\n\n # set the human and mouse sequences to a tuple called sequences\n sequences = (sequence_1, sequence_2)\n\n # return the tuple of sequences\n return sequences", "title": "" }, { "docid": "ecad1b74e733b986d6ad356e02f3a677", "score": "0.54942006", "text": "def _parse_ref_id_line(self): \n result_dict = {}\n \n token = self._tokenizer.next()\n \n if token.type not in (TokenCreator.TokenNames.ID, TokenCreator.TokenNames.NUMBER):\n raise ParsingError(ParsingError.create_std_error_msg('an id', token), 'The ref_id line is missing a ref_src or it is not well formatted', token)\n \n result_dict['REFSTR'] = token.value\n \n token = self._tokenizer.next()\n \n # could be the optional ref_src\n if token.type in (TokenCreator.TokenNames.ID, TokenCreator.TokenNames.NUMBER):\n result_dict['REFSRC'] = token.value\n token = self._tokenizer.next()\n \n # now the [part seq_num [of tot_num]]\n if token.type == TokenCreator.TokenNames.PART:\n \n #get the seq num val\n token = self._tokenizer.next()\n \n if token.type not in (TokenCreator.TokenNames.ID, TokenCreator.TokenNames.NUMBER):\n raise ParsingError(ParsingError.create_std_error_msg('an id', token), \"The ref_id line is missing a the seq_num in the \\'part\\' construct: ref_id ref_str [ref_src] [part seq_num [of tot_num]]\", token)\n \n result_dict['SEQNUM'] = token.value\n \n # look for OF token\n token = self._tokenizer.next()\n \n if token.type == TokenCreator.TokenNames.OF:\n \n # get the tot_num val\n token = self._tokenizer.next()\n \n if token.type not in (TokenCreator.TokenNames.ID, TokenCreator.TokenNames.NUMBER):\n raise ParsingError(ParsingError.create_std_error_msg('an id', token), \"The ref_id line is missing a the tot_num in the \\'of\\' construct: ref_id ref_str [ref_src] [part seq_num [of tot_num]]\", token)\n \n result_dict['TOTNUM'] = token.value\n \n #go to next\n token = self._tokenizer.next()\n # it can then only be a new line\n elif token.type != TokenCreator.TokenNames.NEWLINE:\n raise ParsingError(ParsingError.create_std_error_msg('an id, a part or a new line ', token), \"The ref_id line is mal formatted. It should follow ref_id ref_str [ref_src] [part seq_num [of tot_num]]\", token)\n \n #eat current and next line characters\n self._tokenizer.consume_while_current_token_is_in([TokenCreator.TokenNames.NEWLINE])\n \n return result_dict", "title": "" }, { "docid": "1dcfb6cbdbeee90283afb072339c6a69", "score": "0.54796875", "text": "def read_fasta(fasta_file, verbose=False) -> dict:\n try:\n fasta_handler = open(fasta_file, 'r')\n except IOError:\n raise IOError(\"ERROR: Unable to open the FASTA file \" + fasta_file + \" for reading!\")\n\n if verbose:\n sys.stdout.write(\"Reading FASTA file... \")\n sys.stdout.flush()\n\n line = fasta_handler.readline()\n # Check FASTA format\n if line[0] != '>':\n sys.stderr.write(\"ERROR: First line in FASTA file is not a header!\\n\")\n sys.exit(4)\n genome_dict = dict()\n sequence = \"\"\n header = \"\"\n while line:\n line = line.strip()\n if line and line[0] == '>':\n if sequence:\n genome_dict[header] = RefSeq(header, sequence)\n header = line\n sequence = \"\"\n else:\n sequence += line\n line = fasta_handler.readline()\n\n genome_dict[header] = RefSeq(header, sequence)\n\n if verbose:\n sys.stdout.write(\"done.\\n\")\n sys.stdout.write(\"Read %d sequences from FASTA file.\\n\" % len(genome_dict.keys()))\n\n return genome_dict", "title": "" }, { "docid": "b70b4b9cdc07710aac5da623d9ad40cb", "score": "0.54714787", "text": "async def read_fastq(f) -> Generator[tuple, None, list]:\n had_plus = False\n\n header = None\n seq = None\n\n async for line in f:\n if line == \"+\\n\":\n had_plus = True\n continue\n\n if not had_plus:\n if line[0] == \"@\":\n header = line.rstrip()\n continue\n\n seq = line.rstrip()\n continue\n\n if had_plus:\n yield header, seq, line.rstrip()\n\n header = None\n seq = None\n had_plus = False", "title": "" }, { "docid": "376c318132a72e00898164ddaa02aad4", "score": "0.54551446", "text": "def ordered_record_generator(fpath,ordered_ids):\n yielded = set()\n for id in ordered_ids:\n with open(fpath) as fasta_f:\n fastas = SeqIO.parse(fasta_f,'fasta')\n for fasta in fastas:\n if fasta.id == id and fasta.id not in yielded:\n yielded.add(fasta.id)\n yield fasta\n break", "title": "" }, { "docid": "b1f709da7df17a99521eb9de2022017a", "score": "0.54525954", "text": "def process_file(self, file):\n \n # Check the file exists and is readable\n utilities.file_exists_readable(file)\n \n # Check that the file of reads is fasta\n # If it is fastq, then convert the file to fasta\n temp_file=\"\"\n if utilities.fasta_or_fastq(file) == \"fastq\":\n input_fasta=utilities.fastq_to_fasta(file)\n temp_file=input_fasta\n else:\n input_fasta=file\n \n file_handle=open(input_fasta,\"rt\")\n \n sequence=\"\"\n id=\"\"\n for line in file_handle:\n if re.search(\"^>\", line):\n # store the prior sequence\n if id:\n yield (id,sequence)\n id=line.rstrip().replace(\">\",\"\")\n # only store the first id if multiple separated by spaces\n if \" \" in id:\n ids=id.split(\" \")\n id=ids[0]\n sequence=\"\"\n else:\n sequence+=line.rstrip()\n \n # add the last sequence\n yield (id, sequence)\n \n file_handle.close()\n \n # Remove the temp fasta file if exists\n if temp_file:\n utilities.remove_file(temp_file)", "title": "" }, { "docid": "eaec13fcba9d9370f7e0395766bbd458", "score": "0.54449064", "text": "def read_fasta(filename):\n seq_series = {}\n with open(filename) as fin:\n # Add each sequence to the series\n for header, sequence in fasta_iter(fin):\n sequence = sequence.upper()\n seq_series[header] = sequence\n \n seq_series = pd.Series(seq_series)\n seq_series.index.name = \"label\"\n return seq_series", "title": "" }, { "docid": "8af25ad2fee6b9117f7003d66c441e23", "score": "0.5444152", "text": "def parse_sequencedata (paramdict,assaydata):\n #Expected for sequencing data: ['Mass','Time (s)','Scan Range','Top Sequence','Score','Next Best Score','Number of Hits','Average Score','Sequencing Confidence']\n #The first three columns can safely not be output, as once matching has occured, they are no longer relevant.\n #Scan range is not carried over as it is of limited use outside of in-depth debugging of CycLS.\n seqcols = []\n seqdata = collections.defaultdict(lambda : collections.defaultdict(lambda : {}))\n for i,expt in enumerate(paramdict['Experiment Names']):\n try:\n swb = openpyxl.load_workbook(paramdict['Sequence Data File Names'][i],read_only=True)\n except IOError:\n print('Error: File {}{} not found.'.format(expt,paramdict['Sequence Data File Names'][i]))\n sys.exit(1)\n sws = swb.active\n header=True\n for row in sws.rows:\n rowvals = [x.value for x in row]\n if header:\n for i,val in enumerate(rowvals):\n if i > 2:\n if val not in seqcols:\n seqcols.append(val)\n header=False\n else:\n seqdata[expt][rowvals[0]][rowvals[1]]=rowvals[3:]\n return seqdata,seqcols", "title": "" }, { "docid": "ea91a1070e5b11e3c4a8a9f2ec0d0564", "score": "0.54311496", "text": "def read_find_ip_results(fn):\n f = open(fn, \"r\")\n data = {}\n for line in f:\n line = line.strip().split(\"\\t\")\n if len(line) == 7:\n id, gene, info, read_pos, ref_pos, thrhld, seq = line\n read_pos = read_pos.strip(\"()\").split(\",\")\n ref_pos = ref_pos.strip(\"()\").split(\",\")\n data[id] = (gene, info, read_pos, ref_pos, thrhld, seq)", "title": "" }, { "docid": "9749f956fbd4941bbb356048de798fdf", "score": "0.5428781", "text": "def alignment_file_parser(filename):\n aln_hits = tuple()\n for entry in read_fasta_file(filename):\n hit = dict()\n hit['subject acc.'] = entry[0].split('|')[0]\n hit['species name'] = entry[0].split('|')[1]\n hit['subject seq'] = entry[1]\n aln_hits += (hit,)\n return aln_hits", "title": "" }, { "docid": "de2ce0d9a1d0fa234efe2dade7332a10", "score": "0.5424265", "text": "def _readproc_lines(filename):\n info = dict()\n with open(filename) as fp:\n for line in fp:\n line = line.replace(' ', ' ')\n (label, data) = line.split(' ', 1)\n info[label] = data\n return info", "title": "" }, { "docid": "63872dcb4a6bed3f75c620cbd51ede28", "score": "0.5412413", "text": "def dictparser(parseddict, seqdict, analysistype):\n # Imaginatively named dictionary to store the filtered results\n plusdict = defaultdict(make_dict)\n # Iterate through the strains with results\n for strain in parseddict:\n # Retrieve the identity cutoff value from seqDict\n identitycutoff = seqdict[strain][\"cutoff\"][analysistype]\n # Iterate through the targets\n for target in seqdict[strain][\"targets\"][analysistype]:\n # Set the target name\n targetname = os.path.basename(target).split(\".\")[0]\n # For every allele in the targets (one or more)\n # cattarget = parseddict[strain].keys()[0]\n # for allele in parseddict[strain][cattarget]:\n for allele in seqdict[strain][\"targetSequences\"][analysistype][target][\"allele\"]:\n # Initialise the variables to store the parsed values\n totaldepth = 0\n nonsnps = 0\n # Retrieve the contig (allele) length from seqDict\n contiglength = seqdict[strain][\"targetSequences\"][analysistype][target][\"allele\"][allele]\n # Iterate through the cat file\n # for cattarget in parseddict[strain]:\n cattarget = parseddict[strain].keys()[0]\n # Iterate through each individual position in the reference mapped bam files\n for pos in sorted(parseddict[strain][cattarget][allele]):\n # Get the depth and quality values from the dictionary\n for depth, quality in parseddict[strain][cattarget][allele][pos].iteritems():\n # As these results were filtered in bamParse, every position represents a non-SNP\n nonsnps += 1\n # Increment the total depth value with the current depth\n totaldepth += float(depth)\n # Calculate the average depth and identity values using the contig length\n averagedepth = float(\"%.2f\" % (float(totaldepth)/contiglength))\n percentidentity = float(\"%.2f\" % (float(nonsnps)/contiglength * 100))\n # If the observed percent identity is greater than the cutoff value, add the results to the dictionary\n if percentidentity >= identitycutoff:\n plusdict[strain][targetname][allele][averagedepth] = percentidentity\n # Return the dictionary\n return plusdict", "title": "" }, { "docid": "4c320819a22d596de343a24ce57e6d26", "score": "0.5411828", "text": "def _read_hits(self, results_file, domain, evalue_threshold):\n\n seq_info = {}\n\n read_hit = False\n for line in open(results_file):\n if line[0:2] == '>>':\n line_split = line.split()\n seq_id = line_split[1]\n read_hit = True\n hit_counter = 0\n elif line.strip() == '':\n read_hit = False\n elif read_hit:\n hit_counter += 1\n if hit_counter >= 3:\n line_split = line.split()\n\n iEvalue = line_split[3]\n ali_from = int(line_split[7])\n ali_to = int(line_split[8])\n\n rev_comp = False\n if ali_from > ali_to:\n rev_comp = True\n ali_from, ali_to = ali_to, ali_from\n\n align_len = int(ali_to) - int(ali_from) + 1\n\n if float(iEvalue) <= evalue_threshold:\n seq_info[seq_id] = seq_info.get(\n seq_id, []) + [[domain, iEvalue, str(ali_from), str(ali_to), str(align_len), str(rev_comp)]]\n\n return seq_info", "title": "" }, { "docid": "457d611aca70fb7fca8f2ccda8ae3024", "score": "0.54053897", "text": "def read_fasta(filetoparse):\n name, seq = None, []\n for line in filetoparse:\n line = line.rstrip()\n if line.startswith(\">\"):\n if name:\n yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name:\n yield (name, ''.join(seq))", "title": "" }, { "docid": "25b51ff0b2469e259c90bc94f91c7de6", "score": "0.5389509", "text": "def read_fasta(fh):\n for header, group in groupby(fh, lambda line: line[0] == \">\"):\n if header:\n line = next(group)\n name = line[1:].strip()\n else:\n seq = \"\".join(line.strip() for line in group)\n yield name, seq", "title": "" }, { "docid": "8876b31735312038d1844e03db0874a1", "score": "0.5388758", "text": "def find_questionId_from_record():\n print(\"find questionId from the action records ...\")\n\n fp = open(actionRecords_path,'r')\n answer_dict,question_dict = read_answer_question_dict()\n A_Q_map_dict = read_A_Q_map()\n\n action_questionId_dict = {}\n tmp_num = 0\n for line in fp:\n line = line.strip().split('\\t')\n action_questionId_dict.setdefault(line[0],{'question':[],'answer':[]})\n tmp_num +=1\n if int(line[1]) > 0:\n actions = line[2].split(',')\n for v in actions:\n v = v.split('|')\n if int(v[2]) != 0:\n Id = 0\n if v[0][0] =='A':\n Id = answer_dict[v[0][1:]]\n action_questionId_dict[line[0]]['answer'].append(Id)\n action_questionId_dict[line[0]]['question'].append(A_Q_map_dict[Id])\n elif v[0][0] == 'Q':\n Id = question_dict.get(v[0][1:])\n action_questionId_dict[line[0]]['question'].append(Id)\n with codecs.open(actionQuestionId_path,'w','utf8') as f:\n json.dump(action_questionId_dict,f)\n print(len(action_questionId_dict))\n print(tmp_num)\n return", "title": "" }, { "docid": "c267e0fd9e268ee469b8a6384d6ed4e8", "score": "0.5387992", "text": "def fastaParser(filename):\n seqs = []\n genome = ''\n with open(filename, 'r') as f:\n for line in f:\n if not line[0] == '>':\n seqs.append(list(line.rstrip()))\n return seqs", "title": "" }, { "docid": "17a6d7bf9578575ffa7afc667ffc9b65", "score": "0.53878194", "text": "def loadFASTA(fasta_file):\n\n fasta = {}\n fasta_f = open(fasta_file)\n name = ''\n seq = ''\n line = fasta_f.readline()\n while line != '':\n if line[0] == '>':\n if name != '':\n fasta[ name ] = seq\n name = line[1:].strip()\n seq = ''\n else:\n seq = seq + line.strip()\n line = fasta_f.readline()\n fasta_f.close()\n\n if name != '': \n fasta[name] = seq\n\n return fasta", "title": "" }, { "docid": "65d11f008288a2d7d451ae74dd42ecee", "score": "0.53822607", "text": "def fastq_batched_iterator(handle):\n while True:\n # Read /1\n title = handle.readline()\n if not title:\n raise StopIteration\n if not title[0] == \"@\":\n raise ValueError(\"Expected FASTQ @ line, got %r\" % title)\n id = title.split(None, 1)[0]\n if not id.endswith(\"/1\"):\n raise ValueError(\"Expected FASTQ record ending /1, got %r\" % title)\n seq = handle.readline()\n plus = handle.readline()\n if not plus[0] == \"+\":\n raise ValueError(\"Expected FASTQ + line, got %r\" % plus)\n qual = handle.readline()\n if len(seq) != len(qual): # both include newline\n raise ValueError(\"Different FASTQ seq/qual lengths for %r\" % title)\n # Read /2\n title2 = handle.readline()\n if not title2[0] == \"@\":\n raise ValueError(\"Expected FASTQ @ line, got %r\" % title2)\n id2 = title2.split(None, 1)[0]\n if not id2.endswith(\"/2\"):\n raise ValueError(\"Expected FASTQ record ending /2, got %r\" % title2)\n if id[:-2] != id2[:-2]:\n raise ValueError(\n \"Expected paired FASTQ records, got %r and %r\" % (title, title2)\n )\n seq2 = handle.readline()\n plus = handle.readline()\n if not plus[0] == \"+\":\n raise ValueError(\"Expected FASTQ + line, got %r\" % plus)\n qual2 = handle.readline()\n if len(seq) != len(qual): # both include newline\n raise ValueError(\"Different FASTQ seq/qual lengths for %r\" % title2)\n yield [\n seq.strip().upper(),\n seq2.strip().upper(),\n ], title + seq + \"+\\n\" + qual + title2 + seq2 + \"+\\n\" + qual2", "title": "" }, { "docid": "cb3263d5cd83ff1292c98d845f410516", "score": "0.5379692", "text": "def processReads(sam_file, num_inserts, filtered_id):\n\n\tread_file = open(sam_file,'r')\n\tin_sam = Reader(read_file) \n\t#list of insert IDs\n\tfragment_id = []\n\n\t#list of insert coordinates in the original fasta sequence\n\tread_coord = []\n\n\tprint(\"PROCESSING READS\", flush = True) \n\tfor i in range(0,num_inserts):\n\t\tif i%500000==0 and i!=0:\n\t\t\tprint(\"{:.2f}\".format((i*100/num_inserts))+\"% of reads processed...\") \n\t\tx = next(in_sam)\n\n\t\t#get ID of the read being processed\n\t\tthis_ID = str(x.qname) \n\t\t\n\t\t#only consider the read is mapped \n\t\tif len(filtered_id) > 0: \n\t\t\tif this_ID in filtered_id: \n\t\t\t\t#save the ID of the read \n\t\t\t\tfragment_id.append(x.qname)\n\t\t\n\t\t\t\t#save the coordinates of the read \n\t\t\t\t#subract 1 as the SAM file position starts at 1 but we use index 0 \n\t\t\t\tread_coord.append((x.pos-1,x.pos+len(x.seq)-1))\n\t\telse: \n\t\t\t#save the ID of the read \n\t\t\tfragment_id.append(x.qname)\n\t\t\n\t\t\t#save the coordinates of the read\n\t\t\t#subtract 1 as the SAM file position starts at 1 but we use index 0 \n\t\t\tread_coord.append((x.pos-1,x.pos-1+(len(x.seq)-1)))\n\t\t\t\n\treturn fragment_id, read_coord", "title": "" }, { "docid": "f248d313cb83172df48035381fd14c91", "score": "0.5377546", "text": "def parse_sequencer(self):\n log.debug(f\"Parsing sequencer table of {self.telescope}\")\n header = True\n data = False\n for line in self.seq_lines:\n if data and line:\n if line.startswith(\"LST1\"):\n self.data_lines.append(line)\n elif \"Tel Seq\" in line:\n data = True\n header = False\n self.keyLine = line\n elif header:\n self.header_lines.append(line)", "title": "" }, { "docid": "a9e95b6ff3d8b6a90425c389ce34dca6", "score": "0.5372185", "text": "def FASTA_iterator(fasta_filename):\n\n with open(fasta_filename, 'rt') as fasta:\n sequence = ''\n identifier = ''\n my_list = []\n for line in fasta:\n if (line[0] == '>'):\n if (sequence != ''):\n my_tuple = (identifier, sequence)\n yield(my_tuple)\n identifier= ''\n sequence= ''\n identifier = line[1:].strip()\n else:\n sequence += line.strip()\n my_tuple = (identifier, sequence)\n yield(my_tuple)", "title": "" }, { "docid": "d48cda87b2b9cd7691a1120e21da3431", "score": "0.53657895", "text": "def seqrcds_to_dict(sequences, key_function=None): #from BioPython (SeqIO)\n if key_function is None:\n key_function = lambda rec: rec.id\n\n d = dict()\n for record in sequences:\n key = key_function(record)\n if key in d:\n raise ValueError(\"Duplicate key '%s'\" % key)\n d[key] = record\n return d", "title": "" }, { "docid": "35be2bc1676765677506ab75bfc91e25", "score": "0.53621376", "text": "def test_read_fasta_format(self):\n self.challenge.sample = '''\n 0\n 1\n >FAS_1\n LLL\n ---\n MMM\n *\n >FAS_2\n AAA\n TTT\n 8\n '''\n self.challenge.read()\n result = self.challenge.fasta(start=2)\n self.assertEqual(result.__name__, 'fasta')\n fasta = dict(result)\n self.assertIn('FAS_1', fasta.keys())\n self.assertEqual('LLL---MMM*', fasta['FAS_1'])\n self.assertIn('FAS_2', fasta.keys())\n self.assertEqual('AAATTT', fasta['FAS_2'])", "title": "" }, { "docid": "9201752b39d7f4075ae22049e77c6b05", "score": "0.5345632", "text": "def process_genomic_positions(infile,hs_dict=None):\n if hs_dict is None:\n hs_dict = {}\n opener = gzip.open if infile.endswith('.gz') else open\n with opener(infile,'rt') as fh:\n rd = csv.reader(fh, delimiter=chr(9))\n for row in rd:\n if not row: # skip empty rows\n continue\n if not row[0].startswith('chr'):\n continue # skip non-body rows\n chrom, chromstart, chromend = row[:3]\n if chrom not in hs_dict:\n hs_dict[chrom] = []\n if (chromstart, chromend) not in hs_dict[chrom]:\n hs_dict[chrom].append((chromstart, chromend))\n return hs_dict", "title": "" }, { "docid": "12a88fafb58382c64bcb79ff815738e5", "score": "0.5343212", "text": "def parse_fastqc(fastqc_path: str, sample_path: str, prefix=\"fastqc_\"):\n # Get the text data files from the FastQC output\n for name in os.listdir(fastqc_path):\n if \"reads\" in name and \".\" not in name:\n suffix = name.split(\"_\")[1]\n shutil.move(\n os.path.join(fastqc_path, name, \"fastqc_data.txt\"),\n os.path.join(sample_path, f\"{prefix}{suffix}.txt\")\n )\n\n # Dispose of the rest of the data files.\n shutil.rmtree(fastqc_path)\n\n fastqc = {\n \"count\": 0\n }\n\n # Parse data file(s)\n for suffix in [1, 2]:\n path = os.path.join(sample_path, f\"{prefix}{suffix}.txt\")\n\n try:\n handle = open(path, \"r\")\n except IOError:\n if suffix == 2:\n continue\n else:\n raise\n\n flag = None\n\n for line in handle:\n # Turn off flag if the end of a module is encountered\n if flag is not None and \"END_MODULE\" in line:\n flag = None\n\n # Total sequences\n elif \"Total Sequences\" in line:\n fastqc[\"count\"] += int(line.split(\"\\t\")[1])\n\n # Read encoding (eg. Illumina 1.9)\n elif \"encoding\" not in fastqc and \"Encoding\" in line:\n fastqc[\"encoding\"] = line.split(\"\\t\")[1]\n\n # Length\n elif \"Sequence length\" in line:\n split_length = [int(s) for s in line.split(\"\\t\")[1].split('-')]\n\n if suffix == 1:\n if len(split_length) == 2:\n fastqc[\"length\"] = split_length\n else:\n fastqc[\"length\"] = [split_length[0], split_length[0]]\n else:\n fastqc_min_length, fastqc_max_length = fastqc[\"length\"]\n\n if len(split_length) == 2:\n fastqc[\"length\"] = [\n min(fastqc_min_length, split_length[0]),\n max(fastqc_max_length, split_length[1])\n ]\n else:\n fastqc[\"length\"] = [\n min(fastqc_min_length, split_length[0]),\n max(fastqc_max_length, split_length[0])\n ]\n\n # GC-content\n elif \"%GC\" in line and \"#\" not in line:\n gc = float(line.split(\"\\t\")[1])\n\n if suffix == 1:\n fastqc[\"gc\"] = gc\n else:\n fastqc[\"gc\"] = (fastqc[\"gc\"] + gc) / 2\n\n # The statements below handle the beginning of multi-line FastQC sections. They set the flag\n # value to the found section and allow it to be further parsed.\n elif \"Per base sequence quality\" in line:\n flag = \"bases\"\n if suffix == 1:\n fastqc[flag] = [None] * fastqc[\"length\"][1]\n\n elif \"Per sequence quality scores\" in line:\n flag = \"sequences\"\n if suffix == 1:\n fastqc[flag] = [0] * 50\n\n elif \"Per base sequence content\" in line:\n flag = \"composition\"\n if suffix == 1:\n fastqc[flag] = [None] * fastqc[\"length\"][1]\n\n # The statements below handle the parsing of lines when the flag has been set for a multi-line\n # section. This ends when the 'END_MODULE' line is encountered and the flag is reset to none\n elif flag in [\"composition\", \"bases\"] and \"#\" not in line:\n # Split line around whitespace.\n split = line.rstrip().split()\n\n # Convert all fields except first to 2-decimal floats.\n try:\n values = [round(int(value.split(\".\")[0]), 1) for value in split[1:]]\n\n except ValueError as err:\n if \"NaN\" in str(err):\n values = handle_base_quality_nan(split)\n\n # Convert to position field to a one- or two-member tuple.\n pos = [int(x) for x in split[0].split('-')]\n\n if len(pos) > 1:\n pos = range(pos[0], pos[1] + 1)\n else:\n pos = [pos[0]]\n\n if suffix == 1:\n for i in pos:\n fastqc[flag][i - 1] = values\n else:\n for i in pos:\n fastqc[flag][i - 1] = virtool.utils.average_list(fastqc[flag][i - 1], values)\n\n elif flag == \"sequences\" and \"#\" not in line:\n line = line.rstrip().split()\n\n quality = int(line[0])\n\n fastqc[\"sequences\"][quality] += int(line[1].split(\".\")[0])\n\n return fastqc", "title": "" }, { "docid": "04fe800464f1153255c423da3cca3eb3", "score": "0.5338469", "text": "def read_gene_symbols():\n\n f = gzip.open(FLYBASE_ID_FILE, \"rb\")\n\n id_dict = {}\n\n for line in f:\n line = line.rstrip()\n if line.startswith(\"#\") or line == \"\":\n # skip blank and comment lines\n continue\n\n words = line.split(\"\\t\")\n symbol = words[0]\n anno_ids = words[3].split(\",\")\n\n if len(words) > 4:\n # there are 'secondary annotation IDs'\n anno_ids.extend(words[4].split(\",\"))\n \n\n for anno_id in anno_ids:\n if anno_id in id_dict:\n id_dict[anno_id].append(symbol)\n else:\n id_dict[anno_id] = [symbol]\n f.close()\n\n return id_dict", "title": "" } ]
6095ae754736c9740a4e3ce047fafdc7
Displays the server info if any.
[ { "docid": "f1a1fc5a37110aafc100a88402524f90", "score": "0.6602481", "text": "async def info(self, ctx):\n\n # Check if we're suppressing @here and @everyone mentions\n if self.settings.getServerStat(ctx.message.guild, \"SuppressMentions\"):\n suppress = True\n else:\n suppress = False\n\n serverInfo = self.settings.getServerStat(ctx.message.guild, \"Info\")\n msg = 'I have no info on *{}* yet.'.format(ctx.message.guild.name)\n if serverInfo:\n msg = '*{}*:\\n\\n{}'.format(ctx.message.guild.name, serverInfo)\n\n # Check for suppress\n if suppress:\n msg = Nullify.clean(msg)\n\n await ctx.channel.send(msg)", "title": "" } ]
[ { "docid": "6e2feebea8ad91bef927d600a232cd0f", "score": "0.7686046", "text": "async def server_info(self, ctx):\r\n try:\r\n server = ctx.message.server\r\n description = \"**Server owner:** {}#{}\\n\".format(server.owner.name, server.owner.discriminator)\r\n description += \"**Server region:** {}\\n\".format(server.region)\r\n adminList = []\r\n for member in server.members:\r\n if ctx.message.channel.permissions_for(member).administrator:\r\n adminList.append(member.name)\r\n description += \"\\n**Administrators:**\\n\" + ', '.join(adminList)\r\n title = \"Server Information: {}\".format(server.name)\r\n em = tools.createEmbed(title=title, description=description)\r\n await self.bot.say(embed=em)\r\n except Exception as e:\r\n await self.bot.say(\"{}: {}\".format(type(e).__name__, e))", "title": "" }, { "docid": "4c54318df1948a3dc4695b3b5d0421e0", "score": "0.7446531", "text": "async def server_info(self, ctx):\n name = str(ctx.guild.name)\n description = str(ctx.guild.description)\n\n owner = str(ctx.guild.owner)\n id = str(ctx.guild.id)\n region = str(ctx.guild.region)\n memberCount = str(ctx.guild.member_count)\n\n icon = str(ctx.guild.icon_url)\n \n embed = discord.Embed(\n title=name + \" Server Information\",\n description=\"Name: \"+name+\" | Description: \"+description,\n color=discord.Color.gold()\n )\n embed.set_thumbnail(url=icon)\n embed.add_field(name=\"Owner\", value=owner, inline=True)\n embed.add_field(name=\"Server ID\", value=id, inline=True)\n embed.add_field(name=\"Region\", value=region, inline=True)\n embed.add_field(name=\"Member Count\", value=memberCount, inline=True)\n\n await ctx.send(embed=embed)", "title": "" }, { "docid": "57f727557ca709049dab82082b7f79c7", "score": "0.74413824", "text": "def show():\n print(SEPARATOR)\n for s in ServerMenu:\n print(s.number, s.display_string, sep=\") \")\n print(SEPARATOR)", "title": "" }, { "docid": "0a72f5e26315ac19a6c51043ad0a5121", "score": "0.7252854", "text": "async def serverinfo(self, ctx):\n server = ctx.message.guild\n roles = [x.name for x in server.roles]\n role_length = len(roles)\n if role_length > 50:\n roles = roles[:50]\n roles.append(f\">>>> Displaying[50/{len(roles)}] Roles\")\n roles = \", \".join(roles)\n channels = len(server.channels)\n time = str(server.created_at)\n time = time.split(\" \")\n time = time[0]\n\n embed = discord.Embed(\n title=\"**Server Name:**\",\n description=f\"{server}\",\n color=config[\"success\"]\n )\n embed.set_thumbnail(\n url=server.icon_url\n )\n embed.add_field(\n name=\"Owner\",\n value=f\"{server.owner}\\n{server.owner.id}\"\n )\n embed.add_field(\n name=\"Server ID\",\n value=server.id\n )\n embed.add_field(\n name=\"Member Count\",\n value=server.member_count\n )\n embed.add_field(\n name=\"Text/Voice Channels\",\n value=f\"{channels}\"\n )\n embed.add_field(\n name=f\"Roles ({role_length})\",\n value=roles\n )\n embed.set_footer(\n text=f\"Created at: {time}\"\n )\n await ctx.send(embed=embed)", "title": "" }, { "docid": "6be5abc7b443392e750f30dfbcdcdd22", "score": "0.7137797", "text": "def ecl_servers():\n ecl_show_view(\"Servers\")", "title": "" }, { "docid": "763656a98379de714751244f390fc217", "score": "0.7081196", "text": "async def server_info(self, ctx, member: discord.Member = None):\n admin_roles = [\n role for role in ctx.guild.roles if role.permissions.administrator]\n members = set(\n [member.display_name for role in admin_roles for member in role.members])\n\n embed = discord.Embed(\n title=f'Server Information of {ctx.guild.name}',\n color=0x8FE381, timestamp=ctx.message.created_at\n )\n embed.set_thumbnail(url=f'{ctx.guild.icon_url}')\n embed.add_field(name=\"Owner\", value=f\"{ctx.guild.owner.display_name}\")\n embed.add_field(name=\"Admins\", value=\", \" .join(\n members))\n embed.add_field(name=\"Members\", value=f\"{ctx.guild.member_count}\")\n embed.add_field(name=\"Server Roles\", value=f\"{len(ctx.guild.roles)}\")\n embed.add_field(name=\"Text Channels\",\n value=f\"{len(ctx.guild.text_channels)}\")\n embed.add_field(name=\"Voice Channels\",\n value=f\"{len(ctx.guild.voice_channels)}\")\n embed.add_field(name=\"Server Boosts\",\n value=f\"{ctx.guild.premium_subscription_count}\")\n embed.set_footer(\n text=f\"Server ID: {ctx.guild.id}\")\n\n await ctx.channel.send(embed=embed)", "title": "" }, { "docid": "78b45f91fbe40a77d53c76496d76218a", "score": "0.7062069", "text": "def print_server_list(self):\n self.servers_lock.acquire()\n self.output(\"Servers:\")\n if len(self.servers) == 0:\n self.output(\"None\")\n else:\n for s in self.servers.keys():\n self.output((s, self.servers[s]))\n self.servers_lock.release()", "title": "" }, { "docid": "bccdf26b44c2c61f4c6123a044396a7d", "score": "0.70572597", "text": "def serverinfo(self):\n return self.__callServer(\"serverinfo\")", "title": "" }, { "docid": "3db806031f4bf88ea5b0e0b0c3f07564", "score": "0.7029099", "text": "def show_info(self):\n print(self.sock, 'started')", "title": "" }, { "docid": "c369fcfc548a7fc66a68371cd4318811", "score": "0.69911754", "text": "def show_sysinfo(self):\n self.__socket.send(Packet(content=self.compatible.sysinfo()))\n packet = self.response_handler.handle(self.__socket.recv())\n print(packet.content)", "title": "" }, { "docid": "14a078f6bd39b92d3037c304f056bd06", "score": "0.6982949", "text": "def server_show(endpoint_id, server_id):\n client = get_client()\n\n server_doc = client.get_endpoint_server(endpoint_id, server_id)\n\n if outformat_is_json():\n print_json_response(server_doc)\n else:\n fields = (('ID', 'id'), ('URI', 'uri'), ('Subject', 'subject'))\n colon_formatted_print(server_doc, fields)", "title": "" }, { "docid": "8eba45cd1473c4fc557573c9ec59f1ef", "score": "0.689191", "text": "async def serverinfo(self, ctx):\n guild = ctx.guild\n guild_created_on = guild.created_at.strftime(\"%d/%m/%Y\")\n embed = discord.Embed(title = guild.name, description = f\"Created on {guild_created_on}\", colour = discord.Colour.random())\n embed.add_field(name = \"Members\", value = len(guild.members), inline = True)\n embed.add_field(name = \"Roles\", value = str(len(guild.roles)), inline = True)\n embed.add_field(name = \"Channels\", value = (f\"Text channels: {len(guild.text_channels)}\\nVoice channels: {len(guild.voice_channels)}\"), inline = True)\n embed.add_field(name = \"Owner\", value = guild.owner.name + \"#\" + guild.owner.discriminator, inline = True)\n embed.add_field(name = \"Voice region\", value = guild.region, inline = True)\n embed.add_field(name = \"Nitro boosts\", value = f\"{guild.premium_subscription_count} (level {guild.premium_tier})\", inline = True)\n embed.set_thumbnail(url = guild.icon_url if len(guild.icon_url) else ctx.author.default_avatar_url)\n embed.set_footer(text = f\"ID: {guild.id}\")\n await ctx.send(embed=embed)", "title": "" }, { "docid": "5d377d7e82534c8f8c21059ee8593d4a", "score": "0.68650913", "text": "def server_info(self):\n uri = self._build_uri('server')\n response = self._get(uri)\n return response.json()", "title": "" }, { "docid": "1cceee3241a53f973e99152db37c4d9f", "score": "0.6836126", "text": "def displayInfo():", "title": "" }, { "docid": "0378a5a4cf281c94f6a1b226574bc386", "score": "0.68214196", "text": "def show_info(self):\n\t\tpass", "title": "" }, { "docid": "1c8fd15751711e616cc50f4692eca1da", "score": "0.67641014", "text": "def screen_lms_info():\n if type(server_status) is not dict:\n print(\"IP: %s\", ip)\n print(\"LMS not found\")\n print(\"No player!\")\n else:\n print(\"LCD Displayer's IP: \", ip)\n print(\"LMS Version: \" + server[\"version\"], 3)\n print(\"LMS IP: \" + str(server[\"ip\"]))\n #lcd.lcd_display_string(player_info[\"name\"], 4)\n sleep(3)\n lastscan = server['lastscan']\n lastscanreadable = strftime(\" %D %H:%M\", gmtime(int(lastscan)))\n print(\"Last Scan: \" + lastscanreadable)\n print(\"Albums : \" + str(server[\"info total albums\"]))\n print(\"Songs : \" + str(server[\"info total songs\"]))\n sleep(3)", "title": "" }, { "docid": "cf4a97e4a623661cf7f0393c631728c9", "score": "0.6532639", "text": "def info(args):\n print \"\"\"This is TiddlyWeb version %s.\n The current store is: %s.\"\"\" % (VERSION, config['server_store'][0])\n if config['system_plugins']:\n print 'System Plugins:'\n for plugin in config['system_plugins']:\n module = __import__(plugin)\n print '\\t%s (%s)' % (plugin,\n getattr(module, '__version__', 'unknown'))", "title": "" }, { "docid": "5d2b778ee33b4f4b0f1177af55bc90eb", "score": "0.6528657", "text": "def show():\n print(SEPARATOR)\n for s in ServerReportsMenu:\n print(s.number, s.display_string, sep=\") \")\n print(SEPARATOR)", "title": "" }, { "docid": "806828f69adbe166fc454cf42ff6a249", "score": "0.6494497", "text": "def do_show(self, parsed):\n showwhat = parsed.get_binding('what').lower()\n if showwhat == 'version':\n self.get_connection_versions()\n self.show_version()\n elif showwhat == 'host':\n self.show_host()\n elif showwhat.startswith('session'):\n session_id = parsed.get_binding('sessionid').lower()\n self.show_session(UUID(session_id))\n elif showwhat.startswith('replicas'):\n token_id = parsed.get_binding('token')\n keyspace = parsed.get_binding('keyspace')\n self.show_replicas(token_id, keyspace)\n else:\n self.printerr('Wait, how do I show %r?' % (showwhat,))", "title": "" }, { "docid": "04d3a4a9dad3a17d509f0fb850d35e38", "score": "0.6488203", "text": "def print_server_status(self):\n print (\"USERS:\")\n for user in self.json_data[\"USER_LIST\"]:\n print(user)\n print()\n print(\"MESSAGES:\")\n for message in self.json_data[\"MESSAGES\"][-10:]:\n self.print_message(message)", "title": "" }, { "docid": "4ca82056e04732e9f89f328d75c6a7c3", "score": "0.64354944", "text": "def test_show_server_usage(self):\n self.rbac_utils.switch_role(self, toggle_rbac_role=True)\n self.servers_client.show_server(self.server_id)", "title": "" }, { "docid": "95ababd70f17d3434a6f04e1fccde5d0", "score": "0.6362304", "text": "def server_info(url):\n print(\"server information:\")\n req = requests.get(url)\n headers = ['Server', 'Date', 'Via', 'X-Powered-By', 'X-Country-Code']\n\n for header in headers:\n try:\n result = req.headers[header]\n print('%s: %s' % (header, result))\n except Exception as error:\n print('%s: Not found' % header)\n print('----')", "title": "" }, { "docid": "4506233f3cb362f79af6ea9d52768827", "score": "0.63255537", "text": "async def serverinfo(ctx):\r\n coolserver = ctx.author.guild\r\n\r\n coolzone = timezone(\"US/Eastern\")\r\n uncoolzone = timezone('UTC')\r\n\r\n vuncooltime = coolserver.created_at\r\n uncooltime = uncoolzone.localize(vuncooltime)\r\n cooltime = uncooltime.astimezone(coolzone)\r\n\r\n coolembed = discord.Embed(color=0x000000, timestamp=ctx.message.created_at)\r\n coolembed.set_author(name=f\"{bot.command_prefix}serverinfo\")\r\n coolembed.set_footer(text=f\"Developed by Star Beam\", icon_url=bot.user.avatar_url)\r\n coolembed.set_thumbnail(url=coolserver.icon_url)\r\n\r\n coolembed.add_field(name=\"Server\", value=f\"{coolserver.name} ({coolserver.id})\")\r\n coolembed.add_field(name=\"Members\", value=len(coolserver.members))\r\n coolembed.add_field(name=\"Server Owner\", value=coolserver.owner)\r\n coolembed.add_field(name=\"Boost Level\", value=coolserver.premium_tier)\r\n coolembed.add_field(name=\"Channels\", value=f\"{len(coolserver.text_channels)} text, {len(coolserver.voice_channels)} voice\")\r\n coolembed.add_field(name=\"Region\", value=coolserver.region)\r\n coolembed.add_field(name=\"Creation Date\", value=cooltime.strftime(\"%a, %#d %B %Y, %H:%M %Z\"))\r\n coolembed.add_field(name=f\"Roles ({len(coolserver.roles)})\", value=f\"To see a full list, type {bot.command_prefix}roles!\")\r\n \r\n \r\n await ctx.send(embed=coolembed)", "title": "" }, { "docid": "ab6b4b758aee9b9e67b947ce67f768b5", "score": "0.6291665", "text": "def get_server_info(session,server, tout):\n r = session.get(server+'/api/server_info', timeout=tout)\n if not r.ok:\n raise InteropError(r)\n\n return r.json()", "title": "" }, { "docid": "7ad240fce4e91739579a85b7591c2316", "score": "0.62636244", "text": "def print_info(self) -> None:\n info = self.info\n print(\n \"\\u001b[1mInformation of `\"\n + self.class_name\n + \"` \"\n + self.host\n + \":\\u001b[0m\"\n )\n for key, value in info.items():\n\n if key == \"memory\":\n display_value = str(self.memory_in_mb) + \" mb\"\n elif isinstance(value, list):\n display_value = \"\"\n for gpu in value:\n display_value = \"{}\".format(gpu)\n else:\n display_value = value\n\n print(\"{:<8} {:<8}\".format(key, display_value))", "title": "" }, { "docid": "a08bb1a9a6f159575d85ca0e59e566f9", "score": "0.624703", "text": "def set_server_info(self):\n si = self._server_info()\n\n # Send the server info to KMOD.\n self.conn.write_instruction(KPP_BEG_SESSION)\n self.conn.write_instruction(KPP_SET_KSERVER_INFO)\n self.conn.write_structure(si)\n self.conn.write_instruction(KPP_END_SESSION)", "title": "" }, { "docid": "665cae3d5f69c54cab9cd4f0b5919f38", "score": "0.6234638", "text": "def info(request):\n return render_to_response(\"info.html\", {})", "title": "" }, { "docid": "b7ccf8c1aa542c1eb92d7bd22d4dd1f7", "score": "0.622741", "text": "def _print_config(self) -> None:\n\n print(f'Server started at http://{self._hostname}:{self._port}')\n print(f'=> template_dir: {self._template_dir}')\n print(f'=> static_dir: {self._static_files}')", "title": "" }, { "docid": "dff44083ad07bdbea7f85e64cc9d79cd", "score": "0.6210536", "text": "def print_info( self ):\n print( \"\\n\".join( self.info_buffer ) )", "title": "" }, { "docid": "3fb2e7e5badca4b3bd20a3be567acffb", "score": "0.62008977", "text": "def show(self):\n show(self, self.title, self.filename, self.server, self.notebook)", "title": "" }, { "docid": "65ae2a937326ed30209661c30125f826", "score": "0.6199855", "text": "def serverinfo(url=\"http://localhost:8080/manager\", timeout=180):\n\n data = _wget(\"serverinfo\", {}, url, timeout=timeout)\n if data[\"res\"] is False:\n return {\"error\": data[\"msg\"]}\n\n ret = {}\n data[\"msg\"].pop(0)\n for line in data[\"msg\"]:\n tmp = line.split(\":\")\n ret[tmp[0].strip()] = tmp[1].strip()\n\n return ret", "title": "" }, { "docid": "26d46c521e13b1cd87586de9b95f956a", "score": "0.6192439", "text": "def getServerInfo(self) -> ghidra.framework.model.ServerInfo:\n ...", "title": "" }, { "docid": "566dadec525837159e4fe6da7957b5f4", "score": "0.61897105", "text": "def server_info(self):\n return self.admin.command(\"buildinfo\",\n read_preference=ReadPreference.PRIMARY)", "title": "" }, { "docid": "98d57f5a56b892687514724298df079d", "score": "0.61769694", "text": "def server_info(self, session=None):\n return self.admin.command(\"buildinfo\",\n read_preference=ReadPreference.PRIMARY,\n session=session)", "title": "" }, { "docid": "fe30bfd887ceeb81efe976ba7a6d9b28", "score": "0.61516273", "text": "def fetch_info(server, *, session=None):\n return fetch_generic_json(f'{server}/api/server/info', session=session)", "title": "" }, { "docid": "0d9c9e05d1a9dd079555dd586c4af6a6", "score": "0.6149072", "text": "def draw_developer_info(self, context, layout):\n global CLIENT\n \n prefs = BlenderfarmAddonPreferences.get(context)\n\n column = layout.column_flow(columns=1)\n\n # Developer info box.\n box = column.box()\n\n # Server name row\n \n version = CLIENT.get_server_info('version')\n uptime = CLIENT.get_server_info('uptime')\n\n if uptime:\n # pylint: disable=bad-whitespace\n seconds = math.floor(uptime % 60)\n minutes = math.floor(uptime / 60)\n hours = math.floor(uptime / 60 / 60)\n days = math.floor(uptime / 60 / 60 / 24)\n years = math.floor(uptime / 60 / 60 / 24 / 365)\n\n days -= years * 365\n \n uptime = str(minutes) + 'm ' + str(seconds) + 's'\n\n if hours:\n uptime = str(hours) + 'h ' + uptime\n \n if days:\n uptime = str(days) + 'd ' + uptime\n \n if years:\n uptime = str(years) + 'y ' + uptime\n \n self.add_row(box, 'Server Version', version or '<not connected>')\n self.add_row(box, 'Server Uptime', uptime or '<not connected>')\n self.add_row(box, 'Client ID', prefs.client_id or '<not connected>')\n\n self.draw_task_info(context, column)", "title": "" }, { "docid": "4c35abaa7df0d180224361b986d0e4ea", "score": "0.61357695", "text": "async def info(self, ctx):\n em = discord.Embed(title='SciBo Info', colour=discord.Colour.blurple())\n em.description = ctx.bot.description\n em.set_thumbnail(url=ctx.bot.user.avatar_url_as(format='png', size=128))\n em.add_field(name='Prefixes', value='\\n'.join([f'\\u2022 {x}' for x in ctx.bot.prefixes]))\n em.add_field(name='Uptime', value=str(datetime.now() - ctx.bot.start_time).split('.')[0])\n em.add_field(name='Ping', value=f'{ctx.bot.latency * 1000:.1f}')\n em.add_field(name='Owners', value='\\n'.join(['\\u2022 naught0#4417', '\\u2022 NCPlayz#7941']))\n em.add_field(name='Source', value='[On Github](https://github.com/NadaChem/SciBo)')\n\n await ctx.send(embed=em)", "title": "" }, { "docid": "15e984ff91662a6f1da8521a5a2dff9a", "score": "0.6109436", "text": "def do_show(self, line):\n _kwargs = self._parse_show(line)\n if _kwargs:\n _info = self.cloud.info_service(_kwargs[\"name\"])\n if not _info:\n self.stdout.write(\"Can't retrieve service information\\n\")\n else:\n self.stdout.write(json.dumps(_info, indent=2))\n self.stdout.write(\"\\n\")", "title": "" }, { "docid": "7892aae9b2a5816b4bd37cff83f93eda", "score": "0.6098074", "text": "def cmd_ultraserverinfo(self, data, client=None, cmd=None):\n \n #Get server information\n gametype = self.console.getCvar('g_gametype').getInt()\n mapname = self.console.getNextMap()\n \n if gametype==0:\n gametype='FFA'\n if gametype==1:\n gametype='LMS'\n if gametype==3:\n gametype='TDM'\n if gametype==4:\n gametype='TS'\n if gametype==7:\n gametype='CTF'\n if gametype==8:\n gametype='Bomb'\n if gametype==9:\n gametype='Jump'\n\n \n cmd.sayLoudOrPM(client, \"^7Server: %s\" % self.console.getCvar('sv_hostname').getString())\n cmd.sayLoudOrPM(client, \"^7Version: ^5%s\" % self.console.getCvar('version').getString())\n cmd.sayLoudOrPM(client, \"^7Public Slots: ^2%s\" % self.console.getCvar('sv_maxclients').getString())\n cmd.sayLoudOrPM(client, \"^7Private Slots: ^2%s\" % self.console.getCvar('sv_privateClients').getString())\n cmd.sayLoudOrPM(client, \"^7Gametype: ^5%s\" % gametype)\n cmd.sayLoudOrPM(client, \"^7Timelimit: ^2%s\" % self.console.getCvar('timelimit').getString())\n cmd.sayLoudOrPM(client, \"^7Fraglimit: ^2%s\" % self.console.getCvar('fraglimit').getString())\n cmd.sayLoudOrPM(client, \"^7Current map: ^2%s\" % self.console.getCvar('mapname').getString())\n cmd.sayLoudOrPM(client, \"^7Next Map: ^2%s\" % mapname)", "title": "" }, { "docid": "84746274f31283ba7d7943b0750fc651", "score": "0.6094178", "text": "def _AzServerShow(self):\n cmd = [\n azure.AZURE_PATH,\n self.GetAzCommandForEngine(),\n self.SERVER_TYPE,\n 'show',\n '--resource-group',\n self.resource_group.name,\n '--name',\n self.instance_id,\n ]\n stdout, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)\n if retcode != 0:\n return None\n json_output = json.loads(stdout)\n return json_output", "title": "" }, { "docid": "c756a4a7e0fdba376fbde8999b03adf1", "score": "0.6083411", "text": "def display(self):\n print()\n print('****************************************************************')\n print()\n print(self.__world)\n print(self.show_status())", "title": "" }, { "docid": "c84f3dbb50cbbd03372c211d7c7651a2", "score": "0.60804814", "text": "def show(self, id):\n return self._get(\n url='{0}servers/{1}.json'.format(self.URL, id),\n headers=self.headers,\n )", "title": "" }, { "docid": "1889d959a761d02bf0fcc841b3b3d2fa", "score": "0.60779005", "text": "def get_server_info( connection ):\n return get_json_generic( connection, \"/api/server/info\", schema='server_info.jsonschema' )", "title": "" }, { "docid": "2fb1c8fa8a32576eba3af350b959844e", "score": "0.6047232", "text": "def info(self):\n print(\"Label:\", self.label)\n print(\"Type:\", self.type)\n print(\"Flavors:\", len(self.flavors))\n print(\"Servers:\", len(self.servers))\n print(\"Images:\", len(self.images))\n print(\"Security Groups:\", len(self.security_groups))\n print(\"Stacks:\", len(self.stacks))\n print(\"Usage:\", self.usage)\n # print \"Users:\", len(self.users)\n # print \"Tenants:\", len(self.tenants)", "title": "" }, { "docid": "15587b58538ccc07953cf71f9b1fa8bd", "score": "0.60407877", "text": "def show(self):\n self.server.start()\n super(ServerDialog, self).show()", "title": "" }, { "docid": "bb6d539505bd7a83ed7c651d238399e5", "score": "0.60053056", "text": "def __showSiteInfo(self):\n from .SiteInfoDialog import SiteInfoDialog\n siteinfoDialog = SiteInfoDialog(\n self.__browser, self.__browser.mainWindow())\n siteinfoDialog.show()", "title": "" }, { "docid": "5084b3729d3921b483fd60f4db1823d5", "score": "0.6002773", "text": "def printUsage(self):\n\t\tprint \"Commands supported:\"\n\t\tprint \"list - provides a listing of the files and directories on the server path\"\n\t\tprint \"cd - changes the directory on the server\"\n\t\tprint \"get <filename> - pulls a text file from the server and stores it in the local directory\"\n\t\tprint \"quit - shuts down the server and the client\"\n\t\treturn", "title": "" }, { "docid": "fdbd81457da3d46f982581653c31c7e6", "score": "0.60012734", "text": "def showInfoScreen (self):\n raise NotImplementedError (\"Lack of showInfoScreen method\")", "title": "" }, { "docid": "f1ba137588cd87da3a6beef87e63a222", "score": "0.60011727", "text": "def pfinfo():\n\n status = { \n 'info': packetfilter.get_status(),\n 'ifaces': packetfilter.get_ifaces(),\n 'limits': packetfilter.get_limit(),\n 'timeouts': packetfilter.get_timeout()\n }\n\n return render_template('pfinfo.html', logged_in=flask_login.current_user.get_id(), status_tab='active', status=status)", "title": "" }, { "docid": "642cb3b762db3704d9c2f9d40183a4cb", "score": "0.5999841", "text": "def display_infos():\n\tmask = myMask.mask\n\tip = myIPAddress.address\n\tif mask == '0':\n\t\tmask = '1'\n\telif mask == '31' or mask == '32':\n\t\tmask = '30'\n\tv['mask_label'].text = cidrToDec(mask) + ' (/' + mask + ')'\n\tAR, ARS, BC, P1, P2, nbrHosts = calculates(ip, mask)\n\trealsHosts = str((2 ** nbrHosts) - 2)\n\tv['ar_label'].text = AR\n\tv['ars_label'].text = ARS\n\tv['bc_label'].text = BC\n\tv['scope_label1'].text = P1\n\tv['scope_label2'].text = P2\n\tv['hosts_label'].text = realsHosts", "title": "" }, { "docid": "53e4dfafd8ac08373f15515390f5e41d", "score": "0.5991664", "text": "def info():", "title": "" }, { "docid": "53e4dfafd8ac08373f15515390f5e41d", "score": "0.5991664", "text": "def info():", "title": "" }, { "docid": "b9a28dd772af6def401f12121640d041", "score": "0.5991607", "text": "async def _info_president(self, ctx):\n\n server = ctx.message.server\n settings = self.check_server_settings(server)\n await self.bot.say(settings[\"Config\"])", "title": "" }, { "docid": "2bf2c89d44736bace637b3be6176b859", "score": "0.598609", "text": "def print_info():\n\n printf(\"This is a helper script for configuring systemd services.\", f=\"bold\")\n printf(\"{}Maintainer: {}{}\".format(FTY.ansi(\"FG_GREEN\"), FTY.ansi(\"RESET\"), MAINTAINER_NICK))\n printf(\"{}Email: {}{}\".format(FTY.ansi(\"FG_GREEN\"), FTY.ansi(\"RESET\"), MAINTAINER_EMAIL))\n\n sys.exit(0)", "title": "" }, { "docid": "c6811f048cb3f04ce898b013cec9d029", "score": "0.59677845", "text": "def show (self):\n\n address = (self._config.socket_address, self._config.socket_port)\n\n try:\n\n sock = socket.socket ()\n sock.connect (address)\n sock.send (\"update\")\n sock.close ()\n\n except socket.error as e:\n\n if e.errno == errno.ECONNREFUSED:\n return False\n\n return True", "title": "" }, { "docid": "e93e79d250942a52cb94eea067f4e6f0", "score": "0.5958973", "text": "async def info_command(self, ctx):\n embed = discord.Embed(title='\\nSummary',\n description='Ghento is designed to be as entertaining as possible resulting\\nin the best experince on your discord server ',\n colour=0x98FB98)\n embed.set_author(name='Ghento Bot Information',\n url='https://discordapp.com/api/oauth2/authorize?client_id=616092750051409928&permissions=8&scope=bot',\n icon_url='https://cdn.discordapp.com/attachments/615011904410222595/616109163315200020/istockphoto-673647420-612x612.jpg')\n\n embed.add_field(name='Server Invite', value='[Invite to server](https://discordapp.com/api/oauth2/authorize?client_id=616092750051409928&permissions=8&scope=bot)')\n embed.add_field(name='Invoked By', value=ctx.author.mention)\n embed.set_footer(text='\\nMade in Python', icon_url='http://i.imgur.com/5BFecvA.png')\n\n await ctx.send(embed=embed)", "title": "" }, { "docid": "201a22ce63a96d2e853576cae24dbaed", "score": "0.5946319", "text": "async def info(self, ctx):\n embed = discord.Embed(\n color= r.randint(0, 0xFFFFFF),\n description= f\"Here's the info for **{self.bot.user}**\")\n fields = {\n \"Developer :computer:\": self.bot.admins[0],\n \"Admin 1 :tickets:\": self.bot.admins[1],\n \"Admin 2 :golf:\": self.bot.admins[2],\n \"Servers :homes:\": len(self.bot.guilds),\n \"Total users :busts_in_silhouette:\": len(list(self.bot.get_all_members())),\n \"Version :white_check_mark:\": VERSION,\n \"Ping :ping_pong:\": str(int(self.bot.latency*1000))+ \"ms\",\n \"Last restart :calendar:\": RUN_TIME,\n \"Region :earth_asia:\": \"Australia\",\n \"Code platform :bow_and_arrow:\": \"GitHub\",\n \"Hosting service :dart:\": \"Heroku\",\n \"Language :airplane:\": \"discord.py rewrite\\nPython 3.7\"}\n\n for field in fields: embed.add_field(name= field, value= fields[field])\n await ctx.send(embed= embed)\n\n if ctx.author in self.bot.admins:\n embed = discord.Embed(description= \"**Admin stats**\", color= r.randint(0, 0xFFFFFF))\n fields = {\"Commands run\": self.bot.commands_run, \"Commands run not admin\": self.bot.non_admin_commands_run}\n for field in fields: embed.add_field(name= field, value= fields[field])\n await ctx.send(embed= embed)", "title": "" }, { "docid": "7a61db3d8b3aa75af4e25ef694417e1c", "score": "0.5919973", "text": "def show_info(self):\r\n # Read the header and footer html snippets\r\n header = html_header()\r\n footer = html_footer()\r\n\r\n string = header\r\n\r\n heading = m.Heading(self.tr('OSM Downloader'), **INFO_STYLE)\r\n body = self.tr(\r\n 'This tool will fetch building (\\'structure\\') or road ('\r\n '\\'highway\\') data from the OpenStreetMap project for you. '\r\n 'The downloaded data will have InaSAFE keywords defined and a '\r\n 'default QGIS style applied. To use this tool effectively:'\r\n )\r\n tips = m.BulletedList()\r\n tips.add(self.tr(\r\n 'Your current extent will be used to determine the area for which '\r\n 'you want data to be retrieved. You can adjust it manually using '\r\n 'the bounding box options below.'))\r\n tips.add(self.tr(\r\n 'Check the output directory is correct. Note that the saved '\r\n 'dataset will be called either roads.shp or buildings.shp (and '\r\n 'associated files).'\r\n ))\r\n tips.add(self.tr(\r\n 'By default simple file names will be used (e.g. roads.shp, '\r\n 'buildings.shp). If you wish you can specify a prefix to '\r\n 'add in front of this default name. For example using a prefix '\r\n 'of \\'padang-\\' will cause the downloaded files to be saved as '\r\n '\\'padang-roads.shp\\' and \\'padang-buildings.shp\\'. Note that '\r\n 'the only allowed prefix characters are A-Z, a-z, 0-9 and the '\r\n 'characters \\'-\\' and \\'_\\'. You can leave this blank if you '\r\n 'prefer.'\r\n ))\r\n tips.add(self.tr(\r\n 'If a dataset already exists in the output directory it will be '\r\n 'overwritten.'\r\n ))\r\n tips.add(self.tr(\r\n 'This tool requires a working internet connection and fetching '\r\n 'buildings or roads will consume your bandwidth.'))\r\n tips.add(m.Link(\r\n 'http://www.openstreetmap.org/copyright',\r\n text=self.tr(\r\n 'Downloaded data is copyright OpenStreetMap contributors'\r\n ' (click for more info).')\r\n ))\r\n message = m.Message()\r\n message.add(heading)\r\n message.add(body)\r\n message.add(tips)\r\n string += message.to_html()\r\n string += footer\r\n\r\n self.web_view.setHtml(string)", "title": "" }, { "docid": "272cfc48db02658fa50dd6a763851f64", "score": "0.5918041", "text": "def connection_info():\n result = [\n '{fig} - {socket}'.format(\n fig=(manager.canvas.figure.get_label()\n or \"Figure {}\".format(manager.num)),\n socket=manager.web_sockets)\n for manager in Gcf.get_all_fig_managers()\n ]\n if not is_interactive():\n result.append(f'Figures pending show: {len(Gcf.figs)}')\n return '\\n'.join(result)", "title": "" }, { "docid": "28da6c13229a243d920250b546604e46", "score": "0.588864", "text": "def show_docker_info(self):\n return self.info_view.render()", "title": "" }, { "docid": "7a45db294fb3a59a9c8021eb613ba640", "score": "0.5875084", "text": "def ControllerInfo(self):\n self._JsonResponse(self.server.box.Info())", "title": "" }, { "docid": "7c68b577613d80905f9b40e981d057af", "score": "0.58633643", "text": "def show_node_info(self):\n current_list = self.tabWidget.currentIndex()\n if current_list == BASIC_TAB:\n bbb = self.basicList.selectedItems()[0].text()\n elif current_list == ADVANCED_TAB:\n bbb = self.advancedList.selectedItems()[0].text()\n else:\n bbb = self.serviceList.selectedItems()[0].text()\n bbb_ip, bbb_hostname = bbb.split(\" - \")\n hashname = \"BBB:{}:{}\".format(bbb_ip, bbb_hostname)\n try:\n info = self.nodes_info[hashname]\n self.window = BBBInfo(info)\n self.window.show()\n except KeyError:\n QtWidgets.QMessageBox.warning(self, \"Warning\", \"The node you are trying to get information isn't connected\",\n QtWidgets.QMessageBox.Abort)", "title": "" }, { "docid": "896e7e74f5e461c61eea4334428c71cd", "score": "0.585629", "text": "def showInfoWindow():\n\treturn 1", "title": "" }, { "docid": "9c1d1cb47137a6f87ee5df3f2ba40a0b", "score": "0.58555835", "text": "def cli_jira_serverinfo(ctx, health_check):\n jira_serverinfo_path = \"rest/api/2/serverInfo\"\n params = {\n 'doHealthCheck': health_check\n }\n _res = ctx.obj['connect'].get(jira_serverinfo_path, params=params, headers=json_headers, auth=True)\n ctx.obj['writer'].out(_res)", "title": "" }, { "docid": "43a8283d2d344a45c2467f00df72c5ef", "score": "0.5848617", "text": "async def server(self, ctx):\n if ctx.invoked_subcommand is None:\n\n rowcheck = await self.getserverstuff(ctx)\n\n findbots = sum(1 for member in ctx.guild.members if member.bot)\n\n emojilist = \"​\"\n for Emoji in ctx.guild.emojis:\n emojilist += f\"{Emoji} \"\n if len(emojilist) > 1024:\n emojilist = \"Too long!\"\n\n if rowcheck[\"embeds\"] == 0 or not permissions.can_embed(ctx):\n return await ctx.send(\n f\"```\\nServer Name: {ctx.guild.name}\\nServer ID: {ctx.guild.id}\\nMembers: {ctx.guild.member_count}\\nBots: {findbots}\\nOwner: {ctx.guild.owner}\\nRegion: {ctx.guild.region}\\nCreated At: {default.date(ctx.guild.created_at)}\\n```\\nEmojis: {emojilist}\"\n )\n\n embed = discord.Embed(colour=249_742)\n embed.set_thumbnail(url=ctx.guild.icon_url)\n embed.add_field(name=\"Server Name\", value=ctx.guild.name, inline=True)\n embed.add_field(name=\"Server ID\", value=ctx.guild.id, inline=True)\n embed.add_field(name=\"Members\", value=ctx.guild.member_count, inline=True)\n embed.add_field(name=\"Bots\", value=findbots, inline=True)\n embed.add_field(name=\"Owner\", value=ctx.guild.owner, inline=True)\n embed.add_field(name=\"Region\", value=ctx.guild.region, inline=True)\n embed.add_field(name=\"Emojis\", value=emojilist, inline=False)\n embed.add_field(\n name=\"Created\", value=default.date(ctx.guild.created_at), inline=False\n )\n await ctx.send(\n content=f\"ℹ information about **{ctx.guild.name}**\", embed=embed\n )", "title": "" }, { "docid": "02f9c59a7dff1e0781969ecc79c2c828", "score": "0.58341056", "text": "def host_show(hostname, debug):\n hostdata = host_query(hostname)\n if debug > 1:\n click.echo(pformat(hostdata))\n for host in hostdata:\n if debug > 0:\n click.echo(pformat(host))\n name = host['name'] or '?'\n for x in ['ip','mac','interface']:\n if host[x] is None:\n host[x] = '?'\n spacer = \" \" * len(\"[ {} ]\".format(name))\n click.echo(\"[ {} ] ip {} mac {} interface {}\".format(\n click.style(name, bold=True),\n click.style(host['ip'], bold=True),\n click.style(host['mac'], bold=True),\n click.style(host['interface'], bold=True)\n ))\n if not host['state']:\n states = \"\"\n else:\n states = \" \".join(sorted([x.strip() for x in host['state'].split(\"|\")]))\n click.echo(\"{} state: {}\".format(spacer,\n click.style(states, bold=True),\n ))\n if 'templates' in host and len(host['templates'])>0:\n click.echo(\"{} templates:\".format(spacer))\n for k,v in host['templates'].items():\n click.echo(\"{} {}: {}\".format(spacer, k, v or ''))\n else:\n click.echo(\"{} templates: -\".format(spacer))\n if 'variables' in host and len(host['variables'])>0:\n click.echo(\"{} variables:\".format(spacer))\n for k,v in host['variables'].items():\n click.echo(\"{} {}: {}\".format(spacer, k, v or ''))\n else:\n click.echo(\"{} variables: -\".format(spacer))\n # build hardware dict\n hardware = {}\n if 'cpu_model' in host:\n hardware['cpu'] = host['cpu_model']\n if 'ram_bytes' in host:\n try:\n hardware['ram'] = humanbytes(int(host['ram_bytes']))\n except:\n pass\n if 'serialnumber' in host:\n hardware['serialnumber'] = host['serialnumber']\n\n # print hardware stats\n if len(hardware):\n click.echo(\"{} hardware:\".format(spacer))\n for key, value in hardware.items():\n click.echo(\"{} {}: {}\".format(spacer,key,value))", "title": "" }, { "docid": "9a64794db440083d76a1c201ee0cfb55", "score": "0.58285517", "text": "def show():\n uri = cli_to_uri(['security', 'ssh_port'])\n response = requests.get(uri)\n return dict_to_tab(response)", "title": "" }, { "docid": "84d204fb0492ae6d70ce7997172ad123", "score": "0.58021814", "text": "async def info(self, ctx):\n player = self.bot.wavelink.get_player(ctx.guild.id)\n node = player.node\n\n used = humanize.naturalsize(node.stats.memory_used)\n total = humanize.naturalsize(node.stats.memory_allocated)\n free = humanize.naturalsize(node.stats.memory_free)\n cpu = node.stats.cpu_cores\n\n fmt = f'**WaveLink:** `{wavelink.__version__}`\\n\\n' \\\n f'Connected to `{len(self.bot.wavelink.nodes)}` nodes.\\n' \\\n f'Best available Node `{self.bot.wavelink.get_best_node().__repr__()}`\\n' \\\n f'`{len(self.bot.wavelink.players)}` players are distributed on nodes.\\n' \\\n f'`{node.stats.players}` players are distributed on server.\\n' \\\n f'`{node.stats.playing_players}` players are playing on server.\\n\\n' \\\n f'Server Memory: `{used}/{total}` | `({free} free)`\\n' \\\n f'Server CPU: `{cpu}`\\n\\n' \\\n f'Server Uptime: `{datetime.timedelta(milliseconds=node.stats.uptime)}`'\n await ctx.send(fmt)", "title": "" }, { "docid": "fa1ed411a8f9929c3197b198ac7c9336", "score": "0.579133", "text": "def HandleServers():\n servers = model_provider.GetFrontend().GetServers()\n\n accept = _GetAccept()\n if accept == RenderMode.TEXT:\n for server, roles in servers.iteritems():\n yield '%s: %s\\n' % (server, roles)\n\n else:\n yield cjson.encode(servers)", "title": "" }, { "docid": "4abfbae46a5fd1abb2d962caab8184d3", "score": "0.5790998", "text": "def do_system_info(self, args):\n sys_info = self._stc.system_info()\n for k in sys_info:\n print(' ', k, ': ', sys_info[k], sep='')", "title": "" }, { "docid": "75f8960f2b7256b24d92178fb2e88eef", "score": "0.57813555", "text": "def test_show_server_diagnostics(self):\n self.rbac_utils.switch_role(self, toggle_rbac_role=True)\n self.servers_client.show_server_diagnostics(self.server_id)", "title": "" }, { "docid": "1e973df13d674a059c72150513edba2d", "score": "0.57779866", "text": "def get(self):\n\n return \"Server active!\"", "title": "" }, { "docid": "d4a52658a39076d7f732b7a63f8f0c55", "score": "0.5767236", "text": "def _print_info(self, info: str) -> None:\n print(f'{self.env.now:.2f} | {self.address} | {info}')", "title": "" }, { "docid": "67096fc5cd28a91757840bd0acd2425d", "score": "0.5767047", "text": "def info():\n\n import jasy.core.Console as Console\n\n print(\"Konstrukteur %s is a static site generator\" % __version__)\n print(\"Visit %s for details.\" % Console.colorize(\"https://github.com/fastner/konstrukteur\", \"underline\"))\n print()", "title": "" }, { "docid": "77c6f3fa0906a3ad74fde1e657d6ccd6", "score": "0.57645744", "text": "def connect_webserver(screen, info):\n pass", "title": "" }, { "docid": "a5e8a6739a6b4d7b96d6e2ff62ac90e0", "score": "0.5763887", "text": "def show_ntp_server(self, context, cancellation_context):\n logger = get_qs_logger(log_category=\"Test\",log_group=context.resource.name)\n logger.info(\"Getting NTP server...\")\n\n api = CloudShellAPISession(host=context.connectivity.server_address,\n token_id=context.connectivity.admin_auth_token,\n domain=context.reservation.domain)\n resource = Vyos.create_from_context(context)\n ctpw = api.DecryptPassword(resource.password).Value\n\n session_types = [SSHSession(host=context.resource.address, username=resource.user, password=ctpw, port=self.port)]\n\n with self.cli.get_session(session_types, self.cliMode) as default_session:\n default_session.send_command(\"set terminal length 0\")\n default_session.send_command(\"configure\")\n out = default_session.send_command(\"show system ntp server\")\n default_session.send_command(\"exit\")\n\n out = \"Begin NTP server info:\" + '\\n' + out + '\\n' + \"End NTP server info\"\n logger.info(out)\n\n print out\n return out", "title": "" }, { "docid": "a745a4ab7bb66d85ae0fdd47d6e98bd4", "score": "0.5749753", "text": "def informServerImOn():\r\n try:\r\n fptr = urlopener.open(\"http://%s/iamhere?ip=%s&port=%s&id=%s\" % (server_addr, ip, listen_port, userID, ))\r\n fptr.close()\r\n except: # central sever down\r\n pass", "title": "" }, { "docid": "d2b4175c14caa49c0afa5f57ec33ab21", "score": "0.5749292", "text": "def os_services_server_tab(request, obj_id):\n server = Server.objects.get(id=obj_id)\n rows = []\n for row in json.loads(server.os_services):\n rows.append((\n row.get('Name'),\n row.get('DisplayName'), # Alternative: \"Caption\"\n row.get('State'),\n row.get('StartMode'),\n row.get('StartName'),\n row.get('PathName')\n ))\n\n return render(request, 'os_info/templates/table.html', dict(\n pagetitle='OS Services',\n intro=\"\"\"\n \"\"\",\n table_caption='Shows services for {}'.format(\n server.hostname if hasattr(server, 'hostname') else 'server'),\n column_headings=[\n 'Service Name',\n 'Service Label',\n 'Status',\n 'Startup',\n 'Account',\n 'Path'\n ],\n rows=rows,\n sort_by_column=1,\n unsortable_column_indices=[],\n ))", "title": "" }, { "docid": "4f1a479b1139fc8636f97172464368a7", "score": "0.5748139", "text": "def display_module_info(self):\n scriptpath = os.path.realpath(__file__)\n day, month, date, clock, year = time.ctime(os.path.getmtime(scriptpath)).split()\n last_modified = f'Last Modified: {date} {month} {year}'\n author_name = 'Written by Metin San'\n self.info_win.addstr(0,self.center_str(author_name), author_name)\n self.info_win.addstr(1,self.center_str(last_modified), last_modified)\n self.info_win.refresh()", "title": "" }, { "docid": "e6eb0441658f5ebb072bdffed8f1a78a", "score": "0.5745947", "text": "def show2(self):\n self.__class__(str(self), tls_session=self.tls_session).show()", "title": "" }, { "docid": "cce4cfaff761fbddfef5aada08b2ee6d", "score": "0.57319653", "text": "def info(domain_or_server, create_if_needed=True):\n domain = normalize_domain(domain_or_server)\n server_info = server = app = None\n\n if domain_or_server in smash_vars.servers:\n server = domain_or_server\n try:\n app = webfaction.get_webapps(server, domain)\n except lib.errors.LoginError:\n app = None\n if app:\n app = app[0]\n if smash_vars.verbose:\n print(\"domain\", domain, \"maps to the server\", server, \"and the app\", app)\n elif webfaction.is_webfaction_domain(domain):\n server = webfaction.get_server(domain)\n if server:\n try:\n app = webfaction.get_webapps(server, domain)\n except lib.errors.LoginError:\n app = None\n if app:\n app = app[0]\n if smash_vars.verbose:\n print(\"domain\", domain, \"maps to the server\", server, \"and the app\", app)\n elif webfaction.can_login(domain) and domain in smash_vars.servers:\n server = domain_or_server\n else:\n for server_name, s in smash_vars.servers.items():\n ds = s.get(\"domains\")\n if ds:\n for d in ds:\n if d == domain:\n server = server_name\n #if not server: #I don't think thse two lines should be here, but I'm not sure\n # server = domain_or_server\n\n if not server and app: #see if this is a subdomain of a domain we have info about\n parent = parent_domain(server)\n if parent:\n #setting create_if_needed to False. If it was passed in as True, it will still prompt if we would like to create\n #a new server entry as soon as we finish checking the parent domain\n info(parent, create_if_needed=False)\n\n\n if server: #this will be True unless we need to create a new server entry\n server_info = servers.get(server)\n elif create_if_needed:\n if \".\" not in domain:\n raise lib.errors.SmashException(\"Whoops {} is not a valid website.\".format(domain))\n resp = input(\"No server entries exists for the domain {}. Would you like to add one now [Yes/no] \".format(domain))\n if not resp.lower().startswith(\"n\"):\n server = servers.interactively_add_conf_entry()\n server_info = servers.get(server)\n try:\n app = webfaction.get_webapps(server, domain)[0]\n except:\n pass\n else:\n raise lib.errors.SmashException(\"Ok. Let me know if you ever do feel like providing info for {}\".format(domain))\n return server_info, server, app", "title": "" }, { "docid": "ea0fff2ff641b4b38574e33398900fed", "score": "0.57253075", "text": "def info_host(args):\n client = args.host or args.client\n config = Kconfig(client=client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pretty_print(k.info_host(), width=100)", "title": "" }, { "docid": "02d8d51acecc1d26d758642ebfbd18d3", "score": "0.57228845", "text": "def how_to_use():\n\n return render_template('info.html')", "title": "" }, { "docid": "3e6d209ab3d625fe669842adb74f2c6f", "score": "0.5716957", "text": "def info(self, target=\"\"):\n\t\tself.sendCmd(\"INFO\", target)", "title": "" }, { "docid": "9283ad4e6ba77194b900f14429b418ee", "score": "0.57162225", "text": "def show(ctx):", "title": "" }, { "docid": "5326d99d3574dbb54b405329ae0e1be2", "score": "0.5712769", "text": "def __sw_show_info_no_port_type__(self):\n #tn.set_debuglevel(9)\n capture_cmd = fos_cmd(\"switchshow\")\n if self.am_i_director:\n ras = re.compile('\\s?([0-9]{1,3})\\s+([-\\d]+)\\s+(\\d+)\\s+([-0-9abcdef]{6})\\s+([-id]{2})\\s+([-UNG12486]{2,3})\\s+([_\\w]{5,9})\\s+((FC)\\s*(?=\\\\n))')\n else:\n #ras = re.compile('\\s?([0-9]{1,3})\\s+(\\d+)\\s+([-0-9abcdef]{6})\\s+([-id]{2})\\s+([-UNG12486]{2,3})\\s+([_\\w]{5,9})\\s+((FC)\\s*(?=\\\\n))')\n ras = re.compile('\\s?([0-9]{1,3})\\s+(\\d+)\\s+([-0-9a-f]{6})\\s+([-id]{2})\\s+([-UNG12486]{2,3})\\s+([_\\w]{5,9})\\s+((FC)\\s*(?=\\\\n))')\n ras = ras.findall(capture_cmd)\n self.online_ports = ras", "title": "" }, { "docid": "63d3736c039d0c4d0b220c76f3ce72bc", "score": "0.5710751", "text": "def serverlist(self):\n return self.send_command('serverlist')", "title": "" }, { "docid": "7dfe13faae8e1f2f34edcd72a9f19203", "score": "0.57076985", "text": "def show_from_remote(self):\n log.debug(\"show from remote\")\n self.forceHide = True\n self.show()", "title": "" }, { "docid": "8fb6e527045bc072bdd510efe8c127dc", "score": "0.5701991", "text": "def showStat(self):\n self.stat.show()", "title": "" }, { "docid": "7d4d96eaf9a286a3da2fc62bb2d9ab47", "score": "0.5687566", "text": "def printInfo(self):\n print(\"-------------------\" + self.name + \"-------------------\")\n print(\" Peer ID: \" + str(self.id))\n print(\" Messages Sent: \" + str(self.numMessages))\n print(\" Pictures Sent: \" + str(self.mediaSent[\"pics\"]))\n print(\" Files Sent: \" + str(self.mediaSent[\"docs\"]))\n print(\" Links Sent: \" + str(self.mediaSent[\"links\"]))\n print(\" Average Message Length (Words): \" + str((round(self.totalLength/self.numMessages))))\n print(\" Average Characters Per Message: \" + str(round(self.totalCharacters/self.numMessages)))\n print(\" Average Response Time(Minutes): \" + str(self.getAverageResponseTime()))\n for word in sorted(self.wordDict.keys()):\n print(\" \" + word + \" : \" + str(self.wordDict[word][0]))\n self.printMonthInfo()\n self.printWeekdayInfo()\n self.printHourInfo()\n self.printSearchedMessages()", "title": "" }, { "docid": "f29f2c7b1905235faed6ff129e19ef12", "score": "0.56814814", "text": "def serverinfo(self) -> Dict[str, str]:\n if self._connection is None:\n raise Exception('No open connection to Apache Tomcat server.')\n\n serverinfo = {}\n url = f'{self._connection.url}/serverinfo'\n logger.debug(f'Prepared request with method GET to {url}')\n response = requests.get(url, auth=self._connection.auth, headers=self.headers, timeout=self._connection.timeout)\n response.raise_for_status()\n resp_list = response.text.split('\\n')\n\n for lines in resp_list[1:-1]:\n key, value = lines.rstrip().split(\":\", 1)\n serverinfo[key] = value.lstrip()\n return serverinfo", "title": "" }, { "docid": "481567475457ff2061caf4327d7d9c5f", "score": "0.56744754", "text": "def _show_info(self):\n if (self.__i == -1):\n print 'Input : ', self.__file_i\n print 'Output : ', self.__file_o\n print 'Frame ID : ', self.__frame_id\n print 'Child Frame ID : ', self.__child_frame_id\n print \"Loading bag file... (This could take a while. Time for a coffe?)\"\n self.__i = 0\n\n elif (self.__i > 0):\n sys.stdout.write(\"%6.1f %% \\r\" % (self.__i*100.0/self.__count))\n sys.stdout.flush()\n\n if (self.__i < self.__count):\n threading.Timer(self.REFRESH_INTERVAL, self._show_info).start()\n else:\n print", "title": "" }, { "docid": "f4b0590623d418dba3602d6f886716ad", "score": "0.56722075", "text": "def show_info(self):\r\n header = html_header()\r\n footer = html_footer()\r\n string = header\r\n\r\n heading = m.Heading(self.tr('Shakemap Grid Importer'), **INFO_STYLE)\r\n body = self.tr(\r\n 'This tool will convert an earthquake \\'shakemap\\' that is in '\r\n 'grid xml format to a GeoTIFF file. The imported file can be used '\r\n 'in InaSAFE as an input for impact functions that require and '\r\n 'earthquake layer. To use this tool effectively:'\r\n )\r\n tips = m.BulletedList()\r\n tips.add(self.tr(\r\n 'Select a grid.xml for the input layer.'))\r\n tips.add(self.tr(\r\n 'Choose where to write the output layer to.'\r\n ))\r\n tips.add(self.tr(\r\n 'Choose the interpolation algorithm that should be used when '\r\n 'converting the xml grid to a raster. If unsure keep the default.'\r\n ))\r\n tips.add(self.tr(\r\n 'If you want to obtain shake data you can get it for free from '\r\n 'the USGS shakemap site: '\r\n 'http://earthquake.usgs.gov/earthquakes/shakemap/list.php?y=2013'))\r\n message = m.Message()\r\n message.add(heading)\r\n message.add(body)\r\n message.add(tips)\r\n string += message.to_html()\r\n string += footer\r\n\r\n self.webView.setHtml(string)", "title": "" }, { "docid": "e8619bb8f7b8a7c3654a8c8556c13202", "score": "0.5666822", "text": "def print_info(self):", "title": "" }, { "docid": "f60975227ea3a7c9543eb4bc3ed601fe", "score": "0.56603056", "text": "def basic_site_display(self):\n self.open(FRONTEND_ADDRESS)\n self.assert_text('Hello world!')", "title": "" }, { "docid": "83488840d53b450dc156581ddf2424f3", "score": "0.565879", "text": "def displayInfo(self,event):\n \n e = self.getSelected()\n if e != None:\n self.createInfoWindow(e)", "title": "" }, { "docid": "86edec09cbe74a5b14aa70bcfa367e40", "score": "0.5657014", "text": "def test_show_fetch_from_core(self):\n response = self.app.get(url_for(controller='devices', action='show', id=241910, format='json'))\n self.assertEqual(response.status, 200)\n detail = computer_detail[241910]\n server = simplejson.loads(response.body)['rows'][0]\n self.assertEqual(server['os'], detail['os_group'])\n self.assertEqual(server['has_managed_storage'], detail['has_managed_storage'])\n self.assertEqual(server['datacenter'], detail['datacenter'])\n self.assertEqual(server['segment'], detail['segment'])\n self.assertEqual(server['icon'], config['core.url'] + '/' + detail['icon'])", "title": "" }, { "docid": "4c2fa729711f92a2a34c7a24bf0e16bd", "score": "0.56488013", "text": "def display(self, stats, refresh_time=None):\n\n stats = {\n 'system': self.stats.get_plugin('system').get_stats_display(args=self.args),\n 'uptime': self.stats.get_plugin('uptime').get_stats_display(args=self.args),\n 'cpu': self.stats.get_plugin('cpu').get_stats_display(args=self.args),\n 'load': self.stats.get_plugin('load').get_stats_display(args=self.args),\n 'mem': self.stats.get_plugin('mem').get_stats_display(args=self.args),\n 'memswap': self.stats.get_plugin('memswap').get_stats_display(args=self.args),\n 'network': self.stats.get_plugin('network').get_stats_display(args=self.args),\n 'diskio': self.stats.get_plugin('diskio').get_stats_display(args=self.args),\n 'fs': self.stats.get_plugin('fs').get_stats_display(args=self.args),\n 'raid': self.stats.get_plugin('raid').get_stats_display(args=self.args),\n 'sensors': self.stats.get_plugin('sensors').get_stats_display(args=self.args),\n 'alert': self.stats.get_plugin('alert').get_stats_display(args=self.args),\n 'processcount': self.stats.get_plugin('processcount').get_stats_display(args=self.args),\n 'monitor': self.stats.get_plugin('monitor').get_stats_display(args=self.args),\n 'processlist': self.stats.get_plugin('processlist').get_stats_display(args=self.args),\n 'docker': self.stats.get_plugin('docker').get_stats_display(args=self.args)\n }\n\n return template('base', refresh_time=refresh_time, stats=stats)", "title": "" }, { "docid": "5589df2be8d799d03516ca975493e613", "score": "0.56429577", "text": "async def info(self,ctx):\n colour = discord.Colour.from_rgb(random.randint(1,255),random.randint(1,255),random.randint(1,255))\n embed = discord.Embed(colour=colour,description=\"A bot made specifically for the HistoryWars discord!\")\n embed.set_author(name=\"HistoryWarsBot\", url=\"https://wwww.github.com/JdavisBro/historywarsbot\", icon_url=self.bot.user.avatar_url)\n embed.set_footer(text=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.add_field(name=\"Creators:\", value=\"JdavisBro#2640 (Raid)\\nMrEdinLaw#1058 (Leaderboard)\", inline=True)\n embed.add_field(name=\"Python Version:\", value=\"[{}](https://www.python.org)\".format(platform.python_version()), inline=True)\n embed.add_field(name=\"Discord.py Version:\", value=\"[{}](https://github.com/Rapptz/discord.py)\".format(discord.__version__), inline=True)\n await ctx.send(embed=embed)", "title": "" }, { "docid": "985dfb20de767f5d020a7c5c0d66980f", "score": "0.56381035", "text": "def show_system_info(self, context, cancellation_context):\n logger = get_qs_logger(log_category=\"Test\",log_group=context.resource.name)\n logger.info(\"Getting system info...\")\n\n api = CloudShellAPISession(host=context.connectivity.server_address,\n token_id=context.connectivity.admin_auth_token,\n domain=context.reservation.domain)\n resource = Vyos.create_from_context(context)\n ctpw = api.DecryptPassword(resource.password).Value\n\n session_types = [SSHSession(host=context.resource.address, username=resource.user, password=ctpw, port=self.port)]\n\n with self.cli.get_session(session_types, self.cliMode) as default_session:\n default_session.send_command(\"set terminal length 0\")\n default_session.send_command(\"configure\")\n out = default_session.send_command(\"show system\")\n default_session.send_command(\"exit\")\n\n out = \"Begin system info:\" + '\\n' + out + '\\n' + \"End system info\"\n logger.info(out)\n\n print out\n return out", "title": "" } ]
187d2f8ecb05b9da510a34ff96078a2c
Get the poll stats. Fetch all the answers from the JotForm API and convert them to cumilative statistics.
[ { "docid": "debe01e9e473e79dcf0176c0cffc413d", "score": "0.7560663", "text": "async def get_poll_stats(uuid: str):\n credentials = redis.get(uuid)\n if credentials is None:\n raise HTTPError(401, \"Unauthorised request.\")\n app_key, poll_id = credentials.decode(\"utf-8\").split(\n \"-\") # Get back user credentials.\n submissions = get_submissions(poll_id, app_key)\n # We now have form submissions with us.\n question_ids = get_question_ids(app_key, poll_id) # And the question IDs.\n counts = jsonable_encoder(get_answer_stats(submissions, question_ids))\n return JSONResponse(counts)", "title": "" } ]
[ { "docid": "a3a21061c83e3eccdebbeb5612841a12", "score": "0.6668814", "text": "def stats(poll_id):\n \n poll_id = int(poll_id)\n poll = Poll.get_by_id(poll_id)\n return render_template(\"stats.html\", \n choice_a=poll.choice_a, \n choice_b=poll.choice_b, \n choice_c=poll.choice_c, \n choice_d=poll.choice_d)", "title": "" }, { "docid": "293031ba3a6d6159ef92282e87aa234b", "score": "0.6552241", "text": "def get_results(poll):\n\n assert poll is not None, \"Invalid poll: None\"\n\n if not poll['closed']:\n return None\n\n results = {}\n\n # Get cached results\n results_db = get_entries('results', 'poll', poll['uid'])\n\n # If no cache, compute the results and store them\n if len(results_db) == 0:\n ballots = get_entries('ballots', 'poll', poll['uid'])\n\n # If no ballots provide, no results\n if len(ballots) == 0:\n return None\n\n # Number of ballots cast\n ballots_count = len(ballots) / len(poll['choices'])\n\n # Build data structures\n choices = {}\n results = {}\n for choice in poll['choices']:\n choices[choice['id']] = {'votes': [0] * 7}\n results[choice['id']] = {'ballots': ballots_count}\n\n # Count the number of vote for each grade for each choice\n for ballot in ballots:\n choices[ballot['choice']]['votes'][ballot['grade']] += 1\n\n # Store the count in percentage for display purposes\n for choice in choices:\n results[choice]['percentages'] = []\n for vote in choices[choice]['votes']:\n results[choice]['percentages'].append(100 * vote / ballots_count)\n\n # Transfrom the number of vote to a list of votes\n for _, choice in choices.items():\n votes = []\n for i in range(len(choice['votes'])):\n votes.extend([i] * choice['votes'][i])\n choice['votes'] = votes\n\n # Compute the median, the number of better and worse vote.\n for _, choice in choices.items():\n choice_compute(choice)\n\n # Apply the grade for each choice\n for choice in choices:\n if choices[choice]['median'] == 0:\n results[choice]['grade'] = \"To reject\"\n elif choices[choice]['median'] == 1:\n results[choice]['grade'] = \"Poor\"\n elif choices[choice]['median'] == 2:\n results[choice]['grade'] = \"Acceptable\"\n elif choices[choice]['median'] == 3:\n results[choice]['grade'] = \"Fair\"\n elif choices[choice]['median'] == 4:\n results[choice]['grade'] = \"Good\"\n elif choices[choice]['median'] == 5:\n results[choice]['grade'] = \"Very Good\"\n elif choices[choice]['median'] == 6:\n results[choice]['grade'] = \"Excellent\"\n\n if choices[choice]['better'] > choices[choice]['worse']:\n results[choice]['grade'] += \"+\"\n else:\n results[choice]['grade'] += \"-\"\n\n # Sort the vote to etablish the ranks\n ranks = rank_choices(choices, ballots_count)\n for choice in results:\n results[choice]['rank'] = ranks[choice]\n\n\n # Store the results\n results_db = []\n for choice, result in results.items():\n results_db.append((poll['uid'], choice, \";\".join([str(rank) for rank in result['rank']]) if isinstance(result['rank'], list) else str(result['rank']), result['grade'], \";\".join([str(percentage) for percentage in result['percentages']]), result['ballots']))\n\n get_db().executemany(\"INSERT INTO results (poll, choice, rank, grade, percentages, ballots) VALUES (?, ?, ?, ?, ?, ?)\", results_db)\n\n # Destroy the ballots\n get_db().execute('DELETE FROM ballots WHERE poll = ?', [poll['uid']])\n\n else:\n for result in results_db:\n results[result['choice']] = {'rank' : int(result['rank']) if ';' not in result['rank'] else [int(vote) for vote in result['rank'].split(';')], 'grade': result['grade'], 'percentages': [int(percentage) for percentage in result['percentages'].split(';')], 'ballots': result['ballots']}\n\n return results", "title": "" }, { "docid": "37abacd042a548b383be578b908a4ca9", "score": "0.61342233", "text": "def list_poll_responses(poll, **kwargs):\n #forceful import\n from poll.models import Poll\n to_ret = {}\n\n \"\"\"\n narrowed down to 3 districts (and up to 14 districts)\n \"\"\"\n DISTRICT = ['Kaabong', 'Kabarole', 'Kyegegwa', 'Kotido']\n if not kwargs:\n # if no other arguments are provided\n for location in Location.objects.filter(name__in=DISTRICT):\n to_ret[location.__unicode__()] = compute_average_percentage([msg.message.text for msg in poll.responses.filter(contact__in=Contact.objects.filter(reporting_location=location))])\n return to_ret\n else:\n # filter by number of weeks\n #TODO more elegant solution to coincide with actual school term weeks\n date_filter = kwargs['weeks'] #give the date in weeks\n date_now = datetime.datetime.now()\n date_diff = date_now - datetime.timedelta(weeks=date_filter)\n all_emis_reports = EmisReporter.objects.filter(reporting_location__in=[loc for loc in Locations.objects.filter(name__in=DISTRICT)])\n for location in Location.objects.filter(name__in=DISTRICT):\n to_ret[location.__unicode__()] = compute_average_percentage([msg.message.text for msg in poll.responses.filter(date__gte=date_diff, contact__in=Contact.objects.filter(reporting_location=location))])\n return to_ret", "title": "" }, { "docid": "a3d9939eccc5bbe410a7be4db3ded1f5", "score": "0.6120695", "text": "def histogram(request, pks=None):\n\n all_polls = Poll.objects.filter(type=u'n')\n pks = (pks if pks != None else request.GET.get('pks', None))\n if pks:\n items = 6\n polls = retrieve_poll(request, pks)\n responses = Response.objects.filter(poll__in=polls)\n pks = polls.values_list('pk', flat=True)\n responses = Response.objects.filter(poll__in=polls,\n poll__type=u'n')\n plottable_data = {}\n if responses:\n poll_results = {}\n poll_qns = ['Qn:' + poll.question + '<br>' for poll in\n Poll.objects.filter(pk__in=pks)]\n\n total_responses = responses.count()\n vals_list = \\\n Value.objects.filter(entity_id__in=responses).values_list('value_float'\n , flat=True)\n vals_list = sorted(vals_list)\n max = int(vals_list[-1])\n min = int(vals_list[0])\n num_list = range(min, max)\n increment = int(max / items)\n bounds = num_list[::increment]\n ranges_list = [str(a) + '-' + str(a + increment) for a in\n bounds if a < max]\n poll_results['categories'] = ranges_list\n poll_results['title'] = poll_qns\n\n for response in responses:\n name = response.poll.name\n poll_results.setdefault(name, {})\n poll_results[name].setdefault('data', {})\n if len(response.eav_values.all()) > 0:\n value = \\\n int(response.eav_values.all()[0].value_float)\n pos = bisect.bisect_right(bounds, value) - 1\n r = ranges_list[pos]\n poll_results[name]['data'].setdefault(r, 0)\n poll_results[name]['data'][r] += 1\n\n data = []\n for key in poll_results.keys():\n if key not in ['categories', 'title']:\n d = {}\n d['name'] = key\n d['data'] = poll_results[key]['data'].values()\n data.append(d)\n plottable_data['data'] = data\n plottable_data['title'] = poll_qns\n plottable_data['categories'] = ranges_list\n plottable_data['mean'] = sum(vals_list) / len(vals_list)\n plottable_data['median'] = vals_list[len(vals_list) / 2]\n return HttpResponse(mark_safe(simplejson.dumps(plottable_data)))\n\n return render_to_response('ureport/partials/viz/histogram.html',\n {'polls': all_polls},\n context_instance=RequestContext(request))", "title": "" }, { "docid": "da020e1dbf92d53bc1ce59ba8a91b36d", "score": "0.60470486", "text": "async def pollstatus(self):\n if not ongoingPoll():\n await ctx.send('There is no poll going on currently, sorry!')\n return\n question = fetchAttr('poll_data', 'question')\n opts = fetchAttr('poll_data', 'options')\n votesSoFar = fetchAttr('poll_data', 'votes')\n message = question + '\\n'\n for i in range(len(opts)):\n message += 'Option ' + str(i + 1) + ': ' + opts[i] + ', currently has ' + str(votesSoFar[i]) + ' votes.\\n'\n await ctx.send(message)", "title": "" }, { "docid": "c07079d9e25832de5ca6389a3ef452b7", "score": "0.5859271", "text": "def test_get_poll_results(self):\n response = self.client.get(f\"/api/poll/{self.poll.pk}/?results=true\", format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn(\"poll\", response.data)\n self.assertIn(\"vote\", response.data)\n options_data = response.data[\"poll\"][\"options\"]\n self.assertEqual(options_data[0].get(\"count_votes\"), self.options[0].count_votes())\n self.assertEqual(options_data[1].get(\"count_votes\"), self.options[1].count_votes())\n self.assertEqual(options_data[2].get(\"count_votes\"), self.options[2].count_votes())", "title": "" }, { "docid": "fbc3558330ea70d5e642d5b46388b4a9", "score": "0.58587897", "text": "def count_stats():\n\n ticket = request.args[\"ticket\"]\n if ticket not in data_manager.ticket_list:\n return 400\n period_start, period_end = (\n request.args[\"date_start\"],\n request.args[\"date_end\"],\n )\n if not (validate_data(period_start) and validate_data(period_end)):\n return 400\n dates, values = data_manager.give_data(\n ticket=ticket, start_date=period_start, end_date=period_end\n )\n fig = go.Figure()\n fig.add_trace(\n go.Scatter(\n x=list(dates), y=list(values), name=\"Real value\"\n )\n )\n fig.update_layout(\n title=go.layout.Title(text=f\"PriPre {ticket} ticket graph\"),\n yaxis_title=\"Close value\",\n xaxis_title=\"Date\",\n showlegend=True,\n legend_title_text=\"Tickets\",\n font=dict(family=\"Courier New, monospace\", size=18, color=\"Black\"),\n )\n values = pd.Series(data=values.values, index=dates)\n answer = {\n \"chart\": fig.to_dict(),\n \"stats\": {\n \"std\": values.std(),\n \"avg\": values.mean(),\n \"median\": values.median(),\n \"mode\": values.mode()[0],\n \"variants\": values.var()\n }\n }\n return json.dumps(answer)", "title": "" }, { "docid": "082030902d9d4593865ac2befc8b5135", "score": "0.5848949", "text": "def getStats():", "title": "" }, { "docid": "91809d4e58b47428b7afcde4685c131b", "score": "0.5763146", "text": "def single_pollrun(pollrun, responses, question):\n chart_type = None\n chart_data = []\n summary_table = None\n\n answers = Answer.objects.filter(response__in=responses, question=question)\n if answers:\n if question.question_type == Question.TYPE_OPEN:\n chart_type = 'open-ended'\n chart_data = word_cloud_data(answers)\n else:\n chart_type = 'bar'\n if question.question_type == Question.TYPE_NUMERIC:\n chart_data = single_pollrun_auto_categorize(answers)\n else:\n chart_data = single_pollrun_multiple_choice(answers, pollrun)\n\n _, answer_avgs, answer_stdevs, response_rates = utils.summarize_by_pollrun(\n answers, responses)\n summary_table = [\n ('Mean', answer_avgs.get(pollrun.pk, 0)),\n ('Standard deviation', answer_stdevs.get(pollrun.pk, 0)),\n ('Response rate average (%)', response_rates.get(pollrun.pk, 0)),\n ]\n\n return chart_type, chart_data, summary_table", "title": "" }, { "docid": "fd4ab916238974c330e2ed4eda49e909", "score": "0.5748508", "text": "def view_statistics(request):\n\n current = models.Competition.current()\n division_stats = []\n for division, division_name in home.models.DIVISIONS:\n stats = []\n subject_stats = []\n for subject, subject_name in home.models.SUBJECTS:\n question_stats_dict = {}\n for answer in models.Answer.objects.filter(\n Q(student__team__division=division) &\n Q(question__round__competition=current) &\n (Q(question__round__ref=\"subject1\") & Q(student__subject1=subject) |\n Q(question__round__ref=\"subject2\") & Q(student__subject2=subject))):\n if answer.question.number not in question_stats_dict:\n question_stats_dict[answer.question.number] = [0, 0, 0]\n if answer.value is None:\n question_stats_dict[answer.question.number][2] += 1\n if answer.value == 1:\n question_stats_dict[answer.question.number][0] += 1\n elif answer.value == 0:\n question_stats_dict[answer.question.number][1] += 1\n subject_stats.append((subject_name,) + tuple(question_stats_dict.items()))\n stats.append(list(zip(*subject_stats)))\n for round_ref in [\"team\", \"guts\"]:\n question_stats_dict = {}\n estimation_guesses = {}\n for answer in models.Answer.objects.filter(\n Q(team__division=division) &\n Q(question__round__competition=current) & Q(question__round__ref=round_ref)):\n if answer.question.type == models.ESTIMATION:\n if answer.question.number not in estimation_guesses:\n estimation_guesses[answer.question.number] = []\n estimation_guesses[answer.question.number].append(answer.value)\n continue\n if answer.question.number not in question_stats_dict:\n question_stats_dict[answer.question.number] = [0, 0, 0]\n if answer.value is None:\n question_stats_dict[answer.question.number][2] += 1\n if answer.value == 1:\n question_stats_dict[answer.question.number][0] += 1\n elif answer.value == 0:\n question_stats_dict[answer.question.number][1] += 1\n stats.append((round_ref, tuple(question_stats_dict.items())))\n if estimation_guesses:\n stats.append((round_ref + \" estimation\", tuple(estimation_guesses.items())))\n division_stats.append((division_name, stats))\n\n return render(request, \"grading/statistics.html\", {\"stats\": division_stats, \"current\": current})", "title": "" }, { "docid": "40e38124786c21c44e5559ee5e277b7c", "score": "0.5741999", "text": "def poll(self):\n # self.initialize()\n data = self.get_data()\n if data:\n self.add_metrics(data)\n #self.finish()", "title": "" }, { "docid": "16308582d51fcce14619e291c24bad0f", "score": "0.5732302", "text": "def get_stat_webpage_data(question_id):\n webpage = 'https://willyoupressthebutton.com/{0}/stats'.format(question_id)\n webpage_content = get_webpage(webpage)\n\n soup = bs(webpage_content, 'html.parser')\n\n main_container = soup.find(id='maincontainer')\n\n if main_container is None:\n raise InvalidIndex({\n \"message\":\"No question found with that index\",\n \"index\": question_id\n })\n\n stats = [stat for stat in [a for a in main_container.find(id='statsBar').children][1].children]\n\n did_press = stats[1].getText()\n did_press_count = int(did_press.split()[0])\n\n didnt_press = stats[3].getText()\n didnt_press_count = int(didnt_press.split()[0])\n\n dilemma = [a for a in main_container.find(id='dilemma').children]\n pro = dilemma[1].getText().strip()\n con = dilemma[5].getText().strip()\n\n return {\n 'link': webpage,\n 'index': question_id,\n 'pro': pro,\n 'con': con,\n 'did_press_count': did_press_count,\n 'didnt_press_count': didnt_press_count\n }", "title": "" }, { "docid": "fc5709a43eef82900cee4fd945c78ad8", "score": "0.5701256", "text": "def get_question_stats_and_answer_and_comments(url):\n random_headers()\n res_page = requests.get(url, headers=header)\n captcha_check(res_page.url)\n soup = BeautifulSoup(res_page.text, 'html.parser')\n dup_url = None\n question_title, question_desc, question_stats, dup_url = get_stats(soup)\n answers = [s.get_text() for s in soup.find_all(\"div\", class_=\"js-post-body\")][\n 1:] # first post is question, discard it.\n accepted_answer = soup.find_all(\"div\",class_=\"accepted-answer\")[0].find_all(\"div\",class_=\"js-post-body\")[0].get_text()\n if accepted_answer in answers:\n answers.remove(accepted_answer)\n accepted_answer = \"=============ACCEPTED_ANSWER===========\\n\" + accepted_answer + \"\\n===============ACCEPTED_ANSWER============\"\n answers.insert(0,accepted_answer)\n comments = get_comments(soup)\n if len(answers) == 0:\n answers.append('No answers for this question ...')\n return question_title, question_desc, question_stats, answers, comments, dup_url", "title": "" }, { "docid": "db94c2faa3853c03bbe4a1a4aea33842", "score": "0.56665474", "text": "def poll(self):\n data = self.get_data()\n if data:\n self.add_metrics(data)", "title": "" }, { "docid": "1d53107b673f1e080fb4d1b0ecd21383", "score": "0.56647515", "text": "def get_stats(self):\n return get_stats_for(self.get_votes(), num_votes=self.num_votes)", "title": "" }, { "docid": "4efe6c80b63aed0ec3b00521d1c1b8d6", "score": "0.5641941", "text": "def stats(self):\r\n resp = self.server.request(\"get\", \"/jobs/%s/%s/stats\" %\r\n (self.sessionid, self.name))\r\n return self.server.json_body(resp)", "title": "" }, { "docid": "e161e314235d7a8778281ab763b9443e", "score": "0.56394017", "text": "def get_data(self):\n\t\tlatest_circuits = CircuitEntry.objects.filter(Time=CircuitEntry.latest(temporary=True))\n\t\tif len(latest_circuits) == 0:\n\t\t\tself.status_comm_error()\n\t\t\treturn []\n\n\t\tgross_power_used = 0.0\n\t\tgross_energy_used = 0.0\n\t\tgross_power_produced = 0.0\n\t\tgross_energy_produced = 0.0\n\n\t\t# see mysql database or electric/fixtures/initial_data.json\n\t\t# these correspond to panel #4 channels #8, #10, #12\n\t\tsolar_circuit_ids = [92, 94, 96]\n\n\t\tfor measurement in latest_circuits:\n\t\t\tif measurement.Circuit.id in solar_circuit_ids:\n\t\t\t\tgross_power_produced += abs(measurement.Power)\n\t\t\t\tgross_energy_produced += abs(measurement.Energy)\n\t\t\telse:\n\t\t\t\tgross_power_used += abs(measurement.Power)\n\t\t\t\tgross_energy_used += abs(measurement.Energy)\n\n\t\tnet_power = gross_power_used - gross_power_produced\n\t\tnet_energy = gross_energy_used - gross_energy_produced\n\n\t\tself.status_ok()\n\t\treturn [CalculatedStats(Time=latest_circuits[0].Time,\n\t\t\tNetPower=net_power,\n\t\t\tNetEnergy=net_energy,\n\t\t\tGrossPowerUsed=gross_power_used,\n\t\t\tGrossEnergyUsed=gross_energy_used,\n\t\t\tGrossPowerProduced=gross_power_produced,\n\t\t\tGrossEnergyProduced=gross_energy_produced)]", "title": "" }, { "docid": "70d1b2c74ab4feaa4c8d7302f4d2aaed", "score": "0.5565786", "text": "def get_response_stats_data(question_id, user_response):\n webpage = 'https://willyoupressthebutton.com/{0}/'.format(question_id)\n if user_response:\n webpage += 'yes'\n else:\n webpage += 'no'\n\n webpage_content = get_webpage(webpage)\n\n soup = bs(webpage_content, 'html.parser')\n\n main_container = soup.find(id='maincontainer')\n\n if main_container is None:\n raise InvalidIndex({\n \"message\":\"No question found with that index\",\n \"index\": question_id\n })\n\n stats = [stat for stat in [a for a in main_container.find(id='statsBar').children][1].children]\n\n did_press = stats[1].getText()\n did_press_count = int(did_press.split()[0])\n did_press_percent = int(did_press[did_press.index('(') + 1: did_press.index(')') - 1])\n\n didnt_press = stats[3].getText()\n didnt_press_count = int(didnt_press.split()[0])\n didnt_press_percent = 100 - did_press_percent\n\n return {\n 'id': question_id,\n 'pro_count': did_press_count,\n 'con_count': didnt_press_count,\n 'pro_percent': did_press_percent,\n 'con_percent': didnt_press_percent\n }", "title": "" }, { "docid": "5d4fdab03ee2dab2b5ad751fefe70172", "score": "0.5564409", "text": "async def get_statistics(request):\n version = request.app['openapi']['info']['version']\n currency_stats = list()\n db = request.app['db']\n aws = [\n get_currency_statistics(request, currency)\n for currency in db.get_supported_currencies()\n ]\n currency_stats = await asyncio.gather(*aws)\n\n tstamp = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n return Stats(currencies=currency_stats,\n version=version,\n request_timestamp=tstamp)", "title": "" }, { "docid": "927f5d933ebeeaffa694d3fa473ae6b6", "score": "0.5540294", "text": "def responseTimes(self, choice = \"All\"):\n response_times = []\n if choice == \"All\":\n for comment in self.THREAD:\n response_times.append(comment['response'])\n if comment['replies']:\n for reply in comment['replies']:\n response_times.append(reply['response'])\n elif choice == \"Top\":\n for comment in self.THREAD:\n response_times.append(comment['response'])\n elif choice == \"Reply\":\n for comment in self.THREAD:\n if comment['replies']:\n for reply in comment['replies']:\n response_times.append(reply['response'])\n return response_times", "title": "" }, { "docid": "675a4b60acb69c8aef396a942fa75bdb", "score": "0.54783297", "text": "def points(self):\n result = list()\n # Stats\n execution_time = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n project_name = self.context.get_meta(\"project_name\", \"None\")\n build_id = f\"{execution_time} - {project_name}\"\n test_type = self.context.get_meta(\"testing_type\", \"None\")\n jira_mapping = self.context.performers[\"reporting\"].get_module_meta(\n \"jira\", \"mapping\", dict()\n )\n results_by_severity = dict()\n for item in self.context.findings:\n if item.get_meta(\"false_positive_finding\", False) or \\\n item.get_meta(\"excluded_finding\", False):\n continue\n priority = item.get_meta(\"severity\", SEVERITIES[-1])\n if priority in jira_mapping:\n priority = jira_mapping[priority]\n if priority not in results_by_severity:\n results_by_severity[priority] = 0\n results_by_severity[priority] += 1\n results_by_severity[\"new_in_jira\"] = \\\n len(self.context.performers[\"reporting\"].get_module_meta(\n \"jira\", \"new_tickets\", list()\n ))\n results_by_severity[\"total_in_jira\"] = \\\n results_by_severity[\"new_in_jira\"] + \\\n len(self.context.performers[\"reporting\"].get_module_meta(\n \"jira\", \"existing_tickets\", list()\n ))\n results_by_severity[\"test_to_count\"] = 1\n result.append({\n \"measurement\": \"stats\",\n \"time\": execution_time,\n \"tags\": {\n \"build_id\": build_id,\n \"test_name\": test_type,\n \"type\": test_type,\n \"project\": project_name\n },\n \"fields\": results_by_severity\n })\n # Errors\n policy = self.config.get(\"policy\", {\"Blocker\": 1, \"Critical\": 5, \"Major\": 15})\n jira_tickets = list()\n jira_tickets.extend(self.context.performers[\"reporting\"].get_module_meta(\n \"jira\", \"new_tickets\", list()\n ))\n jira_tickets.extend(self.context.performers[\"reporting\"].get_module_meta(\n \"jira\", \"existing_tickets\", list()\n ))\n for issue in jira_tickets:\n ts = int(datetime.datetime.strptime( # pylint: disable=C0103\n issue[\"created\"], \"%Y-%m-%dT%H:%M:%S.%f%z\"\n ).timestamp())\n break_policy = \"Y\" if str(issue[\"priority\"]) in policy and \\\n ts + (policy[str(issue[\"priority\"])] * 24 * 3600) < int(time()) else \"N\"\n issue = {\n \"measurement\": \"errors\",\n \"time\": execution_time,\n \"tags\": {\n \"build_id\": build_id,\n \"description\": str(issue[\"description\"]),\n \"test_name\": test_type,\n \"type\": test_type,\n \"project\": project_name,\n \"priority\": issue[\"priority\"],\n \"created\": datetime.datetime.strptime(\n issue[\"created\"], \"%Y-%m-%dT%H:%M:%S.%f%z\"\n ).strftime(\"%d %b %Y %H:%M:%S.%f\"),\n \"link\": str(issue[\"jira_url\"])\n },\n \"fields\": {\n \"breaking_policy\": break_policy,\n \"status\": str(issue[\"status\"]),\n \"assignee\": str(issue[\"assignee\"]),\n \"quantity\": 1\n }\n }\n result.append(issue)\n # NB: not implemented in 1.0:\n # - sort_results_by_issue_type (implement with canonical issue naming)\n # - out_of_compliance_issues (implement with compliance policy)\n # Return points for InfluxDB\n return result", "title": "" }, { "docid": "c5c2fc10767c75f1f2db81b73907b6c4", "score": "0.5463494", "text": "def collect_statistics(self):\n from .quiz_stats import QuizStatistics\n return QuizStatistics.collect_statistics(self)", "title": "" }, { "docid": "48732da82aa4ce74aa83eb204d4e0b38", "score": "0.54612875", "text": "def get_stats():\n hashtag = request.form.get('ht')\n if hashtag is None or not check_hashtag(hashtag):\n return make_response(render_template(\"error.html\", message=ERROR_INVALID))\n response = requests.get(MAPREDUCE_URL + MAPREDUCE_GET + hashtag)\n if response.status_code != 200 or response.headers['content-type'].find('application/json') < 0:\n return make_response(render_template(\"error.html\", message=ERROR_SERVER))\n content = json.loads(response.content)\n if content.get('status') is not None and not content['status']:\n return jsonify({'status': False})\n try:\n data = get_graph_data(content['tweets_per_hour'])\n average_word = content['average_words']\n user_nbr = content['user_nbr']\n favorites = get_favorites(content.get('favorites'))\n except TypeError:\n return make_response(render_template(\"error.html\", message=ERROR_SERVER))\n return make_response(render_template(\"hashtag_stats.html\", data=data,\n user_nbr=user_nbr, average_word=average_word,\n favorites=favorites))", "title": "" }, { "docid": "c98fa712bc13da8e5c2021f539de3968", "score": "0.54575306", "text": "def basic_stats(self):\n sub_score = sum(x.score for x in self.submissions)\n comm_score = sum(x.score for x in self.comments)\n sub_duration = self.max_date - self.min_date\n sub_rate = (86400. * len(self.submissions) / sub_duration\n if sub_duration else len(self.submissions))\n\n # Compute comment rate\n if self.comments:\n self.comments.sort(key=lambda x: x.created_utc)\n duration = (self.comments[-1].created_utc -\n self.comments[0].created_utc)\n comm_rate = (86400. * len(self.comments) / duration\n if duration else len(self.comments))\n else:\n comm_rate = 0\n\n values = [('Total', len(self.submissions), len(self.comments)),\n ('Rate (per day)', '{0:.2f}'.format(sub_rate),\n '{0:.2f}'.format(comm_rate)),\n ('Unique Redditors', len(self.submitters),\n len(self.commenters)),\n ('Combined Score', sub_score, comm_score)]\n\n retval = 'Period: {0:.2f} days\\n\\n'.format(sub_duration / 86400.)\n retval += '||Submissions|Comments|\\n:-:|--:|--:\\n'\n for quad in values:\n # pylint: disable=W0142\n retval += '__{0}__|{1}|{2}\\n'.format(*quad)\n # pylint: enable=W0142\n return retval + '\\n'", "title": "" }, { "docid": "71af4ec4860c9fd166298279b776b12f", "score": "0.545548", "text": "def _gather_stats(self):\n # Set all values to zero\n self.wins = 0\n self.ties = 0\n self.losses = 0\n self.season_len = 0\n self.points = 0\n self.vs_points = 0\n self.win_percentage = 0.0\n self.point_difference = 0\n self.wins_vs_teams = []\n self.losses_vs_teams = []\n self.ties_vs_teams = []\n self.record_vs_teams = []\n self.f_record_vs_teams = []\n wins_list = []\n losses_list = []\n ties_list = []\n opponents = []\n # Gather statistics\n for g in self.season:\n # Gather the number of games won, lost, and tied\n g_result = g['result']\n opponent = g['vs']\n if opponent not in opponents:\n opponents.append(opponent)\n if g_result == 'w':\n self.wins += 1\n wins_list.append(g)\n elif g_result == 'l':\n self.losses += 1\n losses_list.append(g)\n elif g_result == 't':\n self.ties += 1\n ties_list.append(g)\n self.season_len += 1\n # Gather the number of runs scored\n g_points = g['points']\n self.points += g_points\n # Gather the number of runs scored by opponents\n g_vs_points = g['vs_points']\n self.vs_points += g_vs_points\n\n for opponent in opponents:\n self.wins_vs_teams.append(self._records_vs(wins_list, opponent))\n self.losses_vs_teams.append(self._records_vs(losses_list, opponent))\n self.ties_vs_teams.append(self._records_vs(ties_list, opponent))\n # Calculate win percentage\n try:\n self.win_percentage = self.wins / self.season_len\n except ZeroDivisionError:\n self.win_percentage = None\n\n # Calculate difference in points\n self.point_difference = self.points - self.vs_points\n\n # Calculate record against opponents\n for x in range(len(opponents)):\n self.record_vs_teams.append({opponents[x]: {'w': self.wins_vs_teams[x][opponents[x]],\n 'l': self.losses_vs_teams[x][opponents[x]],\n 't': self.ties_vs_teams[x][opponents[x]]}})\n self.f_record_vs_teams.append(\n f\"\"\"{opponents[x]}: {self.wins_vs_teams[x][opponents[x]]}-{self.losses_vs_teams[x][opponents[x]]}-{self.ties_vs_teams[x][opponents[x]]}\"\"\")", "title": "" }, { "docid": "c0b8fbeeecaec94a6bc0091fe1550c4a", "score": "0.54358995", "text": "def update(self):\n # Reset stats\n self.reset()\n\n if self.input_method == 'local':\n for k, v in iteritems(self.glances_amps.update()):\n # self.stats.append({k: v.result()})\n self.stats.append({'key': k,\n 'name': v.NAME,\n 'result': v.result(),\n 'refresh': v.refresh(),\n 'timer': v.time_until_refresh(),\n 'count': v.count(),\n 'countmin': v.count_min(),\n 'countmax': v.count_max(),\n })\n else:\n # Not available in SNMP mode\n pass\n\n return self.stats", "title": "" }, { "docid": "0a2c41a9b2007a5686c00af66bbd40b0", "score": "0.5430336", "text": "def get_stats(self):\n stats = {}\n for api_entry, api_timings in self._response_timings.items():\n stats['happybase.' + api_entry] = list(api_timings)\n self._response_timings.clear()\n return stats", "title": "" }, { "docid": "467ae542adbee6153112c5793737d9cd", "score": "0.54168946", "text": "async def fetch_vote_count(self):\n\n if self.token is None:\n raise errors.NoKey(\"No API Key was passed\")\n\n resp = await self.request(\"GET\", url=f\"bot/{self.bot.user.id}/votes\", headers={\"authorization\": self.token})\n\n a = resp['current_votes']['alltime']\n m = len(resp['current_votes']['monthly'])\n\n return {\"alltime\": a, \"monthly\": m}", "title": "" }, { "docid": "862d34082acb113d6208737a439c854e", "score": "0.54117805", "text": "def fetching_latest_quiz_statistics(request_ctx, course_id, quiz_id, all_versions, **request_kwargs):\n\n path = '/v1/courses/{course_id}/quizzes/{quiz_id}/statistics'\n payload = {\n 'all_versions' : all_versions,\n }\n url = request_ctx.base_api_url + path.format(course_id=course_id, quiz_id=quiz_id)\n response = client.get(request_ctx, url, payload=payload, **request_kwargs)\n\n return response", "title": "" }, { "docid": "0443ae09532abb5d81760cdf33657038", "score": "0.5409869", "text": "def updateStats(self):\n\n for podID, server in self.servers.items():\n if server[\"enabled\"] and server[\"stats\"][\"enabled\"]:\n _ignore_scheme, hostname, _ignore_path, _ignore_query, _ignore_fragment = urlsplit(server[\"uri\"])\n data = self.readStatsSock((hostname.split(\":\")[0], server[\"stats\"][\"Port\"],), True)\n if \"Failed\" not in data:\n data = data[\"stats\"][\"5m\"] if \"stats\" in data else data[\"5 Minutes\"]\n result = (\n safeDivision(float(data[\"requests\"]), 5 * 60),\n safeDivision(data[\"t\"], data[\"requests\"]),\n safeDivision(float(data[\"slots\"]), data[\"requests\"]),\n safeDivision(data[\"cpu\"], data[\"requests\"]),\n )\n msg(type=\"sim-expired\", podID=podID, reason=result)", "title": "" }, { "docid": "8d867e53b9d661e5a074968b024f8378", "score": "0.538949", "text": "def get_attempt_stats(quiz, response):\n total_marks = 0\n correct_answer = 0\n incorrect_answer = 0\n total_number = Question.objects.filter(quiz=quiz, published=True).count()\n response_data = response.get_response()\n\n for qid in response_data:\n try:\n question = Question.objects.get(id=int(qid))\n except Question.DoesNotExists:\n # there might be other kind of data in response_data we don't care about\n continue\n question_type = QUESTION_TYPE[question.question_type]\n marks = question_type.get_marks(question, extract_response(response_data, qid))\n total_marks += marks\n if marks > 0:\n correct_answer += 1\n else:\n incorrect_answer += 1\n grade = round(total_marks / db.get_quiz_total_marks(quiz), 2)\n unanswered = total_number - (correct_answer + incorrect_answer)\n if quiz.quizsettings.showAnswersAfterAttempt:\n # Student allowed to see answer and hence the grade after attending quiz\n return dict(total_grade=grade, correct=correct_answer, incorrect=incorrect_answer, \n unanswered=unanswered, total_questions=total_number, showAnswer=True)\n return dict(total_grade='Shown after exam ends', unanswered=unanswered, total_questions=total_number, showAnswer=False)", "title": "" }, { "docid": "8e2d05a7de3071d996e6e720ff13ffb9", "score": "0.53858703", "text": "def stats(self):\n return self.connection.call('stats')", "title": "" }, { "docid": "23f2d9523eba8b6da84efe8dda55654e", "score": "0.5384794", "text": "async def fetch_poll_summary(div_id: str, course_name: str) -> List[tuple]:\n query = text(\n \"\"\"select act, count(*) from useinfo\n join (select sid, max(id) mid\n from useinfo where event='poll' and div_id = :div_id and course_id = :course_name group by sid) as T\n on id = T.mid group by act\"\"\"\n )\n\n async with async_session() as session:\n rows = await session.execute(\n query, params=dict(div_id=div_id, course_name=course_name)\n )\n return rows.all()", "title": "" }, { "docid": "e0b333fe68fb981826d01ddadd10bbfd", "score": "0.5375736", "text": "def stats(self):\n return self._interact_yaml('stats\\r\\n', ['OK'])", "title": "" }, { "docid": "36f2d127b5d3d6d7efeb8987ad9d63ce", "score": "0.5362324", "text": "def get_performance(self):\n result = requests.get('%s%s' % (self.api.BASE_URL, 'user/performance/'), headers=self.api.headers)\n self.api._auth_report(result)\n content = json.loads(result.content.decode(\"utf-8\"))['result']\n\n self.competitions_entered = [name for name in content.keys() if name != 'total']\n self.score_dfs = {}\n\n # make dataframes for the historical scores for each competition\n for comp_key, comp_values in content.items():\n if comp_key == 'total':\n continue\n\n self.score_dfs[comp_key] = pd.DataFrame(content[comp_key]['score_series'])\n self.score_dfs[comp_key].columns = ['Throne Score']\n self.score_dfs[comp_key].index = [datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S') for date in content[comp_key]['score_dates']]\n\n self.edge_df = pd.DataFrame.from_dict({comp_key : comp_values['edge'] for comp_key, comp_values in content.items() if comp_key != 'total'}, orient='index')\n self.edge_df.columns = ['Edge']\n\n return content", "title": "" }, { "docid": "06dcd8926801b6049f771ce9bf1b8159", "score": "0.53590804", "text": "def generateStatistics(self):\n # Remove any previously generated statistics\n self.statisticsList.delete(0, tk.END)\n \n if(not len(self.currentResults)):\n # To prevent division by zero errors, if there isn't any results, don't generate statistics by returning.\n self.statisticsList.insert(tk.END, \"No data.\")\n return\n \n # Creating total variables, which will be divided to work out averages.\n totalDuration = 0\n totalAverageAnswerTime = 0\n totalScore = 0\n totalSecondsIntoDay = 0\n for i in self.currentRecentResults:\n # For each recent result, add the variables to the totals.\n totalDuration += i[6]\n totalAverageAnswerTime += i[5]\n totalScore += i[3]\n # The fifth column is a datetime, converting it into seconds into the day is straighforward.\n totalSecondsIntoDay += i[4].hour * 3600 + i[4].minute * 60 + i[4].second\n # Add the statistics to the 'list' in the GUI.\n self.statisticsList.insert(tk.END, \"Averages for your last \" + str(len(self.currentRecentResults)) + \" quiz attempts.\")\n self.statisticsList.insert(tk.END, \"Quiz duration: \" + str(round(totalDuration / len(self.currentRecentResults), 1)) + \"s\")\n self.statisticsList.insert(tk.END, \"Time to answer: \" + str(round(totalAverageAnswerTime / len(self.currentRecentResults), 1)) + \"s\")\n # Score is calculated as a percentage.\n self.statisticsList.insert(tk.END, \"Score: \" + str(round(100 * totalScore / len(self.currentRecentResults))) + \"%\")\n averageSecondsIntoDay = int(totalSecondsIntoDay / len(self.currentRecentResults))\n # Hours into the day can be worked out by SecondsIntoDay DIV 3600 using integer division.\n # Minutes after that hour of the day can be worked out by SecondsIntoDay DIV 60 (integer division, to work out the minutes into the day),\n # then that result MOD 60 is the number of minutes into the hour it is.\n self.statisticsList.insert(tk.END, \"Time of day: \" + str(averageSecondsIntoDay // 3600) + \":\" + str((averageSecondsIntoDay // 60) % 60))\n self.statisticsList.insert(tk.END, \"\")\n # Adding all-time statistics for the user.\n # Adding the statistics to the end of the list in the GUI.\n self.statisticsList.insert(tk.END, \"All time statistics.\")\n self.statisticsList.insert(tk.END, \"No. of quiz attempts: \" + str(len(self.currentResults)))\n self.statisticsList.insert(tk.END, \"\")\n # Resetting the variables to be used to calculate all-time averages.\n # Average time isn't calculated for all-time, as it probably won't be any more interesting than the recent average time.\n totalDuration = 0\n totalAverageAnswerTime = 0\n totalScore = 0\n for i in self.currentResults:\n # For each result, add its variables to the totals.\n totalDuration += i[6]\n totalAverageAnswerTime += i[5]\n totalScore += i[3]\n # Then add the all-time averages to the statistics list on the GUI.\n self.statisticsList.insert(tk.END, \"All time averages.\")\n self.statisticsList.insert(tk.END, \"Quiz duration: \" + str(round(totalDuration / len(self.currentResults), 1)) + \"s\")\n self.statisticsList.insert(tk.END, \"Answer time: \" + str(round(totalAverageAnswerTime / len(self.currentResults), 1)) + \"s\")\n self.statisticsList.insert(tk.END, \"Score: \" + str(round(100 * totalScore / len(self.currentResults))) + \"%\")", "title": "" }, { "docid": "726979db9987cf49ec3dedf599ff7875", "score": "0.53316575", "text": "def evaluate(self):\n self._get_data()\n return self._score_positive(), self._score_negative(), self.response_results", "title": "" }, { "docid": "ffba0851770c676ef19293c29ade2afe", "score": "0.530948", "text": "def get_pivot_response_stats(pivot_id, pivot_type):\n cursor = connection.cursor()\n \n cursor.execute(\"\"\"\n SELECT plugin_option.plugin_id, plugin_option.value\n FROM drill_multiplechoiceresponse AS mcr\n INNER JOIN (\n SELECT pivot_qn.plugin_id, mco.id AS option_id, mco.value\n FROM (\n SELECT id, question_plugin_id AS plugin_id\n FROM drill_question\n WHERE pivot_type = \"%(pivot_type)s\"\n AND pivot_id = %(pivot_id)d\n ) AS pivot_qn\n INNER JOIN drill_multiplechoiceoption AS mco\n ON mco.question_id = pivot_qn.id\n ) AS plugin_option\n ON plugin_option.option_id = mcr.option_id\n \"\"\" % {'pivot_type': pivot_type, 'pivot_id': pivot_id})\n rows = cursor.fetchall()\n dist_map = {}\n plugin_ids_used = set(plugin_id for (plugin_id, error_value) in rows)\n for plugin_id in plugin_ids_used:\n dist_map[plugin_id] = FreqDist()\n \n for plugin_id, error_value in rows:\n dist_map[plugin_id].inc(error_value)\n \n plugin_map = drill_models.QuestionPlugin.objects.in_bulk(dist_map.keys())\n \n results = [(plugin_map[plugin_id].name, dist) \\\n for (plugin_id, dist) in dist_map.iteritems()]\n combined_dist = FreqDist()\n for name, dist in results:\n combined_dist.inc(name, dist.N())\n results[0:0] = [('By plugin type', combined_dist)]\n \n return results", "title": "" }, { "docid": "1a1a84378c4581b1c005bc7aa68d3f94", "score": "0.5306244", "text": "def analysis(request):\n\n\tserializer = QuizIDSerializer(data=request.query_params)\n\tserializer.is_valid(raise_exception=True)\n\tvalidated_data = serializer.validated_data\n\n\ttry:\n\t\t#quiz_id = Quiz.objects.get(pk=validated_data['quiz_id'])\n\t\t#student_id = request.user.student\n\t\t# responses = StudentResponse.objects.get(quiz_id__id=validated_data['quiz_id'],student_id=request.user.student)\n\t\t\n\t\tresponses = request.user.student.response_set.get(quiz_id__id=validated_data['quiz_id'])\n\t\tquiz = responses.quiz_id\n\n\t\tquestions = quiz.question_set.all().order_by('id') # sort options marked in order of questions\n\n\t\tmarked_responses = responses.responses.all().order_by('question__id')\n\n\t\tRES = []\n\t\t\"\"\"\n\t\tGetting the stats of the quiz. For the difficulty levels, the values are lists indicating \n\t\t[# correctly answered questions in that category, total # questions in the category]. For error labels,\n\t\tthe values are # errors of that category\n\t\t\"\"\"\n\t\tstats = {\n\t\t\t\t\t\"Easy\": [0,0], \n\t\t\t\t\t\"Medium\": [0,0], \n\t\t\t\t\t\"Hard\": [0,0], \n\t\t\t\t\t\"Misconception\": 0, \n\t\t\t\t\t\"Silly mistake\": 0, \n\t\t\t\t\t\"Unattempted\": 0, \n\t\t\t\t\t\"Unclassified\": 0, \n\t\t\t\t\t\"Chapter_Stats\":{}\n\t\t}\n\t\tdifficulty_code = dict(DIFFICULTY)\n\t\terror_code = dict(ERROR_CLASS)\n\t\ttotal_errors = 0\n\n\t\t\n\n\t\tj = 0\n\n\n\t\tfor q in questions:\n\t\t\t# opt = q.option_set.get(opt_err_label=0) # 0 means correct\n\t\t\t#increments the total number of questions for the difficulty level the question belongs to:\n\t\t\tstats[difficulty_code[q.q_difficulty]][1] += 1\n\t\t\tres = {\n\t\t\t\t\t\"q_id\" : q.id,\n\t\t\t\t\t\"q_type\" : q.q_type,\n\t\t\t\t\t\"q_text\": re.sub(r'src=\"@@PLUGINFILE@@/([^\"]+)\"',r'src=\"'+BACKEND_HOST+r'/media'+r'/quiz/'+str(quiz.id)+r'/question/'+str(q.id)+r'/\\1\"',q.q_text),\n\t\t\t\t\t\"q_weight\": q.q_weight,\n\t\t\t\t\t\"q_difficulty\": q.q_difficulty,\n\t\t\t\t\t\"solution\": q.ans_expl\n\t\t\t\t}\n\n\t\t\tmarked_opt_for_q = None\t\t\t\n\n\t\t\tif q.id == marked_responses[j].question.id:\n\t\t\t\tmarked_opt_for_q = marked_responses[j]\n\n\t\t\t\t# go to next marked option if it exists\n\t\t\t\tj += 1 if j+1 < len(marked_responses) else 0\n\t\t\t\t\n\t\t\tif q.q_type == 1: # MCQ\n\t\t\t\t# Get all the options\n\t\t\t\topts = q.option_set.all()\n\t\t\t\tchoices = []\n\t\t\t\topt_feedback = None\n\t\t\t\tmarked_opt_err_label = -1\n\n\t\t\t\tfor opt in opts:\n\t\t\t\t\tcurr_opt = {\n\t\t\t\t\t\t\t\"opt_id\" : opt.id,\n\t\t\t\t\t\t\t\"opt_text\" : re.sub(r'src=\"@@PLUGINFILE@@/([^\"]+)\"',r'src=\"'+BACKEND_HOST+r'/media'+r'/quiz/'+str(quiz.id)+r'/option/'+str(opt.id)+r'/\\1\"',opt.opt_text),\n\t\t\t\t\t\t\t\"opt_err_label\" : opt.opt_err_label,\n\t\t\t\t\t\t\t\"marked\" : False\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\tif opt == marked_opt_for_q:\n\t\t\t\t\t\tcurr_opt['marked'] = True\n\t\t\t\t\t\topt_feedback = opt.opt_feedback\n\t\t\t\t\t\tmarked_opt_err_label = opt.opt_err_label\n\n\n\t\t\t\t\tchoices.append(curr_opt)\n\n\t\t\t\tres.update({\n\t\t\t\t\t\"options\" : choices,\n\t\t\t\t\t\"mark_obtained\" : marked_opt_for_q.opt_weight * q.q_weight if marked_opt_for_q is not None else 0.0,\n\t\t\t\t\t\"opt_feedback\": opt_feedback,\n\t\t\t\t\t\"opt_err_label\": marked_opt_err_label\n\t\t\t\t\t})\n\t\t\t\tif marked_opt_err_label==0:\n\t\t\t\t\tstats[difficulty_code[q.q_difficulty]][0] += 1\n\t\t\t\telif marked_opt_err_label==4:\n\t\t\t\t\tstats[\"Misconception\"] += 1\n\t\t\t\t\tstats[\"Silly mistake\"] += 1\n\t\t\t\telse:\n\t\t\t\t\tstats[error_code[marked_opt_err_label]] += 1\n\n\n\t\t\telse: #integer type questions and one word answer type questions\n\t\t\t\tpass\n\n\t\t\tRES.append(res)\n\n\t\tstats.update({\n\t\t\t'total_marks': quiz.total_marks,\n\t\t\t'marks_obtained': responses.marks_obtained,\n\t\t\t'rank': responses.rank,\n\t\t\t})\n\n\t\tRES.append(stats)\n\t\t\"\"\"\n\t\tmulti_flag = False\t # set True if a student has marked a question bu \n\n\t\twhile i<len(questions) and j<len(marked_responses):\n\t\t\t# question not attempted\n\t\t\tif questions[i].id < marked_responses[j].question.id:\n\t\t\t\tif not multi_flag:\n\t\t\t\t\tres = {\"q_id\":questions[i].id,\"ans\":\"null\"}\n\t\t\t\t\tRES.append(res)\n\n\t\t\t\ti+=1\n\t\t\t\n\t\t\t# question attempted\n\t\t\telse:\n\t\t\t\tres = {\"q_id\":questions[i].id,\"ans\":marked_responses[j].opt_name}\n\t\t\t\tRES.append(res)\n\n\t\t\t\tmulti_flag = True\n\t\t\t\tj += 1\n\t\t\t\t# i not incremented to accomodate multiple correct answers\n\t\t\"\"\"\n\n\t\treturn Response(RES)\n \n\texcept ObjectDoesNotExist:\n\t\traise exceptions.NotFound(\"No entry found\")", "title": "" }, { "docid": "3d92834b599430c288321678f219b8db", "score": "0.5283129", "text": "def calculates_results_stats():\n pass", "title": "" }, { "docid": "21a207f049fa3e39d3c03dc98184b7fb", "score": "0.5275473", "text": "def get_statistics(self):\n pass", "title": "" }, { "docid": "427965de8a463fa50d50cbbe285c35d5", "score": "0.526942", "text": "def get_score(self):\n jira = JiraServer()\n jira_response = jira.make_api_call(self.query)\n return self.process_jira_response(jira_response)", "title": "" }, { "docid": "7302f0606ae930c2ccfc754fd08bbab0", "score": "0.52512497", "text": "def event_stats(self):\n stats = []\n is_penalty_event = self.is_penalty_event()\n is_second_chance_event = self.is_second_chance_event()\n if self.distance is not None:\n stats += self._get_shot_distance_stat_items()\n if self.is_heave:\n stats.append(self._get_heave_stat_item())\n\n team_ids = list(self.current_players.keys())\n opponent_team_id = team_ids[0] if self.team_id == team_ids[1] else team_ids[1]\n if self.is_made and not self.is_assisted:\n stats += self._get_unassisted_stat_items(\n is_second_chance_event, is_penalty_event\n )\n elif self.is_assisted:\n stats += self._get_assisted_stat_items(\n is_second_chance_event, is_penalty_event\n )\n elif self.is_blocked:\n stats += self._get_blocked_stat_items(\n is_second_chance_event, is_penalty_event, opponent_team_id\n )\n else:\n stats += self._get_missed_stat_items(\n is_second_chance_event, is_penalty_event\n )\n\n if self.is_made:\n # add plus minus and opponent points - used for lineup/wowy stats to get net rating\n for team_id, players in self.current_players.items():\n multiplier = 1 if team_id == self.team_id else -1\n for player_id in players:\n stat_item = {\n \"player_id\": player_id,\n \"team_id\": team_id,\n \"stat_key\": pbpstats.PLUS_MINUS_STRING,\n \"stat_value\": self.shot_value * multiplier,\n }\n stats.append(stat_item)\n if multiplier == -1:\n opponent_points_stat_item = {\n \"player_id\": player_id,\n \"team_id\": team_id,\n \"stat_key\": pbpstats.OPPONENT_POINTS,\n \"stat_value\": self.shot_value,\n }\n stats.append(opponent_points_stat_item)\n\n lineups_ids = self.lineup_ids\n for stat in stats:\n opponent_team_id = (\n team_ids[0] if stat[\"team_id\"] == team_ids[1] else team_ids[1]\n )\n stat[\"lineup_id\"] = lineups_ids[stat[\"team_id\"]]\n stat[\"opponent_team_id\"] = opponent_team_id\n stat[\"opponent_lineup_id\"] = lineups_ids[opponent_team_id]\n\n return self.base_stats + stats", "title": "" }, { "docid": "4c0b9182a2382f0d9724628bfa79bf4a", "score": "0.52404386", "text": "def get_stats(json_data):\n stats = {'title': json_data['title'], 'listing_count': json_data['stats']['listing_count'],\n 'lowest_price': json_data['stats']['lowest_price'], 'median_price': json_data['stats']['median_price'],\n 'average_price': json_data['stats']['average_price'], 'highest_price': json_data['stats']['highest_price'],\n 'announce_date': json_data['announce_date'], 'event_time': json_data['datetime_local'],\n 'current_time': datetime.datetime.utcnow().isoformat()[:-4]}\n return stats", "title": "" }, { "docid": "5d3974bc6f130ccdb0d49a7b1527607f", "score": "0.5235246", "text": "def stats(self):\n url = \"{}/api/stats/\".format(self.host)\n res = requests.get(url)\n\n try:\n result = (res.json(), res.status_code == 200)\n except:\n result = (None, False)\n\n return result", "title": "" }, { "docid": "b46bc8eff91eeda25cf35b9bcc3d5f5f", "score": "0.52296835", "text": "def stats(self):\n return self._send_to_all( Connection.stats)", "title": "" }, { "docid": "35029ba89a4c353b7b1c6e955262f1e9", "score": "0.52222943", "text": "def stats(self, query: str):\n\n stats = []\n\n if query in ('days', 'times'):\n stats = {\n 'days': self._rental_repository.get_stats_days(),\n 'times': self._rental_repository.get_stats_times()\n }[query]\n\n stats.sort(key=lambda tup: tup[1], reverse=True)\n\n elif query in ('late', 'current'):\n stats = {\n 'current': [\n r for r in self._rental_repository.get_all()\n if r.returned_date is None\n ],\n 'late': [\n r for r in self._rental_repository.get_all()\n if r.due_date < datetime.now()\n ]\n }[query]\n\n stats.sort(key=lambda r: r.due_date, reverse=True)\n\n else:\n raise ValueError('Invalid arg for stats command')\n\n print_list(stats)", "title": "" }, { "docid": "78ccb85e7f1b99495fd3a9dc4380d853", "score": "0.5219053", "text": "async def get_votes(self) -> [int]:\n await self._ensure_user_bot()\n req = requests.get('https://primebots.it/api/' + str(self.client.user.id) + '/votes/' + self.token)\n if req.status_code == 401:\n logging.error(\"API Token non valido\")\n return\n res = req.json()\n return res['votes']", "title": "" }, { "docid": "2d94e5e24569af440a8b4cac7038d17b", "score": "0.5217834", "text": "def get(self,request,slug):\n question = get_object_or_404(Question,slug=slug)\n \n total_votes = 0\n data_list=[]\n \n for choice in question.choice_set.all():\n total_votes += choice.total_votes\n text = choice.text.replace(\"'\",\"\\\\'\")\n data_list.append((re.sub(r\"'\",\"\\\\'\",choice.text) ,choice.total_votes)) \n\n # used for render title in javascript\n jstitle = question.title.replace(\"'\",\"\\\\'\")\n \n context = {'question':question,\n 'total_votes':total_votes, \n 'jstitle':jstitle,\n 'data_list':data_list,\n 'poll_slug':slug}\n\n return render_to_response(\n 'polling/poll_results.html', \n context, \n context_instance=RequestContext(request))", "title": "" }, { "docid": "82c800ff2e48364a4426ece42c546e92", "score": "0.5217288", "text": "async def stat(request: web.Request) -> web.json_response:\n\n data = dict(request.query)\n cleaned_data = ForStat().load(data)\n result = await Views(request).track_count(**cleaned_data)\n result_response = QueryString().dump(result, many=True)\n return web.json_response(data=result_response)", "title": "" }, { "docid": "ef49c7a447996c693dffe41a5c5de6a3", "score": "0.52162087", "text": "def get_stats(self):\n raise NotImplementedError", "title": "" }, { "docid": "f8ff666f138d0d41801f97660493a6a6", "score": "0.5213719", "text": "def get_stats(self) -> Dict[str, int]:", "title": "" }, { "docid": "7a7753c6dfd7919c49a55ba3f9e68f68", "score": "0.5213178", "text": "def getStats(self):\n return self.info['statistics']", "title": "" }, { "docid": "12cf1c11ca2cf32c21be0c623216cfde", "score": "0.52046686", "text": "def GetStats(self):\n raise NotImplementedError()", "title": "" }, { "docid": "12cf1c11ca2cf32c21be0c623216cfde", "score": "0.52046686", "text": "def GetStats(self):\n raise NotImplementedError()", "title": "" }, { "docid": "8f42c755c226fa98110326c97f1235c5", "score": "0.5202629", "text": "def stats(self):\n return self.conn.stats_job(self.jid)", "title": "" }, { "docid": "e48d214eba4547fc35f848ebf1f11a52", "score": "0.5202468", "text": "def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)", "title": "" }, { "docid": "a3cbe16df453c16301b1f0315cf365de", "score": "0.5200575", "text": "def get_scores(inp, status):\n nhl_scores_url = \"https://statsapi.web.nhl.com/api/v1/schedule?startDate=%s&endDate=%s&expand=schedule.linescore,schedule.teams\"\n j = get_nhl_json(inp, nhl_scores_url)\n scores = []\n games = j['dates'][0]['games']\n for game in games:\n if game['status']['abstractGameState'] == status:\n scores.append(game)\n return scores", "title": "" }, { "docid": "c47b5fb83176af0767d50d138d1ed1d4", "score": "0.51992756", "text": "def get_statistics(self, context, view_name, output_type):\n with self._runners_pool.actual_runner(context) as runner:\n return runner.get_statistics(view_name, output_type)", "title": "" }, { "docid": "05028ca83a0a236617f504313ce66b10", "score": "0.51814246", "text": "def compute_performance(self):\n # TODO Track fixation breaks here? Also in the remote?\n\n if self.trial_data:\n data = pd.DataFrame([t for t, _ in self.trial_data])\n mean_acc = data[\"correct\"].mean()\n responses = data[\"responded\"].sum()\n return mean_acc, responses\n else:\n return None, None", "title": "" }, { "docid": "880e16ddf62d4596f5d76c74c758d11c", "score": "0.5180588", "text": "def cli_get_stats(dummy):\n def print_table(title, table):\n if table:\n print \"=\" * 20, title, \"=\" * 20\n for row in table:\n print \"\\t\".join(str(elem) for elem in row)\n\n for collection, reclist in run_sql(\"SELECT name, reclist FROM collection ORDER BY name\"):\n print \"-\" * 79\n print \"Statistic for: %s \" % collection\n reclist = intbitset(reclist)\n if reclist:\n sqlreclist = \"(\" + ','.join(str(elem) for elem in reclist) + ')'\n print_table(\"Formats\", run_sql(\"SELECT COUNT(format) as c, format FROM bibrec_bibdoc AS bb JOIN bibdocfsinfo AS fs ON bb.id_bibdoc=fs.id_bibdoc WHERE id_bibrec in %s AND last_version=true GROUP BY format ORDER BY c DESC\" % sqlreclist)) # kwalitee: disable=sql\n print_table(\"Mimetypes\", run_sql(\"SELECT COUNT(mime) as c, mime FROM bibrec_bibdoc AS bb JOIN bibdocfsinfo AS fs ON bb.id_bibdoc=fs.id_bibdoc WHERE id_bibrec in %s AND last_version=true GROUP BY mime ORDER BY c DESC\" % sqlreclist)) # kwalitee: disable=sql\n print_table(\"Sizes\", run_sql(\"SELECT SUM(filesize) AS c FROM bibrec_bibdoc AS bb JOIN bibdocfsinfo AS fs ON bb.id_bibdoc=fs.id_bibdoc WHERE id_bibrec in %s AND last_version=true\" % sqlreclist)) # kwalitee: disable=sql", "title": "" }, { "docid": "ed877aba47eb18702de277d8acac8b70", "score": "0.5173229", "text": "def getStatistics(self):\n\n self.statistics()\n return self.stats", "title": "" }, { "docid": "5b4177949c106fc64d408ba32284561c", "score": "0.5162939", "text": "async def __specialty_statistics(self):\n # Parse query params\n from_date = self.get_argument('from_date', None)\n # If there is no from_date, we get all consultations\n from_date = datetime.min if not from_date else datetime.strptime(from_date, '%d-%m-%Y')\n to_date = self.get_argument('to_date', None)\n to_date = None if not to_date else datetime.strptime(to_date, '%d-%m-%Y')\n # Retrieve statistics\n statistics = await SpecialtyStatisticsService.get_statistics(from_date, to_date)\n self.make_response(statistics)", "title": "" }, { "docid": "024ae1c828f0c6c628309ee311229df7", "score": "0.5151317", "text": "def get_planning_unit_answers(request, survey_slug, question_slug):\n\n def flatten_answers(pu_answers):\n return Counter([ ans['unit'] for ans in pu_answers.values(\"unit\")])\n\n\n survey = get_object_or_404(Survey, slug=survey_slug)\n if request.method == 'GET':\n \n uuid = request.GET.get('respondant', None)\n if uuid:\n if question_slug.find('*') == -1:\n pu_answers = PlanningUnitAnswer.objects.filter(response__respondant__uuid=uuid,\n response__question__slug=question_slug,\n )\n else:\n pu_answers = PlanningUnitAnswer.objects.filter(response__respondant__uuid=uuid,\n response__question__slug__contains=question_slug.replace('*', ''),\n )\n else:\n if question_slug.find('*') == -1:\n pu_answers = PlanningUnitAnswer.objects.filter(response__respondant__survey=survey,\n response__question__slug=question_slug,\n )\n else:\n pu_answers = PlanningUnitAnswer.objects.filter(response__respondant__survey=survey,\n response__question__slug__contains=question_slug.replace('*', ''),\n )\n \n if not request.user.is_authenticated():\n pu_answers = pu_answers.filter(respondant__complete=True)\n\n filter_list = []\n filters = None\n\n filters = request.GET.get('filters', None)\n\n if filters is not None:\n filter_list = simplejson.loads(filters)\n\n if filters is not None:\n merged_filtered_set = None\n for filter in filter_list:\n slug = filter.keys()[0]\n value = filter[slug]+\"areas\"\n related_questions = pu_answers.filter(related_question_slug=value)\n if merged_filtered_set is not None:\n merged_filtered_set = merged_filtered_set | related_questions\n else:\n merged_filtered_set = related_questions\n \n if merged_filtered_set is not None:\n pu_answers = merged_filtered_set\n\n pu_answers = pu_answers.distinct('response__respondant', 'unit')\n answers = flatten_answers(pu_answers)\n out = {'success': \"true\", \n 'answers': answers\n }\n\n return HttpResponse(simplejson.dumps(out))", "title": "" }, { "docid": "0c17193c59c110994f53230a01edb363", "score": "0.5146482", "text": "def poll_data():\n pos = request.args.get('pos')\n\n reply = _wrap_reply(BACKEND_SERVER.poll_data, pos)\n\n return reply", "title": "" }, { "docid": "02478b6f26612de197f20c074e79c62b", "score": "0.5135567", "text": "def display_stats():\n\n parametrs = {\n \"tickets\": data_manager.ticket_list,\n \"min_date\": data_manager.start_date,\n \"max_date\": data_manager.end_date\n }\n\n return render_template(\"stats.html\", **parametrs)", "title": "" }, { "docid": "5040c39eecd9cbd9d4a2cca80058ee88", "score": "0.51348066", "text": "async def calculate_stats(self):\n cpu_pct = (\n Decimal(self.used_resources[\"cpus\"])\n / Decimal(self.resources[\"cpus\"])\n * 100\n )\n\n ram_pct = (\n Decimal(self.used_resources[\"mem\"])\n / Decimal(self.resources[\"mem\"])\n * 100\n )\n\n self.stats = {\n \"cpu_pct\": str(round_up(cpu_pct)),\n \"ram_pct\": str(round_up(ram_pct)),\n }", "title": "" }, { "docid": "b9dd0012cd81283ec21e69e4218193d8", "score": "0.5134672", "text": "async def refresh(self):\n async with aiohttp.get('{api_url}/{poll_id}'.format(api_url=api_url, poll_id=self.id)) as strawpoll_response:\n raise_status(strawpoll_response)\n self.status_code = strawpoll_response.status\n self.response_json = await strawpoll_response.json()\n self.id = self.response_json['id']\n self.title = self.response_json['title']\n self.options = self.response_json['options']\n self.votes = self.response_json['votes']\n self.captcha = self.response_json['captcha']\n self.dupcheck = self.response_json['dupcheck']\n self.url = 'https://www.strawpoll.me/{id}'.format(id=self.id)\n self.results_url = 'https://www.strawpoll.me/{id}/r'.format(id=self.id)", "title": "" }, { "docid": "8ede06c4a68fd54043e94321281fb681", "score": "0.5123524", "text": "def getConfirmationStats():\n\n if request.method == 'POST':\n confirmation_arr, time_stamp = [], []\n count, count_tv, count_mattress, count_couch, count_chair, count_refrigerator, count_cart, count_clean = 0, 0, 0, 0, 0, 0, 0, 0\n confirmation_lists = mongodb.confirmation_lists\n result = []\n\n for confirmation_list in confirmation_lists.find({},{'_id':0}):\n print(confirmation_list)\n result.append(confirmation_list)\n if confirmation_list['category'] == 'tv-monitor':\n count_tv += 1\n if confirmation_list['category'] == 'couch':\n count_couch += 1\n if confirmation_list['category'] == 'mattress':\n count_mattress += 1\n if confirmation_list['category'] == 'chair':\n count_chair += 1\n if confirmation_list['category'] == 'refrigerator':\n count_refrigerator += 1\n if confirmation_list['category'] == 'shopping-cart':\n count_cart += 1\n if confirmation_list['category'] == 'clean-street':\n count_clean += 1\n count += 1\n\n confirmation_stats_arr = [['tv-monitor', count_tv],\n ['mattress', count_mattress],\n ['couch', count_couch],\n ['chair', count_chair],\n ['refrigerator', count_refrigerator],\n ['shopping-cart', count_cart],\n ['clean-street', count_clean]]\n\n json_str = json.dumps(confirmation_stats_arr)\n return json_str", "title": "" }, { "docid": "7de9bc93f2a71425523e7eaebd7016ea", "score": "0.5120832", "text": "def poll_updates(request):\n count = bot.poll_updates_and_handle()\n return HttpResponse(f\"Processed {count} update{'' if count == 1 else 's'}.\")", "title": "" }, { "docid": "7de9bc93f2a71425523e7eaebd7016ea", "score": "0.5120832", "text": "def poll_updates(request):\n count = bot.poll_updates_and_handle()\n return HttpResponse(f\"Processed {count} update{'' if count == 1 else 's'}.\")", "title": "" }, { "docid": "8ffeb7633da3637b3d39d25ce1b55d01", "score": "0.5119075", "text": "def stats(self):\n result = self._client.get(self._full_path('stats'))\n if 'list' in result:\n for stat in result['list']:\n if 'subscriptions' in stat:\n stat['subscriptions'] = [\n Subscription(self._client, **subscription) for subscription in stat['subscriptions']\n ]\n return [TagStats(**stat) for stat in result['list']]\n else:\n raise ResponseStructureError(\"list doesn't exist in response\", result)", "title": "" }, { "docid": "1d523266defb6f360721f2f1d3cab2f0", "score": "0.5113466", "text": "def get_stats(self) -> Dict[str, Any]:\n return self.http.get(\n f'{self.config.paths.index}/{self.uid}/{self.config.paths.stat}'\n )", "title": "" }, { "docid": "509d532613e5ef94d7dc577446316153", "score": "0.50973344", "text": "def gw_statistics():\n stats_dict = get_statistics()\n return render_template('stats.html',\n gameweek=stats_dict['gameweek'],\n status=\"Completed\" if stats_dict['completed'] == True else \"Ongoing\",\n data=stats_dict['data'],\n gameweeks=stats_dict['gameweek']\n )", "title": "" }, { "docid": "a147a15a2eaa1c8b84818ed567d444e5", "score": "0.5097159", "text": "def get_output_stats(self, data=None):\n if not data:\n data = self.analyze()\n requests = data['requests_count']\n time_delta = data['last_datetime'] - data['first_datetime']\n req_per_sec = str(round(requests / time_delta.seconds, 3))\n twohoundreds_avg_size = humanize(data['2XX_total_size'] // data['2XX_count'])\n response_status = dict_to_str(data['response_status_count'])\n return {\n 'requests': str(requests),\n 'status_count': response_status,\n 'request_per_second': req_per_sec,\n '2XX_avg_size': twohoundreds_avg_size,\n }", "title": "" }, { "docid": "32fcfb080aaab86637c0a903dba572c3", "score": "0.5083221", "text": "def get_statistics(self):\n raise NotImplementedError", "title": "" }, { "docid": "88944ddb21f9cc1cea5dfa86506c0f06", "score": "0.50716925", "text": "def stats(self):\n with self._sock_ctx() as socket:\n self._send_message('stats', socket)\n body = self._receive_data_with_prefix(b'OK', socket)\n stats = yaml_load(body)\n return stats", "title": "" }, { "docid": "f915137ab9f51a88edb5a31ade2999e4", "score": "0.5068456", "text": "def results_with_percent(self):\n\n percents = [int(float(v) / sum(self.votes) * 100) if sum(self.votes) > 0 else 0 for v in self.votes]\n return zip(self.options, self.votes, percents)", "title": "" }, { "docid": "2de5c7101494d0ca2021e13c85aa4e70", "score": "0.50619566", "text": "def Stats(self):\n response = _endpoint(self.key, self.symbol, 'stats')\n return pd.Series(response).to_frame().T", "title": "" }, { "docid": "c37877fdbc22e41862bb8f8a998cae4f", "score": "0.5057299", "text": "def numbers():\n\n # The most popular champion\n popular_champ = (\n db.session.query(ChampionData)\n .order_by(ChampionData.num_seen.desc())\n .first()\n )\n\n # Gets the most popular champions\n popular_champs = (\n db.session.query(ChampionData)\n .order_by(ChampionData.num_seen.desc())\n .limit(15)\n .all()\n )\n\n # Picks a random champion to analyze\n random_champ = (\n db.session.query(ChampionData)\n .order_by(func.rand())\n .first()\n )\n\n # Gets the champion that wins the most\n winning_champ = (\n db.session.query(ChampionData)\n .filter(ChampionData.num_seen > 10)\n .order_by(ChampionData.score.desc())\n .first()\n )\n\n # Gets the role of the champions who wins the most\n winning_champ_roles = (\n db.session.query(\n Champion.role.label(\"role\"),\n func.count(Champion.id).label(\"seen\")\n )\n .filter(Champion.champion_id == winning_champ.champion_id)\n .group_by(Champion.role).all()\n )\n\n # Stats, Date Stats, Case Study of Popular or Highest Win Rate\n stats = {\n 'stats': {\n 'match_count': Match.query.count(),\n 'popular_champ': popular_champ.get_name(),\n 'popular_champ_kda': round(popular_champ.get_kda(), 2),\n 'random_champ': random_champ.get_name(),\n 'random_champ_role': random_champ.role.capitalize(),\n 'random_champ_seen': random_champ.num_seen,\n 'average_kills': round(\n db.session.query(\n func.avg(ChampionData.kills)\n )\n .first()[0], 2\n ),\n 'average_towers': round(\n db.session.query(\n func.avg(ChampionData.tower_score)\n ).first()[0], 2\n )\n },\n\n 'champion_picks': {\n 'labels': [\n champ.get_name() + \" (\" +\n champ.role.capitalize() + \")\" for champ in popular_champs\n ],\n 'data': [champ.num_seen for champ in popular_champs],\n 'images': [champ.get_full_image() for champ in popular_champs]\n },\n\n # Time graph of pick rate over a week, group by date picked\n 'winning_champ': {\n 'name': winning_champ.get_name(),\n 'role': winning_champ.role.capitalize(),\n 'image': winning_champ.get_full_image(),\n 'seen': winning_champ.num_seen,\n 'won': winning_champ.won * 100,\n 'assists': compile_sorted_champions(\n champ.get_compiled_weights(\"assists\")\n ),\n 'kda': winning_champ.get_kda(),\n\n 'role_distribution': {\n 'labels': [\n data.role.capitalize() for data in winning_champ_roles\n ],\n 'data': [data.seen for data in winning_champ_roles]\n }\n }\n }\n\n return jsonify(stats)", "title": "" }, { "docid": "cf310e05fb77f629a4c110919b0e5734", "score": "0.5057088", "text": "def question_stats(data_dir):\r\n OUTPUT = \"question.stats\"\r\n count = []\r\n for qid in qa_map.keys():\r\n ans_count = len(qa_map[qid]['AnswererIdList'])\r\n count.append(ans_count)\r\n if ans_count == 0:\r\n print(\"0 answer id list\", qid)\r\n question_stats_cntr = Counter(count)\r\n\r\n with open(data_dir + OUTPUT, \"w\") as fout:\r\n for x in sorted(list(question_stats_cntr.keys())):\r\n print(\"{}\\t{}\".format(x, question_stats_cntr[x]), file=fout)\r\n print(\"Total\\t{}\".format(sum(count), file=fout), file=fout)\r\n return", "title": "" }, { "docid": "1c5eb6bd073f8b34288ea03f619e9712", "score": "0.50558263", "text": "def get_statistics(self, _):\r\n return Bcfg2.Statistics.stats.display()", "title": "" }, { "docid": "212cb5966b69f625d0ec7a333a5ddedc", "score": "0.5054232", "text": "def test_get_stats(self):\n response = self.client.get(reverse('stats'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n body = json.loads(response.content.decode())\n\n self.assertEqual(body['approvedExperimentCount'], 4)\n self.assertEqual(body['approvedFarmerCount'], 3)\n self.assertEqual(body['contactCount'], 3)", "title": "" }, { "docid": "6d5a2fca4c939bb11d74f639be72d3c4", "score": "0.5050168", "text": "def get_surveyed_facilities():\n urls = [\n 'http://ossap.formhub.org/ossap/forms/health_mopup/api',\n 'http://ossap.formhub.org/ossap/forms/health_mopup_new/api',\n 'http://ossap.formhub.org/ossap/forms/education_mopup/api',\n 'http://ossap.formhub.org/ossap/forms/education_mopup_new/api',\n 'http://ossap.formhub.org/ossap/forms/mopup_questionnaire_health_final/api',\n 'http://ossap.formhub.org/ossap/forms/mopup_questionnaire_education_final/api'\n ]\n facilities = []\n\n for url in urls:\n logging.debug('Fetching: ' + url)\n request = urllib2.Request(url)\n base64string = base64.encodestring('%s:%s' % (secrets.username, secrets.password)).replace('\\n', '')\n request.add_header('Authorization', 'Basic %s' % base64string) \n response = urllib2.urlopen(request)\n data = response.read()\n facilities += json.loads(data)\n\n output = {}\n for fac in facilities:\n lga_id = fac.get('lga', None)\n fac_id = fac.get('facility_ID', None)\n if lga_id:\n fac['id'] = fac_id.lower() if fac_id else None\n fac['name'] = fac.get('facility_name', '')\n lga_facilities = output.setdefault(lga_id, [])\n lga_facilities.append(fac)\n return output", "title": "" }, { "docid": "5d9df4a55412a4e14972bd21c2c2c4cc", "score": "0.5047964", "text": "def getstats(self, ):\n try:\n resp = self.__getcontent()\n self.__html = BeautifulSoup(resp, 'html.parser')\n total = self.__gettotalstats()\n state = self.__getstatesstat()\n time = self.__gettime()\n return {'states': state, 'total': total, 'time': time}\n except Exception as e:\n return \"Unable to Fetch Data\", e", "title": "" }, { "docid": "7cccc8ce13fe3c20695be258a704deb0", "score": "0.50457805", "text": "def GetClientStats(self, _):\n response = rdf_client_stats.ClientStats()\n for i in range(12):\n sample = rdf_client_stats.CpuSample(\n timestamp=int(i * 10 * 1e6),\n user_cpu_time=10 + i,\n system_cpu_time=20 + i,\n cpu_percent=10 + i)\n response.cpu_samples.Append(sample)\n\n sample = rdf_client_stats.IOSample(\n timestamp=int(i * 10 * 1e6),\n read_bytes=10 + i,\n write_bytes=10 + i)\n response.io_samples.Append(sample)\n\n return [response]", "title": "" }, { "docid": "a2c9ccb790ad9ac2006c8349739a5ca2", "score": "0.5041896", "text": "def gamestats():\n\n uuid = request.form.get('game', None)\n game = None\n\n if uuid is not None:\n game = Game.load(uuid)\n # Check whether the game is still in progress\n if (game is not None) and not game.is_over():\n # Don't allow looking at the stats in this case\n game = None\n\n if game is None:\n return jsonify(result = Error.GAME_NOT_FOUND)\n\n return jsonify(game.statistics())", "title": "" }, { "docid": "91cc360faea7fe3ec682a4f3082fa9cb", "score": "0.50325227", "text": "def poll(self):\n\n i = len(self.metrics)\n for metric, val in self.metrics.items():\n\n if --i <= 0 :\n data = self.get_data(metric, val['value'], saveLastSync = 1)\n else:\n data = self.get_data(metric, val['value'])\n\n if data:\n LOGGER.info('adding metric...')\n self.add_metrics(data, metric, val['appName'])", "title": "" }, { "docid": "e7a17fa58407cddf261c8790f5f8f7c7", "score": "0.50275254", "text": "def fetch_latest_stats(self):\n counts = namedtuple(\"Stats\", (\"node_count\",\n \"relationship_count\",\n \"property_count\"))\n uri = self.resource.metadata[\"resources\"][\"latest_data\"]\n latest_data = Resource(uri).get().content\n timestamps = latest_data[\"timestamps\"]\n data = latest_data[\"data\"]\n data = zip(\n (datetime.fromtimestamp(t) for t in timestamps),\n (counts(*x) for x in zip(\n (numberise(n) for n in data[\"node_count\"]),\n (numberise(n) for n in data[\"relationship_count\"]),\n (numberise(n) for n in data[\"property_count\"]),\n )),\n )\n return data", "title": "" }, { "docid": "712e7ecfce1c512f9562e042ba9303cb", "score": "0.50261223", "text": "def t_stats():\r\n # Query all teams' four factors stats\r\n results = session.query(Stats).all()\r\n\r\n # Create a dictionary from the row data and append to a list of all_games\r\n today_stats = []\r\n for t_stat in results:\r\n t_stat_dict = {}\r\n t_stat_dict[\"Team_abbr\"] = t_stat.Team_abbr\r\n t_stat_dict[\"Offense_eFG\"] = t_stat.Offense_eFG\r\n t_stat_dict[\"Defense_eFG\"] = t_stat.Defense_eFG\r\n t_stat_dict[\"Offense_TOV\"] = t_stat.Offense_TOV\r\n t_stat_dict[\"Defense_TOV\"] = t_stat.Defense_TOV\r\n t_stat_dict[\"Offense_ORB\"] = t_stat.Offense_ORB\r\n t_stat_dict[\"Defense_DRB\"] = t_stat.Defense_DRB\r\n t_stat_dict[\"Offense_FtFga\"] = t_stat.Offense_FtFga\r\n t_stat_dict[\"Defense_FtFga\"] = t_stat.Defense_FtFga\r\n today_stats.append(t_stat_dict)\r\n\r\n return jsonify(today_stats)", "title": "" }, { "docid": "64ad9612b74aaa739c23d2b6124ec7e2", "score": "0.502288", "text": "def _fetch_data(self):\n now = datetime.datetime.now()\n today_year = now.strftime(\"%Y\")\n today_month = now.strftime(\"%m\")\n yesterday = datetime.datetime.now() - datetime.timedelta(1)\n yesterday_year = yesterday.strftime('%Y')\n yesterday_month = yesterday.strftime('%m')\n yesterday_day = yesterday.strftime('%d')\n url = BASE_URI+API_ENDPOINT_DATA\n url += '{}/{}/{}'.format(\n yesterday_year,\n yesterday_month, self._counter_id\n )\n \n self._get_cookie()\n\n data = requests.get(url, headers=self._headers)\n\n try:\n self.state = int(float(data.json()[int(\n yesterday_day)-1][1])*1000)\n self.success = True\n self.attributes['attribution'] = \"Data provided by toutsurmoneau.fr\"\n\n except ValueError:\n raise PySuezError(\"Issue with yesterday data\")\n pass\n\n try:\n if yesterday_month != today_month:\n url = BASE_URI+API_ENDPOINT_DATA\n url += '{}/{}/{}'.format(\n today_year,\n today_month, self._counter_id\n )\n data = requests.get(url, headers=self._headers)\n\n self.attributes['thisMonthConsumption'] = {}\n for item in data.json():\n self.attributes['thisMonthConsumption'][item[0]] = int(\n float(item[1])*1000)\n\n except ValueError:\n raise PySuezError(\"Issue with this month data\")\n pass\n\n try:\n if int(today_month) == 1:\n last_month = 12\n last_month_year = int(today_year) - 1\n else:\n last_month = int(today_month) - 1\n last_month_year = today_year\n\n url = BASE_URI+API_ENDPOINT_DATA\n url += '{}/{}/{}'.format(\n last_month_year, last_month,\n self._counter_id\n )\n\n data = requests.get(url, headers=self._headers)\n\n self.attributes['previousMonthConsumption'] = {}\n for item in data.json():\n self.attributes['previousMonthConsumption'][item[0]] = int(\n float(item[1])*1000)\n\n except ValueError:\n raise PySuezError(\"Issue with previous month data\")\n pass\n\n try:\n url = BASE_URI+API_ENDPOINT_HISTORY\n url += '{}'.format(self._counter_id)\n\n data = requests.get(url, headers=self._headers)\n fetched_data = data.json()\n self.attributes['highestMonthlyConsumption'] = int(\n float(fetched_data[-1])*1000)\n fetched_data.pop()\n self.attributes['lastYearOverAll'] = int(\n float(fetched_data[-1])*1000)\n fetched_data.pop()\n self.attributes['thisYearOverAll'] = int(\n float(fetched_data[-1])*1000)\n fetched_data.pop()\n self.attributes['history'] = {}\n for item in fetched_data:\n self.attributes['history'][item[3]] = int(\n float(item[1])*1000)\n\n\n except ValueError:\n raise PySuezError(\"Issue with history data\")\n pass", "title": "" }, { "docid": "17729078508b28f6920ba3bc2f60ebae", "score": "0.5022304", "text": "def report(self):\n\n # Sync with the REST server\n self._sync_with_server()\n\n # Report historical progress and results assumed pending\n import matplotlib.pyplot as plt \n\n # Get outcome values and put them in order of their IDs,\n # which should be equivalent to chronological order (of suggestion time)\n ids = np.array(self._ids_to_outcome_values.keys())\n outcomes_values = np.array(self._ids_to_outcome_values.values())\n\n # Clean up nans, infs and Nones\n outcomes_values = np.array(map(lambda x: float(x) if x is not None else -np.inf, outcomes_values))\n outcomes_values[np.logical_not(np.isfinite(outcomes_values))] = -np.inf\n\n s = ids.argsort()\n ids = ids[s]\n outcome_values = outcomes_values[s]\n outcome_values = np.array([float(i) for i in outcome_values])\n if outcome_values.size == 0 or np.all(np.isinf(outcome_values)):\n print('There are no completed results to report')\n return\n\n # Plot progression\n plt.figure(1)\n plt.clf()\n y = outcome_values\n best_so_far = [ np.max(y[:(i+1)]) for i in range(len(y)) ]\n plt.scatter(range(len(y)),y,marker='x',color='k',label='Outcomes')\n plt.plot(range(len(y)),best_so_far,color='k',label='Best so far')\n plt.xlabel('Result #')\n plt.ylabel(self.outcome_name)\n plt.title('Results progression')\n plt.legend(loc=3)\n plt.draw()\n plt.ion()\n plt.show()\n \n # Plot table of results\n plt.figure(2)\n param_names = list(np.sort(self.parameters.keys()))\n col_names = ['Result #'] + param_names + [self.outcome_name]\n cell_text = []\n for nb,id in enumerate(ids):\n # Get paramater values, put in correct order and add to\n # table with corresponding outcome value\n params, values = zip(*self._ids_to_param_values[id].items())\n s = np.argsort(params)\n values = np.array(values)[s]\n outcome = self._ids_to_outcome_values[id]\n cell_text.append([str(nb+1)] + [str(v) for v in values] + [str(outcome)])\n\n if len(cell_text) > 20:\n cell_text = cell_text[-20:]\n the_table = plt.table(cellText = cell_text, colLabels=col_names, loc='center')\n\n ## change cell properties\n table_props=the_table.properties()\n table_cells=table_props['child_artists']\n for cell in table_cells:\n cell.set_fontsize(8)\n\n plt.axis('off')\n plt.title('Table of results')\n plt.draw()\n plt.ion()\n plt.show()", "title": "" }, { "docid": "2185bf0694f5e667d1460ee219d43f40", "score": "0.5010615", "text": "async def stats(self, ctx, battletag, comp=\"normal\"):\n battletagurl = battletag.replace('#', '-')\n url = 'https://www.overbuff.com/players/pc/' + battletagurl\n if comp == \"comp\":\n url += \"?mode=competitive\"\n async with aiohttp.get(url) as response:\n soupobject = BeautifulSoup(await response.text(), \"html.parser\")\n stats = discord.Embed()\n h1 = soupobject.find_all('h1')\n for tag in h1:\n stats.add_field(name='Tag:', value=tag.get_text(), inline=True)\n sr = soupobject.find_all('span', class_='color-stat-rating')\n try:\n stats.add_field(name='Skill Rank:', value=sr[0].get_text(), inline=True)\n except IndexError:\n stats.add_field(name=\"Skill Rank:\", value=\"User has no SR\", inline=True)\n heroes = soupobject.find_all('a', class_='color-white')\n heroranks = soupobject.find_all('span', rel='tooltip')\n mostplayed = soupobject.find_all('div', class_='player-hero')\n i = 0\n topthree = ''\n for i in range (0, 3):\n try:\n topthree += '- {0:<11} Rank: {1:>5}'.format(heroes[i].get_text(), heroranks[i+1].get_text()) + '\\n'\n except IndexError:\n topthree += 'No more heroes played.'\n break\n stats.add_field(name='Top Heroes Played:', value=topthree, inline = True)\n wins = soupobject.find_all('span', class_='color-stat-win')\n losses = soupobject.find_all('span', class_='color-stat-loss')\n total = int(int(wins[0].get_text().replace(\",\", \"\")) + int(losses[0].get_text().replace(\",\", \"\")))\n stats.add_field(name='Losses:', value=losses[0].get_text() , inline=True)\n stats.add_field(name='Total Games: ', value=str(total), inline = True)\n await self.bot.say(embed=stats)\n pass", "title": "" }, { "docid": "3de1a73dc566cf26ac44b06ae9a9ea8a", "score": "0.5008555", "text": "def get_metrics(self) -> List[Dict[str, Any]]:\n check_module(\"lpot\")\n framework = self.config.get(\"framework\", None)\n if framework is None:\n raise ClientErrorException(\"Framework not set.\")\n\n if framework == \"pytorch\":\n check_module(\"ignite\")\n else:\n check_module(framework)\n from lpot.metric.metric import framework_metrics\n\n help_dict = load_help_lpot_params(\"metrics\")\n if framework == \"onnxrt\":\n raw_metric_list = list(\n framework_metrics.get(\"onnxrt_qlinearops\")().metrics.keys(),\n )\n else:\n raw_metric_list = list(framework_metrics.get(framework)().metrics.keys())\n raw_metric_list += [\"custom\"]\n metrics_updated = update_metric_parameters(raw_metric_list)\n for metric, value in metrics_updated.copy().items():\n if isinstance(value, dict):\n for key in value.copy().keys():\n help_msg_key = f\"__help__{key}\"\n metrics_updated[metric][help_msg_key] = help_dict.get(\n metric,\n {},\n ).get(help_msg_key, \"\")\n metrics_updated[f\"__help__{metric}\"] = help_dict.get(\n f\"__help__{metric}\",\n \"\",\n )\n return self._parse_help_in_dict(metrics_updated)", "title": "" }, { "docid": "13496ce7cbf289136b8e55c456b75344", "score": "0.50078154", "text": "def evaluate(self):\n self.stat = {l: [] for l in self.labels}\n self.stat['others'] = []\n for record in self.stmt_data.iterrows():\n desc = re.sub('\\d', '', record[1]['description'])\n val, percent, accuracy = self._get_category(desc)\n # print(percent, val, record)\n if accuracy: self._update_stat(val, (record[0], record[1]['description']))\n else: self.stat['others'].append((record[0], record[1]['description']))", "title": "" }, { "docid": "8b2692ca4844107dd9da8cbdcaae29b3", "score": "0.5004159", "text": "def stats(self):\n pass", "title": "" }, { "docid": "0738a7b41008be4e6eebebedd5e8bc9b", "score": "0.50039274", "text": "def get_statistics():\n doc_stats = solr.statistics.docs()\n n_keywords = solr.statistics.keywords()\n n_keyword_models = solr.statistics.keywordmodel()\n return jsonify(\n n_total_docs=doc_stats.n_total,\n n_tagged_docs=doc_stats.n_tagged,\n n_untagged_docs=doc_stats.n_untagged,\n uploaded_last_7_days=doc_stats.last_7_days,\n uploaded_last_4_weeks=doc_stats.last_4_weeks,\n uploaded_last_12_months=doc_stats.last_12_months,\n uploaded_all_years=doc_stats.all_years,\n n_keywords=n_keywords,\n n_keyword_models=n_keyword_models,\n )", "title": "" }, { "docid": "209bbb51d58d84979bfd92b486bb472c", "score": "0.500075", "text": "def fetch_data(exp, version=None):\n if exp not in (1, 2):\n raise ValueError('exp must be 1 or 2.')\n\n if version is None:\n version = 'c1.1' if exp is 1 else 'c2.1'\n\n exp_data = get_data(version, '../experiment/data')\n\n pdf = exp_data['participants'].set_index('pid')\n complete = pdf.completed\n pdf = pdf.loc[complete]\n if 'variance' in pdf:\n pdf.variance = pdf.variance.replace(2442, 'decreasing').replace(2424, 'increasing')\n else:\n pdf['variance'] = 'constant'\n\n mdf = exp_data['mouselab-mdp'].set_index('pid').loc[complete]\n\n def extract(q):\n return list(map(int, q['click']['state']['target']))\n\n mdf['clicks'] = mdf.queries.apply(extract)\n mdf['n_clicks'] = mdf.clicks.apply(len)\n mdf['thinking'] = mdf['rt'].apply(get(0, default=0))\n mdf['variance'] = pdf.variance\n\n tdf = mdf.query('block == \"test\"').copy()\n tdf.trial_index -= tdf.trial_index.min()\n tdf.trial_index = tdf.trial_index.astype(int)\n tdf.trial_id = tdf.trial_id.astype(int)\n\n # pdf['total_time'] = exp_data['survey'].time_elapsed / 60000\n\n pdf['n_clicks'] = tdf.groupby('pid').n_clicks.mean()\n pdf['score'] = tdf.groupby('pid').score.mean()\n pdf['thinking'] = mdf.groupby('pid').thinking.mean()\n\n def excluded_pids():\n sdf = exp_data['survey-multi-choice'].set_index('pid').loc[complete]\n responses = pd.DataFrame(list(sdf.responses), index=sdf.index)\n grp = responses.groupby(lambda pid: pdf.variance[pid])\n correct = grp.apply(lambda x: x.mode().iloc[0]) # assume the most common answer is correct\n errors = correct.loc[pdf.variance].set_index(pdf.index) != responses\n fail_quiz = errors.sum(1) > 1\n no_click = mdf.query('block == \"train_inspector\"').groupby('pid').n_clicks.sum() == 0\n return fail_quiz | no_click\n\n pdf['excluded'] = excluded_pids()\n tdf = tdf.loc[~pdf.excluded]\n print(f'Excluding {pdf.excluded.sum()} out of {len(pdf)} partipicants')\n\n def get_env(row):\n row.state_rewards[0] = 0\n sigma = 5 if row.variance == 'constant' else 4\n\n return make_env(0, sigma,\n scaling_factors=SCALING[row.variance],\n ground_truth=row.state_rewards)\n tdf['env'] = tdf.apply(get_env, axis=1)\n\n def unroll(df):\n for pid, row in df.iterrows():\n env = row.env\n env.reset()\n for a in [*row.clicks, env.term_action]:\n yield {'pid': pid, 'trial_index': row.trial_index, 'trial_id': row.trial_id,\n 'state': env._state, 'action': a}\n env.step(a)\n return {\n 'participants': pdf,\n 'trials': tdf,\n 'unrolled': pd.DataFrame(unroll(tdf)),\n\n }", "title": "" }, { "docid": "0a8ad7d36de67db0d30114e93651d307", "score": "0.49989972", "text": "def getLocalStatistics(_current_response):\n try:\n logger.info('getLocalStatistics: Calculating local statistics.')\n\n #set up temporary window size\n window_tmp = WINDOW_SIZE\n\n response_size = len(_current_response)\n no_pos_in_window = no_neg_in_window = 0\n\n local_true_positives = local_false_positives = local_true_negatives = local_false_negatives = 0\n\n\n #define starting point\n if response_size < WINDOW_SIZE:\n window_tmp = response_size\n\n start = response_size - 1 - window_tmp\n\n #get local statistics for window\n for dct in _current_response[start:]:\n predicted_energy_in = dct['statistics']['global_statistics']['predicted_energy_in']\n predicted_energy_out = dct['statistics']['global_statistics']['predicted_energy_out']\n\n if predicted_energy_in == 1:\n local_true_positives += 1\n\n elif predicted_energy_in == -1:\n local_false_positives += 1\n\n if predicted_energy_out == 1:\n local_true_negatives += 1\n\n elif predicted_energy_out == -1:\n local_false_negatives += 1\n\n local_no_pos = local_true_positives + local_true_positives\n local_no_neg = local_true_negatives + local_false_negatives\n\n if local_no_pos:\n no_pos_in_window = float(local_true_positives) / float(local_no_pos)\n\n if local_no_neg:\n no_neg_in_window = float(local_true_negatives) / float(local_no_neg)\n\n return {\n 'local_statistics':\n {\n 'local_relative_positives': no_pos_in_window,\n 'local_relative_negatives': no_neg_in_window\n }\n }\n\n except:\n raise", "title": "" }, { "docid": "6af2725177476a3db61e5ab3d51701f4", "score": "0.49965128", "text": "def stats(username, location):\n\n # Grabs necessary information for analysis.\n user_id = get_user_id(username, location)\n\n query = (\n db.session.query(PlayerData)\n .filter_by(player_id=user_id, location=location)\n .all()\n )\n\n has_ranked = True\n\n # Calculates the time in which we should reset the data.\n cache_difference = (\n datetime.datetime.now() - datetime.timedelta(minutes=CACHE_LIMIT)\n )\n\n # If we have data, and it's old, we reset the user's data.\n if len(query) > 0 and query[0].updated < cache_difference:\n LOGGING.push(\"*'\" + username + \"'* has old data. Resetting stats.\")\n\n reset_stats(username, user_id, location)\n query = []\n\n # If we don't have data we get new data.\n if len(query) == 0:\n try:\n LOGGING.push(\n \"*'\" + username + \"'* from @'\" + location +\n \"'@ does not exist in the database. Creating model.\"\n )\n\n session = RiotSession(API_KEY, location)\n has_ranked = build_stats(username, location, session, user_id)\n\n query = (\n PlayerData.query\n .filter_by(player_id=user_id, location=location)\n .all()\n )\n\n # TODO(Make this a more descriptive error.)\n except KeyError:\n abort(429)\n\n # TODO(Restructure this so it doesn't make multiple query requests.)\n\n # Sets up data for analysis.\n full_stats = {'scores': []}\n\n # Fills the user specific statistics\n fill_user_stats(full_stats, username, user_id, location)\n\n # Gives popular counters for general roles for quick display\n full_stats['popular_counters'] = [\n popular_counters(\"TOP\"),\n popular_counters(\"MIDDLE\"),\n popular_counters(\"BOTTOM\"),\n popular_counters(\"JUNGLE\")\n ]\n\n # If we don't have data we don't call analyze_player.\n full_stats['analyzed_player'] = (\n analyze_player(user_id, location) if has_ranked\n else \"No analysis available.\"\n )\n\n # Returns a json of all the stats needed for display\n return jsonify(full_stats)", "title": "" } ]
2f856edd516ea41b56043c18b779c784
Method used for merging the transactions together. A single paypal payment is often translated into multiple transactions (e.g. conversion to foreign currency means +2 transactions). As this is a redundant information for us, we'd like to merge this into a single record.
[ { "docid": "9e04f7b2e1c283576dec06245eac7afb", "score": "0.5725072", "text": "def combine_transactions(trx_data):\n result = {}\n bar = ChargingBar('3/3 Merging trx ', max=len(trx_data.keys()), suffix = '%(percent).1f%% - %(eta)ds remaining')\n for i in sorted(trx_data.keys()):\n # First create groups of transactions based on the time in which they occurred\n timestamp = trx_data[i][0] + 'T' + trx_data[i][1]\n if timestamp not in result.keys():\n # The first transaction we came across at this time\n result[timestamp] = trx_data[i]\n else:\n # Another transaction from this time already exist and they both need to be merged\n result[timestamp] = merge_transactions(result[timestamp], trx_data[i])\n\n bar.next()\n\n bar.finish()\n return result", "title": "" } ]
[ { "docid": "32b3fdd5a2519df39368ae454f3f83c8", "score": "0.6524009", "text": "def merge_transactions(base, new):\n\n result = base\n\n # Check the fields one-by-one\n for i in range(max(len(base), len(new))):\n # Generic element handling\n try:\n if result[i] == \"\":\n # In case of an empty data cell, use the data from the 'new' transaction\n result[i] = new[i]\n continue\n except IndexError:\n if len(result) < len(new):\n # In case there's more elements in the new trx, copy them into the result\n result.append(new[i])\n continue\n\n # Specific element handling\n if i == 4:\n # Processing Currency\n if result[i] != BASE_CURRENCY:\n # We filter out non-BASE_CURRENCY values\n result[i] = new[i]\n # If we update the currency, we need to update the price as well\n result[i+1] = new[i+1]\n if i == 5:\n # Processing Value\n if new[i-1] != BASE_CURRENCY:\n # In case the new transaction is in foreign currency we don't mess with the value\n continue\n if (result[i] != new[i]):\n if abs(float(result[i])) == abs(float(new[i])):\n # This means that one transaction is to charge the account, the other is the debit\n # We are interested in the debit (i.e. <0)\n if float(result[i]) < 0:\n continue\n else:\n result[i] = new[i]\n else:\n print(\"WARN: Unexpected situation occurred while merging transactions\\n{}\\n{}\\n\".format(result, new), file=sys.stderr)\n print(\"WARN: Transaction has been ignored\\n\")\n\n return result", "title": "" }, { "docid": "b8a2225467deca9a2c53f8b2f702a170", "score": "0.587501", "text": "def transactify(transactions):", "title": "" }, { "docid": "8b4a451dd67f414f7a45840860c17ad3", "score": "0.5699397", "text": "def merge_transactions(\n self, dest: Transaction, source: Transaction, keep_dest_accounts: bool\n ) -> Transaction:\n dest_posting_comments = deepcopy(dest.postings[0].meta)\n source_posting_comments = deepcopy(source.postings[0].meta)\n source.postings[0].meta.clear()\n\n # clear and use new_entry\n if not keep_dest_accounts:\n dest.postings.clear()\n\n dest.postings.extend(source.postings)\n\n # restore and merge comments\n for comment in dest_posting_comments.values():\n self.add_posting_comment(dest, comment)\n for comment in source_posting_comments.values():\n self.add_posting_comment(dest, comment)\n\n for comment in source.meta.values():\n self.add_transaction_comment(dest, comment)\n return dest", "title": "" }, { "docid": "e9be9ee4faa91b75fe4f0f52b7585c15", "score": "0.5648792", "text": "def transform_txn_for_ledger(txn):\n return txn", "title": "" }, { "docid": "e64975356ab3c576864f082ebaf279e6", "score": "0.54528105", "text": "def transaction(self):\n\n # TODO: refactor. Ok, it's late and I don't have time to rewrite all the old code.\n # so I'll just gently wrap it in a new function.\n trs = [Transaction.from_v1(tr) for tr in self._transactions()]\n tr = trs[0]\n if len(trs) == 2:\n tr.transfers = {**tr.transfers, **trs[1].transfers}\n return tr", "title": "" }, { "docid": "a13b4349e2873557ef0157956056f047", "score": "0.5376048", "text": "def _amount_all(self):\n for order in self:\n total_second = 0.0\n total_main = 0.0\n total_money = 0.0\n for line in order.sale_account_detail:\n total_second += line.second_unit_number\n total_main += line.main_unit_number\n total_money += line.money\n order.update({\n 'total_second_number': total_second,\n 'total_main_number': total_main,\n 'total_money': total_money,\n })", "title": "" }, { "docid": "5ddd191e0c4de5e67803e0939a605ad1", "score": "0.52762854", "text": "def preprocces_transaction(self, buyer, seller):\n\n buyer.quantity -= 1\n seller.quantity -= 1\n self.surplus += buyer.valuation - seller.valuation\n self.transactions.append(buyer.bid)\n buyer.profits.append(buyer.valuation - buyer.bid)\n seller.profits.append(buyer.bid - seller.valuation)\n buyer.bid = 1\n seller.bid = 200\n self.agents = [agent for agent in self.agents if agent not in (buyer, seller)]", "title": "" }, { "docid": "dbce3ee411640526b37a0d8b68b86e84", "score": "0.52018", "text": "def new_transaction(self, sender, recipient, amount):\n new_transaction = {\n\n }\n\n\n return new_transaction", "title": "" }, { "docid": "b7f0a8061d1144a1333bb0329f1c8087", "score": "0.5187081", "text": "def _amount_all(self):\n voucher_obj = self.env['account.voucher'] \n for order in self:\n amount_untaxed = amount_deposit = amount_tax = 0.0\n for pline in order.reservation_package_line:\n amount_untaxed += pline.price_subtotal\n amount_tax += pline.price_tax\n for rline in order.reservation_room_line:\n amount_untaxed += rline.price_subtotal\n amount_tax += rline.price_tax\n for sline in order.reservation_service_line:\n amount_untaxed += sline.price_subtotal\n amount_tax += sline.price_tax\n for deposit in voucher_obj.search([('partner_id','=',order.partner_id.id)]):\n if deposit.state == 'draft':\n amount_deposit += deposit.amount\n order.update({\n 'amount_deposit': amount_deposit,\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\n 'amount_total': amount_untaxed + amount_tax,\n })", "title": "" }, { "docid": "f80e570cb6b7ee2f58ce455e0294522c", "score": "0.5153631", "text": "def test_double_save_paypal(self):\n extra_data = {\n \"response\": {\n \"transactions\": (\n {\n \"related_resources\": (\n {\n \"sale\": {\n \"transaction_fee\": {\n \"value\": \"0.34\",\n },\n },\n },\n ),\n },\n ),\n },\n }\n p = models.Payment(\n variant=\"paypal\", total=Decimal(\"10.00\"), extra_data=json.dumps(extra_data)\n )\n p.save()\n p.save()\n rp = models.Payment.objects.get()\n self.assertEqual(rp.transaction_fee, Decimal(\"0.34\"))", "title": "" }, { "docid": "9a71048faf2de85ee6b2e3d33dd7bf09", "score": "0.51502514", "text": "def processing_transactions():\n transactions = Transaction.objects.exclude(status__in=['start', 'done']).order_by('created')\n counter = 0\n\n for tran in transactions.iterator():\n try:\n with transaction.atomic():\n rate = ExchangeRate.objects.filter(currency=tran.currency, created__lte=tran.created)\\\n .order_by('-created').first()\n if not rate:\n continue\n \n usd = math.floor(100 * tran.amount / rate.rate)\n tran_amount = math.floor(tran.amount * tran.currency.fractional)\n oper = _create_operation(tran, usd, tran_amount)\n \n wallet_to = tran.wallet_to\n wallet_from = None\n \n if tran.operation == 'TRANSFER':\n wallet_from = tran.wallet_from\n wallet_from_amount = -1 * _get_wallet_amount(tran, wallet_from, usd, tran_amount)\n _create_wallet_hist(tran, wallet_from, wallet_to, oper, wallet_from_amount)\n _inc_wallet_balance(wallet_from, wallet_from_amount)\n \n wallet_to_amount = _get_wallet_amount(tran, wallet_to, usd, tran_amount)\n _create_wallet_hist(tran, wallet_to, wallet_from, oper, wallet_to_amount)\n _inc_wallet_balance(wallet_to, wallet_to_amount)\n \n tran.status = 'done'\n tran.save()\n counter += 1\n except Exception as err:\n logging.warning(err)\n return '{} transactions processed'.format(counter)", "title": "" }, { "docid": "9d69272af50f3f3f1991c8dc6f7d27b5", "score": "0.5143318", "text": "def _amount_all2(self):\n if not self.early_payment_discount:\n self.early_payment_disc_total = self.amount_total\n self.early_payment_disc_tax = self.amount_tax\n self.early_payment_disc_untaxed = self.amount_untaxed\n else:\n cur = self.pricelist_id.currency_id\n val = val1 = 0\n for line in self.order_line:\n if line.product_id and line.product_id.without_early_payment:\n val1 += line.price_subtotal\n val += self._amount_line_tax(line)\n else:\n val1 += line.price_subtotal * \\\n (1.0 - (float(self.early_payment_discount or 0.0)) /\n 100.0)\n val += self._amount_line_tax(line) * \\\n (1.0 - (float(self.early_payment_discount or 0.0)) /\n 100.0)\n self.early_payment_disc_tax = cur.round(val)\n self.early_payment_disc_untaxed = cur.round(val1)\n self.early_payment_disc_total = cur.round(val+val1)\n self.total_early_discount = self.early_payment_disc_untaxed - \\\n self.amount_untaxed", "title": "" }, { "docid": "28b6ca15fce817d85894aab1916f8106", "score": "0.51289475", "text": "def _amount_all(self):\n res = {}\n ut_obj = self.env['l10n.ut']\n for iwdl_brw in self.browse(self.ids):\n # Using a clousure to make this call shorter\n f_xc = ut_obj.sxc(\n iwdl_brw.invoice_id.company_id.currency_id.id,\n iwdl_brw.invoice_id.currency_id.id,\n iwdl_brw.islr_wh_doc_id.date_uid)\n\n res[iwdl_brw.id] = {\n 'amount': (iwdl_brw.base_amount * (iwdl_brw.retencion_islr / 100.0)) or 0.0,\n 'currency_amount': 0.0,\n 'currency_base_amount': 0.0,\n }\n for xml_brw in iwdl_brw.xml_ids:\n res[iwdl_brw.id]['amount'] = xml_brw.wh\n res[iwdl_brw.id]['currency_amount'] = f_xc(\n res[iwdl_brw.id]['amount'])\n res[iwdl_brw.id]['currency_base_amount'] = f_xc(\n iwdl_brw.base_amount)\n #pass\n #return res", "title": "" }, { "docid": "36d5f783523374bf977f133560be482d", "score": "0.51194155", "text": "def _transactions(self):\n\n trs = Transactions()\n\n # ----- invoice transaction\n\n trs.append({'date': self.date,\n 'description': 'Invoice '+self.id,\n 'amount': self.amount,\n 'from': self['from'],\n 'to': self['to']})\n\n # ----- tax transaction\n if self.tax:\n params = {'invoice_id': self.id, 'ext_name': self.ext_name}\n accounts = utils.invoice_accounts(\n self.prefix, params, 'tax_accounts')\n\n trs.append({'date': self.date,\n 'description': 'Tax for invoice '+self.id,\n 'amount': self.tax,\n **accounts})\n\n return trs", "title": "" }, { "docid": "306add8f341f826de579138f172bdf76", "score": "0.50973797", "text": "def serialize(self):\n return {\n \"id\": self.id,\n \"currency\": self.currency,\n \"owner\": self.owner.serialize,\n \"balance\": self.balance,\n \"transactions\": [tx.serialize for tx in self.sent_transactions.union(self.received_transaction).order_by(Transaction.at.desc())]\n }", "title": "" }, { "docid": "c8aea55af5b075f7c76e7ed27040308e", "score": "0.50799274", "text": "def merge_orders(self, cr, uid, ids, context=None):\n order_obj = self.pool.get('purchase.order')\n proc_obj = self.pool.get('procurement.order')\n mod_obj =self.pool.get('ir.model.data')\n if context is None:\n context = {}\n result = mod_obj._get_id(cr, uid, 'purchase', 'view_purchase_order_filter')\n id = mod_obj.read(cr, uid, result, ['res_id'])\n\n allorders = order_obj.do_merge(cr, uid, context.get('active_ids',[]), context)\n for new_order in allorders:\n proc_ids = proc_obj.search(cr, uid, [('purchase_id', 'in', allorders[new_order])], context=context)\n for proc in proc_obj.browse(cr, uid, proc_ids, context=context):\n if proc.purchase_id:\n proc_obj.write(cr, uid, [proc.id], {'purchase_id': new_order}, context)\n \n cr.execute(\"\"\"update purchase_order_line set procurement_id = null\n where order_id in %s\"\"\",(tuple(allorders[new_order]),))\n\n return {\n 'domain': \"[('id','in', [\" + ','.join(map(str, allorders.keys())) + \"])]\",\n 'name': _('Purchase Orders'),\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'purchase.order',\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'search_view_id': id['res_id']\n }", "title": "" }, { "docid": "131c8f9b1099f230f759eb6c72374771", "score": "0.50755316", "text": "def insert_data_in_payment(self):\n try:\n try:\n \n # create connection\n con = connect_db()\n # cursor\n cur = con.cursor()\n\n # -------*******------- Insert Data in 'payment' table -------*******-------\n # Field Count => 4\n cur.execute(\n \"INSERT INTO payment(user_email, tran_id, transaction_log, total_amt) values(%s,%s,%s,%s) RETURNING id;\",\n (\n self.event_body.get(\"user_email\", None),\n self.event_body.get(\"tran_id\", None),\n self.event_body.get(\"transaction_log\", None),\n float(self.event_body.get(\"amount\", None)) if not self.event_body.get(\"amount\", None) == \"\" else 0.0\n )\n )\n # Commit the payment transaction\n con.commit()\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to insert data in 'payment' table!\"\n )\n )\n\n try:\n \n # -------*******------- Insert Data in 'payment_details' table -------*******-------\n # get inserted Payment row\n inserted_payment_row = cur.fetchone()\n # get PAYMENT ID\n # payment_id = inserted_payment_row['id']\n payment_id = inserted_payment_row[0]\n\n # Field Count => 21\n cur.execute(\n \"INSERT INTO payment_details(payment_id, amount, card_type, store_amount, card_no, bank_tran_id, status, tran_date, currency, card_issuer, card_brand, card_issuer_country, card_issuer_country_code, store_id, verify_sign, verify_key, cus_fax, currency_type, currency_amount, currency_rate, base_fair) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\",\n (\n inserted_payment_row, \n float(self.event_body.get(\"amount\", None)) if not self.event_body.get(\"amount\", None) == \"\" else 0.0,\n self.event_body.get(\"card_type\", None),\n float(self.event_body.get(\"store_amount\", \"\")) if not self.event_body.get(\"store_amount\", \"\") == \"\" else 0.0,\n self.event_body.get(\"card_no\", None),\n self.event_body.get(\"bank_tran_id\", None),\n self.event_body.get(\"status\", None),\n self.event_body.get(\"tran_date\", None),\n self.event_body.get(\"currency\", None),\n self.event_body.get(\"card_issuer\", None),\n self.event_body.get(\"card_brand\", None),\n self.event_body.get(\"card_issuer_country\", None),\n self.event_body.get(\"card_issuer_country_code\", None),\n self.event_body.get(\"store_id\", None),\n self.event_body.get(\"verify_sign\", None),\n self.event_body.get(\"verify_key\", None),\n self.event_body.get(\"cus_fax\", None),\n self.event_body.get(\"currency_type\", None),\n float(self.event_body.get(\"currency_amount\", None)) if not self.event_body.get(\"currency_amount\", None) == \"\" else 0.0,\n float(self.event_body.get(\"currency_rate\", None)) if not self.event_body.get(\"currency_rate\", None) == \"\" else 0.0,\n float(self.event_body.get(\"base_fair\", None)) if not self.event_body.get(\"base_fair\", None) == \"\" else 0.0\n )\n )\n\n # Commit the payment transaction\n con.commit()\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to insert data in 'payment_details' table!\"\n )\n )\n\n # ------------------- *** validate structure and finalize response *** -------------------\n result = {\n \"statusCode\": 200,\n \"body\": {\n \"responseData\": {\n \"paymentID\": payment_id\n }\n }\n }\n finalized_response = finalize_response(response=result)\n \n # return finalized_response\n return finalized_response\n\n except Exception as E:\n raise Exception(\n get_exception_message(\n Ex=E, file=__file__, parent=inspect.stack()[0][3], line=inspect.stack()[\n 0][2],\n msg=\"Failed to insert data in Database!\"\n )\n )", "title": "" }, { "docid": "9d6c50eee9b446b4692f88d011a5b11c", "score": "0.50637966", "text": "def norm_transactions(clean_trans_df, clean_port_df):\n # creates the transaction data\n transactions_data = transactions(clean_trans_df)\n \n # all offer affected transactions\n inf_discount = influenced_discount(clean_trans_df, clean_port_df)\n inf_bogo = influenced_bogo(clean_trans_df, clean_port_df)\n inf_informational = influenced_informational(clean_trans_df, clean_port_df)\n \n # combine all the influenced transcations \n inf_trans = inf_informational.append(inf_discount.append(inf_bogo))\n \n # drop to have the same columns as all transactions \n inf_trans = inf_trans[['person', 'transaction_time', 'spend']]\n \n # remove offer related transactions\n norm_trans = pd.concat([transactions_data, inf_trans]).drop_duplicates(keep=False)\n \n return norm_trans", "title": "" }, { "docid": "b411494c2f32b8e2caef63dac9b678e1", "score": "0.5063595", "text": "def issue_payment(self):\r\n self.total = (self.salary / 24)\r\n for entry in self.receipts:\r\n self.total += (entry * (self.commission_rate/100))\r\n self.receipts = []\r\n return self.total", "title": "" }, { "docid": "c3b537d95b96f3c3b0a3cbef01757392", "score": "0.5040347", "text": "def payment_total(self):\n total = 0\n for x in Payment.query.all():\n total += x.amount\n\n return total", "title": "" }, { "docid": "6c2402771c6fa813530b421ab832698d", "score": "0.5029317", "text": "def post(self):\n for rec in self:\n amt = 0 \n\n if not rec.name:\n # Use the right sequence to set the name\n if rec.payment_type == 'transfer':\n sequence_code = 'account.payment.transfer'\n else:\n if rec.partner_type == 'customer':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.customer.invoice'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.customer.refund'\n if rec.partner_type == 'supplier':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.supplier.refund'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.supplier.invoice'\n rec.name = self.env['ir.sequence'].with_context(ir_sequence_date=rec.payment_date).next_by_code(sequence_code)\n if not rec.name and rec.payment_type != 'transfer':\n raise UserError(_(\"You have to define a sequence for %s in your company.\") % (sequence_code,))\n\n rec.name = rec.journal_id.code + \"/\" + rec.currency_option + \"/\" + rec.name\n if rec.invoice_lines:\n \n for line in rec.invoice_lines:\n amt += line.allocation\n if rec.amount < amt:\n raise ValidationError((\"Payment amount must be greater then or equal to '%s'\") %(amt))\n if rec.amount > amt:\n for line in rec.invoice_lines:\n line.allocation = line.allocation + (rec.amount - amt)\n break\n\n result = super(AccountPayment,self).post()\n if result:\n debit_line_vals = []\n credit_line_vals = []\n data_final = []\n\n destination_move_line_id = self.env['account.move.line'].search([\n ('name', '=', rec.name),\n ('move_id.journal_id','=',rec.destination_journal_id.id)\n ])\n\n print (\"Destination Move Id \", destination_move_line_id.move_id.name)\n if destination_move_line_id:\n move_id = destination_move_line_id.move_id\n # move_id.button_cancel()\n \n for line in move_id.line_ids:\n line.remove_move_reconcile()\n\n if line.debit > 0:\n debit_line_vals = (0,0,{\n 'name': line.name,\n 'journal_id': line.journal_id.id,\n 'date': line.date,\n 'credit': 0,\n 'debit': line.debit * rec.currency_rate,\n 'partner_id': line.partner_id.id,\n 'account_id': line.account_id.id,\n 'payment_id': rec.id\n })\n data_final.append(debit_line_vals)\n\n elif line.credit > 0:\n credit_line_vals = (0,0,{\n 'name': line.name,\n 'journal_id': line.journal_id.id,\n 'date': line.date,\n 'debit': 0,\n 'credit': line.credit * rec.currency_rate,\n 'partner_id': line.partner_id.id,\n 'account_id': line.account_id.id,\n 'payment_id': rec.id\n })\n data_final.append(credit_line_vals)\n\n rec.write({'state':'draft'})\n new_move_id = self.env['account.move'].create({\n 'ref': move_id.name,\n 'journal_id': move_id.journal_id.id,\n 'date': move_id.date,\n 'narration': move_id.name,\n 'line_ids': data_final\n })\n\n new_move_id.post()\n\n # Delete old Move\n move_id.button_cancel()\n move_id.unlink()\n rec.write({'state':'posted'})\n\n return True", "title": "" }, { "docid": "3ec5ed9977dd7a38414f938a7bccfd9d", "score": "0.5029102", "text": "def match_transaction_with_payout(transaction):\n from .models import RemoteDocdataPayout, RemoteDocdataPayment\n\n full_description = ''.join([getattr(transaction, 'description{0}'.format(i)) for i in range(1, 7)])\n\n # Matching...\n # Try to match checking to savings\n if transaction.sender_account == 'NL45RABO0132207044' and \\\n transaction.counter_account in ['NL38RABO1513237977', '1513237977']:\n transaction.status = transaction.IntegrityStatus.Valid\n transaction.category_id = 4\n transaction.save()\n return\n\n # Try to match savings to checking\n if transaction.sender_account == 'NL38RABO1513237977' and \\\n transaction.counter_account in ['NL45RABO0132207044', '0132207044']:\n transaction.status = transaction.IntegrityStatus.Valid\n transaction.category_id = 5\n transaction.save()\n return\n\n # Figure out possible invoice references\n possible_invoice_refences = [\n getattr(transaction, 'description{0}'.format(i)).split(' ')[0].lower() for i in range(1, 5)\n ]\n\n # Try to match Project Payout\n # Built query for filtering transactions on invoice reference\n qs_filter = Q()\n for ref in possible_invoice_refences:\n if len(ref) > 0:\n qs_filter |= Q(invoice_reference=ref)\n\n if qs_filter:\n try:\n project_payout = ProjectPayout.objects.get(qs_filter)\n if project_payout.amount_payable == transaction.amount:\n transaction.status = transaction.IntegrityStatus.Valid\n else:\n transaction.status = transaction.IntegrityStatus.AmountMismatch\n transaction.status_remarks = '{0} != {1}'.format(project_payout.amount_payable, transaction.amount)\n\n transaction.payout = project_payout\n transaction.category_id = 1\n transaction.save()\n return\n except ProjectPayout.DoesNotExist:\n pass\n except ProjectPayout.MultipleObjectsReturned:\n logger.critical('Multiple project payouts with matching references are found: {0}'.format(', '.join(possible_invoice_refences)))\n\n # Try to match Docdata Payout\n match = re.search('pop\\d+t', full_description)\n if match:\n invoice_reference = match.group()\n try:\n remote_payout = RemoteDocdataPayout.objects.get(payout_reference=invoice_reference)\n if remote_payout.payout_amount == transaction.amount:\n transaction.status = transaction.IntegrityStatus.Valid\n else:\n transaction.status = transaction.IntegrityStatus.AmountMismatch\n transaction.status_remarks = '{0} != {1}'.format(remote_payout.payout_amount, transaction.amount)\n\n transaction.remote_payout = remote_payout\n transaction.category_id = 2\n transaction.save()\n return\n except RemoteDocdataPayout.DoesNotExist:\n logger.warning('No remote Docdata payout found for reference: {0}'.format(invoice_reference))\n except RemoteDocdataPayout.MultipleObjectsReturned:\n logger.critical('Multiple Docdata payouts with matching triple deal reference are found: {0}'.format(\n invoice_reference))\n\n # Try to match Docdata Payment\n match = re.search('pid\\d+t', full_description)\n if match:\n tdr = match.group()\n try:\n remote_payment = RemoteDocdataPayment.objects.get(triple_deal_reference=tdr)\n if remote_payment.amount_collected == transaction.amount:\n transaction.status = transaction.IntegrityStatus.Valid\n else:\n transaction.status = transaction.IntegrityStatus.AmountMismatch\n transaction.status_remarks = '{0} != {1}'.format(remote_payment.amount_collected, transaction.amount)\n\n transaction.remote_payment = remote_payment\n transaction.category_id = 6\n transaction.save()\n return\n except RemoteDocdataPayment.DoesNotExist:\n logger.warning('No remote Docdata payment found for triple deal reference: {0}'.format(tdr))\n except RemoteDocdataPayment.MultipleObjectsReturned:\n logger.critical('Multiple Docdata payments with matching triple deal reference are found: {0}'.format(tdr))\n\n transaction.status = transaction.IntegrityStatus.UnknownTransaction\n transaction.remote_payment = None\n transaction.remote_payout = None\n transaction.payout = None\n transaction.category_id = None\n transaction.save()", "title": "" }, { "docid": "431f8a7a0b59c9245bddd83f79dc8d67", "score": "0.50224507", "text": "def __insert_transaction(self, transactions):\n INSERT_TRANS = \"INSERT INTO staging_transactions (trans_type, transaction_date, post_date, ref_number, account_number, amount, DESCRIPTION) VALUES (?, ?, ?, ?, ?, ?, ?)\"\n\n for _, trans in transactions.items():\n trans = [self.__flatten_transaction(tran) for tran in trans]\n self.cursor.executemany(INSERT_TRANS, trans)", "title": "" }, { "docid": "35af6e89532fbd7f406465d97f88606f", "score": "0.50094736", "text": "def consolidate_result(self, results):\n return results", "title": "" }, { "docid": "b87759d9476b917cf036f893740139c0", "score": "0.49833766", "text": "def transactions(data):\n transactions_df = data[data['transaction'] == 1]\n transactions_df = transactions_df[['person','time','id']]\n transactions_df.columns = ['person','transaction_time','spend']\n \n return transactions_df", "title": "" }, { "docid": "37642d149c890d5d01ccdb2428b6c2ee", "score": "0.49266425", "text": "def merge_non_transaction_entries(imported_entries, enhanced_transactions):\n enhanced_entries = []\n enhanced_transactions_iter = iter(enhanced_transactions)\n for entry in imported_entries:\n # pylint: disable=isinstance-second-argument-not-valid-type\n if isinstance(entry, Transaction):\n enhanced_entries.append(next(enhanced_transactions_iter))\n else:\n enhanced_entries.append(entry)\n\n return enhanced_entries", "title": "" }, { "docid": "78f0289ba76ad812c27bd8e986dc5143", "score": "0.49237102", "text": "def purchase_made(request):\n\n # multiplier = Instrument.objects.values_list('multiplier')[11][0]\n # print(multiplier)\n if request.method == 'POST' and request.user.is_authenticated():\n quantity = request.POST.get('quantity')\n price = request.POST.get('price')\n symbol = request.POST.get('symbol')\n multiplier = request.POST.get('multiplier')\n if (int(quantity)!=0):\n t = Transaction()\n t.user = request.user\n t.symbol = symbol\n t.quantity = int(quantity)\n t.price = float(price)\n z=Instrument()\n z.multiplier = int(multiplier)\n t.transaction_amount= z.multiplier*t.quantity*t.price\n t.save()\n # transaction_list = Transaction.objects.values('symbol').annotate(total=Sum('transaction_amount'))\n transaction_list = Transaction.objects.filter(user=request.user.id).values('symbol').annotate(total=Sum('transaction_amount'))\n TransactionSummary.objects.filter(user=request.user.id).delete()\n for item in range(len(transaction_list)):\n y=TransactionSummary()\n y.user=request.user\n y.symbol=transaction_list[item]['symbol']\n y.symbol_total=transaction_list[item]['total']\n y.absolute_symbol=abs(y.symbol_total)\n y.save()\n print(transaction_list)\n # print(type(transaction_list))\n # print(transaction_list[0]['symbol'])\n return JsonResponse({'success':'true'})\n return", "title": "" }, { "docid": "b7287605b0e45681fa047ace5462f918", "score": "0.49161172", "text": "def _process_transactions(self, response):\n\n if not response.ok:\n raise exceptions.BadRequest('TODO: SOMETHING FUCKED UP')\n\n response_json = response.json()\n\n transactions = [\n {\n '_id': transaction['_id'],\n '_account': transaction['_account'],\n 'date': self._process_transaction_date(transaction['date']),\n 'amount': transaction['amount'],\n 'name': self._process_transaction_name(transaction['name']),\n 'pending': transaction['pending']\n }\n for transaction\n in response_json['transactions']\n ]\n\n return transactions", "title": "" }, { "docid": "a45170267d1d003087b5fa466469de0e", "score": "0.490086", "text": "def fetch_accountTransactions(accountNum):\n ## Search for any transactions that has originated from or destined to that account. If\n ## no transactions are found, simply return an empty string\n try:\n transactions = Transaction.objects.filter(Q(transaction_origin=Account.objects.get(account_number=accountNum)) | Q(transaction_destination=Account.objects.get(account_number=accountNum)))\n except Transaction.DoesNotExist:\n transactions = []\n ## For each transaction found, add a new dict to transactions_modified to display information\n ## to the user. M2M fields transaction_origin and transaction_destination hasve to be converted\n ## to string to show user \n transactions_modified = []\n for i in range(len(transactions)):\n transactions_modified.append({})\n transactions_modified[i][\"transaction_id\"] = transactions[i].transaction_id\n transactions_modified[i][\"transaction_amount\"] = \"${:,.2f}\".format(transactions[i].transaction_amount)\n transactions_modified[i][\"transaction_time\"] = transactions[i].transaction_time\n transactions_modified[i][\"transaction_name\"] = transactions[i].transaction_name\n transactions_modified[i][\"transaction_origin\"] = list(transactions[i].transaction_origin.all())[0].account_number\n transactions_modified[i][\"transaction_destination\"] = list(transactions[i].transaction_destination.all())[0].account_number\n transactions_modified[i][\"transaction_origin_balance\"] = \"${:,.2f}\".format(transactions[i].transaction_origin_balance)\n transactions_modified[i][\"transaction_destination_balance\"] = \"${:,.2f}\".format(transactions[i].transaction_destination_balance)\n\n ## Search for any wire transactions for that account\n try:\n wire_transactions = WireTransaction.objects.filter(transaction_origin=Account.objects.get(account_number=accountNum))\n except WireTransaction.DoesNotExist:\n wire_transactions = []\n ## Repeat same process for adding dict element to a list\n wire_transactions_modified = []\n for i in range(len(wire_transactions)):\n wire_transactions_modified.append({})\n wire_transactions_modified[i][\"transaction_id\"] = wire_transactions[i].transaction_id\n wire_transactions_modified[i][\"transaction_amount\"] = \"${:,.2f}\".format(wire_transactions[i].transaction_amount)\n wire_transactions_modified[i][\"transaction_time\"] = wire_transactions[i].transaction_time\n wire_transactions_modified[i][\"transaction_name\"] = wire_transactions[i].transaction_name\n wire_transactions_modified[i][\"transaction_origin\"] = list(wire_transactions[i].transaction_origin.all())[0].account_number\n wire_transactions_modified[i][\"transaction_origin_balance\"] = \"${:,.2f}\".format(wire_transactions[i].transaction_origin_balance)\n\n ## Add any wire transactions found to the original transactions_modified list \n transactions_modified.extend(wire_transactions_modified)\n\n ## Sort the transactions by the time it was processed, starting from latest first\n transactions_modified.sort(key = lambda x: (x[\"transaction_time\"]), reverse=True)\n\n \n return transactions_modified", "title": "" }, { "docid": "694241e6bc12850742a3f713dc5e61c8", "score": "0.48892254", "text": "def consolidate_trades(self):\n\n assert len(self._strategies), 'No strategies to consolidate'\n\n self._trades = {}\n for strategy in self._strategies:\n for k, v in strategy.trades.items():\n if k not in self._trades:\n self._trades[k] = {}\n self._trades[k]['contract'] = strategy.contracts[k]\n if 'quantity' in self._trades[k]:\n self._trades[k]['quantity'] += v\n else:\n self._trades[k]['quantity'] = v\n if 'source' in self._trades[k]:\n self._trades[k]['source'][strategy.name] = v\n else:\n self._trades[k]['source'] = {strategy.name: v}\n\n self._trades = {k: v for k, v in self._trades.items() if v['quantity'] != 0}", "title": "" }, { "docid": "2874f5cabfbc5adf27a96944593e17d4", "score": "0.48814476", "text": "def _combine_employee_group_pay(cls, records):\n employee_cache = {}\n for record in records:\n employee_id = record.get(\"employee_id\")\n if employee_id not in employee_cache:\n employee_cache[employee_id] = record\n else:\n employee_record = employee_cache.get(employee_id)\n employee_record[\"start_of_period\"] = \\\n min(employee_record[\"start_of_period\"],\n record[\"start_of_period\"])\n employee_record[\"end_of_period\"] = \\\n max(employee_record[\"end_of_period\"],\n record[\"end_of_period\"])\n employee_record[\"amount_paid\"] += record[\"amount_paid\"]\n employee_ids = sorted(employee_cache.keys())\n return [\n employee_cache.get(employee_id) for employee_id in employee_ids]", "title": "" }, { "docid": "d5fabd9080fb3e7fdfeae2d9152c6a05", "score": "0.48780742", "text": "def translate(self, data):\n\n #: Paypal attribute names indexed by corresponding PR names.\n mapping = {\n 'transaction_id' : 'TRANSACTIONID',\n 'card_type' : 'CREDITCARDTYPE',\n 'invoice_number' : 'INVNUM',\n 'first_name' : 'FIRSTNAME',\n 'address_label' : 'STREET',\n 'country' : 'COUNTRYCODE',\n 'last_name' : 'LASTNAME',\n 'card_number' : 'ACCT',\n 'sales_tax' : 'TAXAMT',\n 'exp_date' : 'EXPDATE',\n 'ip' : 'IPADDRESS',\n 'state' : 'STATE',\n 'amount' : 'AMT',\n 'city' : 'CITY',\n 'cvv2' : 'CVV2',\n 'zip' : 'ZIP',\n }\n \n ret = {}\n for atr in data:\n d = data[atr]\n if d is not None:\n ret[mapping[atr]] = d\n return ret", "title": "" }, { "docid": "c4e4f9d291634ad56c47cb42fde86354", "score": "0.48616725", "text": "def _amount_all(self):\n res = {}\n ut_obj = self.env['l10n.ut']\n for ret_line in self.browse(self.id):\n f_xc = ut_obj.sxc(\n ret_line.invoice_id.company_id.currency_id.id,\n ret_line.invoice_id.currency_id.id,\n ret_line.islr_wh_doc_id.date_uid)\n res[ret_line.id] = {\n 'amount_islr_ret': 0.0,\n 'base_ret': 0.0,\n 'currency_amount_islr_ret': 0.0,\n 'currency_base_ret': 0.0,\n }\n #for line in ret_line.iwdl_ids:\n # res[ret_line.id]['amount_islr_ret'] += line.amount\n # res[ret_line.id]['base_ret'] += line.base_amount\n # res[ret_line.id]['currency_amount_islr_ret'] += \\\n # f_xc(line.amount)\n # res[ret_line.id]['currency_base_ret'] += f_xc(line.base_amount)\n iwdl_local = self.env['islr.wh.doc.line'].search([('islr_wh_doc_id', '=', ret_line.islr_wh_doc_id.id)])\n for line in iwdl_local:\n res[ret_line.id]['amount_islr_ret'] += line.base_amount * line.retencion_islr / 100\n res[ret_line.id]['base_ret'] += line.base_amount\n res[ret_line.id]['currency_amount_islr_ret'] += \\\n f_xc(line.base_amount * line.retencion_islr / 100)\n res[ret_line.id]['currency_base_ret'] += f_xc(line.base_amount)\n return res", "title": "" }, { "docid": "5769eed8ab69d70bf671cdd5a01655ad", "score": "0.4856353", "text": "def __reduce__(self):\n return _make_money, (self._currency_code, Decimal.__str__(self))", "title": "" }, { "docid": "4d06d6fcb81538983bdbb8bd7a36d097", "score": "0.4854114", "text": "def transactions (date=False, current=False):\n\n if current:\n transactions = get_transactions(current=True)\n\n if not date:\n transactions = get_transactions(report_date)\n else:\n transactions = get_transactions(date)\n\n total = 0\n with open (homepath + \"trans_amounts.txt\", \"w\") as fh:\n for t in transactions:\n if t[\"tenders\"][0][\"type\"] == \"CARD\":\n amount = int(t[\"tenders\"][0][\"amount_money\"][\"amount\"])\n total += amount\n fh.write(\"%s\\n\" % amount)", "title": "" }, { "docid": "36fd172fc2d49c911cda4fcb107fa522", "score": "0.48465487", "text": "def to_accounts(self):\n df = self.to_df()\n acc = df.set_index('id')['amount'] # account series\n tax = pd.Series([df.tax[df.tax > 0].sum(),\n df.tax[df.tax < 0].sum()],\n index=['tax.to_receive', 'tax.to_pay'])\n return pd.concat((acc, tax))", "title": "" }, { "docid": "bee5cd28e245222edfe60e3c8a0da8e8", "score": "0.48428786", "text": "def _amount_all(self):\n #PHMTB4 add amount_undiscounted, amount_discount\n for order in self:\n amount_untaxed = amount_tax = 0.0\n amount_discount = 0.0\n amount_undiscounted = 0.0\n for line in order.order_line:\n amount_untaxed += line.price_subtotal \n amount_undiscounted += line.price_unit * line.product_uom_qty\n \n #amount_discount += (line.price_unit - line.price_reduce) * (line.product_uom_qty)\n amount_discount += (line.price_subtotal - line.price_unit * line.product_uom_qty)\n # FORWARDPORT UP TO 10.0\n if order.company_id.tax_calculation_rounding_method == 'round_globally':\n price = line.price_unit * (1 - (line.discount or 0.0) / 100.0)\n taxes = line.tax_id.compute_all(price, line.order_id.currency_id, line.product_uom_qty, product=line.product_id, partner=line.order_id.partner_id)\n amount_tax += sum(t.get('amount', 0.0) for t in taxes.get('taxes', []))\n else:\n amount_tax += line.price_tax\n order.update({\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\n 'amount_total': amount_untaxed + amount_tax,\n #PHMTB4\n 'amount_total_discount': order.pricelist_id.currency_id.round(amount_discount),\n 'amount_total_base' : order.pricelist_id.currency_id.round(amount_undiscounted)\n })", "title": "" }, { "docid": "02e24329a8d0b81dfcd09868d2362cb4", "score": "0.48421907", "text": "def process_transactions(self):\n\n now = datetime.now(pytz.utc)\n for transaction in Transaction.objects.filter(pending=True):\n if (now - transaction.timestamp).total_seconds() > settings.TIP_EXPIRY:\n transaction.from_user.balance += transaction.amount\n transaction.pending = False\n transaction.from_user.save()\n transaction.save()\n logger.info('Refunded pending tip, from_user: %s, to_user: %s, amt: %s',\n transaction.from_user.user_id,\n transaction.to_user_temp_id,\n transaction.amount.quantize(Decimal('0.00')))\n tasks.send_notify_of_refund.delay(transaction.from_user,\n transaction.to_user_temp_id,\n transaction.amount)\n else:\n try:\n to_user = User.objects.get(user_id=transaction.to_user_temp_id)\n to_user.balance += transaction.amount\n to_user.save()\n transaction.to_user = to_user\n transaction.pending = False\n transaction.accepted = True\n transaction.save()\n logger.info('Completed pending tip, from_user: %s, to_user: %s, amt: %s',\n transaction.from_user.user_id,\n to_user.user_id,\n transaction.amount.quantize(Decimal('0.00')))\n tasks.send_tip_success.delay(transaction.from_user.user_id, to_user.user_id, transaction.amount)\n except User.DoesNotExist:\n pass", "title": "" }, { "docid": "204114b6bbf9e6de878b04dbcd5111cc", "score": "0.4822463", "text": "def _record_payment(self, data, amount=None, state=\"PAID\"):\n if not self.cart:\n return\n if not amount:\n amount = data[\"latest-charge-amount\"]\n transaction_id = data[\"google-order-number\"]\n pending = self.cart.payments.filter(state=\"PENDING\", transaction_id=transaction_id)\n if pending:\n pending[0].state = \"PAID\"\n pending[0].save()\n return pending[0]\n else:\n payment = self._create_payment(amount, transaction_id, state)\n payment.save()\n return payment", "title": "" }, { "docid": "bedad047eb5ba6730d95a967e11a2333", "score": "0.48085684", "text": "def process(self):\n return self.to_df().groupby(['account']).sum()['amount']", "title": "" }, { "docid": "e6baa8079dfed02814dba729896bdaf9", "score": "0.47973895", "text": "def process_payment(self):\n print(\"Please insert your payment.\")\n for coin in self.COIN_VALUES:\n self.payment += int(input(f\"How many {coin}?: \")) * self.COIN_VALUES[coin]\n return self.payment", "title": "" }, { "docid": "bb582925e86ae54ba19d9d0007a7cb01", "score": "0.4785093", "text": "def all(cls):\n cls.logger.info('Processing all Payments')\n return cls.query.all()", "title": "" }, { "docid": "633379f0c24a2562c5eb3b8b2a2457ed", "score": "0.4771961", "text": "def add_transaction( recipient, sender=owner, amount=1.0): #last_transaction=[1] is not removed as it is a required param\n # sender and amount are optin args, as the values are already specified, so recipient is placed first\n transaction = {\n 'sender': sender, \n 'recipient': recipient, \n 'amount': amount\n }\n pending_transactions.append(transaction)", "title": "" }, { "docid": "fed28e1623cd090cfe3e95cf0a2aaac1", "score": "0.47717285", "text": "def _amount_all(self):\n for order in self:\n amount_untaxed = amount_tax = amount_discount = 0.0\n for line in order.order_line:\n amount_untaxed += line.price_subtotal\n amount_tax += line.price_tax\n amount_discount += (line.product_uom_qty * line.price_unit * line.discount) / 100\n order.update({\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\n 'amount_total': amount_untaxed + amount_tax + self.adjustment_amount_total\n })", "title": "" }, { "docid": "21c8b4a337b6fbb35937132718c04dc4", "score": "0.47657847", "text": "def post(self):\n p_trans_account = self.request.get('trans_account')\n p_trans_purpose_category = self.request.get('trans_purpose_category')\n p_information = self.request.get('information')\n p_payee = self.request.get('payee')\n p_trans_type = self.request.get('trans_type')\n p_description = self.request.get('description')\n p_amount = Decimal(self.request.get('amount'))\n p_trans_date1 = datetime.strptime(self.request.get('trans_date'), '%Y-%m-%d')\n p_trans_date = date(p_trans_date1.year, p_trans_date1.month, p_trans_date1.day)\n\n trans_account = TransAccount.get(p_trans_account)\n trans_purpose_category = TransPurposeCategory.get(p_trans_purpose_category)\n payee = Payee.get(p_payee)\n \n p_amount = handle_amount(p_amount, p_trans_type)\n \n trans = TransItem(parent=None, trans_account = trans_account,\n trans_purpose_category = trans_purpose_category,\n information = p_information,\n payee = payee,\n trans_type = p_trans_type,\n description = p_description,\n amount = p_amount,\n trans_date = p_trans_date)\n \n trans_account.balance = trans_account.balance + trans.amount\n\n trans_account.put()\n trans.put()\n\n self.redirect('/trans_list')", "title": "" }, { "docid": "e914898369b5840b40b3200cc8a81dce", "score": "0.47604513", "text": "def new_transaction(self, sender, recipient, amount):\n transaction = {\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n }\n self.pending_transactions.append(transaction)\n\n return transaction", "title": "" }, { "docid": "18255a55b9ccd5f0bcef8379cf911ef1", "score": "0.4753281", "text": "def merge_pairs(self):\r\n curr = self._first\r\n while curr is not None and curr.next is not None:\r\n curr.item += curr.next.item\r\n curr.next = curr.next.next\r\n curr = curr.next", "title": "" }, { "docid": "c8a8f8d30de26b3122efe71d756872fb", "score": "0.47405946", "text": "def get_values(self, trans_actions, trans_price, trans_balance, str_action_buy, str_action_sell,\n str_action_update):\n if trans_actions[0] != str_action_buy:\n raise RuntimeError(\"First investment transaction must be a buy. Cannot calculate investment-values.\")\n # Check the individual transactions for updates in price, and update the value according to the balance.\n # If no price-updates are given, the last value is used.\n trans_value = []\n for idx, action in enumerate(trans_actions):\n if action == str_action_buy or action == str_action_sell or action == str_action_update:\n trans_value.append(trans_balance[idx] * trans_price[idx])\n else:\n trans_value.append(trans_value[-1])\n\n return trans_value", "title": "" }, { "docid": "b436e74d2ff54aeb3acb4676a31ad086", "score": "0.47400868", "text": "def add_transaction(self, sender, recipient, amount):\n # create a dictionary for a transaction\n transaction = {}\n transaction['tid'] = self.transaction_sequence\n transaction['sender'] = sender\n transaction['recipient'] = recipient\n transaction['amount'] = amount\n # add the transaction to the ledger in the correct place\n self.all_transactions[self.block_count]['transactions'].append(\n transaction)\n # confirming transaction\n self.transaction_confirmed_count += 1\n # increment the unique tid\n self.transaction_sequence += 1\n # keep track of the value total added\n self.transaction_values.append(amount)\n # keep track of the entire flow of money (including cancels)\n self.transaction_values_with_cancels.append(amount)\n self.record_transaction_per_user(sender, recipient, amount)\n self.record_highest_lowest_balance(sender)\n self.record_highest_lowest_balance(recipient)", "title": "" }, { "docid": "856aab4e61b3132c03799e0355550ad9", "score": "0.47349286", "text": "def update_payment_from_webhook(cls, payload):\n try:\n webhook_response = payload['payment']['entity']\n obj = cls.get_active_objects(order_id=webhook_response['order_id'])\n if obj.exists():\n if webhook_response['method'] == 'card':\n obj.update(\n payment_id=webhook_response['id'],\n method=webhook_response['method'],\n card_id=webhook_response['card_id'],\n cname=webhook_response['card']['name'],\n last4 = webhook_response['card']['last4'],\n cnetwork = webhook_response['card']['network'],\n ctype = webhook_response['card']['type'],\n bank = webhook_response['bank'],\n wallet = webhook_response['wallet'],\n vpa = webhook_response['vpa'],\n email = webhook_response['email'],\n contact = webhook_response['contact'],\n notes = webhook_response['notes']\n )\n else:\n obj.update(\n payment_id=webhook_response['id'],\n method=webhook_response['method'],\n card_id=webhook_response['card_id'],\n bank=webhook_response['bank'],\n wallet=webhook_response['wallet'],\n vpa=webhook_response['vpa'],\n email=webhook_response['email'],\n contact=webhook_response['contact'],\n notes=webhook_response['notes']\n )\n obj = obj[0]\n if webhook_response['status'] == CONST_PAYMENT_STATUS_CAPTURED:\n obj.status = CONST_PAYMENT_STATUS_CAPTURED\n obj.save()\n elif webhook_response['status'] == CONST_PAYMENT_STATUS_AUTHORIZED and (obj.status == CONST_PAYMENT_STATUS_CREATED\n or obj.status == CONST_PAYMENT_STATUS_FAILED):\n obj.status = CONST_PAYMENT_STATUS_AUTHORIZED\n obj.save()\n elif webhook_response['status'] == CONST_PAYMENT_STATUS_FAILED and (obj.status == CONST_PAYMENT_STATUS_CREATED\n or obj.status == CONST_PAYMENT_STATUS_AUTHORIZED):\n obj.status = CONST_PAYMENT_STATUS_FAILED\n obj.save()\n except Exception as err:\n send_error_report_email(str(traceback.format_exc()), inspect.stack()[0][3])", "title": "" }, { "docid": "a02884b54468d475dc68467053b3c314", "score": "0.47216836", "text": "def combine_data(current_holdings_df, api_data_return):\n\n combined_holdings_dict = api_data_return\n \n for keys, values in current_holdings_df.items():\n\n for key_part, value_part in values.items():\n temp = []\n if key_part == 'purchase_date' or key_part == 'book_price' or key_part == 'purchase_currency' or key_part == 'qty':\n \n for i in value_part.values():\n temp.append(i)\n \n combined_holdings_dict[keys][key_part] = temp\n \n if key_part == 'purchase_date':\n temp_FX = []\n\n for i in value_part.values():\n date_obj = i.to_pydatetime()\n USD_CAD_purchase = CurrencyRates()\n USD_CAD_purchase = USD_CAD_purchase.get_rate('USD', 'CAD', date_obj)\n temp_FX.append(USD_CAD_purchase)\n \n combined_holdings_dict[keys]['fx_USDCAD_purchase'] = temp_FX\n \n return combined_holdings_dict", "title": "" }, { "docid": "26f0132462a8b57c7ac69a0b636180e9", "score": "0.4710216", "text": "def issue_payment(self):\r\n total = 0\r\n for entry in self.time_cards:\r\n total += self.Hourly_pay * entry\r\n self.time_cards = []\r\n return total", "title": "" }, { "docid": "6ead24bb5f225458277bfbe540772c01", "score": "0.47049943", "text": "def _credit_payments_info(self):\n operations = len(self.credit_transactions)\n checksum = self.total_credit_transactions\n\n return sepa_export_credit.credit_payments_info(\n operations, checksum, self.settings['DEBTOR'],\n self.total_credit_transactions, self.description)", "title": "" }, { "docid": "4576cb012f38c535f92b095ce9591a33", "score": "0.4704749", "text": "def reverse_payment(self, user_id, amount, currency, merchant_id):", "title": "" }, { "docid": "0956b70eff930a43b8678b4b4edbbf67", "score": "0.47014067", "text": "def make_payment(self, amount):\n pp = PaylineProcessor()\n result, transaction, message = pp.make_wallet_payment(self.wallet_id,\n amount)\n if result:\n self.transaction_set.create(amount=amount,\n transaction_id=transaction)\n return (result, message)", "title": "" }, { "docid": "efd1426a3a7d86e52e5eacdafab96540", "score": "0.46999037", "text": "def merged(self):\n return RecordStoreSummary(mtb.utils.flatten(self.values()), merge=True)", "title": "" }, { "docid": "201ac9cc77b497465a90b34d0edbb8bd", "score": "0.46952492", "text": "def post(self):\n for rec in self:\n #~ if rec.state != 'draft':\n #~ raise UserError(_(\"Only a draft payment can be posted.\"))\n\n if any(inv.state != 'open' for inv in rec.invoice_ids):\n raise ValidationError(_(\"The payment cannot be processed because the invoice is not open!\"))\n\n # keep the name in case of a payment reset to draft\n if not rec.name:\n # Use the right sequence to set the name\n if rec.payment_type == 'transfer':\n sequence_code = 'account.payment.transfer'\n else:\n if rec.partner_type == 'customer':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.customer.invoice'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.customer.refund'\n if rec.partner_type == 'supplier':\n if rec.payment_type == 'inbound':\n sequence_code = 'account.payment.supplier.refund'\n if rec.payment_type == 'outbound':\n sequence_code = 'account.payment.supplier.invoice'\n rec.name = self.env['ir.sequence'].with_context(ir_sequence_date=rec.payment_date).next_by_code(sequence_code)\n if not rec.name and rec.payment_type != 'transfer':\n raise UserError(_(\"You have to define a sequence for %s in your company.\") % (sequence_code,))\n\n # Create the journal entry\n amount = rec.amount * (rec.payment_type in ('outbound', 'transfer') and 1 or -1)\n move = rec._create_payment_entry(amount)\n persist_move_name = move.name\n\n # In case of a transfer, the first journal entry created debited the source liquidity account and credited\n # the transfer account. Now we debit the transfer account and credit the destination liquidity account.\n \n if rec.payment_type == 'transfer':\n transfer_credit_aml = move.line_ids.filtered(lambda r: r.account_id == rec.company_id.transfer_account_id)\n transfer_debit_aml = rec._create_transfer_entry(amount)\n (transfer_credit_aml + transfer_debit_aml).reconcile()\n persist_move_name += self._get_move_name_transfer_separator() + transfer_debit_aml.move_id.name\n\n rec.write({'state': 'posted', 'move_name': persist_move_name})\n self.action_payment()\n return True", "title": "" }, { "docid": "bea29914e2fd3ff66b2983fe53dce2c9", "score": "0.46943602", "text": "def merge(payload1, payload2):\n\n for item in payload2:\n if item not in payload1:\n payload1.append(item)\n\n return payload1", "title": "" }, { "docid": "878dd66800e30c3ed40137a6c1d0324c", "score": "0.46900675", "text": "def add_transaction(self, transaction):\n transaction['Date'] = transaction['Date'].date()\n\n if isinstance(transaction['Amount'], basestring) and transaction['Amount'].startswith(\"=\"):\n transaction['Amount'] = eval(transaction['Amount'][1:])\n\n transaction['Amount'] = monetize(transaction['Amount'])\n self.transactions.append(transaction)", "title": "" }, { "docid": "90048e89dcb138b2d4d71e975dd5322d", "score": "0.46855134", "text": "def make_transaction_frame(transactions):\n\n transaction_list = []\n for dt in transactions.index:\n txns = transactions.loc[dt]\n if len(txns) == 0:\n continue\n\n for txn in txns:\n txn = map_transaction(txn)\n transaction_list.append(txn)\n df = pd.DataFrame(sorted(transaction_list, key=lambda x: x['dt']))\n df['txn_dollars'] = -df['amount'] * df['price']\n\n df.index = list(map(pd.Timestamp, df.dt.values))\n return df", "title": "" }, { "docid": "f3d7c52bc1c5b6449f79474e136479eb", "score": "0.46709263", "text": "def merge(self, merge_with_id):\n params = base.get_params(None, locals())\n url = '{0}/merge'.format(self.get_url())\n return http.Request('POST', url, params), parsers.parse_json", "title": "" }, { "docid": "0c3eb85aeaef428eef07d9cd5f3cfa02", "score": "0.46688193", "text": "def calculate(self):\n\n from numpy import ppmt, ipmt, arange\n periods = arange(self.mperiods) + 1\n principal_repayments = ppmt(rate=self.rate, per=periods, nper=self.mperiods, pv=self.summa)\n interest_payments = ipmt(rate=self.rate, per=periods, nper=self.mperiods, pv=self.summa)\n\n date = self.start_date\n\n percent_payment = 0 # (self.summa * self.yrate / 12)\n debt_payment = 0\n rest_payment = self.summa + abs(interest_payments.sum())\n rest_payment_wo_percent = self.summa\n\n percent_payments = OrderedDict({date: percent_payment})\n debt_payments = OrderedDict({date: debt_payment})\n rest_payments = OrderedDict({date: rest_payment})\n rest_payments_wo_percents = OrderedDict({date: rest_payment_wo_percent})\n\n for i in range(self.mperiods):\n date = lastDayNextMonth(date)\n\n percent_payment = interest_payments[i]\n debt_payment = principal_repayments[i]\n rest_payment -= abs(percent_payment) + abs(debt_payment)\n rest_payment_wo_percent -= abs(debt_payment)\n\n if rest_payment < 0.01:\n rest_payment = 0\n\n if rest_payment_wo_percent < 0.01:\n rest_payment_wo_percent = 0\n\n percent_payments[date] = percent_payment\n debt_payments[date] = debt_payment\n rest_payments[date] = rest_payment\n rest_payments_wo_percents[date] = rest_payment_wo_percent\n\n self.percent_payments = percent_payments\n self.debt_payments = debt_payments\n self.rest_payments = rest_payments\n self.rest_payments_wo_percents = rest_payments_wo_percents", "title": "" }, { "docid": "2207c7b6f744ff99cf0b8238c76d5b1b", "score": "0.46584246", "text": "def __build_tx(self, tx):\n return Transaction(\n tx['sender'],\n tx['recipient'],\n tx['amount'],\n tx['signature']\n )", "title": "" }, { "docid": "c49395550ea3806716bb61b486dac5ec", "score": "0.4646898", "text": "def mergemarkers(self, transaction, data):\n version, markers = _readmarkers(data)\n return self.add(transaction, markers)", "title": "" }, { "docid": "9d024830ff6ef55e683bd5ab1c3c97b8", "score": "0.46463335", "text": "def pay_transform(self, pay_filepath, claims_filepath):\n\n # clean and transform pay table\n pay = pd.read_csv(pay_filepath, usecols=['PayDate', 'PatNum', 'PayAmt'])\n pay = pay[pay['PayDate'] != '2020-12-22']\n grouped_pay = pay.groupby(\"PatNum\", as_index=False)['PayAmt'].sum()\n\n # clean and transform claims table\n claims = pd.read_csv(claims_filepath, engine='python', error_bad_lines=False,\n usecols=['PatNum', 'DateReceived', 'InsPayAmt'])\n claims = claims[claims['DateReceived'] != '0001-01-01']\n claims.loc[17482, 'InsPayAmt'] = 754\n claims.drop('DateReceived', axis=1, inplace=True)\n grouped_claims = claims.groupby('PatNum', as_index=False).sum()\n\n # merge tables and create \"TOTAL\" for further use\n merged = grouped_claims.merge(grouped_pay)\n merged['Total'] = merged['InsPayAmt'] + merged['PayAmt']\n merged = merged.loc[:, ['PatNum', 'Total']]\n\n #merged.to_csv('../data/model/total.csv', index=False)\n return merged", "title": "" }, { "docid": "ecfd31515bbe28e4e434de491d3cbec8", "score": "0.46430418", "text": "def _log_totals(self):\n\n for sales_person, sales_credit in self._totals_by_sales_person.items():\n single_line = self._get_result_line('TOTAL BY SALES PERSON',\n sales_person=sales_person,\n sales_credit=sales_credit)\n self._trade_results.append(single_line)\n\n for portfolio, sales_credit in self._totals_by_portfolio.items():\n single_line = self._get_result_line('TOTAL BY PORTFOLIO',\n portfolio=portfolio,\n sales_credit=sales_credit)\n self._trade_results.append(single_line)\n\n for portfolio, sc_data in (self._totals_by_portfolio_and_sales_person\n .items()):\n for sales_person, sales_credit in sc_data.items():\n single_line = self._get_result_line(('TOTAL BY PORTFOLIO'\n '& SALES PERSON'),\n portfolio=portfolio,\n sales_person=sales_person,\n sales_credit=sales_credit)\n self._trade_results.append(single_line)\n\n for ins_type, sc_data in (self._totals_by_ins_type_and_sales_person\n .items()):\n for sales_person, sales_credit in sc_data.items():\n single_line = self._get_result_line(('TOTAL BY INSTRUMENT TYPE'\n ' & SALES PERSON'),\n ins_type=ins_type,\n sales_person=sales_person,\n sales_credit=sales_credit)\n self._trade_results.append(single_line)", "title": "" }, { "docid": "325ec7e9ecf939162b91fdd32fec10e6", "score": "0.462219", "text": "def add_transaction(self, transaction):\n transaction = {\n 'sender': transaction['sender'],\n 'recipient': transaction['recipient'],\n 'amount': transaction['amount'],\n 'time': str(datetime.utcnow())\n }\n self.pending_transactions.append(transaction)\n previous_block = self.previous_block\n\n response = {\n 'block_index': previous_block['index'] + 1,\n 'transaction': transaction, \n }\n return response", "title": "" }, { "docid": "388666cbe8c8f907b58e63fc8be00592", "score": "0.46203074", "text": "def get_successful_payments(self):\n return self.get_queryset().filter(status__in=[self.model.PAYMENT_STATUS_PAID])", "title": "" }, { "docid": "603612520fd1ac024f4c53fae268500a", "score": "0.46073586", "text": "def get_transactions(self):\n my_list = self.transactions\n print('\\n'.join(map(str, my_list)))", "title": "" }, { "docid": "08f57ac14f8f9316c4dcbfd9a17c174d", "score": "0.45920342", "text": "def add_turbulence(self, data):\n if data.empty: return data\n\n df = data.copy()\n df.reset_index(drop=True, inplace=True)\n turbulence_index = self.calcualte_turbulence(df)\n df = df.merge(turbulence_index, on='timestamp')\n df = df.sort_values(['timestamp','entity_id']).reset_index(drop=True)\n return df", "title": "" }, { "docid": "9d9cf627a5b3c1aacedcbc92cd0996e1", "score": "0.4590208", "text": "def MergeSimuRecord(all_simu_records):\n result = []\n tmp = []\n for peer_info in all_simu_records:\n if peer_info[0] != ';':\n tmp.append(ElementToFloat(peer_info))\n else:\n result.append(tmp)\n tmp = []\n return result", "title": "" }, { "docid": "50ea251e75d682fb0d30caafcf818b83", "score": "0.458753", "text": "def transaction(self):\n from Acquire.Accounting import Transaction as _Transaction\n\n if self.is_null():\n return _Transaction()\n else:\n return _Transaction(self.value(),\n \"Refund for transaction %s\"\n % self.transaction_uid())", "title": "" }, { "docid": "0eba47a23cf3f65fc9b4530aa4b0fcb0", "score": "0.4586215", "text": "def get_next_transaction_from_state(self, save_transaction=True):\n trans = None\n # The starting state of our transaction\n if self.status == self.STATUS_PROCESSING and self.location == self.LOCATION_ORIGIN:\n trans = Transaction(item=self.item, status=self.STATUS_PROCESSING, location=self.LOCATION_ROUTABLE)\n\n # If we are successful with the destination, put it in to a success status, else, error\n elif self.status == self.STATUS_PROCESSING and self.location == self.LOCATION_ROUTABLE:\n trans = Transaction(item=self.item, status=self.STATUS_COMPLETED, location=self.LOCATION_DESTINATION)\n\n # We are fixing this transaction, so move it back into processing so we can try again\n elif self.status == self.STATUS_FIXING and self.location == self.LOCATION_ROUTABLE:\n trans = Transaction(item=self.item, status=self.STATUS_PROCESSING, location=self.LOCATION_ROUTABLE)\n\n # We are refunding this transaction, so move it back to origin and mark it refunded\n elif self.status == self.STATUS_REFUNDING and self.location == self.LOCATION_ROUTABLE:\n trans = Transaction(item_id=self.item.id, status=self.STATUS_REFUNDED, location=self.LOCATION_ORIGIN)\n\n if trans and save_transaction is True:\n trans.save()\n return trans", "title": "" }, { "docid": "eabb69835aee4913cea1aa8fb749c42b", "score": "0.45815152", "text": "def populate_sec_transactions(self):\r\n while len(transactions) <= 20:\r\n address1 = wallets[random.randint(0, 9)]\r\n address2 = wallets[random.randint(0, 9)]\r\n while address2 == address1:\r\n address2 = wallets[random.randint(0, 9)]\r\n value = random.randint(0, 5)\r\n\r\n transaction = OrderedDict({'sender_address': address1.address,\r\n 'recipient_address': address2.address,\r\n 'value': value})\r\n transaction_verification = self.verify_transaction_signature(address1.address, address1.sign(transaction),\r\n transaction)\r\n if transaction_verification:\r\n self.transactions.append(transaction)\r\n print(\"Transaction %d added\" % len(transactions))\r\n else:\r\n print(\"Transaction %d failed\" % len(transactions))\r\n\r\n print(\"Twenty transactions added to Transaction pool..\")", "title": "" }, { "docid": "90f54d5b76b3e9020140681b44246782", "score": "0.45726606", "text": "def transaction(\n meta: Meta,\n date: datetime.date,\n flag: Flag,\n payee: str | None,\n narration: str,\n tags: TagsOrLinks,\n links: TagsOrLinks,\n postings: list[Posting],\n) -> Transaction:\n return data.Transaction( # type: ignore[return-value]\n meta,\n date,\n flag,\n payee,\n narration,\n tags,\n links,\n postings, # type: ignore[arg-type]\n )", "title": "" }, { "docid": "5b232fc83cbfc11a79086c2b51458908", "score": "0.45723155", "text": "def user_transactions(profile, transactions):\n # list of consumers in the transaction data\n consumers = transactions.groupby('person').sum().index\n\n # calculate the total transaction values for a consumer\n consumer_spend = transactions.groupby('person')['spend'].sum().values\n\n # calculate the number of transactions per consumer\n consumer_trans = transactions.groupby('person')['spend'].count().values\n\n # create a dataframe with spend info per consumer\n consumer_data = pd.DataFrame(consumer_trans, index=consumers, columns=['total transactions'])\n\n # add the total transaction column\n consumer_data['total spend'] = consumer_spend \n \n # average spend per transaction \n consumer_data['spend per trans'] = consumer_data['total spend']/consumer_data['total transactions']\n \n # average spend per day\n consumer_data['spend per day'] = consumer_data['total spend']/30\n \n # combine profile and transaction data\n consumer_profile = profile.merge(consumer_data, on=['person']).fillna(0)\n \n # I will take the last date the final day data has been collected\n final_date = consumer_profile['member joined'].max()\n \n # membership length in weeks\n consumer_profile['membership length'] = [round((final_date - x).days / 7,0) for x in consumer_profile['member joined']]\n\n return consumer_profile", "title": "" }, { "docid": "944c75a4ba7d0802f3e17ed984bcd3b8", "score": "0.45703897", "text": "def create_transaction(self, sender, recipient, amount):\n transaction = TRANSACTION(sender, recipient, amount)\n self.current.append(transaction)\n logging.info(\"Transaction successfully added. Number of Transactions now pending are %d \" % len(self.current))\n return transaction", "title": "" }, { "docid": "0c3e3a9e13dcf6e937faa6b72be56628", "score": "0.45700797", "text": "def merge(self):\n raise NotImplementedError", "title": "" }, { "docid": "99361c7996bb2fd283c278005bf58299", "score": "0.45683402", "text": "def total_balance(transactions):\n total = 0\n for t in transactions:\n total += float(t['Amount'])\n return total", "title": "" }, { "docid": "09f033e281d29c1a4266959ff28c9064", "score": "0.4568261", "text": "def transaction_list_from_db(user_id, account_id, cur, payee_list_dict, \n category_list_dict, account_list_dict):\n trans_list_dict = []\n cur.execute(\n \"\"\"SELECT * FROM transactions WHERE user_id = ? AND (account_id = ? OR transf_to_account_id = ?) \n ORDER BY date\"\"\", (user_id, account_id, account_id))\n trans_db = cur.fetchall()\n if not trans_db:\n return False\n total = 0\n for tran in trans_db:\n tran_dict = {}\n tran_dict[\"transaction_id\"] = tran[\"transaction_id\"]\n tran_dict[\"date\"] = tran[\"date\"]\n account_id_db = tran[\"account_id\"]\n transf_to_account_id = tran[\"transf_to_account_id\"]\n if transf_to_account_id == int(account_id):\n for account in account_list_dict:\n if account[\"account_id\"] == account_id_db:\n account_name = account[\"account_name\"]\n tran_dict[\"payee_name\"] = f\"Transfer from: {account_name}\"\n tran_dict[\"amount\"] = tran[\"amount\"]\n elif transf_to_account_id:\n for account in account_list_dict:\n if account[\"account_id\"] == transf_to_account_id:\n account_name = account[\"account_name\"]\n tran_dict[\"payee_name\"] = f\"Transfer to: {account_name}\"\n tran_dict[\"amount\"] = tran[\"amount\"] * -1\n else:\n tran_dict[\"payee_id\"] = tran[\"payee_id\"]\n for pay in payee_list_dict:\n if pay[\"payee_id\"] == tran[\"payee_id\"]:\n tran_dict[\"payee_name\"] = pay[\"payee_name\"]\n tran_dict[\"category_id\"] = tran[\"category_id\"]\n for cat in category_list_dict:\n if cat[\"category_id\"] == tran[\"category_id\"]:\n tran_dict[\"category_name\"] = cat[\"category_name\"]\n tran_dict[\"amount\"] = tran[\"amount\"]\n tran_dict[\"account_id\"] = tran[\"account_id\"]\n total = round(float(total + tran_dict[\"amount\"]), 2)\n tran_dict[\"user_id\"] = tran[\"user_id\"]\n trans_list_dict.append(tran_dict)\n return [trans_list_dict, total]", "title": "" }, { "docid": "893bcc851a3383e0af6990cad6ef31fd", "score": "0.45681724", "text": "def merge(orders, products):\n res = []\n for row in orders:\n for row1 in products:\n if row['product_id'] == row1['product_id']:\n batch_dict = {**row, **row1}\n res.append(batch_dict)\n return res", "title": "" }, { "docid": "37580fcee0a966d683ff2be711f446f2", "score": "0.4552214", "text": "def merge(self, another_bill):\n if not self.id:\n logger.debug('trying to merge into a bill with id=None, title=%s',\n self.title)\n self.save()\n if not another_bill.id:\n logger.debug('trying to merge a bill with id=None, title=%s',\n another_bill.title)\n another_bill.save()\n\n if self is another_bill:\n logger.debug('abort merging bill %d into itself' % self.id)\n return\n logger.debug('merging bill %d into bill %d' % (another_bill.id,\n self.id))\n\n other_kp = KnessetProposal.objects.filter(bill=another_bill)\n my_kp = KnessetProposal.objects.filter(bill=self)\n if my_kp and other_kp:\n logger.debug('abort merging bill %d into bill %d, because both '\n 'have KPs' % (another_bill.id, self.id))\n return\n\n for pv in another_bill.pre_votes.all():\n self.pre_votes.add(pv)\n for cm in another_bill.first_committee_meetings.all():\n self.first_committee_meetings.add(cm)\n if not self.first_vote and another_bill.first_vote:\n self.first_vote = another_bill.first_vote\n for cm in another_bill.second_committee_meetings.all():\n self.second_committee_meetings.add(cm)\n if not self.approval_vote and another_bill.approval_vote:\n self.approval_vote = another_bill.approval_vote\n for m in another_bill.proposers.all():\n self.proposers.add(m)\n for pp in another_bill.proposals.all():\n pp.bill = self\n pp.save()\n if other_kp:\n other_kp[0].bill = self\n other_kp[0].save()\n\n bill_ct = ContentType.objects.get_for_model(self)\n Comment.objects.filter(content_type=bill_ct,\n object_pk=another_bill.id).update(\n object_pk=self.id)\n for v in voting.models.Vote.objects.filter(content_type=bill_ct,\n object_id=another_bill.id):\n if voting.models.Vote.objects.filter(content_type=bill_ct,\n object_id=self.id,\n user=v.user).count() == 0:\n # only if this user did not vote on self, copy the vote from\n # another_bill\n v.object_id = self.id\n v.save()\n for f in Follow.objects.filter(content_type=bill_ct,\n object_id=another_bill.id):\n try:\n f.object_id = self.id\n f.save()\n except IntegrityError: # self was already being followed by the\n # same user\n pass\n for ti in TaggedItem.objects.filter(content_type=bill_ct,\n object_id=another_bill.id):\n if ti.tag not in self.tags:\n ti.object_id = self.id\n ti.save()\n for ab in another_bill.agendabills.all():\n try:\n ab.bill = self\n ab.save()\n except IntegrityError: # self was already in this agenda\n pass\n for be in another_bill.budget_ests.all():\n try:\n be.bill = self\n be.save()\n except IntegrityError: # same user already estimated self\n pass\n another_bill.delete()\n self.update_stage()", "title": "" }, { "docid": "019da226704986bfb9ef3c23b9a718de", "score": "0.45483857", "text": "def additional_action_done(self):\n for rec in self:\n user_email_list = []\n user = self.env['res.users']\n if rec.new_connection_id:\n history_id = self.env['connection.history'].search([\n ('additional_connection_id', '=', rec.id)], limit=1)\n if history_id:\n history_id.state = 'done'\n rec.new_connection_id.cylinder_qty += rec.qty\n rec.new_connection_id.security_deposit_amount += \\\n rec.security_deposit_amount\n rec.state = 'done'\n # Create Payment for security deposit and post\n self.env['account.payment'].create({\n 'partner_type': 'customer',\n 'payment_type': 'inbound',\n 'amount': rec.security_deposit_amount or 0.0,\n 'journal_id': self.env['account.journal'].search([\n ('company_id', '=', self.env.company.id),\n ('type', '=', 'cash')], limit=1).id,\n 'payment_method_id': self.env.ref(\n \"account.account_payment_method_manual_in\").id,\n 'partner_id': rec.new_connection_id.partner_id.id,\n 'communication':\n 'Security Deposit for Additional Connection ' +\n str(rec.new_connection_id.number),\n 'company_id': rec.company_id.id,\n 'currency_id': rec.currency_id.id,\n 'new_connection_id': rec.new_connection_id.id,\n }).post()\n # Send Approved Email notification for Sale users\n from_mail = user.browse(self._uid) and user.login or ''\n if rec.new_connection_id.user_id and \\\n rec.new_connection_id.user_id.login:\n user_email_list.append(rec.new_connection_id.user_id.login)\n account_grp = self.env.ref(\"account.group_account_manager\")\n # List of users which have account group assign\n for user in account_grp.users:\n if user.partner_id.email not in user_email_list:\n user_email_list.append(user.partner_id.email\n if user.partner_id.email\n else '')\n email_template = self.env.ref(\n 'big_new_registration.'\n 'email_additional_connection_approve_big')\n if email_template and user_email_list:\n user_email = ','.join(user_email_list)\n email_template.sudo().write({\n 'email_from': from_mail,\n 'email_to': user_email\n })\n email_template.send_mail(self.id, force_send=True)\n message = \"Additional Connection Status : \" \\\n \"Approved by Sales Point -> Approved by BIG\"\n rec.new_connection_id.message_post(body=message)\n return True", "title": "" }, { "docid": "4e93f85b7d69e16533de500c99cd0e5b", "score": "0.45408815", "text": "def new_transaction(self, owner, receiver, amount, drug_id):\n self.current_transactions.append({\n 'owner': owner,\n 'receiver': receiver,\n 'amount': amount,\n 'drug_id': drug_id\n \n })", "title": "" }, { "docid": "819104e2a9ae80c056246b1ff7b63946", "score": "0.45367426", "text": "def _post_tx(self, transaction, endpoint):\n\n headers = {\n 'content-type': 'application/vnd+fetch.transaction+json',\n }\n\n # format the URL\n url = format_contract_url(self.host, self.port, self.API_PREFIX, endpoint)\n\n # make the request\n r = self._session.post(url, data=transaction, headers=headers)\n success = 200 <= r.status_code < 300\n\n if not success:\n raise ApiError(\n 'Unable to fulfil transaction request {}.{}. Status Code {}'.format(self.API_PREFIX, endpoint,\n r.status_code))\n\n # parse the response\n response = r.json()\n\n # attempt to extract out the submitting transaction hash\n tx_list = response.get('txs', [])\n if len(tx_list):\n return tx_list[0]", "title": "" }, { "docid": "60a2033b22c388585e24f7c511133cc1", "score": "0.45288405", "text": "def _merge(accumulator):\n return Comment(txt = '\\n'.join(comm.txt for comm in accumulator),\n meta = MetaComment(lo = accumulator[0].meta.lo,\n hi = accumulator[-1].meta.hi,\n typ = accumulator[0].meta.typ))", "title": "" }, { "docid": "63017bf260f3ce61b0fe3fc32d422f07", "score": "0.45228392", "text": "def apply_transactions(\n self, transactions: List[TransactionMessage]\n ) -> \"OwnershipState\":\n new_state = copy.copy(self)\n for tx_message in transactions:\n new_state._update(tx_message)\n\n return new_state", "title": "" }, { "docid": "7d1521ec3217248647852f83078f574f", "score": "0.45179093", "text": "def adopt(self):\n for sent in self.all():\n sent.adopt()", "title": "" }, { "docid": "00c17e3028ba1ce61040536adf3f5a4b", "score": "0.4517472", "text": "def merge_all(self, tokens, emitter):\n if len(tokens) == 1:\n if len(tokens[0]) > 0:\n for tok in tokens[0]:\n other_con = into_connection(tok)\n self.merge_with(other_con, emitter)", "title": "" }, { "docid": "1fcaf1cefff1936e47429f1cef2f0a26", "score": "0.45134994", "text": "def merge(self):\n raise NotImplementedError('To be implemented')", "title": "" }, { "docid": "f5f29caac18ece7ca0a565330406dec5", "score": "0.4510011", "text": "async def prepair_transaction(self, messages: List[Message], memo: str):\n\n # Account data need to create a raw transaction.\n account = await self.get_account()\n\n # Getting chain id using tendermint cliend.\n chain_id = await self.get_chain_id()\n\n # Calculating the transaction fee.\n fee = Fee(\n gas_limit=self.max_gas,\n amount=[Coin(denom=\"ubnt\", amount=str(int(self.max_gas * self.gas_price)))],\n )\n\n # Creating the raw transaction.\n tx = Transaction(\n account=account,\n messages=messages,\n sign_mode=SIGN_MODE_DIRECT,\n privkey=self.wallet.private_key,\n fee=fee,\n memo=memo,\n chain_id=chain_id,\n )\n\n # Calculating the raw transaction bytes.\n raw_bytes = tx.create(sign_mode_handler=DirectSignModeHandler())\n\n # Signing the raw transaction bytes offline.\n signed_tx = tx.sign(raw_bytes)\n self.logger.debug(\"signed transaction, signed_tx=%s\", signed_tx)\n return signed_tx", "title": "" }, { "docid": "d62888e051aebe0a8eb05d667e2d1ce7", "score": "0.45078188", "text": "def calculate(self):\n date = self.start_date\n\n percent_payment = 0# (self.summa * self.yrate / 12)\n debt_payment = 0\n rest_payment = self.total_debt\n rest_payment_wo_percent = self.summa\n\n percent_payments = OrderedDict({date: percent_payment,})\n debt_payments = OrderedDict({date: debt_payment,})\n rest_payments = OrderedDict({date: rest_payment,})\n rest_payments_wo_percents = OrderedDict({date: rest_payment_wo_percent,})\n\n for i in range(0, self.mperiods):\n date = lastDayNextMonth(date)\n\n percent_payment = rest_payment_wo_percent * self.yrate / 12\n debt_payment = self.mpayment - percent_payment\n rest_payment -= self.mpayment\n rest_payment_wo_percent -= debt_payment\n\n if rest_payment < 0.01:\n rest_payment = 0\n\n if rest_payment_wo_percent < 0.01:\n rest_payment_wo_percent = 0\n\n percent_payments[date] = percent_payment\n debt_payments[date] = debt_payment\n rest_payments[date] = rest_payment\n rest_payments_wo_percents[date] = rest_payment_wo_percent\n\n self.percent_payments = percent_payments\n self.debt_payments = debt_payments\n self.rest_payments = rest_payments\n self.rest_payments_wo_percents = rest_payments_wo_percents", "title": "" }, { "docid": "c1757ed1593646f69062b4fce8dc1902", "score": "0.45034164", "text": "def remove_garbage(transactions):\n garbage = re.compile(r'\\w*[#@.\\d]\\w*|\\b(USD)\\b|\\s+$|')\n\n for trans in transactions:\n trans['Company'] = garbage.sub('', trans['Company'])\n trans['Company'] = re.sub(' +', ' ', trans['Company']).rstrip()\n\n return transactions", "title": "" }, { "docid": "9a44788f005e5fd904bb085b7fa98a05", "score": "0.44987327", "text": "def transfer_funds(self, from_user, to_user, amt):\n from_user.balance -= amt\n from_user.save()\n # because some oddballs like to tip themselves\n to_user = User.objects.get(id=to_user.id)\n to_user.balance += amt\n to_user.save()\n trans = Transaction(\n from_user=from_user,\n to_user=to_user,\n amount=amt,\n pending=False,\n accepted=True\n )\n trans.save()\n logger.info('Moved coin: %s', trans)\n return trans", "title": "" }, { "docid": "bf0637c9ff195107125d27ac5b7670ae", "score": "0.44969976", "text": "def merge_cash_bundles(db, p_id):\n return db.one(\"\"\"\n LOCK TABLE cash_bundles IN EXCLUSIVE MODE;\n WITH regroup AS (\n SELECT owner, origin, wallet_id, sum(amount) AS amount, max(ts) AS ts\n FROM cash_bundles\n WHERE owner = %s\n AND disputed IS NOT TRUE\n AND locked_for IS NULL\n GROUP BY owner, origin, wallet_id\n HAVING count(*) > 1\n ),\n inserted AS (\n INSERT INTO cash_bundles\n (owner, origin, amount, ts, wallet_id)\n SELECT owner, origin, amount, ts, wallet_id\n FROM regroup\n RETURNING *\n ),\n deleted AS (\n DELETE\n FROM cash_bundles b\n USING regroup g\n WHERE b.owner = g.owner\n AND b.origin = g.origin\n AND b.disputed IS NOT TRUE\n AND b.locked_for IS NULL\n AND b.wallet_id = g.wallet_id\n RETURNING b.*\n )\n SELECT (SELECT json_agg(d) FROM deleted d) AS before\n , (SELECT json_agg(i) FROM inserted i) AS after\n \"\"\", (p_id,))", "title": "" }, { "docid": "f640e0ad342dfc3fc9c1ecfd326eee29", "score": "0.44872737", "text": "def update_wallets(self):\n\n # reinitialize walletList before adding to it\n self.walletList = {}\n\n # accumulate balances\n for i in self.blockchain:\n for j in i.data['transactions']:\n if j['from'] == self.hash:\n # if balance exists, already, add to it, else set it\n try:\n self.walletList[j['to']] += j['value']\n except KeyError:\n self.walletList[j['to']] = j['value']\n\n # go through current transactions\n for i in self.transactions:\n # if balance exists, already, add to it, else set it\n try:\n self.walletList[i.recipient] += i.value\n except KeyError:\n self.walletList[i.recipient] = i.value", "title": "" }, { "docid": "6171536bbbb14fa904e26c58f5c8685e", "score": "0.44837183", "text": "def combine_data(self):\n format = '%Y-%m-%d'\n self.stock_data['Date'] = pd.to_datetime(self.stock_data['Date'], format=format)\n merge_data = pd.merge(self.sentiment_data, self.stock_data, on='Date')\n merge_data.fillna(inplace=True, value=0)\n\n return merge_data", "title": "" }, { "docid": "773e901eabcc2f42d49b638578cee073", "score": "0.4481442", "text": "def transaction(buyer, seller, stock, amount_of_product, amount_of_money):\n seller.sell(stock, amount_of_product, amount_of_money)\n buyer.buy(stock, amount_of_product, amount_of_money)", "title": "" }, { "docid": "674760199703b6f96442b3a6a9f95dc7", "score": "0.4477619", "text": "def to_records(self):\n return [{'date': self.date, 'account': acct, 'amount': amount} for acct, amount in self.transfers.items()]", "title": "" }, { "docid": "74f7f30262fe6a37cd1f2503159cb7ed", "score": "0.44747686", "text": "def record_transaction_per_user(self, sender, recipient, amount):\n if sender not in self.transactions_by_user:\n self.initialize_user_data(sender)\n if recipient not in self.transactions_by_user:\n self.initialize_user_data(recipient)\n # add to the sender totals\n self.transactions_by_user[sender]['sent_transactions'] += 1\n self.transactions_by_user[sender]['sent_totals'] += amount\n sender_balance = self.transactions_by_user[sender]['running_balance']\n if len(sender_balance) == 0:\n self.transactions_by_user[sender]['running_balance'].append(\n -amount)\n else:\n # get the last balance and subtract the amount from the running total\n sender_current_balance = self.transactions_by_user[sender]['running_balance'][-1]\n self.transactions_by_user[sender]['running_balance'].append(\n sender_current_balance - amount)\n # add to the recipient totals\n self.transactions_by_user[recipient]['received_transactions'] += 1\n self.transactions_by_user[recipient]['received_totals'] += amount\n receiver_balance = self.transactions_by_user[recipient]['running_balance']\n if len(receiver_balance) == 0:\n self.transactions_by_user[recipient]['running_balance'].append(\n amount)\n else:\n # get the last balance and add the amount to the running total\n receiver_current_balance = self.transactions_by_user[recipient]['running_balance'][-1]\n self.transactions_by_user[recipient]['running_balance'].append(\n receiver_current_balance + amount)", "title": "" } ]
e1ab27fa6412f3971e20f851c2ccf3ef
Give the user the option of automatically copying an encrypted/decrypted string to the Windows clipboard if Windows is the OS running on the user's computer.
[ { "docid": "57b0ac6d36cf54d1601e78d07775b737", "score": "0.79971385", "text": "def clipboard(cipher_text):\n\n frame = str(inspect.stack())\n\n if platform.system() == 'Windows':\n if 'multiple_encryption' not in frame:\n import win32clipboard\n print cipher_text\n store_clip = raw_input('\\nStore output in clipboard? (y/n): ')\n if store_clip.startswith('y'):\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardText(cipher_text)\n win32clipboard.CloseClipboard()\n else:\n return cipher_text\n elif platform.system() == 'Linux':\n print cipher_text", "title": "" } ]
[ { "docid": "8a5b87d7917bdaa8c4f4fbab9968cf45", "score": "0.6691027", "text": "def copy_to_clipboard(text):\n # try windows first\n try:\n import win32clipboard\n\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardText(text)\n win32clipboard.CloseClipboard()\n return True\n except:\n pass\n # try xclip\n try:\n import subprocess\n\n p = subprocess.Popen(['xclip', '-selection', 'c'], stdin=subprocess.PIPE)\n p.stdin.write(text)\n p.stdin.close()\n retcode = p.wait()\n return True\n except:\n pass\n # try pbcopy (Os X)\n try:\n import subprocess\n\n p = subprocess.Popen(['pbcopy'], stdin=subprocess.PIPE)\n p.stdin.write(text)\n p.stdin.close()\n retcode = p.wait()\n return True\n except:\n pass\n # try os /linux\n try:\n import subprocess\n\n p = subprocess.Popen(['xsel'], stdin=subprocess.PIPE)\n p.stdin.write(text)\n p.stdin.close()\n retcode = p.wait()\n return True\n except:\n pass\n # try pygtk\n try:\n # Code from\n # http://www.vector-seven.com/2007/06/27/\n # passing-data-between-gtk-applications-with-gtkclipboard/\n import pygtk\n\n pygtk.require('2.0')\n import gtk\n # get the clipboard\n clipboard = gtk.clipboard_get()\n # set the clipboard text data\n clipboard.set_text(text)\n # make our data available to other applications\n clipboard.store()\n except:\n return False", "title": "" }, { "docid": "1999ed0deb2e652dff4ad5227a37c90a", "score": "0.66775334", "text": "def unsupported():\n print(\"Platform not supported for autoclearing clipboard.\")\n print(\n \"⚠️\\ Your password will be on the clipboard until you clear it manually!\")\n raise SystemExit(1)", "title": "" }, { "docid": "e5fff6c9fcda3fedfabf1fc010c4199b", "score": "0.6346173", "text": "def paste():\n ctypes.windll.user32.OpenClipboard(0)\n pcontents = ctypes.windll.user32.GetClipboardData(_CF_TEXT)\n data = ctypes.c_char_p(pcontents).value\n #ctypes.windll.kernel32.GlobalUnlock(pcontents)\n ctypes.windll.user32.CloseClipboard()\n return data.decode('utf-8')", "title": "" }, { "docid": "e8b0820dd7b580a960de3561a04b1e29", "score": "0.63368773", "text": "def copy_to_clipboard(self, text):\n self.logger.debug(\"copy_to_clipboard\")\n if platform.system() == \"Windows\":\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardText(text)\n win32clipboard.CloseClipboard()\n else:\n clipboard.copy(text)", "title": "" }, { "docid": "6d3900691801e9c0e4cac83faa2460d0", "score": "0.6206263", "text": "def copy(string):\n GMEM_DDESHARE = 0x2000\n ctypes.windll.user32.OpenClipboard(0)\n ctypes.windll.user32.EmptyClipboard()\n hCd = ctypes.windll.kernel32.GlobalAlloc(GMEM_DDESHARE, len(string.encode('utf-8')) + 1)\n pchData = ctypes.windll.kernel32.GlobalLock(hCd)\n ctypes.cdll.msvcrt.strcpy(ctypes.c_char_p(pchData), string.encode('utf-8'))\n ctypes.windll.kernel32.GlobalUnlock(hCd)\n ctypes.windll.user32.SetClipboardData(_CF_TEXT, hCd)\n ctypes.windll.user32.CloseClipboard()", "title": "" }, { "docid": "26327dfa2f26214296ec8d6daa52568f", "score": "0.6126891", "text": "def copy_to_clipboard(what: str) -> str:\n # Primary + clipboard\n xsel_proc = subprocess.Popen(['xsel', '-pbi'], stdin=subprocess.PIPE)\n xsel_proc.communicate(bytes(what, encoding='utf-8'))\n return what", "title": "" }, { "docid": "d09e50bc79bf0d6d1a02dda5f362cb48", "score": "0.612153", "text": "def set_clipboard(text):\n\n xsel_proc = Popen(['xsel', '-bi'], stdin=PIPE)\n xsel_proc.communicate(bytes(text, 'utf-8'))", "title": "" }, { "docid": "7c9e5e8a851edc65662cad6aaa0061e7", "score": "0.6092396", "text": "def set_clipboard(data):\n\n encoded_str = unify(data).encode('utf-8')\n scpt_str = 'set the clipboard to \"{0}\"'.format(applescriptify(encoded_str))\n run_applescript(scpt_str)", "title": "" }, { "docid": "13e665fa176f1976025553324f5cf9d9", "score": "0.60423607", "text": "def test_getClipboard(self):\n text = \"Lorem Ipsum\"\n pyperclip.copy(text)\n self.assertEqual(getClipboard(), text)", "title": "" }, { "docid": "54d0c3ec0ebcf6d36615479e372ed67f", "score": "0.6009742", "text": "def test_noClipboard(self):\n noText = \"\"\n pyperclip.copy(noText)\n self.assertIsNone(getClipboard())", "title": "" }, { "docid": "ea36f27b5e994f6f2dc271024a7bb3b2", "score": "0.5970908", "text": "def setClipboard(myText):\n\ttry:\n\t\tmyClipboard = NSPasteboard.generalPasteboard()\n\t\tmyClipboard.declareTypes_owner_([NSStringPboardType], None)\n\t\tmyClipboard.setString_forType_(myText, NSStringPboardType)\n\t\treturn True\n\texcept Exception as e:\n\t\tprint(e)\n\t\timport traceback\n\t\tprint(traceback.format_exc())\n\t\treturn False", "title": "" }, { "docid": "2270ce982fb84d3d347b8edaf7d75528", "score": "0.59489733", "text": "def set_clipboard(data): \n scpt = \"\"\"\n set the clipboard to \"{0}\"\n \"\"\".format(_applescriptify(data))\n subprocess.call(['osascript', '-e', scpt])", "title": "" }, { "docid": "8da05f9b111adb1d959609fc42955a15", "score": "0.5925775", "text": "def get_copy_to_clipboard():\r\n\r\n return getattr(settings, \"WAGTAIL_CODE_BLOCK_COPY_TO_CLIPBOARD\", True)", "title": "" }, { "docid": "5901c6d4471ca0ca2ddf8d1bc251db1d", "score": "0.5924029", "text": "def OpenClipboard(owner=None):\n\thandle_nonzero_success(windll.user32.OpenClipboard(owner))", "title": "" }, { "docid": "cf4c021797e33060e30f6f6101debbab", "score": "0.5887179", "text": "def main():\n\n chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\\\n + '!@&$%^*-()+'\n\n pass_amount = int(input('Please enter how many passwords would you like: '))\n pass_length = int(input('Length of the password(s): '))\n\n password_list = []\n for i in range(pass_amount):\n password = \"\"\n for i in range(pass_length):\n password += random.choice(chars) # Get a random letter.\n password_list.append(password)\n\n print('------------------------------')\n for i in range(len(password_list)):\n print('Password:', password_list[i])\n print('------------------------------')\n if pass_amount == 1:\n try:\n import pyperclip\n pyperclip.copy(password)\n print(\"Success. Password available in clipboard.\")\n except ModuleNotFoundError as a:\n print(\"Can't copy password to clipboard. Missing modules.\")\n module = str(a).split()[3]\n print(\"Missing module:\", module)\n print(\"Use: 'pip install \" + module)", "title": "" }, { "docid": "20e39980e263303906346fe31818f26f", "score": "0.5861881", "text": "def test_pyperclipModule(self):\n randomText = \"Lorem Ipsum Dolor Sit Amet\"\n pyperclip.copy(randomText)\n copiedText = pyperclip.paste()\n self.assertEqual(copiedText, randomText)", "title": "" }, { "docid": "2e6b80c46394270e3c68c3b3b092b77a", "score": "0.5847506", "text": "def windowsss():\n print(\"We noticed you are on Windows 🤮. \", end='')\n print(\"Please switch to something more decent. :p\")\n print(\n \"⚠️\\ Your password will be on the clipboard until you clear it manually!\")\n raise SystemExit(1)", "title": "" }, { "docid": "dabf8f8f29f19b2aebb2c9a0fa3c2c7f", "score": "0.58426136", "text": "def SetClipboard(content):\n\n cb = file( \"/dev/clipboard\", \"w\" )\n cb.write(content)\n cb.close()", "title": "" }, { "docid": "e9cc47ab9bf5b895fd8da82bc840e879", "score": "0.5824485", "text": "def _to_pbcopy(txt: str, pbcopy: bool) -> None:\n txt = txt.rstrip(\"\\n\")\n if not pbcopy:\n print(txt)\n return\n if hsinte.is_running_on_macos():\n # -n = no new line\n cmd = f\"echo -n '{txt}' | pbcopy\"\n hsinte.system(cmd)\n print(f\"\\n# Copied to system clipboard:\\n{txt}\")\n else:\n _LOG.warning(\"pbcopy works only on macOS\")", "title": "" }, { "docid": "aec763924bac53b8b1eb1f36aed953c4", "score": "0.5737337", "text": "def SetClipboardText(text):\n data_o = wx.TextDataObject()\n data_o.SetText(text)\n if wx.TheClipboard.IsOpend() or wx.TheClipboard.Open():\n wx.TheClipboard.SetData(data_o)\n wx.TheClipboard.Close()", "title": "" }, { "docid": "939778e57dc0ae39b4a2757ed549f3aa", "score": "0.5701459", "text": "def copy_clipboard(name,flag):\n\tnew=\"\"\n\tfor i in name:\n\t\tif i==\".\":\n\t\t\tbreak\n\t\tnew+=i\n\tif flag==1:\n\t\tstri=\"xclip -in -selection c \"+new+\".html\"\n\telse:\n\t\tstri=\"xclip -in -selection c \"+new+\".rtf\"\n\tos.system(stri)", "title": "" }, { "docid": "aba54f465abb56c64813b1bcc114bab0", "score": "0.5641562", "text": "def to_clipboard(img: bytes) -> None:\n cmd = [\"xclip\", \"-selection\", \"clipboard\", \"-t\", \"image/png\"]\n try:\n subprocess.run(cmd, input=img)\n except FileNotFoundError:\n sys.stderr.write(\"We can't find {} in your $PATH\\n\".format(cmd[0]))\n sys.exit(1)", "title": "" }, { "docid": "aa9cfb693bd1fe5c40668a204b225b50", "score": "0.5628487", "text": "async def set_clipboard(self, s: str, *, blocking: bool = True) -> Union[None, AsyncFutureResult[None]]:\n args = [s]\n return await self._transport.function_call('AHKSetClipboard', args, blocking=blocking)", "title": "" }, { "docid": "7202cd2906e176765339c4e54b97613e", "score": "0.55932784", "text": "def pastePlain(self):\n text = QtGui.QApplication.clipboard().mimeData().text()\n if text and self.hasFocus():\n self.insertPlainText(text)", "title": "" }, { "docid": "ac689eb22d6f648ccb7b73fe5b18aea7", "score": "0.55714494", "text": "def copy(self) -> None:\n text, success = self.download(formatText=True)\n if success:\n clip_board = QApplication.clipboard()\n clip_board.setText(text)", "title": "" }, { "docid": "efb1edae91d0124f4403ccc5f5489a75", "score": "0.55471367", "text": "def paste_from_clipboard(self):\n self.logger.debug(\"paste_from_clipboard\")\n if platform.system() == \"Windows\":\n win32clipboard.OpenClipboard()\n if win32clipboard.IsClipboardFormatAvailable(win32clipboard.CF_TEXT):\n text = win32clipboard.GetClipboardData()\n else:\n text = None\n win32clipboard.CloseClipboard()\n return text\n else:\n return clipboard.paste()", "title": "" }, { "docid": "c19cedb570ea30c49e3dcbbc9516a2ae", "score": "0.5527079", "text": "def emacs_draft_clipboard():\n actions.user.emacs_draft_run(lisp_edit_clipboard)", "title": "" }, { "docid": "2f96c31ee16ae870e7038b5a2c2b574f", "score": "0.551352", "text": "def show_user_result(result_string):\n droid.dialogCreateAlert(\"Endecryptor Result\", result_string)\n droid.dialogSetPositiveButton(\"Copy\")\n droid.dialogSetNegativeButton(\"Exit\")\n droid.dialogShow()\n if droid.dialogGetResponse().result[\"which\"] == \"positive\":\n droid.setClipboard(result_string)\n droid.makeToast(\"Copied to Clipboard.\")", "title": "" }, { "docid": "9feae48007b74767502dea25666bbce2", "score": "0.5505484", "text": "def copy_command():\n global text\n global saved\n x = text.selection_get()\n saved = x", "title": "" }, { "docid": "1ed67603d20c6f402f0760679a15c5f1", "score": "0.5502604", "text": "def encrypt_decrypt_prompt(self):\n\n self.option = raw_input('Encrypt/Decrypt (e/d)?: ')\n if self.check_option():\n return self.option\n else:\n return self.encrypt_decrypt_prompt()", "title": "" }, { "docid": "dad61793d7981169b6c3ba4f66d3a21e", "score": "0.5482059", "text": "def Copy(self):\n text = self.GetSelectedText()\n data_o = wx.TextDataObject()\n data_o.SetText(text)\n if wx.TheClipboard.IsOpened() or wx.TheClipboard.Open():\n wx.TheClipboard.SetData(data_o)\n wx.TheClipboard.Flush()\n wx.TheClipboard.Close()", "title": "" }, { "docid": "b041a8cf99d82d32a0235b3dae56509b", "score": "0.5472072", "text": "def generate_password():\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n symbols = ['!', '#', '$', '%', '&', '*']\n\n # Get a random amount of characters of each type\n password_letters = [choice(letters) for _ in range(randint(8, 10))]\n password_numbers = [choice(numbers) for _ in range(randint(3, 7))]\n password_symbols = [choice(symbols) for _ in range(randint(6, 8))]\n\n # Concatenate the different types\n password_list = password_letters + password_numbers + password_symbols\n\n # Shuffle the password\n shuffle(password_list)\n\n # Join the list of characters, insert new password to GUI, and copy to clipboard\n password = \"\".join(password_list)\n password_entry.delete(0, END)\n password_entry.insert(0, password)\n pyperclip.copy(password)", "title": "" }, { "docid": "47ccc0dceb062a11e019bf197facb162", "score": "0.54494447", "text": "def get_clipboard():\n\n return os.popen('xsel').read()", "title": "" }, { "docid": "580dff4d465c29dd73c3191033daac58", "score": "0.54191244", "text": "def clipboard_copy(self):\n text = self.ui.textBrowser.toPlainText()\n self.clipboard.setText(text, QClipboard.Clipboard)", "title": "" }, { "docid": "aaf8125d1bd6f2acabee28d1e5a1ec46", "score": "0.539326", "text": "def copySelected(self) -> None:\n text, success = self.download(allData=False, formatText=True)\n if success:\n clip_board = QApplication.clipboard()\n clip_board.setText(text)", "title": "" }, { "docid": "70c3e009c870076f92451f86087e80c2", "score": "0.5367837", "text": "def send_to_clipboard(self, clip_type, data):\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.SetClipboardData(clip_type, data)\n win32clipboard.CloseClipboard()", "title": "" }, { "docid": "269ce28e3d38da2f233b203dd49fd5fd", "score": "0.5318778", "text": "def caesar(input_file, output_file, key, decrypt):\r\n if input_file:\r\n text = input_file.read()\r\n else:\r\n # if user wants to encrypt text, we don't want to have this show up in the command history\r\n text = click.prompt('Please enter a text', hide_input=not decrypt)\r\n\r\n if decrypt:\r\n key = -key\r\n\r\n ciphertext = encrypt(text, key)\r\n\r\n if output_file:\r\n output_file.write(ciphertext)\r\n else:\r\n click.echo(ciphertext)", "title": "" }, { "docid": "717e9b5bc867c3909ec7cf1e114bd295", "score": "0.5288389", "text": "def win_paste(event):\n pass", "title": "" }, { "docid": "81e670df464c1d4baa91e5046dcee0f6", "score": "0.5288383", "text": "def copyISIN(self, event):\r\n if wx.TheClipboard.Open():\r\n wx.TheClipboard.SetData(wx.TextDataObject(self.clickedISIN))\r\n wx.TheClipboard.Close()", "title": "" }, { "docid": "da408856cdf21a236830348cff13c0c9", "score": "0.52503073", "text": "def copy(self):\n self.text_buffer.copy_clipboard(gtk.Clipboard())", "title": "" }, { "docid": "5f14d3017ffa8370cf70e350080b0fd6", "score": "0.5215054", "text": "def copy_keys(self):\n self.clipboard = core.copy_keys()", "title": "" }, { "docid": "35f7740d6c10917f32402f9bcb3cdca4", "score": "0.520918", "text": "def test04_copy_code():\n base.info(\"Add Python code to code input.\")\n code_input = base.find_element(driver, \"code_input\")\n code_input.send_keys(python_script)\n\n base.info(\"Click on submit button \")\n base.find_element(driver, \"submit_button\").click()\n base.info(\"Click on copy code button.\")\n base.find_element(driver, \"copy_code\").click()\n\n base.info(\" Check that code copied successfully.\")\n copied_code = pyperclip.paste()\n assert CODE_FIRST_LINE in copied_code", "title": "" }, { "docid": "143ffa486bdae9bca1f5e62f2b83d8d7", "score": "0.5203483", "text": "def paste_from_clipboard(self, element):\n self.element.clear()\n self.element.send_keys(keys.Keys.CONTROL, 'v')\n element.click()\n element = self._driver.find_element(*self._locator)\n self.text = element.get_attribute(\"value\")", "title": "" }, { "docid": "c06a3d99726bec43183960426bf32adb", "score": "0.5194503", "text": "def generate_password():\n size_input = size_entry.get()\n if size_input.strip().isdigit():\n size = int(size_input.strip())\n else:\n size = 16\n\n chars = (\n string.ascii_uppercase\n + string.ascii_lowercase\n + string.digits\n + string.punctuation\n )\n chars = shuffle_string(chars)\n\n new_password = \"\"\n while len(new_password) < size:\n new_password += chars[random.randint(0, len(chars) - 1)]\n\n new_password = shuffle_string(new_password)\n\n update_file(new_password)\n pyperclip.copy(new_password)\n\n password_dest.delete(0, END)\n password_dest.insert(0, new_password)\n\n print_strength(new_password)\n\n Label(root, text=\"Updated passwords.txt\\nCopied to Clipboard :)\", bg=\"white\").place(\n relx=0.5, rely=0.6, anchor=CENTER\n )", "title": "" }, { "docid": "a2cdfa781fa0535936a8c9e142f53697", "score": "0.5152303", "text": "def canPaste(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "135fa54a4b5dc3a76f1f1a8b63cb0246", "score": "0.51423264", "text": "def on_copy_path_to_clipboard_button_callback(self):\n node = hou.pwd()\n\n # get the path depending if in full or proxy mode:\n render_path = self.__get_render_path(node)\n\n # use Qt to copy the path to the clipboard:\n from sgtk.platform.qt import QtGui\n QtGui.QApplication.clipboard().setText(render_path)", "title": "" }, { "docid": "dc68289e91265071fc0ef461296af913", "score": "0.5137429", "text": "def paste():", "title": "" }, { "docid": "560f12d7f48ba2f7846d1caf1f32c95c", "score": "0.51363206", "text": "def onCopyInfo(self, evt):\n\t\tinfo = dabo.ui.getSystemInfo(\"string\")\n\t\tappdoc = self.getAppSpecificString()\n\t\tself.Application.copyToClipboard(\"\\n\\n\".join((info, appdoc)))", "title": "" }, { "docid": "aa6db9be6463fde0797ab24c7b354339", "score": "0.51218104", "text": "def CanPaste(self):\n if not wx.TheClipboard.Open():\n return False\n r=wx.TheClipboard.IsSupported(wx.DataFormat(wx.DF_FILENAME))\n wx.TheClipboard.Close()\n return r", "title": "" }, { "docid": "a14a712a892bd1afe1c6846831f372dd", "score": "0.5116549", "text": "def copy_to_clipboard(obj, clipboard):\r\n\t## support copying text to clipboard\r\n\t## as well as files in both the general uri-list representation\r\n\t## and in nautilus' file copy clipboard type\r\n\ttarget_ids = (uri_id, text_id, nautilus_id) = (80, 81, 82)\r\n\tnautilus_target = 'x-special/gnome-copied-files'\r\n\r\n\t# udata is the data dict\r\n\tdef store(clipboard, sdata, info, udata):\r\n\t\tif info == uri_id:\r\n\t\t\tsdata.set_uris(udata[uri_id])\r\n\t\tif info == text_id:\r\n\t\t\tsdata.set_text(udata[text_id])\r\n\t\tif info == nautilus_id:\r\n\t\t\tstr_data_format = 8\r\n\t\t\tsdata.set(nautilus_target, str_data_format, udata[nautilus_id])\r\n\tdef clear(clipboard, udata):\r\n\t\tpass\r\n\r\n\ttargets = []\r\n\tdata = {}\r\n\ttry:\r\n\t\turilist = obj.get_urilist_representation()\r\n\texcept AttributeError:\r\n\t\tpass\r\n\telse:\r\n\t\tif urilist:\r\n\t\t\ttargets = gtk.target_list_add_uri_targets(targets, uri_id)\r\n\t\t\ttargets.append((nautilus_target, 0, nautilus_id))\r\n\t\t\tdata[uri_id] = urilist\r\n\t\t\tdata[nautilus_id] = 'copy\\n' + '\\n'.join(urilist)\r\n\r\n\ttry:\r\n\t\ttext = obj.get_text_representation()\r\n\texcept AttributeError:\r\n\t\tpass\r\n\telse:\r\n\t\ttargets = gtk.target_list_add_text_targets(targets, text_id)\r\n\t\tdata[text_id] = text\r\n\tif data:\r\n\t\tclipboard.set_with_data(targets, store, clear, data)\r\n\t\t# store all targets\r\n\t\tclipboard.set_can_store(targets)\r\n\t\treturn True\r\n\treturn False", "title": "" }, { "docid": "f6d50e6228c92cb23b9e22ad3fdd8a26", "score": "0.5103011", "text": "def generate_password():\n letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',\n 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',\n 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']\n\n # The program generates a random length for the password between 12 and 18 characters\n # But just change the range if you would like\n password_letters = [choice(letters) for char in range(randint(8, 10))]\n password_symbols = [choice(symbols) for symbol in range(randint(2, 4))]\n password_numbers = [choice(numbers) for number in range(randint(2, 4))]\n password_list = password_letters + password_symbols + password_numbers\n shuffle(password_list) # Nice way to change the order, useful to check out if you've never used it before\n\n password = \"\".join(password_list) # Joins together all the symbols in the password_list to a string\n\n password_input.delete(0, END) # Clears the password_input field in the GUI\n password_input.insert(0, password)\n pyperclip.copy(password) # Copies the password to your clipboard if you want to use it right away", "title": "" }, { "docid": "62b968fc545961ca19934f8a06de4479", "score": "0.5069248", "text": "def from_clipboard():\n s = pyperclip.paste()\n return from_bmatrix(s)", "title": "" }, { "docid": "4770e831d654e2580b073177ad423c6e", "score": "0.5068591", "text": "def getpass(self, prompt, default=None):\n password = click.prompt(prompt, hide_input=True, default=default)\n\n # https://github.com/softlayer/softlayer-python/issues/1436\n # click.prompt uses python's getpass() in the background\n # https://github.com/python/cpython/blob/3.9/Lib/getpass.py#L97\n # In windows, shift+insert actually inputs the below 2 characters\n # If we detect those 2 characters, need to manually read from the clipbaord instead\n # https://stackoverflow.com/questions/101128/how-do-i-read-text-from-the-clipboard\n if password == 'àR':\n # tkinter is a built in python gui, but it has clipboard reading functions.\n # pylint: disable=import-outside-toplevel\n from tkinter import Tk\n tk_manager = Tk()\n password = tk_manager.clipboard_get()\n # keep the window from showing\n tk_manager.withdraw()\n return password", "title": "" }, { "docid": "9670134565130dd6fef8746dbed1a224", "score": "0.50325596", "text": "def decrypt(ctx: click.Context, text: str) -> None:\n tcrypt = ctx.obj[\"tcrypt\"]\n\n if not text:\n text = get_stream()\n\n println(tcrypt.decrypt(text))", "title": "" }, { "docid": "d92454cad981e7fe2424b6f5352f3c11", "score": "0.502184", "text": "def clear_clipboard(self):\n self.logger.debug(\"clear_clipboard\")\n if platform.system() == \"Windows\":\n win32clipboard.OpenClipboard()\n win32clipboard.EmptyClipboard()\n win32clipboard.CloseClipboard()\n else:\n clipboard.copy(\"\")", "title": "" }, { "docid": "60e4ad24baadb121532bb298f6904c83", "score": "0.50101763", "text": "def PasswordInClearText(self) -> str:", "title": "" }, { "docid": "295ff719cad226712104d74c5ca1cd3c", "score": "0.5001278", "text": "def decrypt(self):\n\n if self.ui.radio_button_text_source.isChecked():\n self.decrypt_text()\n else:\n self.decrypt_file()", "title": "" }, { "docid": "37c918cf5878b28de5698b466e6487ca", "score": "0.49898326", "text": "def copy_to_clipboard(self):\n clipboard = QtGui.QApplication.clipboard()\n pixmap = QtGui.QPixmap.grabWidget( self.canvas )\n clipboard.setPixmap( pixmap )", "title": "" }, { "docid": "b1ded3830f43d7512d1ae317061c5375", "score": "0.4985541", "text": "def store_clipboard(db_manager):\n user_clipboard_type = clipboard.get_clipboard_type()\n if user_clipboard_type is None:\n # If we can't support this clipboard, show an error (will stop after dialog closed)\n ui.show_unsupported_clipboard_warning()\n sys.exit()\n user_clipboard_contents = clipboard.get_clipboard()\n user_clipboard_preview = clipboard.get_clipboard_preview()\n current_clipboard_id = db_manager.current_clipboard\n db_manager.set_clipboard(current_clipboard_id, user_clipboard_type, user_clipboard_contents, user_clipboard_preview)", "title": "" }, { "docid": "4efead11180ae8339d7709d935f3e191", "score": "0.4976822", "text": "def encrypt(self):\n\n if self.ui.radio_button_text_source.isChecked():\n self.encrypt_text()\n else:\n self.encrypt_file()", "title": "" }, { "docid": "67d30b1a00d3a0a6a390eace05fae4d9", "score": "0.49714556", "text": "def paste(self):\n self.text_buffer.paste_clipboard(gtk.Clipboard(), None, True)", "title": "" }, { "docid": "49a925469e46c98b58486716da067176", "score": "0.49558985", "text": "def test_copy_paste(self):\n self.component.set_selection(1, 2)\n self.component.copy()\n self.set_cursor_position(self.widget, 0)\n self.component.paste()\n self.assertEqual(self.get_value(self.widget), u'babc')", "title": "" }, { "docid": "b730fd5f08a5134e4c72c2853b41aa1a", "score": "0.49540475", "text": "def copy_with_confirmation(from_path, to_path):\n from_path = Path(from_path)\n to_path = Path(to_path)\n\n if to_path.exists():\n print(f\"Are you sure you want to overwrite {to_path}?\")\n confirmation = input()\n if confirmation.lower() in [\"y\", \"yes\"]:\n shutil.copy(from_path, to_path)\n else:\n shutil.copy(from_path, to_path)", "title": "" }, { "docid": "25eae308836053fed0e58f4f240950b8", "score": "0.49512678", "text": "def encrypt(ctx: click.Context, text: str) -> None:\n tcrypt = ctx.obj[\"tcrypt\"]\n\n if not text:\n text = get_stream()\n\n println(tcrypt.encrypt(text))", "title": "" }, { "docid": "1c8c4a6b88e6c3e40ac27370d9142250", "score": "0.49511757", "text": "def get_clipboard(self):\n wc.OpenClipboard()\n url = wc.GetClipboardData(win32con.CF_TEXT)\n wc.CloseClipboard() # it must be close, else we will get the same url from clipboard\n return url", "title": "" }, { "docid": "6e54569d616df1985244d143bc8b40c3", "score": "0.49469337", "text": "def share_copy(self):\n self.oap.tap(BTN['share'])\n time.sleep(0.3)\n # 左拉滑动\n self.oap.swap([910, 1748], [351, 1734])\n self.oap.tap(BTN['copy2'])\n time.sleep(0.5)\n # adb shell am broadcast -a clipper.get\n command = r'adb shell am broadcast -a clipper.get >> data.txt'\n system(command)", "title": "" }, { "docid": "292f3090c5870d2b990cc8453cef3288", "score": "0.49450704", "text": "def Copy(self, events=None):\r\n\r\n \"\"\" METHOD 1\"\"\"\r\n main_text.event_generate(\"<<Copy>>\")\r\n\r\n \"\"\" METHOD 2\"\"\"\r\n # selected_text = main_text.selection_get()\r\n # main_text.clipboard_clear()\r\n # main_text.clipboard_append(selected_text)\r", "title": "" }, { "docid": "dc49ef074634b8b7eef6260e5c83d716", "score": "0.49359542", "text": "def string_secret_save(string_plain, path, box):\n string_cipher = string_encrypt(string_plain, box, encoder=Base64Encoder)\n files.string_save(string_cipher)", "title": "" }, { "docid": "a23b116b93c776d0d31c3a88a05f9141", "score": "0.49337375", "text": "def on_actioncopy_triggered(self):\n print(\"你点击了 copy\")", "title": "" }, { "docid": "92d31e08ce31c6c00f2e124d6a8557d4", "score": "0.4932918", "text": "async def get_clipboard(self, *, blocking: bool = True) -> Union[str, AsyncFutureResult[str]]:\n return await self._transport.function_call('AHKGetClipboard', blocking=blocking)", "title": "" }, { "docid": "fa2318d5b10ddd10908e419fe287c521", "score": "0.49313134", "text": "def cmd_password():", "title": "" }, { "docid": "cc03c8594ad110a1e8ea614f81a49f99", "score": "0.4902098", "text": "def cut_keys(self):\n self.clipboard = core.cut_keys()", "title": "" }, { "docid": "13d184e3dc7f6fae617d4b3ff14de027", "score": "0.48874918", "text": "def on_control_c(self, event=None):\n\n def copy_text(choice_data: CloppyChoiceMadeEventData):\n if choice_data.choice == 'Yes':\n root: tk.Tk = self.master\n root.clipboard_clear()\n root.clipboard_append(self.get_selected_text())\n\n cloppy_yesno(\n self,\n \"It looks like you're trying to copy some text.\\n\"\n \"You might not have highlighted the correct section to copy.\\n\"\n \"Perhaps it'd be a good idea to double check now.\",\n copy_text\n ).show()\n\n return 'break'", "title": "" }, { "docid": "c57389868c41aee313272ff43bd3e115", "score": "0.48868445", "text": "def can_crack(self, pwd):", "title": "" }, { "docid": "721875f9fa37ec618efd5c11378f1a5d", "score": "0.48861235", "text": "def paste_keys(self):\n core.paste_keys(self.clipboard)", "title": "" }, { "docid": "d8e0c3f2a2f109ad47c1eab25eb4aa39", "score": "0.48771575", "text": "def get_clipboard_data(self):\n win32clipboard.OpenClipboard()\n d = win32clipboard.GetClipboardData(win32clipboard.CF_UNICODETEXT)\n win32clipboard.CloseClipboard()\n return d", "title": "" }, { "docid": "b6f8a59bff82fbfa1d32a81d2aafc5d7", "score": "0.48663312", "text": "def on_copy(self):\n\n self.master.editor_window.on_control_c()", "title": "" }, { "docid": "13f7b879780d52968a4848fa066eb0d0", "score": "0.48618853", "text": "def paste(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "35ee6cbbb11fe50363012f8335c67fcb", "score": "0.48531017", "text": "def _obscurepass(self, line):\n if line.find(self._sshpass) != -1:\n try:\n hide = [line.find(self._sshpass), line.find(self._sshpass) + len(self._sshpass)]\n return line[:hide[0]] + \"***SECRET***\" + line[hide[1]:]\n except:\n return line\n else:\n return line", "title": "" }, { "docid": "6da088c419b169be1e07b2ee39abc7c0", "score": "0.48409355", "text": "def emacs_draft_selection(copy_or_cut: str = \"default\"):\n assert copy_or_cut in [\"copy\", \"cut\", \"default\"]\n if copy_or_cut == \"default\": copy_or_cut = setting_copy_or_cut.get()\n assert copy_or_cut in [\"copy\", \"cut\"]\n global source_window\n source_window = ui.active_window()\n clip.set_text(\"\")\n actions.edit.copy() if copy_or_cut == \"copy\" else actions.edit.cut()\n clip.await_change(old=\"\")\n #print(f\"***** clip.text() = {clip.text()!r} *****\")\n if clip.text(): actions.user.emacs_draft_clipboard()\n else: actions.user.emacs_draft_empty()", "title": "" }, { "docid": "0a50bc681910fee702895f45ea3bf569", "score": "0.481851", "text": "def paste_command():\n global text\n global saved\n index = text.index(INSERT)\n text.insert(index, saved)", "title": "" }, { "docid": "c37e3942433165063c6532f29ae858e7", "score": "0.4805247", "text": "def decrypt(self,input_text):\r\n self.text_shift = (-1 * self.text_shift) # Apply the opposite shift by *(-1) ...\r\n output_text = self.encrypt_raw(input_text) # ... then call the encrypt again with new shift.\r\n return output_text", "title": "" }, { "docid": "00ce4b3f61df4952bb637f0a51743415", "score": "0.48005444", "text": "def GetClipboardText(self):\n text_obj = wx.TextDataObject()\n rtext = \"\"\n if wx.TheClipboard.IsOpend() or wx.TheClipboard.Open():\n if wx.TheClipboard.GetData(text_obj):\n rtext = text_obj.GetText()\n wx.TheClipboard.Close()\n return rtext", "title": "" }, { "docid": "4bfc957e443b2085bf7bdc43ee221df9", "score": "0.4791615", "text": "def copy_to_clipboard(self):\n try:\n s = [\"\\t\".join([f\"{name}\" for name in self.columns])]\n for row in self.rows:\n s.append(\"\\t\".join((str(i) for i in row)))\n s = \"\\n\".join(s)\n pyperclip.copy(s)\n except MemoryError:\n raise MemoryError(\"Cannot copy to clipboard. Select slice instead.\")", "title": "" }, { "docid": "13e15e761e831c1c58a3e4c5d666cb26", "score": "0.47900182", "text": "def copySel(self):\n QtGui.QApplication.clipboard().setText(self.serialize(useSelection=True))", "title": "" }, { "docid": "066dd0c44a7cf8ff44bf20cc5c1e2b58", "score": "0.47857523", "text": "def cut(self):\n self.text_buffer.cut_clipboard(gtk.Clipboard(), True)", "title": "" }, { "docid": "0dcd42b5a8089ff2a5208d78d7046448", "score": "0.47795695", "text": "def encryption_input_from_console():\n raw_message=input(\"Please enter your message: \")\n uppercase_raw_message=raw_message.upper()\n message_into_values=message_convertor_to_values(uppercase_raw_message)\n\n k=int(input(\"Please enter the value of encryptor: \"))\n\n encrypted_values=encryption(k,message_into_values)\n\n encrypted_message=translation_of_message(encrypted_values)\n print(\"\".join(encrypted_message))", "title": "" }, { "docid": "35e5a038834ffce0fdb3433bd7557604", "score": "0.477741", "text": "def copyToClipboard(self):\r\n clipboard = \"\"\r\n\r\n for r in range(self.rowCount()):\r\n for c in range(self.columnCount()):\r\n idx = self.index(r, c)\r\n clipboard += str(self.data(idx, role=Qt.DisplayRole))\r\n if c != (self.columnCount() - 1):\r\n clipboard += \"\\t\"\r\n clipboard += \"\\n\"\r\n\r\n # copy to the system clipboard\r\n sys_clip = QtWidgets.QApplication.clipboard()\r\n sys_clip.setText(clipboard)", "title": "" }, { "docid": "189a24519d96bb280044d4c6f33933ab", "score": "0.4770043", "text": "def _copy_result(result, html=False):\n\n (copy_from, extra_spacing,) = (result.char, ' ',)\n if html:\n copy_from = cgi.escape(copy_from).encode(\n 'ascii', 'xmlcharrefreplace'\n ).decode('ascii')\n extra_spacing = ''\n\n sys.stdout.write((\n 'copying {fore.GREEN}{copy_from}{fore.RESET}{extra_spacing} '\n 'to clipboard... '\n ).format(**locals(), **COLOR))\n pyperclip.copy(copy_from)\n sys.stdout.write((\n '{style.BRIGHT}{fore.GREEN}✓{style.RESET_ALL}\\n'\n ).format(**COLOR))", "title": "" }, { "docid": "62900be85cb8c5a420892b4ba1d698af", "score": "0.47388858", "text": "def prompt_for_password(prompt_text):\n return getpass.getpass(prompt_text)", "title": "" }, { "docid": "c177d14528fa66daafe7ecb3b077bed3", "score": "0.473758", "text": "def paste(self, *_args):\n text = self.service_path_fld.selection_get(selection='CLIPBOARD')\n self.service_path_fld.insert('insert', text)", "title": "" }, { "docid": "cdab6d127a2e4d08cb222b83bc06e271", "score": "0.47300997", "text": "def encrypt(event=None): # event is passed by binders.\n global identity\n global myTmpDir\n global privKey\n global pubKey\n global mod\n\n msg = inputText.get(\"1.0\",tkinter.END)[:-1]\n outText.delete('1.0', tkinter.END)\n\n # create file\n f = open(myTmpDir + \"locEnc\" + str(identity) + \".bin\",\"w+\")\n f.close()\n\n f = open(myTmpDir + 'pt' + str(identity) + '.bin','w')\n f.write(msg)\n f.close()\n\n command = \"rsa.exe e \" + myTmpDir + \"pt\" + str(identity) + \".bin \"+ myTmpDir + \"locEnc\" + str(identity) + \".bin \" + mod + \" \" + pubKey\n print(\"command encrypt: \", command, \"\\n\")\n os.popen(command)\n time.sleep(1)\n\n locEncFileName = myTmpDir + \"locEnc\" + str(identity) + \".bin\"\n print(locEncFileName)\n\n ctP = open(locEncFileName, \"rb\")\n readFile = ctP.read()\n ctP.close()\n\n print(bytes(readFile))\n # Convert to hex representation\n digest = base64.encodestring(bytes(readFile))\n\n outText.insert(tkinter.END, digest)", "title": "" }, { "docid": "7a435458f719b88f7bf6863a5522ea78", "score": "0.4723832", "text": "def encryptText(data, gui):\n password = gui.passwordEntry.get().strip('\\n')\n # binary data to be encrypted\n pbdata = str.encode(data)\n\n # input plaintext binary stream\n fIn = io.BytesIO(pbdata)\n\n # initialize ciphertext binary stream\n fCiph = io.BytesIO()\n\n # encrypt stream\n pyAesCrypt.encryptStream(fIn, fCiph, password, bufferSize)\n\n return str(fCiph.getvalue())", "title": "" }, { "docid": "7ed3fa9a04e556bf10f226ea8dd349e8", "score": "0.47189394", "text": "def decrypt(event=None): # event is passed by binders.\n global identity\n global myTmpDir\n global privKey\n global pubKey\n global mod\n\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n # create file\n f = open(myTmpDir + \"ptSender\" + str(identity) + \".bin\", \"w+\")\n f.close()\n\n decB64Msg = base64.decodestring(str.encode(msg))\n\n f = open(myTmpDir + 'ct' + str(identity) + '.bin','wb')\n f.write(decB64Msg)\n f.close()\n\n command = \"rsa.exe d \" + myTmpDir + \"ct\" + str(identity) + \".bin \" + myTmpDir + \"ptSender\" + str(identity) + \".bin \" + mod + \" \" + privKey\n print(\"command decrypt: \", command, \"\\n\")\n os.popen(command)\n time.sleep(1)\n\n with open(myTmpDir + \"ptSender\" + str(identity) + \".bin\", \"rb\") as f:\n readFile = f.read()\n f.close()\n # Convert to hex representation\n decMsg = bytes(readFile)\n\n outText.insert(tkinter.END, decMsg)", "title": "" }, { "docid": "5b140908bab568a0f1be96332528e25a", "score": "0.4718859", "text": "def run_brute_force_decrypt_dialog():\n text = input(\"Please enter a text to decrypt: \")\n\n for key in range(0, 26):\n print(f\"Key #{key}: {decrypt(text,key)}\")", "title": "" }, { "docid": "a465e2d6106c66ccab6b146385f0c12c", "score": "0.47105467", "text": "def mouse_click(x=\"\", y=\"\", left_or_right=\"left\", single_double_triple=\"single\", copyToClipBoard_Yes_No=\"no\"):\n try:\n if not x and not y:\n x_y = str(gui_get_any_input_from_user(\"X,Y co-ordinates to perform Mouse (Left) Click. Ex: 200,215\"))\n if \",\" in x_y:\n x, y = x_y.split(\",\")\n x = int(x)\n y = int(y)\n else:\n x = int(x_y.split(\" \")[0])\n y = int(x_y.split(\" \")[1])\n\n copiedText = \"\"\n time.sleep(1)\n\n if x and y:\n if single_double_triple.lower() == \"single\" and left_or_right.lower() == \"left\":\n pg.click(x,y)\n elif single_double_triple.lower() == \"double\" and left_or_right.lower() == \"left\":\n pg.doubleClick(x,y)\n elif single_double_triple.lower() == \"triple\" and left_or_right.lower() == \"left\":\n pg.tripleClick(x,y)\n elif single_double_triple.lower() == \"single\" and left_or_right.lower() == \"right\":\n pg.rightClick(x,y)\n time.sleep(1) \n\n if copyToClipBoard_Yes_No.lower() == \"yes\":\n kb.press_and_release(\"ctrl+c\")\n time.sleep(1)\n copiedText = clipboard.paste().strip()\n time.sleep(1)\n \n time.sleep(1) \n return copiedText\n except Exception as ex:\n print(\"Error in mouseClick=\"+str(ex))", "title": "" }, { "docid": "ccbb9928f849d5dd4caa5adf7955db0e", "score": "0.47079134", "text": "def on_control_v(self, event=None):\n\n def paste_text(choice_data: CloppyChoiceMadeEventData):\n if choice_data.choice == 'Yes':\n root: tk.Tk = self.master\n try:\n self.set_selected_text(\n root.clipboard_get(),\n set_selected=False\n )\n\n except tk.TclError:\n pass\n\n cloppy_yesno(\n self,\n \"It looks like you're trying to paste some text.\\n\"\n \"The text you're pasting could be replacing other important text.\",\n paste_text\n ).show()\n\n return 'break'", "title": "" }, { "docid": "c5907d87559ebbf1f5b2dd2c6d7e5e31", "score": "0.46874964", "text": "def prompt_encrypt(self):\n print(\"Please copy/paste key and secret from exchange and\")\n print(\"then provide a password to encrypt them.\")\n print(\"\")\n\n key = input(\" key: \").strip()\n secret = input(\" secret: \").strip()\n while True:\n password1 = getpass.getpass(\" password: \").strip()\n if password1 == \"\":\n print(\"aborting\")\n return\n password2 = getpass.getpass(\"password (again): \").strip()\n if password1 != password2:\n print(\"you had a typo in the password. try again...\")\n else:\n break\n\n hashed_pass = hashlib.sha512(password1.encode(\"utf-8\")).digest()\n crypt_key = hashed_pass[:32]\n crypt_ini = hashed_pass[-16:]\n aes = AES.new(crypt_key, AES.MODE_OFB, crypt_ini)\n\n # since the secret is a base64 string we can just just pad it with\n # spaces which can easily be stripped again after decryping\n print(len(secret))\n secret += \" \" * (16 - len(secret) % 16)\n print(len(secret))\n secret = base64.b64encode(aes.encrypt(secret)).decode(\"ascii\")\n\n self.config.set(\"api\", \"secret_key\", key)\n self.config.set(\"api\", \"secret_secret\", secret)\n self.config.save()\n\n print(\"encrypted secret has been saved in %s\" % self.config.filename)", "title": "" }, { "docid": "98eadefc1ffadd53a691dcb161c4a12a", "score": "0.46821305", "text": "def passwordbox(d):\n while True:\n # 'insecure' keyword argument only asks dialog to echo asterisks when\n # the user types characters. Not *that* bad.\n (code, password) = d.passwordbox(\"Password:\", insecure=True)\n if handle_exit_code(d, code) == d.DIALOG_OK:\n break\n return password", "title": "" }, { "docid": "9d654ad66660b9ec5a42596bfd59b0dc", "score": "0.46752864", "text": "def copy_to_clipboard(self):\n # Getting clipboard\n clipboard = QGuiApplication.clipboard()\n\n # Saving image to a path\n try:\n path = os.path.join(os.path.expanduser(r\"~\\Documents\"), \"image.png\")\n self.canvas.figure.savefig(path, dpi=150, transparent=False)\n pixmap = QPixmap()\n # Loading image as pixmap and saving to clipboard\n if pixmap.load(path):\n clipboard.setImage(pixmap.toImage())\n self.message_handler.raise_success(\n \"Copiado com sucesso para a área de transferência!\"\n )\n os.remove(path)\n except Exception as error:\n self.message_handler.raise_error(\n f\"Erro copiar para a área de transferência, contatar os desenvolvedores.\\n{error}\"\n )", "title": "" } ]
30cc9bea2320336e2024d02064a23c56
Return 0/day average if task(s) does not exists
[ { "docid": "ac21eeb1e1ca9948918a76a50b87232d", "score": "0.67772514", "text": "def test_average_completed_report_with_not_existing_tasks(self):\n self.create_test_user()\n token = self.client.post(self.login_url, self.user_login_data, format=\"json\").data['token']\n\n res = self.client.get(self.average_completed_report_url,\n **{'HTTP_AUTHORIZATION':'token '+token})\n\n self.assertEqual(res.data['Average completed tasks'], '0/day')\n self.assertEqual(res.status_code, 201)", "title": "" } ]
[ { "docid": "a45ccb2b23a9377058f43a9de041582c", "score": "0.6399138", "text": "def get_average(days):\n average = 0\n for i in range(days):\n average = average + workloads[-1-i]\n average = average / days\n return average", "title": "" }, { "docid": "334f04a4879085f0a7589afe6cb9096c", "score": "0.61084497", "text": "def time_mean(df, island, frequency, task):\n if (task == IDLE):\n return 0\n df = df[df[ISLAND] == island]\n df = df[df[FREQ] == frequency]\n df = df[df[TASK] == task]\n return df[TIME].mean()", "title": "" }, { "docid": "52c2830ccda4d05433c7ec22377dc802", "score": "0.6073382", "text": "def test_average_completed_report_with_existing_task(self):\n self.create_test_user()\n token = self.client.post(self.login_url, self.user_login_data, format=\"json\").data['token']\n\n self.client.post(self.task_create_url, self.task_data,\n **{'HTTP_AUTHORIZATION':'token '+token})\n\n self.client.post(self.task_create_url, self.task_data2,\n **{'HTTP_AUTHORIZATION':'token '+token})\n\n res = self.client.get(self.average_completed_report_url,\n **{'HTTP_AUTHORIZATION':'token '+token})\n\n self.assertEqual(res.data['Average completed tasks'], '1/day')\n self.assertEqual(res.status_code, 201)", "title": "" }, { "docid": "39e33b9b4075919e246de22370cb356c", "score": "0.6020792", "text": "def test_daily_average(self):\n \n d1=[1.0]*800+[2.0]*400+[1.0]*400\n d2=[1.0]*800+[2.0]*400+[1.0]*400\n data=numpy.array([d1,d2])\n data=numpy.transpose(data)\n data[336,:]=numpy.nan\n st=datetime.datetime(year=1990,month=2,day=3,hour=11, minute=15)\n delta=time_interval(minutes=15)\n test_ts=rts(data,st,delta,{})\n nt3=daily_average(test_ts)", "title": "" }, { "docid": "1b604951e2fbc5e87bead0bb2950116b", "score": "0.5957909", "text": "def average(numbers):\n return None", "title": "" }, { "docid": "10ccb77c437eebe08acff838d7791f5f", "score": "0.5889117", "text": "def CalculateTaskTotals():\n print \"*** Calculate Task Totals ***\"\n\n task_events=Event_4698.objects.all()\n tasks=[]\n for te in task_events:\n tasks.append(te.task)\n\n fanalysis= Counter(tasks)\n for k, v in fanalysis.items():\n k.count=v\n k.save()\n\n\n tasks = Task.objects.all()\n tasknames =[]\n for t in tasks:\n tasknames.append(t.taskname)\n uniquetasknames=list(set(tasknames))\n ngram_weights= calculate_weights(2, uniquetasknames)\n tasks = Task.objects.all()\n for t in tasks:\n t.ngramscore=score(2, t.taskname, ngram_weights)\n t.save()", "title": "" }, { "docid": "a2c9eed71e8b51c607f6953c3b5db46d", "score": "0.5884039", "text": "def __assemble_tasks_results(self):\n results_tuples = self.get_required_results_from_all_tasks()\n # calculating the average over all result items\n self._average_result = np.mean(results_tuples, axis=0).tolist()\n self._standard_deviation = np.std(results_tuples, axis=0).tolist()", "title": "" }, { "docid": "dda5493b929f634e3e96875dcc72fb0d", "score": "0.57503873", "text": "def average_run(self):\n\t\truns = Runs()\n\t\tfor goalruns in self._wggr:\n\t\t\truns.extend(goalruns.runs)\n\t\treturn runs.average()", "title": "" }, { "docid": "803408c9c685607568e2ed7650c2fc0b", "score": "0.5739305", "text": "def eta(self):\n # finished tasks * overlap\n finished_tasks = Task.objects.filter(project=self.id, is_labeled=True)\n # one could make more than need to overlap\n min_n_finished_annotations = sum([ft.overlap for ft in finished_tasks])\n\n annotations_unfinished_tasks = Annotation.objects.filter(\n task__project=self.id, task__is_labeled=False, ground_truth=False, result__isnull=False).count()\n\n # get minimum remain annotations\n total_annotations_needed = self.get_total_possible_count\n annotations_remain = total_annotations_needed - min_n_finished_annotations - annotations_unfinished_tasks\n\n # get average time of all finished TC\n finished_annotations = Annotation.objects.filter(\n Q(task__project=self.id) & Q(ground_truth=False), result__isnull=False).values('lead_time')\n avg_lead_time = finished_annotations.aggregate(avg_lead_time=Avg('lead_time'))['avg_lead_time']\n\n if avg_lead_time is None:\n return None\n return avg_lead_time * annotations_remain", "title": "" }, { "docid": "caa209f81f79f326f4cb85ddefce121e", "score": "0.57081217", "text": "def compute_average(n):\n data = []\n start = time()\n for k in range(n):\n data.append(None)\n end = time()\n return (end - start )/ n", "title": "" }, { "docid": "7a79bf93f1082117576274d1576d6ad4", "score": "0.56944907", "text": "def get_avg_temperature_day_time(self, active_sensors, day, time):\r\n found = 0\r\n sum = 0.0\r\n if self._data_set is None:\r\n return None\r\n elif len(active_sensors) == 0:\r\n return None\r\n else:\r\n for i in range(0, len(self._data_set)):\r\n if (self._data_set[i][2] in active_sensors) and (self._data_set[i][0] == day) and (\r\n self._data_set[i][1] == time):\r\n found += 1\r\n sum += self._data_set[i][3]\r\n return sum/found", "title": "" }, { "docid": "6c798407f64c623ee0c423c0a1d1dde0", "score": "0.5680199", "text": "def avg(self):\n return sum(self.times) / len(self.times)", "title": "" }, { "docid": "de29f1955a7473b379eb447262f73b16", "score": "0.56692576", "text": "def avg_proj(sheet, row):\n sum = 0\n num = 0\n for colx in range(4, sheet.ncols, 2):\n sum = sum + find_worker_mark(sheet, row, colx)\n if (find_worker_mark(sheet, row, colx) != 0):\n num += 1\n avg = sum / num\n return avg", "title": "" }, { "docid": "b0aecac6995fcee029c1369746504a3b", "score": "0.5660519", "text": "def avg_trash_cycle(self):\n\t\tself._load_data()\n\t\treturn self._get_avg_time(\"starting time\", \"time taken out\")", "title": "" }, { "docid": "d8e0389484eb1548af3b769e9e92c899", "score": "0.56546164", "text": "def avg(self):\n return sum(self.times)/len(self.times)", "title": "" }, { "docid": "1b47ded228d8ef7ea3d4c182b4f5ef00", "score": "0.5650122", "text": "def average(qT, t):\n\tn_time = len(t) - 1\n\t# Finding the first value to take into account.\n\ti_deb = 0\n\twhile i_deb < len(t) and t[i_deb] < pPP.mean_begin_time:\n\t\ti_deb += 1\n\tif i_deb > n_time - 1:\n\t\tprint('WARNING average: End of simulation before start of averaging.')\n\t\tprint('WARNING average: Taking only last profile.')\n\t\ti_deb = n_time - 1\n\t# Initialisation\n\tq = qT[i_deb]\n\t# Averaging\n\ti = i_deb + 1\n\twhile i < n_time + 1 and t[i] < pPP.mean_end_time:\n\t\tq += qT[i]\n\t\ti += 1\n\tq /= (i - i_deb)\n\treturn q", "title": "" }, { "docid": "8883ee48b07a7a58f9eecd24b354a534", "score": "0.56499565", "text": "def average(graphs):\n n_time = 0\n tnt_time = 0\n for graph in graphs:\n n_time += graph.naive_time\n tnt_time += graph.tar_troj_time\n n_time /= len(graphs)\n tnt_time /= len(graphs)\n return n_time, tnt_time", "title": "" }, { "docid": "ac60adaaac474835d83aa5d7482221a9", "score": "0.5644896", "text": "def average(LIST,d=5):\r\n\tstart_time = ct()\r\n\ttry:\r\n\t\t_temp_average = round(sum(LIST)/len(LIST),d)\r\n\t\ttotal_time = ct() - start_time\r\n\t\tlog(\"log.txt\",[\"Average\",total_time])\r\n\t\treturn _temp_average\r\n\texcept:\r\n\t\tlog(\"log.txt\",[\"Average\",\"FAILED\"])\r\n\t\treturn 0", "title": "" }, { "docid": "d13e61e31ead0125f803dd2ebf045315", "score": "0.56366146", "text": "def combine_task_stats_mean(self):\n self.stats_good_op_comb = self.combine_task_norm_method(self.stats_good_op).mean(axis=0)", "title": "" }, { "docid": "41dab0865dbe00aa71bec431ccb0e962", "score": "0.56135625", "text": "def get_tasks_total(tasks):\n return sum([task.total() for task in tasks])", "title": "" }, { "docid": "9466bdd7c12e8d247f763382c8f0b36e", "score": "0.56120276", "text": "def get_avg_temperature_day_time(self, active_sensors, day, time):\n if self._data_set is None:\n return None\n else:\n return 0", "title": "" }, { "docid": "8ad0d20e69e0b2b7cab5797149980035", "score": "0.5608513", "text": "def average(t):\n def sum_helper(t):\n total, count = t.label, 1\n for b in t.branches:\n t, c = sum_helper(b)\n total += t\n count += c\n return total, count\n total, count = sum_helper(t)\n return total / count", "title": "" }, { "docid": "148641652673057ce827f5c6c047d61e", "score": "0.5597161", "text": "def compute_average(n):\n data = []\n start = time()\n for k in range(n):\n data.append(None)\n end = time()\n return (end - start) / n", "title": "" }, { "docid": "148641652673057ce827f5c6c047d61e", "score": "0.5597161", "text": "def compute_average(n):\n data = []\n start = time()\n for k in range(n):\n data.append(None)\n end = time()\n return (end - start) / n", "title": "" }, { "docid": "86ba27b573a67f38cd74292ee0db9a4d", "score": "0.5595723", "text": "def compute_average(n):\n data = []\n start = time.time()\n for k in range(n):\n data.append(None)\n end = time.time()\n return (end - start) / n", "title": "" }, { "docid": "46db0f061a5a03cd43b10d83213a0d35", "score": "0.5594759", "text": "def task3a(G):\n average = average/len(all_paths)\n average = 0\n for el in all_paths:\n average += el\n return average\n all_paths = find_shortest_paths(G, True)", "title": "" }, { "docid": "fc4768af632a6e348629cbfc15d25c11", "score": "0.5588304", "text": "def avg(lis, exception=0.0):\n lis = [item for item in lis if item is not None]\n if len(lis) == 0:\n return exception\n else:\n return sum(lis) / len(lis)", "title": "" }, { "docid": "bf0a77fc9ed176ad61116a9afa2f175f", "score": "0.5585687", "text": "def all_scheduled():\n sum_cores = 0.\n sum_sfs = 0.\n for i, n in enumerate(used):\n if n != 0:\n sum_cores += resource_configs[i].cores * n\n sum_sfs += resource_configs[i].speed_factor * n\n if int(sum_cores) != len(tasks):\n return MAX_VALUE\n else:\n # print('Possible solution', sum_cores)\n # for i, n in enumerate(used):\n # if n != 0:\n # print(resource_configs[i], 'x', n, '(index {})'.format(i))\n # print()\n return 0. + 1./sum_sfs", "title": "" }, { "docid": "f0d00b354423419ce84a22fa856f2594", "score": "0.55789155", "text": "def avg_worker_mark(sheet, name):\n avg = 0\n sum = 0\n num = 0\n colx = find_colx(sheet, name)\n if (colx != 0):\n for i in range(1, sheet.nrows):\n sum += find_worker_mark(sheet, i, colx)\n num = find_sum_project(sheet, colx)\n if (num != 0):\n avg = sum / num\n return avg", "title": "" }, { "docid": "03976d46b88700647e2aef9930e937d8", "score": "0.5575015", "text": "def calc_avgtime(urls_count, urls_resptime):\n for url, count in urls_count.items():\n if count == 1:\n continue\n urls_resptime[url] //= count", "title": "" }, { "docid": "5a3de0fdc3221b8181d1222107f26807", "score": "0.556286", "text": "def average(t):\n def sum_helper(t):\n total, count = t.label, 1\n for b in t.branches:\n total, count = total + sum_helper(b)[0], count + sum_helper(b)[1]\n\n return total, count\n total, count = sum_helper(t)\n return total / count", "title": "" }, { "docid": "3bc6d115564f3d7b2a12377929ea5c0a", "score": "0.5561096", "text": "def average_month(array):\n\n if type(array) is str:\n array = json.loads(array)\n\n known_numbers = list(filter(lambda x: not math.isnan(x), array))\n\n if len(known_numbers) < len(array) / 2:\n return -1, \"insufficient data\"\n else:\n return 0 if len(known_numbers) < len(array) else 1, sum(known_numbers) / len(known_numbers)", "title": "" }, { "docid": "6bf38344c17223f9ddec9b375f6e8a8a", "score": "0.55566823", "text": "def average_duration_of_employment():\n return 2", "title": "" }, { "docid": "03c8fa3536d6309c10bae8e39de16f10", "score": "0.5554009", "text": "def calculate_stats(self,is_keep_data = False):\n for task in self.tasks:\n task.calc_stats(is_keep_data = is_keep_data)", "title": "" }, { "docid": "20a257b62ef94475bee8b2cf9ea4c9d2", "score": "0.5551863", "text": "def get_average(ds):\n return ds.mean(dim=\"time\", skipna=True, keep_attrs=True)", "title": "" }, { "docid": "256b3b89268625566db7001485d70093", "score": "0.5516864", "text": "def daily_average(self):\n if len(self.vals) < 1:\n return self.curr_val\n\n return statistics.mean(self.vals)", "title": "" }, { "docid": "6c6c13aa6510ede1e603c0421c987141", "score": "0.5515754", "text": "def calc_mean(self):\n logging.info(\"The calculation of temporal mean is finished!\")", "title": "" }, { "docid": "6da76021e1107b83cb505bfbb03cd40c", "score": "0.5510021", "text": "def average_ring_task(hp, conds, reps):\n total_trials = conds * reps\n\n fix_start, fix_end = get_fix_start_ends(hp, conds)\n angles_loc = np.random.uniform(0, 2*np.pi, size=(2, conds))\n\n trials = []\n\n for c in range(conds):\n task = Trial(hp, n_trials=reps)\n \n task_vals = one_hot(hp['n_tasks'], task_id_map['average_ring'])\n task.put('x', 'task', 'write', ix=[None,None], vals=task_vals)\n\n v_fix = np.zeros((hp['n_steps']))\n v_fix[fix_start[c]:fix_end[c]] = 1\n task.put('x', 'fix', 'write', ix=[None,None], vals=v_fix)\n task.put('y', 'fix', 'write', ix=[None,None], vals=v_fix)\n\n ang1 = angles_loc[0,c]\n ang2 = angles_loc[1,c]\n avg_ang = (ang1 + ang2) / 2\n if abs(ang1 - ang2) > np.pi:\n avg_ang = (avg_ang + np.pi) % (2 % np.pi)\n\n # features stimulus and response values\n x_ring1 = task.get_ring_vec(hp['n_in_ring'], ang1, 1, trial_hp['ang_var'])\n x_ring2 = task.get_ring_vec(hp['n_in_ring'], ang2, 1, trial_hp['ang_var'])\n y_id = np.round(avg_ang / (2 * np.pi) * hp['n_out_ring']).astype(np.int) % hp['n_out_ring']\n y_ring = one_hot(hp['n_out_ring'], y_id)\n task.put('x', 'ring', 'write', ix=[fix_start[c],fix_end[c]], vals=x_ring1)\n task.put('x', 'ring', 'append', ix=[fix_start[c],fix_end[c]], vals=x_ring2)\n task.put('y', 'ring', 'write', ix=[fix_end[c],None], vals=y_ring)\n\n trials.append(task)\n\n return trials", "title": "" }, { "docid": "2ec783b1b3d6be113beb60a71af4cfff", "score": "0.5495658", "text": "def get_avg_temperature_day_time(self, active_sensors, day, time):\n if self._data_set is None or active_sensors == []:\n return None\n else:\n temp_data = [k[3] for k in self._data_set if day == k[0] and time == k[1] and k[2] in\n active_sensors]\n if len(temp_data) > 0:\n return round(convert_units((sum(temp_data) / len(temp_data)), current_unit), 1)\n else:\n return None", "title": "" }, { "docid": "7fa41462890adf6613ff3fe6660042b3", "score": "0.5491756", "text": "def average(values):\n\n try:\n return float(sum(values) / len(values))\n except ZeroDivisionError:\n return 0", "title": "" }, { "docid": "100e13f6c32515419cb95bdd1364a134", "score": "0.5479099", "text": "def get_average(user_id, method):\n import numpy as np\n if not isinstance(method, list):\n method = [method]\n procs = ProcessedImage.objects.proc(user_id, method)\n t_av = np.mean([proc.timeToProcess for proc in procs])\n if np.isnan(t_av):\n return 'User has not used this operation.'\n else:\n return t_av", "title": "" }, { "docid": "e9900318d91a8c13931d090e9ccd8e29", "score": "0.5468353", "text": "def average(seq: list[Optional[float]]) -> float:\n total, count = 0.0, 0\n for v in filter(None, seq):\n total += v\n count += 1\n return total / count", "title": "" }, { "docid": "2c26e0de5d0f08b32ae94d30aa68eacc", "score": "0.5464825", "text": "def avg_time(self, datetimes):\n epoch = datetime.utcfromtimestamp(0)\n dt1 = (datetimes[0] - epoch).total_seconds()\n dt2 = (datetimes[1] - epoch).total_seconds()\n dt1_dt2 = (dt1 + dt2) / 2\n dt1_dt2_datetime = datetime.utcfromtimestamp(dt1_dt2)\n return dt1_dt2_datetime\n # return datetimes[0]", "title": "" }, { "docid": "e6feedfd0f7d0d8da9a7aa083a3f1c4c", "score": "0.5456072", "text": "def getAverageValuesOfProcesses(processesList:list):\n\n #logic of this function is to sort all the processes by arrival time and execute them one-by-one\n #we will calculate all necessary data for each process in for loop\n \n currentTimeCPU = 0\n averageWaitingTime = 0\n averageTurnaroundTime = 0\n \n #I used lambda function to sort dictionaries by arrival time for easier processing \n processesList = sorted(processesList, key = lambda i: i[\"Arrival Time\"])\n \n for process in range(len(processesList)):\n if processesList[process][\"Arrival Time\"] > currentTimeCPU:\n currentTimeCPU = (processesList[process][\"Arrival Time\"] + processesList[process][\"Burst Time\"])\n else:\n currentTimeCPU += processesList[process][\"Burst Time\"]\n\n processesList[process][\"Completion Time\"] = currentTimeCPU\n \n processesList[process][\"Turnaround Time\"] = (\n processesList[process][\"Completion Time\"] - processesList[process][\"Arrival Time\"])\n\n processesList[process][\"Waiting Time\"] = (\n processesList[process][\"Turnaround Time\"] - processesList[process][\"Burst Time\"])\n\n averageWaitingTime += processesList[process][\"Waiting Time\"]\n averageTurnaroundTime += processesList[process][\"Turnaround Time\"]\n\n try:\n averageWaitingTime /= len(processesList)\n averageTurnaroundTime /= len(processesList)\n except ZeroDivisionError:\n print(\"There are no processes to process!\")\n\n return (averageWaitingTime, averageTurnaroundTime)", "title": "" }, { "docid": "698d661510064a38fd51599fd4d3800a", "score": "0.54508054", "text": "def get_average(totals_in_seconds):\n return sum(totals_in_seconds) / len(totals_in_seconds)", "title": "" }, { "docid": "ae4a58b5d65fe524802540e957031c79", "score": "0.54491997", "text": "def average(T):\n s = 0\n for num in T:\n s = s + num\n \n return s / len(T)", "title": "" }, { "docid": "48649d14d6647f929bbae48fe970d7c0", "score": "0.5443821", "text": "def test_average_single_none(self):\n\n self.assertEqual(0.0, average([None]))", "title": "" }, { "docid": "c6ff2c7830936d9dab8e7ec190e1d047", "score": "0.5441879", "text": "def find_average():\n number_list = []\n print(\"Enter the numbers you want to find the average of. \"\n \"When you want to stop, enter done\")\n while True:\n try:\n user_input = input(\"Enter number : \")\n if user_input.lower() == \"done\":\n if len(number_list) > 0:\n return sum(number_list)/len(number_list)\n else:\n return \"Nothing to average!\"\n else:\n number_list.append(float(user_input))\n except:\n print(\"That's not a valid number, try again!\")", "title": "" }, { "docid": "d9f18e5b4fec6a69e25b287e73d81127", "score": "0.54414356", "text": "def average_duration(results):\n dates = []\n for result in results:\n created = datetime.strptime(getattr(result, 'created').split('.')[0], '%Y-%m-%dT%H:%M:%S')\n resolved = datetime.strptime(getattr(result, 'resolutiondate').split('.')[0], '%Y-%m-%dT%H:%M:%S')\n dates.append(resolved - created)\n return '{0} days'.format((builtins.sum(dates, timedelta(0)) / len(dates)).days)", "title": "" }, { "docid": "43c9e0239beb0b552b6df9fd0b08ddd2", "score": "0.5433715", "text": "def hourly_avg_dynamic(station_number, day):\n sql = \"select * from availability where station_number = %s;\"\n engine = scraper.connect_db(\"DublinBikeProjectDB.cun91scffwzf.eu-west-1.rds.amazonaws.com\", \"3306\", \"DublinBikeProjectDB\", \"theForkAwakens\", \"db_password.txt\")\n station_details = engine.execute(sql, station_number).fetchall()\n engine.dispose() \n hours_bikes = []\n hours_stands = []\n avg_bikes = []\n avg_stands = []\n \n for i in range(25):\n hours_bikes.append([0, 0])\n hours_stands.append([0, 0])\n \n for station in station_details:\n num_bikes = station[\"bikes_available\"]\n num_stands = station[\"bike_stands_available\"]\n #working out which hour we are dealing with\n last_update = station[\"last_updated\"]\n dtime = scraper.datetime_formatter(last_update)\n hour = int(dtime[1][11:13])\n \n hours_bikes[hour][0] += num_bikes\n hours_bikes[hour][1] += 1\n \n hours_stands[hour][0] += num_stands\n hours_stands[hour][1] += 1\n \n for hour in hours_bikes:\n if hour[0] > 0 and hour[1] > 0:\n avg_bikes_hour = int(round((hour[0]/hour[1]), 0))\n avg_bikes.append(avg_bikes_hour)\n \n for hour in hours_stands:\n if hour[0] > 0 and hour[1] > 0:\n avg_stands_hour = int(round((hour[0]/hour[1]), 0))\n avg_stands.append(avg_stands_hour)\n\n return avg_bikes, avg_stands", "title": "" }, { "docid": "8e97734bcdb2ba247117f81d2f3e892e", "score": "0.54301447", "text": "def avg_xp_per_task(self):\n return self.slayer_xp // self.tasks", "title": "" }, { "docid": "ccfb38b7a57b0c76384cb70923f90ba2", "score": "0.5427184", "text": "def average(numbers):\n\n total_sum = reduce(lambda x, y: x+y, numbers, 0) # Accumulate all the integers. Initialize with Zero in case I get an empty list\n qty_numbers = len(numbers)\n if qty_numbers == 0:\n average_total= None # Return a None, so that the user can handle the return.\n else:\n average_total = float(total_sum) / qty_numbers # COnvert to float to be able to handle float numbers or results. Divide by total numbers.\n\n return average_total", "title": "" }, { "docid": "0bc5b2f504cc32a0aed49304cd9318d4", "score": "0.5421586", "text": "def mean(items):\r\n return float(sum(items)) / len(items) if len(items) > 0 else 0", "title": "" }, { "docid": "3e925dd812ea069167e6e8ac34da662e", "score": "0.54069364", "text": "def _get_averages_over_time(all_ratings):\n\n TUPLE_DATETIME_FORMAT = 'YYYY-MM-DD-HH'\n start_time = arrow.get(all_ratings[0][0], TUPLE_DATETIME_FORMAT )\n end_time = arrow.utcnow()\n\n averages_by_hour = dict()\n\n rating_idx = 0\n current_count = 0\n current_sum = 0\n for current_hour, next_hour in arrow.Arrow.interval('day', start_time, end_time):\n while rating_idx < len(all_ratings):\n rating = all_ratings[rating_idx]\n\n rating_created = arrow.get(rating[0], TUPLE_DATETIME_FORMAT)\n if rating_created > next_hour:\n if current_count:\n averages_by_hour[current_hour.format(MODEL_DATE_FORMAT_ARROW)] = {\n 'rating_average': current_sum/current_count,\n 'rating_count': current_count\n }\n break\n\n #keep on iterating through the ratings\n current_count += rating.rating_count\n current_sum += rating.rating_sum\n\n rating_idx += 1\n\n return averages_by_hour", "title": "" }, { "docid": "0f7996000fb9023a32951fc03226d7a4", "score": "0.5404775", "text": "def calculate_avg():\r\n for game_name in fixtures:\r\n # Lets look in each game\r\n for game_results in fixtures[game_name]:\r\n # Each \"game_results\" contains each possible outcome (1,2,x)\r\n counter = 0\r\n sum = 0\r\n for website in fixtures[game_name][game_results]:\r\n odd = fixtures[game_name][game_results][website]\r\n # Turning the odd from str to float\r\n if \"/\" in odd:\r\n odds = int(odd.split(\"/\")[0]) / int(odd.split(\"/\")[1])\r\n counter += 1\r\n sum += float(odds)\r\n\r\n old_average = 0\r\n new_average = float(sum / counter)\r\n fixtures[game_name][game_results][\"average\"] = new_average\r\n if game_name in old_averages:\r\n if game_results in old_averages[game_name]:\r\n if \"average\" in old_averages[game_name][game_results]:\r\n old_average = fixtures[game_name][game_results][\"average\"]\r\n else:\r\n old_averages[game_name] = {}\r\n old_averages[game_name][game_results] = {}\r\n old_averages[game_name][game_results][\"average\"] = new_average\r\n\r\n if old_average > 0:\r\n # Alert if the odds changed in 20% to any direction.\r\n if new_average * 0.8 > old_average or new_average * 1.2 < old_average:\r\n print(\"ALERT - ABNORMAL BEHAVIOR\")\r\n fixtures[game_name][game_results][\"alert\"] = f\"The game's ratio\" \\\r\n f\" changed dramatically,\" \\\r\n f\" by more than 20%!\"", "title": "" }, { "docid": "57cb01ebb8160eb8a942bed9ece1d870", "score": "0.54033667", "text": "def average_day(minimum, maximum):\n\n if type(minimum) is str:\n minimum = float(minimum)\n\n if type(maximum) is str:\n maximum = float(maximum)\n\n if math.isnan(minimum) or math.isnan(maximum):\n return -1, \"insufficient data\"\n else:\n return 0, (minimum + maximum) / 2", "title": "" }, { "docid": "63fd5f8382c54a7de65ae012229694b8", "score": "0.53895146", "text": "def safe_mean(array):\n total_items = 0\n total_value = 0\n for i in range(0, len(array)):\n if not math.isnan(i):\n total_items += 1\n total_value += array[i]\n return total_value / total_items", "title": "" }, { "docid": "5d0d0fc2030f02ea954804215e9cfced", "score": "0.53869194", "text": "def test_average_two_with_none(self):\n\n self.assertEqual(7.5, average([7.5, None]))", "title": "" }, { "docid": "7766202de4b5c6be41b732915aa5c910", "score": "0.53857636", "text": "def get_average(differences):\n sum = timedelta(days=0)\n for difference in differences:\n sum += difference\n avg = (sum.total_seconds()/60/60/24)/len(differences)\n return avg", "title": "" }, { "docid": "2126fcd15d92347cd79a16e899d5e659", "score": "0.5384181", "text": "def _cal_avg(self) -> float:\r\n if self._leaf_count != 0:\r\n return self._leaf_sum / self._leaf_count\r\n else:\r\n return 0.0", "title": "" }, { "docid": "2126fcd15d92347cd79a16e899d5e659", "score": "0.5384181", "text": "def _cal_avg(self) -> float:\r\n if self._leaf_count != 0:\r\n return self._leaf_sum / self._leaf_count\r\n else:\r\n return 0.0", "title": "" }, { "docid": "5fc7e95a12e153ea3eb18918e547c4e1", "score": "0.53738594", "text": "def task_acc(scores, preds):\n gt = get_task_gt(tag_file)\n combined = np.array([[task, preds[vid_id]] for vid_id, task in gt.items()], dtype=int)\n acc = sum(combined[:, 0] == combined[:, 1]) / combined.shape[0]\n print ('Prediction Accuracy: %.4f' % (acc * 100))", "title": "" }, { "docid": "4ff86a75c215c6c0d5072076814f02c7", "score": "0.5372331", "text": "def getAverage(self):\n if self.count == 0:\n return 0\n return self.total/self.count", "title": "" }, { "docid": "fa1817a024d5f82324d086e3398ba267", "score": "0.5372227", "text": "def task_plannedhrs(self,cr,uid,ids,context={},arg=None,obj=None):\n result = {}\n for f in self.browse(cr, uid,ids): \n sql=\"\"\"select COALESCE(sum(activity_time),0) from project_task_work where task_id=\"\"\"+str(f.id)\n cr.execute(sql)\n res=cr.fetchone()\n result[f.id] = float(res[0])\n \n return result", "title": "" }, { "docid": "5854f7b5c2f486ebd2768d9e66cc7279", "score": "0.5371268", "text": "def average(self):\n s = [v[0] for v in self if v[0] is not None]\n if s:\n return sum(s) / len(s)\n else:\n return None", "title": "" }, { "docid": "81b721c5be5a08f4ab103559384140ab", "score": "0.53671724", "text": "def _get_average_temperature():\n t1 = redis_client.current_t1_temperature\n t2 = redis_client.current_t2_temperature\n if t1 and t2:\n return (t1 + t2)/2", "title": "" }, { "docid": "091406e06444c1c21aa8d1b8e008a7c4", "score": "0.53613997", "text": "def avg_cost(self, data, result):\n num = len(data)\n total_cost = 0\n for f in range(num):\n total_cost += self.cost(data[f], result[f])\n return total_cost/num", "title": "" }, { "docid": "c41598c442dbb36cc9b6eba94b352845", "score": "0.5357289", "text": "def fawg(dictionary):\t\r\n\tlist_to_compute = condition(dictionary, \"broken\")\r\n\tsumm = 0 \r\n\tfor i in range(len(list_to_compute)):\r\n\t\tsumm += list_to_compute[i]\r\n\tarithmetic_mean = summ/len(list_to_compute)\r\n\treturn arithmetic_mean", "title": "" }, { "docid": "8e1ad198ff6a4081f4cbf065c71021e4", "score": "0.535567", "text": "def average(self, period):\n\n # Get the necessary bits of time.\n daily_inc = int(24.0 / self._inc) # get model steps per day\n monthly_inc = daily_inc * 30 # get model steps per 30 days (~month)\n nt = self.time.datetime.shape[0]\n dnt = self.time.datetime[::daily_inc].shape[0]\n # Monthly increment might mean we end up trying to skip over more days than we have in the input. So,\n # set the number of montly times to be 1 (only one month of data) or the number of times when subsampled at\n # the monthly increment, whichever is larger.\n mnt = np.max((self.time.datetime[::monthly_inc].shape[0], 1))\n nx = self.u_diff.shape[-1]\n\n # Prepare the arrays if we're doing averaging\n if period == 'daily':\n self.daily = type('daily', (object,), {})()\n if self._noisy:\n print('Compute daily residuals')\n for var in ('u_diff', 'v_diff'):\n datetime = [] # has to be a list so we can insert a datetime object.\n daily = np.empty((dnt, nx))\n # This could be done with a neat reshaping, but I can't be bothered to figure it out, so we'll just\n # do it the old-fashioned way.\n for ti, t in enumerate(np.arange(0, nt, daily_inc).astype(int)):\n daily[ti, :] = np.median(getattr(self, var)[t:t + daily_inc, :], axis=0)\n datetime.append(self.time.datetime[np.min((t, nt - 1))])\n if 'instantaneous' in self._max_speed:\n daily = self._clip(daily, self._max_speed['instantaneous'])\n setattr(self.daily, var, daily)\n setattr(self.daily, 'datetime', np.asarray(datetime))\n\n # Now create the speed and direction arrays.\n setattr(self.daily, 'speed', np.sqrt(getattr(self.daily, 'u_diff')**2 + getattr(self.daily, 'v_diff')**2))\n setattr(self.daily, 'direction', np.rad2deg(np.arctan2(getattr(self.daily, 'u_diff'), getattr(self.daily, 'v_diff'))))\n\n # Make the components after we've clipped so our plots look nice.\n self.daily.u_res = np.sin(np.deg2rad(self.daily.direction)) * self.daily.speed\n self.daily.v_res = np.cos(np.deg2rad(self.daily.direction)) * self.daily.speed\n\n elif period == 'monthly':\n self.monthly = type('monthly', (object,), {})()\n if self._noisy:\n print('Compute monthly residuals')\n for var in ('u_diff', 'v_diff'):\n datetime = [] # has to be a list so we can insert a datetime object.\n monthly = np.empty((mnt, nx))\n # This could be done with a neat reshaping, but I can't be bothered to figure it out, so we'll just\n # do it the old-fashioned way.\n for ti, t in enumerate(np.arange(0, nt / 60 / 30, monthly_inc).astype(int)):\n monthly[ti, :] = np.median(getattr(self, var)[t:t + monthly_inc, :], axis=0)\n datetime.append(self.time.datetime[np.min(((t + monthly_inc) // 2, nt - 1))]) # mid-point\n setattr(self.monthly, var, monthly)\n setattr(self.monthly, 'datetime', np.asarray(datetime))\n\n # Now create the speed and direction arrays.\n setattr(self.monthly, 'speed', np.sqrt(getattr(self.monthly, 'u_diff')**2 + getattr(self.monthly, 'v_diff')**2))\n setattr(self.monthly, 'direction', np.rad2deg(np.arctan2(getattr(self.monthly, 'u_diff'), getattr(self.monthly, 'v_diff'))))\n\n if 'monthly' in self._max_speed:\n if self._noisy:\n print('Capping monthly residuals to {} m/s'.format(self._max_speed['monthly']))\n self.monthly.speed = self._clip(self.monthly.speed, self._max_speed['monthly'])\n # Make the components after we've clipped so our plots look nice.\n self.monthly.u_res = np.sin(np.deg2rad(self.monthly.direction)) * self.monthly.speed\n self.monthly.v_res = np.cos(np.deg2rad(self.monthly.direction)) * self.monthly.speed\n\n if period == 'monthly':\n # We need to add a pseudo-time dimension to the monthly data so we can still use the plot_var function.\n if np.ndim(self.monthly.speed) == 1:\n self.monthly.speed = self.monthly.speed[np.newaxis, :]\n self.monthly.direction = self.monthly.direction[np.newaxis, :]\n self.monthly.u_res = self.monthly.u_res[np.newaxis, :]\n self.monthly.v_res = self.monthly.v_res[np.newaxis, :]", "title": "" }, { "docid": "bf6fb380e2b7e4e2dc04ec215f173501", "score": "0.5351332", "text": "def __average(self):\n return sum(self.dtime_log) / len(self.dtime_log)", "title": "" }, { "docid": "e5df20808dffb84c1756e1b2cfab66fd", "score": "0.53503466", "text": "def estimated_total_tasks(self):\n return self.tasks_to_level_99() + self.tasks", "title": "" }, { "docid": "61de5888c1f93edb64e7bd39e88cb4ad", "score": "0.5345149", "text": "def test_calculate_average_tempo_failure(self):\n training = self._create_sample_training(\n user=self.user,\n name=\"test training1\",\n distance=7,\n time_in_seconds=1800,\n )\n\n training.calculate_average_tempo()\n wrong_tempo = 28\n\n self.assertNotEqual(training.avg_tempo, wrong_tempo)", "title": "" }, { "docid": "bef7418d3f08f02f33f1f3b0066b3659", "score": "0.53440154", "text": "def forecast_average(self,models='all',exclude=None,metric='mape',call_me='average',test_length='max'):\n if models == 'all':\n avg_these_models = [e for e in list(getattr(self,metric).keys()) if (e != call_me) & (not e is None)]\n elif isinstance(models,list):\n avg_these_models = models[:]\n elif isinstance(models,str):\n if models.startswith('top_'):\n ordered_models = [e for e in self.order_all_forecasts_best_to_worst(metric) if (e != call_me) & (not e is None)]\n avg_these_models = [m for i, m in enumerate(ordered_models) if (i+1) <= int(models.split('_')[1])]\n else:\n raise ValueError(f'argument in models parameter not recognized: {models}')\n\n if not exclude is None:\n if not isinstance(exclude,list):\n raise TypeError(f'exclude must be a list type or None, not {type(exclude)} type')\n else:\n avg_these_models = [m for m in avg_these_models if m not in exclude]\n \n if len(avg_these_models) == 0:\n print('no models found to average')\n return None\n\n if test_length == 'max':\n for i, m in enumerate(avg_these_models):\n if i == 0:\n test_length = self.info[m]['holdout_periods']\n else:\n test_length = min(test_length,self.info[m]['holdout_periods'])\n else:\n assert isinstance(test_length,int), f'test_length must be an int, not {type(test_length)}'\n assert test_length >= 1, 'test_length must be at least 1'\n\n self.mape[call_me] = 1\n self.forecasts[call_me] = [None]*self.forecast_out_periods\n\n self.info[call_me] = self._get_info_dict()\n self.info[call_me]['holdout_periods'] = test_length\n self.info[call_me]['test_set_actuals'] = self.y[-(test_length):]\n\n forecasts = pd.DataFrame()\n test_set_predictions_df = pd.DataFrame()\n for m in avg_these_models:\n test_set_predictions_df[m] = self.info[m]['test_set_predictions'][-(test_length):]\n forecasts[m] = self.forecasts[m]\n \n self.info[call_me]['model_form'] = 'Average of ' + str(len(avg_these_models)) + ' models: ' + ', '.join(avg_these_models)\n self.info[call_me]['test_set_predictions'] = test_set_predictions_df.mean(axis=1).to_list()\n self._metrics(call_me)\n self.forecasts[call_me] = forecasts.mean(axis=1).to_list()", "title": "" }, { "docid": "a38db2aba536ebc1fe324ca437b9e1f6", "score": "0.53420776", "text": "def average_agg(default=0.0):\n\n return lambda items: (sum(items) / len(items)) if len(items) > 0 else default", "title": "" }, { "docid": "7c76bd680874a057b25738e0ff738ea3", "score": "0.53361887", "text": "def test_lambdas_average_zeros(self):\n\n size = 6\n start = (long(time.time()) / 60) * 60\n end = start + size\n times = range(start, end)\n\n values1 = [0, 100, 20, 30, 0, 10]\n\n def compute_avg(values):\n \"\"\"Compute the average\"\"\"\n values = [value for value in values if value != 0]\n return sum(values) / float(len(values))\n\n timeserie1 = 'timeserie-1' + constants.CHAR_AGG + aggregations.AGGREGATION_AVG_ZERO\n\n expected_data = {\n timeserie1: [[times[cur], float(values1[cur])] for cur in range(size)]\n }\n\n expected_data_minutes = {\n timeserie1: [[start, compute_avg(values1)]]\n }\n\n event = {\n timeserie1: zip(times, values1),\n }\n\n self.generic_test(event, start, end, expected_data, expected_data_minutes)", "title": "" }, { "docid": "da0793e850ddcdabeca6c1d989f73ddb", "score": "0.5333746", "text": "def safe_average(elements):\n return safe_divide(sum(elements), len(elements))", "title": "" }, { "docid": "79d8ad4790f5056b86d84121cdfeccf6", "score": "0.5330121", "text": "def average(self):\n try:\n average = (self.overall_points + self.overall_bonuses)/self.overall_runs\n except ZeroDivisionError:\n average = 0\n return average", "title": "" }, { "docid": "4d79e40bf383c77a1959d37fb0db0bb3", "score": "0.5328026", "text": "def add_averages(self):\n all_top_keys = self._top_tree().keys()\n for top_key in all_top_keys:\n # sort dates\n current_dates = self._top_tree()[top_key]\n current_dates_list = sorted(current_dates, reverse=False)\n number_seen = 0\n running_total = 0\n prev_total = 0\n # loop through dates and calculate running average\n for current_date in current_dates_list:\n current_node = self._top_tree()[top_key][current_date]\n running_total += prev_total\n prev_total = current_node.get_total_entries()\n if number_seen >= 1:\n current_node.set_running_average(myround(running_total / number_seen))\n number_seen += 1\n else:\n number_seen += 1\n self._update_node_to_tree(top_key, current_date, current_node)", "title": "" }, { "docid": "0aea87f30df14eef24e962d713c11f04", "score": "0.5325565", "text": "def mean(t: tuple, ignore_empty: bool = False) -> float:\n tuple_check(t)\n count = 0\n total = 0\n for val in t:\n if ignore_empty: \n if val == 0 or val == None:\n continue\n else:\n total += val\n count += 1\n else:\n total += val\n count += 1 \n count = count or 1 # in case t is an empty vector\n return total/count", "title": "" }, { "docid": "5765cf2f302d9a7a10fb91af31863497", "score": "0.53135455", "text": "def mean(self, days, account=None):\n start = datetime.datetime.now() - datetime.timedelta(days)\n amounts = self.transactions(account=account, start=start)['Amount']\n amounts = filter(lambda x: x < 0, amounts) # ignore deposits\n return float(sum(amounts)) / days", "title": "" }, { "docid": "e7296deea0ccd1749f01e3c21a8f375c", "score": "0.5309955", "text": "def task_imp(pattern, task, data):\n i, j = data.get_trim_idx(pattern)\n # TODO - adopt to the scenario where \"counts\" importance scores are not present\n return OrderedDict([\n # 1. aggregate across channels (sum)\n # 2. aggregate accross examples\n # 3. trim to the pattern\n (\"profile\", data.get_imp(pattern, task, 'profile').sum(axis=-1).mean(axis=0)[i:j].mean()),\n (\"counts\", data.get_imp(pattern, task, 'counts').sum(axis=-1).mean(axis=0)[i:j].mean()),\n ])", "title": "" }, { "docid": "be955f24ef7339af7397fddf97177e62", "score": "0.5302984", "text": "def get_rainfall_average(self):\n total_rainfall = 0\n counter = 0\n while counter < self.get_number_days():\n total_rainfall = total_rainfall + self._ndays_weather[counter].get_rainfall()\n counter = counter + 1\n rainfall_average = total_rainfall / counter\n return rainfall_average", "title": "" }, { "docid": "be955f24ef7339af7397fddf97177e62", "score": "0.5302984", "text": "def get_rainfall_average(self):\n total_rainfall = 0\n counter = 0\n while counter < self.get_number_days():\n total_rainfall = total_rainfall + self._ndays_weather[counter].get_rainfall()\n counter = counter + 1\n rainfall_average = total_rainfall / counter\n return rainfall_average", "title": "" }, { "docid": "a2e073239a6817e689ad95af6b9c9971", "score": "0.52982455", "text": "def check_day(timetable):\n score_day = 0\n \n for day in range(5):\n day_check = {}\n for timeslot in range(5):\n for activity in timetable[timeslot][day]:\n name = activity.name\n group_id = int(activity.group_id)\n \n try:\n counters = day_check[name]\n counters[group_id] = counters[group_id] + 1\n day_check[name] = counters\n except KeyError:\n no_groups = courses[name].no_groups\n counters = np.zeros(no_groups+1)\n counters[group_id] = 1\n day_check[name] = counters\n\n for key in day_check.keys():\n counters = day_check[key]\n if counters[0] > 1:\n score_day -= (10*(counters[0] - 1))\n no_groups = courses[key].no_groups\n \n for group_id in range(1, len(counters)):\n if counters[0] > 0 and counters[group_id] > 0:\n score_day -= (10./no_groups*counters[group_id])\n elif counters[0] == 0 and counters[group_id] > 1:\n score_day -= (10./no_groups*(counters[group_id] - 1))\n \n return score_day", "title": "" }, { "docid": "901a213ce1de18974154a7ca2e40c692", "score": "0.52981085", "text": "def get_processing_time_avg(jobs: list):\n\n\t\ttotal_processing_time = 0\n\t\tfor job in jobs:\n\t\t\ttotal_processing_time += job._processing_time\n\t\t\t\n\t\treturn total_processing_time/len(jobs)", "title": "" }, { "docid": "84fcfb66ada62a3cee951010f4809c1d", "score": "0.5297674", "text": "def average(self):\n totals = self.counts.aggregate(total=Sum(\"count\"), seconds=Sum(\"seconds\"))\n if totals[\"seconds\"] is None or totals[\"total\"] is None:\n return 0\n\n average = float(totals[\"seconds\"]) / totals[\"total\"]\n return average", "title": "" }, { "docid": "b79dbac6c34f1a2708db0aad91ec80a5", "score": "0.5294692", "text": "def test_report_default_average(report):\n assert 0 == report.avg()", "title": "" }, { "docid": "294ab1915937c49ace8068eb1b26e86f", "score": "0.5291609", "text": "def get_timebox_estimate_count(self) -> int:\n total = 0\n for task in self.tasks:\n if task[\"tb_estimate\"]:\n total += task[\"tb_estimate\"]\n return total", "title": "" }, { "docid": "f96d07357012530913b8c432700b4758", "score": "0.5282953", "text": "def _mean(self):\n try:\n return (self.sum / self.count)\n except:\n return 0", "title": "" }, { "docid": "741d1507aad06acb9a4d1897ab9f2ba4", "score": "0.5282438", "text": "def avg(tup):\n result = 0\n if len(tup) == 0:\n return float(result)\n else:\n for x in tup:\n result = result + x\n return result/len(tup)", "title": "" }, { "docid": "84c514fcce6889ca23b2d2e10b6578cc", "score": "0.5281026", "text": "def get_average_import_time() -> float:\n last_week = timezone.now() - datetime.timedelta(days=7)\n recent_avg = (\n models.ImportJob.objects.filter(created_date__gte=last_week, complete=True)\n .exclude(status=\"stopped\")\n .annotate(\n runtime=ExpressionWrapper(\n F(\"updated_date\") - F(\"created_date\"),\n output_field=fields.DurationField(),\n )\n )\n .aggregate(Avg(\"runtime\"))\n .get(\"runtime__avg\")\n )\n\n if recent_avg:\n return recent_avg.total_seconds()\n return None", "title": "" }, { "docid": "d2a5feb8742bbff317aacb89925e0e5e", "score": "0.52801096", "text": "def daily_avg_dynamic(station_number, day):\n engine = scraper.connect_db(\"DublinBikeProjectDB.cun91scffwzf.eu-west-1.rds.amazonaws.com\", \"3306\", \"DublinBikeProjectDB\", \"theForkAwakens\", \"db_password.txt\")\n sql = \"select bikes_available, bike_stands_available from availability where station_number = %s and day = %s;\"\n results = engine.execute(sql,station_number,day).fetchall()\n bikes = []\n bike_stands = []\n \n for row in results:\n bikes.append(row[\"bikes_available\"])\n bike_stands.append(row[\"bike_stands_available\"])\n\n avg_bikes = int(round((sum(bikes)/len(bikes)), 0))\n avg_bike_stands = int(round((sum(bike_stands)/len(bike_stands)), 0))\n \n engine.dispose()\n return avg_bikes, avg_bike_stands", "title": "" }, { "docid": "d459331380fddf19074759ad3ffa2306", "score": "0.52748054", "text": "def Average(lst): \n if len(lst)!=0:\n return sum(lst) / len(lst) \n else:\n return(0)", "title": "" }, { "docid": "9f6513b7bafc1292423c93fcc14aecd5", "score": "0.5272187", "text": "def avg(iterable):\n items = [item for item in iterable if item is not None]\n if len(items) == 0:\n return None\n return float(sum(items)) / len(items)", "title": "" }, { "docid": "9f6513b7bafc1292423c93fcc14aecd5", "score": "0.5272187", "text": "def avg(iterable):\n items = [item for item in iterable if item is not None]\n if len(items) == 0:\n return None\n return float(sum(items)) / len(items)", "title": "" }, { "docid": "afee28fd0be01b022ae20f93f68445be", "score": "0.5271105", "text": "def avg(dat):\n\ttry:\n\t\treturn sum(dat)/len(dat)\n\texcept ZeroDivisionError:\n\t\treturn 0.0", "title": "" }, { "docid": "2bae05013ec9f2b95a2d896120d49292", "score": "0.52705556", "text": "def average_duration(results: Tuple[TestResult]) -> int:\n # Find whether this test has ever been successful.\n if any(not t.failed for t in results):\n candidates = filter(lambda t: not t.failed, results)\n else:\n candidates = results\n\n # Find the average duration over all executions.\n total_duration = reduce(\n lambda acc, r: (acc[0] + r.duration, acc[1] + 1),\n candidates,\n (0, 0)\n )\n\n return total_duration[0] / total_duration[1]", "title": "" }, { "docid": "9271caad78f8c14ea798c99113dc2911", "score": "0.52674055", "text": "def get_avg_arithmetical(lst):\n if check_all_numbers(lst):\n return sum(lst) / len(lst)\n return None", "title": "" }, { "docid": "ac31d08dce782ff56d763cd2629f6579", "score": "0.5265613", "text": "def get_task_total_charged_hrs(self,cr,uid,ids,context={},arg=None,obj=None):\n result = {}\n for f in self.browse(cr, uid,ids): \n sql=\"\"\"select COALESCE(sum(hours),0) from project_task_work where task_id=\"\"\"+str(f.id)\n cr.execute(sql)\n res=cr.fetchone()\n result[f.id] = float(res[0])\n return result", "title": "" }, { "docid": "fe238a3a5dc9fb7fea0f2178c5196b5f", "score": "0.526451", "text": "def calculate_mean(data: []) -> float:\n try:\n return round(sum(data) / len(data), 2)\n except ZeroDivisionError:\n return 0.0", "title": "" } ]
bda7477a24eb68e08cb740b5646017e8
Get the value of GPIO_DSn
[ { "docid": "a54678832faf27520d90b3ba3965f6b4", "score": "0.762052", "text": "def DS(self):\n val = self.chip[\"GPIOConfig_DS\"][\"GPIO_DS<8:0>\"]\n if val & (1<<self.n)>0:\n return 1\n return 0", "title": "" } ]
[ { "docid": "8cc13613535efa6b50a0e48ee178d9ca", "score": "0.71249497", "text": "def get_digital(self):\n [error,digital_value] = self.controller.GPIODigitalGet(self.input_digital_channel)\n return digital_value", "title": "" }, { "docid": "38ae247f1193754b27cd48cbdb120c5b", "score": "0.66686857", "text": "def DS(self, value):\n tmp = self.chip[\"GPIOConfig_DS\"][\"GPIO_DS<8:0>\"]\n mask = 1<<self.n\n tmp &= (0xFFFF ^ mask)\n if value > 0:\n tmp |= mask\n self.chip[\"GPIOConfig_DS\"][\"GPIO_DS<8:0>\"] = tmp", "title": "" }, { "docid": "742a55a7966713faf176c65af5a1b95e", "score": "0.65826094", "text": "def di( self, port ):\n assert( port in ( 1, 2 ) )\n self.protocol.gpio( port=port )\n value = ImpinjR2KReader.analyze_data( 'DATA' )( lambda x, y : y )( self, None )\n return value[0] if port == 1 else value[1]", "title": "" }, { "docid": "26005974b03ff39932e7153eb7416672", "score": "0.6471843", "text": "def digitalRead(self, pin):\n return (self.sendCommand('r', pin, 1))[0]", "title": "" }, { "docid": "2dba13ac53d2d832a704ce5acbad47e8", "score": "0.6452712", "text": "def get_digital_output(self, pin):\n return rpi_gpio.RaspberryPiDigitalOutput(pin)", "title": "" }, { "docid": "8637d8c0461fe30a23e22782977e1c60", "score": "0.62205756", "text": "def digital_read(self, pin):\r\n return (self.digital_input_data[pin >> 3] >> (pin & 0x07)) & 0x01", "title": "" }, { "docid": "272e7e80994e0a82c86af0027e0f4cf0", "score": "0.6199846", "text": "def getDigitalPinValue(self, pin):\n\n value = self.get_pin_value_service(pin, \"digital\")\n return value.data", "title": "" }, { "docid": "1e7b9d3eab4dd6a8c41f79985bf8e1f9", "score": "0.61964935", "text": "def _get_DIO_values(self, print_vals=False):\n dio_pins = c_uint16()\n # fetch digital IO information from the device\n dwf.FDwfDigitalIOStatus(self.interface_handler)\n # read state of all pins, regardless of output enable\n dwf.FDwfDigitalIOInputStatus(self.interface_handler, byref(dio_pins))\n if print_vals:\n print \"Digital IO Pins: \" + binary_num_str(dio_pins.value) + \"\\n\"\n return dio_pins.value", "title": "" }, { "docid": "ad4ecea57695c3210a710268ccfc67f1", "score": "0.6123439", "text": "def value(self):\n if self._direction == digitalio.Direction.OUTPUT:\n return self._value\n return self._seesaw.digital_read(self._pin)", "title": "" }, { "docid": "146d08b44b94dab43b8a1657b5a5a180", "score": "0.6097254", "text": "def get_digital_input(self, pin):\n return rpi_gpio.RaspberryPiDigitalInput(pin)", "title": "" }, { "docid": "9ec6772f6e4a20dd75230ee080dff4f4", "score": "0.6091821", "text": "def digital_read(pin_number):\r\n emulator_parts.request_digtial_read = True\r\n global rpi_emulator\r\n value = rpi_emulator.emu_screen.input_pins[pin_number-1].value\r\n emulator_parts.request_digtial_read = False\r\n\r\n rpi_emulator.emu_screen.queue_draw()\r\n return value", "title": "" }, { "docid": "64ea012d90bc90c16717b30600579eae", "score": "0.59712964", "text": "def read(self):\n return self._dev.value\n # test\n # return '0'", "title": "" }, { "docid": "995b907ebfae27911b45bed08283eb8c", "score": "0.58921754", "text": "def gpio(self):\n return self._gpio[0]", "title": "" }, { "docid": "f1ac7f172899bb47278f90412d5c1d25", "score": "0.58726084", "text": "def value(self):\n return self.port.device.get()", "title": "" }, { "docid": "746ac5e374ad3581de3d662d68cc3200", "score": "0.5832174", "text": "def getValue(self):\n if GPIO.input(self.dataPin):\n return self.__MAX\n else:\n return self.__MIN", "title": "" }, { "docid": "c7990b34ed3b6f300223fb6be1d30ece", "score": "0.578788", "text": "def spi_read( self ):\n\t\td = 0\n\n\t\tfor i in range( 7, -1, -1 ):\n\t\t\tself.clk_pin.value( 0 )\n\t\t\tsleep_ms( 1 )\n\n\t\t\tif self.data_pin.value():\n\t\t\t\t# set the bit to 0 no matter what\n\t\t\t\td = d | (1 << i)\n\n\t\t\tself.clk_pin.value( 1 )\n\t\t\tsleep_ms( 1 )\n\n\t\treturn d", "title": "" }, { "docid": "65c281f87b3b29280f37026aa1a4bb4d", "score": "0.5783263", "text": "def on(self):\n return GPIO.input(self.dataPin)", "title": "" }, { "docid": "cebacbd5ddae8d00d3f5925c8a16138d", "score": "0.57822937", "text": "def getJD(self):\r\n self.update()\r\n JD = self.STAT_jd\r\n print(JD)\r\n return JD", "title": "" }, { "docid": "60932b2a05d249bb9921c4caac35f581", "score": "0.56953675", "text": "def get_sd(self):\n return self.sday", "title": "" }, { "docid": "356a880ec6bec697a38d6cb56b0f9a4b", "score": "0.569014", "text": "def sd(self):\n return self[\"sd\"]", "title": "" }, { "docid": "67f99256fe6462ebd0324662178b293e", "score": "0.56621313", "text": "def dni(self) -> int:\n return self._dni", "title": "" }, { "docid": "cd4e51395472fe78074658223b436592", "score": "0.5651221", "text": "def get_value(self):\n return self.ipcon.send_request(self, IO4.FUNCTION_GET_VALUE, (), '', 'B')", "title": "" }, { "docid": "eda966c4515f65e1fb71eaebfb48f43f", "score": "0.5638606", "text": "def read(self):\n self.io_type = self.gpio_lib.IN\n value = self.gpio_lib.input(self.pin_name)\n pub.sendMessage(\"write msg\", author = self.pin_name, msg = value)\n return value", "title": "" }, { "docid": "e3e2fa2fe6e73471b5508c46c6037be0", "score": "0.5588738", "text": "def GetSensorData(self):\n self.downlink.WriteCommand(\"d\")\n return self.downlink.ReadReply(2)", "title": "" }, { "docid": "ba4d01d68d4e408f152ced3199b659a6", "score": "0.5571732", "text": "def readGPIO(self, pin):\n GPIO.setup(pin, GPIO.OUT)\n return GPIO.input(pin)", "title": "" }, { "docid": "9785512478b7d6e11a393dc47426f396", "score": "0.55451155", "text": "def get_sdv(self):\n return self.sdv", "title": "" }, { "docid": "a9791353011ccf502adb44a073e57261", "score": "0.5540797", "text": "def get_D(self):\n \n return self.D", "title": "" }, { "docid": "8d035a1ef143058547f866284452a47a", "score": "0.54789233", "text": "def read(self):\n if self.is_disposed:\n raise ObjectDisposedException(\"PiFaceGpioDigital\")\n\n if self.inner_pin.value < self.GPIO_B_OFFSET:\n return self.__read(self.REGISTER_GPIO_A)\n\n return self.__read(self.REGISTER_GPIO_B)", "title": "" }, { "docid": "5978ab6addc335e8486882c156dbe2cb", "score": "0.54744184", "text": "def get_direction(gpio_pin):\n direction = ctypes.c_uint8(0)\n ret = _LIB.gpio_get_direction(gpio_pin, ctypes.byref(direction))\n if ret < 0:\n raise Exception(\"gpio get direction failed\")\n return direction.value", "title": "" }, { "docid": "7738cf38ce91abee46940b20bfb3f493", "score": "0.54508626", "text": "def PortDelayValue(self):\r\n\t\treturn self._get_attribute('portDelayValue')", "title": "" }, { "docid": "65b52d8ffc16b423cdb359faf1ccde49", "score": "0.54499793", "text": "def PortDelayValue(self):\n\t\treturn self._get_attribute('portDelayValue')", "title": "" }, { "docid": "65b52d8ffc16b423cdb359faf1ccde49", "score": "0.54499793", "text": "def PortDelayValue(self):\n\t\treturn self._get_attribute('portDelayValue')", "title": "" }, { "docid": "65b52d8ffc16b423cdb359faf1ccde49", "score": "0.54499793", "text": "def PortDelayValue(self):\n\t\treturn self._get_attribute('portDelayValue')", "title": "" }, { "docid": "429b3bc967d457c03af272206fbdbc0d", "score": "0.5449892", "text": "def read(self):\n if self.mode == UNAVAILABLE:\n raise IOError(\"Cannot read pin {0}\".format(self.__str__()))\n return self.value", "title": "" }, { "docid": "9c0822fcd2df048d845e2265951ed50e", "score": "0.5447525", "text": "def readPortValue(self, addr):\n # TODO: to be implemented\n portValue = 0\n return portValue", "title": "" }, { "docid": "979890c0161e1b730c3e7177c0d20be0", "score": "0.5398617", "text": "def dx(self):\n return self[\"dx\"]", "title": "" }, { "docid": "8ae5085404acf638841e2bf078e6c315", "score": "0.5393605", "text": "def DuidStepValue(self):\n if self.force_auto_sync:\n self.get('DuidStepValue')\n return self._DuidStepValue", "title": "" }, { "docid": "102a91f4b4b9218e1043ddd9f23c01fa", "score": "0.53600806", "text": "def ReadField(self):\n self.write('MEAS?')\n self.read() # device Echos command\n field=self.read() # but we want the result so we return the second read\n return float(field.split(' ')[0]) # Pull only the numeric value", "title": "" }, { "docid": "aeef2f6a86c3a77aa0a7521eee8f1655", "score": "0.532019", "text": "def meas_dc_voltage(self):\r\n return self.u_dc0", "title": "" }, { "docid": "aeef2f6a86c3a77aa0a7521eee8f1655", "score": "0.532019", "text": "def meas_dc_voltage(self):\r\n return self.u_dc0", "title": "" }, { "docid": "810f2e1fc422d3cfba3d0d3f7c0697d7", "score": "0.53174794", "text": "def target_dpus(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"target_dpus\")", "title": "" }, { "docid": "b334de1dbb38cea1a1eceb423d8b870e", "score": "0.5309844", "text": "def sysfs_read(self, inp):\n try:\n with open(inp, \"r\") as f:\n str_val = f.readline().rstrip(\"\\n\")\n if str_val.find(\"0x\") == -1:\n val = int(str_val, 10)\n else:\n val = int(str_val, 16)\n return val\n except Exception:\n return None", "title": "" }, { "docid": "3e257a9a586ce091ce4094b04c297a44", "score": "0.5292646", "text": "def get_SDR_Address(self):\n return self.SDR_Address", "title": "" }, { "docid": "1474c75ac50218a9d83756405191a7fb", "score": "0.52916133", "text": "def read(self):\n # Read value\n try:\n buf = os.read(self._fd, 8)\n except OSError as e:\n raise LEDError(e.errno, \"Reading LED brightness: \" + e.strerror)\n\n # Rewind\n try:\n os.lseek(self._fd, 0, os.SEEK_SET)\n except OSError as e:\n raise LEDError(e.errno, \"Rewinding LED brightness: \" + e.strerror)\n\n return int(buf)", "title": "" }, { "docid": "1474c75ac50218a9d83756405191a7fb", "score": "0.52916133", "text": "def read(self):\n # Read value\n try:\n buf = os.read(self._fd, 8)\n except OSError as e:\n raise LEDError(e.errno, \"Reading LED brightness: \" + e.strerror)\n\n # Rewind\n try:\n os.lseek(self._fd, 0, os.SEEK_SET)\n except OSError as e:\n raise LEDError(e.errno, \"Rewinding LED brightness: \" + e.strerror)\n\n return int(buf)", "title": "" }, { "docid": "de032e3c6c532167a58e393abc9d817d", "score": "0.5287876", "text": "def gpio_read(handle, gpio):\n return _u2i(_lgpio._gpio_read(handle&0xffff, gpio))", "title": "" }, { "docid": "50159a81580f640496d07b2b877e210b", "score": "0.5263467", "text": "def value(self):\n return _get_bit(self._aw.inputs, self._pin)", "title": "" }, { "docid": "2eb3734342f35a3e0a26f03728d8ca61", "score": "0.52594125", "text": "def get_value(self):\n return self.sensor.is_pressed", "title": "" }, { "docid": "e0a950e3fc45baef17982e7e8463de41", "score": "0.52555054", "text": "def raw_to_val(self, raw_val):\n port = (raw_val & 0xf0) >> 4\n pin = raw_val & 0x0f\n if port == 0:\n return 'unconfigured'\n ans = '{:c}{}'.format(0x40 + port, pin)\n return ans", "title": "" }, { "docid": "31b65c5e98cc05e051cc88d9c65e35d0", "score": "0.52364427", "text": "def digitalWrite(self, pin, value):\n if value == 'HIGH':\n self.sendCommand('h', pin, 0)\n elif value == 'LOW':\n self.sendCommand('l', pin, 0)\n else:\n raise Exception(\"Value\" + value + \" not understood\")", "title": "" }, { "docid": "92bb52d0c36694bd63d488fb75ce36c5", "score": "0.52300185", "text": "def value(self):\n # get() always returns a list with a single entry\n return self.port.device.get()[0]", "title": "" }, { "docid": "defd51809c069d4e7dd7a8d57a773b70", "score": "0.52107406", "text": "def target_dpus(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"target_dpus\")", "title": "" }, { "docid": "730e0622f6f620a69b5584dd3daf73a9", "score": "0.51761526", "text": "def get_dbg_value(self, dbg_val_id):\n if dbg_val_id is not None:\n return self.dbg_values[dbg_val_id]\n\n return None", "title": "" }, { "docid": "b3e6a5eea4417c35d0d7deeabd3810a4", "score": "0.5173155", "text": "def D(self):\n return self.gate_opts['max_bond']", "title": "" }, { "docid": "5c19c4c94ec430eb9b6902b4dcb9c5b6", "score": "0.5171636", "text": "def value(self, val=None):\n # Digital In / Out\n if self._mode in (Pin.IN, Pin.OUT):\n if val is not None:\n if val == self.LOW:\n self._value = val\n mypin = mraa.Gpio(self.id)\n mypin.write(0)\n elif val == self.HIGH:\n self._value = val\n mypin = mraa.Gpio(self.id)\n mypin.write(1)\n else:\n raise RuntimeError(\"Invalid value for pin\")\n return None\n return mraa.Gpio.read(mraa.Gpio(self.id))\n # Analog In\n if self._mode == Pin.ADC:\n if val is None:\n # Read ADC here\n mypin = mraa.Aio(self.id)\n mypin.read()\n return mypin.read()\n # read only\n raise AttributeError(\"'AnalogIn' object has no attribute 'value'\")\n # Analog Out\n if self._mode == Pin.DAC:\n \"\"\"if val is None:\n # write only\n raise AttributeError(\"unreadable attribute\")\n # Set DAC here\n mypin = mraa.Aio(self.id)\n mypin.setBit(val)\"\"\"\n raise AttributeError(\n \"AM65xx doesn't have an DAC! No Analog Output possible!\"\n )\n raise RuntimeError(\n \"No action for mode {} with value {}\".format(self._mode, val)\n )", "title": "" }, { "docid": "e2bcd008c743d1e0dfc147faf1e079c5", "score": "0.5166182", "text": "def OUT(self):\n val = self.chip[\"GPIOOutData\"][\"GPIO_OUT_SPI<8:0>\"]\n if val & (1<<self.n)>0:\n return 1\n return 0", "title": "" }, { "docid": "2188b222ce0c6b7aa0af5dbd4652ce72", "score": "0.51488525", "text": "def get_value(self):\n return self.sensor.raw", "title": "" }, { "docid": "9cedf7c67ee801e6ef185deedf563e65", "score": "0.5142049", "text": "def get_voltage(pin): # helper\n return (pin.value * 3.3) / 65536", "title": "" }, { "docid": "35bee1d442a5a80331d1a5ae7ddce8c2", "score": "0.51337487", "text": "def getdkdT(self, T=295, P=0.1):\n dkdT = 0 # zero when conductivity is a constant in temperature\n return dkdT", "title": "" }, { "docid": "5774597949c888cac0e9298165422a02", "score": "0.5130272", "text": "def get_value(self):\n now = time.monotonic()\n if now + 0.5 < max(self._last_range_change, self.switcher._start_switch) + self.pause:\n return None\n result = self.get_param(f'RDGR{self.channel}')\n if self.autorange:\n self.fix_autorange()\n if now + 0.5 > self._last_range_change + self.pause:\n rng = int(max(self.minrange, self.range)) # convert from enum to int\n if self.status[1] == '':\n if abs(result) > self.RES_SCALE[rng]:\n if rng < 22:\n rng += 1\n else:\n lim = 0.2\n while rng > self.minrange and abs(result) < lim * self.RES_SCALE[rng]:\n rng -= 1\n lim -= 0.05 # not more than 4 steps at once\n # effectively: <0.16 %: 4 steps, <1%: 3 steps, <5%: 2 steps, <20%: 1 step\n elif rng < self.MAX_RNG:\n rng = min(self.MAX_RNG, rng + 1)\n if rng != self.range:\n self.write_range(rng)\n self._last_range_change = now\n return result", "title": "" }, { "docid": "4b9c9546bc8808339e365d5d237ea378", "score": "0.511538", "text": "def get_port(self):\n return self._parent.get_pin_port(self._line)", "title": "" }, { "docid": "e34e4bd61c923539fd3631bc68fd26e7", "score": "0.5115135", "text": "def fctl_d(self,):\n return 1 & (self._fctl >> HeyMacFrame.FCTL_D_SHIFT)", "title": "" }, { "docid": "a8fe6409a700c0776e7cfc38d33d4453", "score": "0.51007426", "text": "def reg_read(self, addr):\n byte_addr = addr << 2;\n msg_bytes = self._unpack_as_bytes([0xABCDB001, byte_addr])\n self.comm.write(bytearray(msg_bytes))\n rx_str = \"\"\n for i in xrange(4):\n ch = self.comm.read(1)\n if len(ch) > 0:\n rx_str += ch\n else:\n util.error(\"Did not get response from FPGA while reading address 0x%x\\n\"\n \"Please check the *clock speed* is correct in the setup script \"\n \"and that the *UART pins* are assigned.\" % addr)\n rx_data = struct.unpack(\"I\", rx_str)\n return rx_data[0]", "title": "" }, { "docid": "d4de5ab163134361f87da406f4c5a73f", "score": "0.5098857", "text": "def kdc_port(self):\n return self._kdc_port", "title": "" }, { "docid": "d305e87536ee1b7baf15f5391070bde7", "score": "0.50986075", "text": "def GetAttribute(self):\n return _pcbnew.D_PAD_GetAttribute(self)", "title": "" }, { "docid": "d78ef30b28e87180f9dac8922859240f", "score": "0.5098284", "text": "def duty_cycle_sp(self):\n self._duty_cycle_sp, value = self.get_attr_int(self._duty_cycle_sp, 'duty_cycle_sp')\n return value", "title": "" }, { "docid": "d78ef30b28e87180f9dac8922859240f", "score": "0.5098284", "text": "def duty_cycle_sp(self):\n self._duty_cycle_sp, value = self.get_attr_int(self._duty_cycle_sp, 'duty_cycle_sp')\n return value", "title": "" }, { "docid": "829b23d370f096349f0923d44f46589c", "score": "0.50921065", "text": "def getPerDofVariable(self, *args):\n return _openmm.CustomIntegrator_getPerDofVariable(self, *args)", "title": "" }, { "docid": "a2e03d2235e7347f346c99985297f99b", "score": "0.5088066", "text": "def voltage(self):\n return float(self.query(\"MEAS:DC?\"))", "title": "" }, { "docid": "b8a4be00ec53ca71393b00c4f8376642", "score": "0.50848603", "text": "def _state(self):\n if self.index in range(0,14):\n raw_value = self.iop.read_cmd(iop_const.ARDUINO_DIO_BASEADDR + \\\n iop_const.ARDUINO_DIO_DATA_OFFSET)\n return (raw_value >> self.index) & 0x1\n else:\n raw_value = self.iop.read_cmd(iop_const.ARDUINO_AIO_BASEADDR + \\\n iop_const.ARDUINO_AIO_DATA_OFFSET)\n return (raw_value >> (self.index-14)) & 0x1", "title": "" }, { "docid": "8e01710e03dfa4565c1d7d6e39099dfb", "score": "0.5077588", "text": "def diode_reading(self):\n if self.read_data < 1:\n return '{:0.3f} mV'.format(self.read_data * 1e3)\n elif 1 < self.read_data < 1e3:\n return '{:0.3f} V'.format(self.read_data)\n elif 1e3 < self.read_data < 1e6:\n return '{:0.3f} kV'.format(self.read_data / 1e3)\n else:\n return '{:0.3f} MV'.format(self.read_data / 1e6)", "title": "" }, { "docid": "9c0a59af371224368e890f3c59a91b64", "score": "0.5074993", "text": "def rdn(self):\n return self[0].value", "title": "" }, { "docid": "6617fbc6a4b94932b72b99775edae488", "score": "0.50741595", "text": "def read_pullup():\r\n port, data = read(INPUT_PULLUP)\r\n return data", "title": "" }, { "docid": "ee045cd508ef87fc0ce3de52ef1422b9", "score": "0.5043322", "text": "def getDigitalSensors(self):\n self.port.write(\"getdigitalsensors\\n\")\n self.readResponseAndUpdateState()\n return [self.state[\"LSIDEBIT\"],self.state[\"RSIDEBIT\"],self.state[\"LFRONTBIT\"],self.state[\"RFRONTBIT\"]]", "title": "" }, { "docid": "a7ad61f54f8cbccb59b434cfdf8b85ed", "score": "0.50423574", "text": "def set_digital(self,value):\n [error,returnString] = self.controller.GPIODigitalSet(self.output_digital_channel,self.digital_channel,value)\n self.digital_value = value\n return error", "title": "" }, { "docid": "9f22985b9bbe00595fcc09b52b9779e7", "score": "0.50319225", "text": "def get_dx(self):\n\n tmp = self.dx\n self.dx = 0\n return tmp", "title": "" }, { "docid": "f9e64ee88145d37de8d1fdbd893cc86f", "score": "0.50305516", "text": "def ret(self):\n return DbgDword(GetRegValue('ESP'))", "title": "" }, { "docid": "b7ee787123a5d65527db2b3d984c939c", "score": "0.5029181", "text": "def sdsrc(self):\n return self[\"sdsrc\"]", "title": "" }, { "docid": "022f196445c6029b63afdc5fec7919f0", "score": "0.50257975", "text": "def getval(self):\n return self.pc", "title": "" }, { "docid": "bef505efe938154005bdd32161dada40", "score": "0.501809", "text": "def ldcs(self, address):\n self.logger.info(\"LDCS from 0x{0:02X}\".format(address))\n self.updi_phy.send([constants.UPDI_PHY_SYNC, constants.UPDI_LDCS | (address & 0x0F)])\n response = self.updi_phy.receive(1)\n if len(response) != 1:\n # Todo - flag error\n return 0x00\n return response[0]", "title": "" }, { "docid": "5ce5dc0f9bdab1b743e81a9094a56e11", "score": "0.49934077", "text": "def nc_get_datum_variable(self, ref):\n return ref.nc_get_datum_variable(default=None)", "title": "" }, { "docid": "bea7bd311bf100a385afec9cf12bb391", "score": "0.49914765", "text": "def duty_cycle(self):\n self._duty_cycle, value = self.get_attr_int(self._duty_cycle, 'duty_cycle')\n return value", "title": "" }, { "docid": "bea7bd311bf100a385afec9cf12bb391", "score": "0.49914765", "text": "def duty_cycle(self):\n self._duty_cycle, value = self.get_attr_int(self._duty_cycle, 'duty_cycle')\n return value", "title": "" }, { "docid": "44fa6d5ce059e7ab9dbcd1623f0077c5", "score": "0.49895477", "text": "def get_debug():\n return config_dict['debug']", "title": "" }, { "docid": "7122a691428da2ea168e438aebc15352", "score": "0.49841422", "text": "def getPerDofVariableByName(self, name):\n return _openmm.CustomIntegrator_getPerDofVariableByName(self, name)", "title": "" }, { "docid": "367641eba47c4d985f87402e0bf6e618", "score": "0.49693063", "text": "def get_pin(self, pin):\n assert 0 <= pin <= 15\n return DigitalInOut(pin, self)", "title": "" }, { "docid": "bec84e80cecae69390ea773bf386f10d", "score": "0.49621925", "text": "def duty(self):\n return float(self.ask('FUNCTION:SQUARE:DCYCLE?'))", "title": "" }, { "docid": "e99e358a0c0038f9d0d068a70754b790", "score": "0.4954396", "text": "def dstrange(self):\n fpi = self._get('fpi')\n # dm_heavy = self._get('dm_heavy')\n # return 2. * mu * dm_heavy / (8. * np.pi**2. * fpi**2.)\n dMK2 = self._get('dMK2')\n return dMK2 / (8. * np.pi**2. * fpi**2.)", "title": "" }, { "docid": "d0f90d566d99a3434744924733d4c30e", "score": "0.49487215", "text": "def read(self):\n self.adc_lib.read(\n self.pin_name) # According to the Internet, we have to do this twice\n raw_value = self.adc_lib.read(self.pin_name)\n norm_value = self._normalize_voltage(raw_value)\n pub.sendMessage(\"write msg\", author = self.pin_name, msg = norm_value)\n return norm_value", "title": "" }, { "docid": "125866cdb056ff8b8c82aa1e98794b8e", "score": "0.4945145", "text": "def position_d(self):\n self._position_d, value = self.get_attr_int(self._position_d, 'hold_pid/Kd')\n return value", "title": "" }, { "docid": "a4019b80004282aded0fe4502d1a20ef", "score": "0.49379453", "text": "def digital_read(pin_number):\r\n current_pin_values = read_input()\r\n pin_bit_mask = get_pin_bit_mask(pin_number)\r\n\r\n result = current_pin_values & pin_bit_mask\r\n\r\n # is this correct? -thomas preston\r\n if result:\r\n return 1\r\n else:\r\n return 0", "title": "" }, { "docid": "c33a3c2a2cce38dd0b525904dc49e0dd", "score": "0.49361667", "text": "def get_discid(disc):\n return read(disc.devpath)", "title": "" }, { "docid": "578942b712fa67e6606869297cf0c7bb", "score": "0.49276507", "text": "def D(self):\n return self._D", "title": "" }, { "docid": "0202c49be4576de64d52612685ef4a1b", "score": "0.49200612", "text": "def input(pin):\n id = _GetValidId(pin)\n if id not in _ExportedIds or _ExportedIds[id] != IN:\n raise WrongDirectionException\n with open('/sys/class/gpio/gpio%s/value'%id, 'r') as f:\n return f.read(1) == '1'", "title": "" }, { "docid": "e0f886a5ebf8e8490345e33bcaba35f4", "score": "0.4919389", "text": "def antenna(self) -> int:", "title": "" }, { "docid": "58cb720168d777735bb75e638dc25376", "score": "0.49106106", "text": "def input(self, pin):\n if pin not in self.pin_modes:\n raise RuntimeError(\"Pin mode must be set before setting value.\")\n\n if self.pin_modes[pin] is not self.IN:\n raise RuntimeError(\"Pin mode must be StubGPIO.IN to get value.\")\n\n if pin in self.values:\n return self.values[pin]\n return self.LOW", "title": "" }, { "docid": "352e831a9e3db536fcd6e185b8d58766", "score": "0.490999", "text": "def get_adc_temp(fpga,katadc_n):\n if not katadc_n in [0,1]: raise RuntimeError(\"katadc_n must be 0 or 1. Please select your ZDok port.\")\n hb=iic_read_register(fpga,katadc_n,0x4C,0x01)\n lb=iic_read_register(fpga,katadc_n,0x4C,0x11)\n return numpy.int8(hb)+numpy.uint8(lb)/float(256)", "title": "" }, { "docid": "061ea849c0f7a46d7354f0231d859010", "score": "0.49076003", "text": "def state(self):\n if self.is_disposed:\n raise ObjectDisposedException(\"PiFaceGpioDigital\")\n\n if self.inner_pin.value < self.GPIO_B_OFFSET:\n result = self.__get_state_a()\n else:\n result = self.__get_state_b()\n\n return result", "title": "" }, { "docid": "93afb472b1cfebbb0dcfd6ab57ce4028", "score": "0.49030006", "text": "def digital_write(pin_number, value):\r\n if VERBOSE_MODE:\r\n emulator_parts.emu_print(\"digital write start\")\r\n\r\n global rpi_emulator\r\n if value >= 1:\r\n rpi_emulator.emu_screen.output_pins[pin_number-1].turn_on()\r\n else:\r\n rpi_emulator.emu_screen.output_pins[pin_number-1].turn_off()\r\n\r\n rpi_emulator.emu_screen.queue_draw()\r\n\r\n if VERBOSE_MODE:\r\n emulator_parts.emu_print(\"digital write end\")", "title": "" }, { "docid": "b80f54243fc4675a8eaceb0102e39071", "score": "0.49011877", "text": "def ev_find_reg_value(self, *args):\n return _ida_idp.IDP_Hooks_ev_find_reg_value(self, *args)", "title": "" } ]
0ed61c7c3a1069b92b788921b076cd49
Exeute a training step of the network
[ { "docid": "014643795bd534fce953ef32ea38328a", "score": "0.0", "text": "def RunTrainingStep(self, _input_data):\n\n data = []\n label = []\n for batch in _input_data:\n data.append(batch['data'])\n label.append(batch['label'])\n\n pred, loss, _ = (self.tf_session.run([self.prediction, self.cross_entropy, self.train],\n feed_dict={self.network_layers[0].x: data, self.truth: label}))\n \n self.AddTrainingResult(loss, len(_input_data))\n\n return loss", "title": "" } ]
[ { "docid": "45e18b87448924a6c7959ae9e64a3345", "score": "0.78879696", "text": "def training_step(self, *args, **kwargs):", "title": "" }, { "docid": "eb0802a9681d93c4fa95c1b9150625c2", "score": "0.7701176", "text": "def train(self):\n print(\"Training not yet implemented.\")", "title": "" }, { "docid": "f6e7e4f3878add0f051410311eee0bd2", "score": "0.76536506", "text": "def train_iteration(self) -> None:\n pass", "title": "" }, { "docid": "33f9f34876dc51af4a411501a7605e8b", "score": "0.75187343", "text": "def train(self):\n\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.7497195", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.7497195", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.7497195", "text": "def train(self):\n pass", "title": "" }, { "docid": "56dffde8ddee837c275c009d87f70f54", "score": "0.74356675", "text": "def train():\n pass", "title": "" }, { "docid": "85def1939fcd30f8f9e56a8d9a36b540", "score": "0.74114704", "text": "def training():\n pass", "title": "" }, { "docid": "031dddd899a984b9fa20cb8ad6f970ef", "score": "0.7381095", "text": "def train(self) -> None:\n pass # [You can insert code here.]", "title": "" }, { "docid": "79b30daf198a494b117cd33912aaf254", "score": "0.73495084", "text": "def train_model(self):\n training_iterations = self.hyperparams.get('training_steps', 1)\n for step in range(training_iterations):\n self.execute_training_step()", "title": "" }, { "docid": "8106f83f3e244d277a86e2a32e8447d9", "score": "0.73443204", "text": "def train(self) -> None:\n pass", "title": "" }, { "docid": "e4684b122d630d6918c0127b6ae7ef0b", "score": "0.7318951", "text": "def train_epoch(self):\n self.flags.train = True\n if self.epoch == (self.opt.nepoch - 1):\n # Flag last epoch\n self.flags.build_website = True\n\n self.log.reset()\n if not self.opt.no_learning:\n self.network.train()\n else:\n self.network.eval()\n self.learning_rate_scheduler()\n self.reset_iteration()\n for i in range(self.opt.loop_per_epoch):\n self.train_loop()", "title": "" }, { "docid": "3a9b54484f9f9018b2c1f1036c74ace4", "score": "0.7297736", "text": "def train(starting_epoch=0):\n global JUMP\n\n model = create_model() # Creates an object of Model class\n\n if starting_epoch: # In case starting_epoch is Non-zero\n model = load_model_weight(model, 'model_weights.pkl')\n \n (x_train, y_train, x_valid, y_valid, x_test, y_test) = load_data()\n print (\"Training Data Shape: \", x_train.shape)\n print (\"Testing Data Shape: \", x_test.shape)\n\n for i in range(starting_epoch, 300000, JUMP): # The paper trained to 300000 \n model = trainer(model,\n x_train,\n y_train,\n x_valid,\n y_valid,\n initial_epoch=i)\n #try:\n # save_model_weight(model, 'model_weights.pkl')\n #except:\n # print (\"Cannot save the model\")\n evaluate(model=model, x_test=x_test, y_test=y_test)", "title": "" }, { "docid": "6e53d57dc1968743b8dc08436858a0f3", "score": "0.7289709", "text": "def network_training(ds):\n print \"network training ...\"\n tries = 2\n bias = True\n fast = False\n previous_error = 100\n epochs = 60\n layer_dim = 1\n for _ in xrange(tries):\n print \" try: %4d\" % _\n train_ds, test_ds = ds.splitWithProportion(0.7)\n try_net = buildNetwork(train_ds.indim, int(train_ds.indim*layer_dim), train_ds.outdim, hiddenclass=SigmoidLayer, outclass=SoftmaxLayer, bias=bias, fast=fast)\n trainer = BackpropTrainer(try_net, train_ds)\n trainer.trainEpochs(epochs)\n for mod in try_net.modules:\n print \"Module:\", mod.name\n if mod.paramdim > 0:\n print \"--parameters:\", mod.params\n for conn in try_net.connections[mod]:\n print \"-connection to\", conn.outmod.name\n if conn.paramdim > 0:\n print \"-parameters\", conn.params\n if hasattr(try_net, \"recurrentConns\"):\n print \"Recurrent connections\"\n for conn in try_net.recurrentConns: \n print \"-\", conn.inmod.name, \" to\", conn.outmod.name\n if conn.paramdim > 0:\n print \"- parameters\", conn.params\n trnresult = percentError(trainer.testOnClassData(), train_ds['class'])\n tstresult = percentError(trainer.testOnClassData(dataset=test_ds ), test_ds['class'])\n #print test_ds['target']\n print \"epoch: %4d\" % trainer.totalepochs, \\\n \" train error: %5.2f%%\" % trnresult, \\\n \" test error: %5.2f%%\" % tstresult\n if tstresult < previous_error:\n net = try_net\n previous_error = tstresult\n NetworkWriter.writeToFile(net, \"net.xml\")\n layer_dim = layer_dim * 2", "title": "" }, { "docid": "13fbf5e75183ab90d502d03c1095377e", "score": "0.7289548", "text": "def train_step(self):\n index_mask = np.random.choice(self._x_train.shape[0], self._batch_size)\n x_batch = self._x_train[index_mask]\n y_batch = self._y_train[index_mask]\n\n self._optimizer.update(x_batch, y_batch)\n loss = self._network.getLoss(x_batch, y_batch)\n self._batch_loss_list.append(loss)\n if self._verbose: print('Train batch loss: ', str(loss))\n\n self._current_iter += 1\n if self._current_iter % self._iter_per_epoche == 0:\n self._current_epoche += 1\n\n x_train_sample, y_train_sample = self._x_train, self._y_train\n x_test_sample, y_test_sample = self._x_test, self._y_test\n\n train_loss = self._network.getLoss(x_train_sample, y_train_sample)\n test_loss = self._network.getLoss(x_test_sample, y_test_sample)\n self._train_loss_list.append(train_loss)\n self._test_loss_list.append(test_loss)\n if self._verbose: print('=== Epoche', str(self._current_epoche), ': train loss:', str(train_loss), ': test loss:', str(test_loss))", "title": "" }, { "docid": "e498c9a6e41e81124b4cf09ac81ca90b", "score": "0.7282458", "text": "def train(self) -> None:\n\n pass", "title": "" }, { "docid": "760d97a1ab51dcfdf3471e640ae8e2e6", "score": "0.7278371", "text": "def train(self):\n done = False\n obs = env.reset()\n while not done:\n\n # self.eps_value = self.eps_value_list[eps_counter]\n action = self.select_action(obs,evaluation_mode=False)\n next_obs, reward, done, info = env.step(action)\n self.replay_memory.push(obs,action,next_obs,reward,done)\n if (self.num_steps>self.min_replay_size) and (self.num_steps%50 == 0):\n self.update()\n obs = next_obs\n self.num_steps += 1", "title": "" }, { "docid": "1a05e7675a3958d7513aad8e0de729eb", "score": "0.72723305", "text": "def run():\n print \"No training required...\"", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.7271205", "text": "def train(self):", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.7271205", "text": "def train(self):", "title": "" }, { "docid": "c7c23e2feab8ea28a5794f28b7ec9db9", "score": "0.7259496", "text": "def train(self):\n # TODO: Implement the back-propagation algorithm outlined in Figure 18.24 (page 734) in AIMA 3rd edition.\n # Only parts of the algorithm need to be implemented since we are only going for one hidden layer.\n\n # Line 6 in Figure 18.24 says \"repeat\".\n # We are going to repeat self.epochs times as written in the __init()__ method.\n\n # Line 27 in Figure 18.24 says \"return network\". Here you do not need to return anything as we are coding\n # the neural network as a class\n delta = 0\n for i in range (self.epochs):\n print(self.x_train)\n for j in range(np.array(self.x_train.shape[0])):\n if (1 == 0):\n print(\"hei\")\n pass", "title": "" }, { "docid": "4c3c59d9e66e53f7f3eb23c2dda324a4", "score": "0.72499263", "text": "def train(self, training=True):\n pass", "title": "" }, { "docid": "41170514fb0014a054a33c26a977573b", "score": "0.72369534", "text": "def train(self):\n self.training = True\n self.validating = False\n self.val_retraining = False", "title": "" }, { "docid": "ee81bfc21aacc85684146870931a2d87", "score": "0.7182985", "text": "def train(self, num_epochs):\r\n pass", "title": "" }, { "docid": "90b3d13e890ff9df4d119709fa4302eb", "score": "0.7174323", "text": "def train_network(self):\n batch = self.memory.sample(self.batch_size)\n inputs = np.array([b[\"state\"] for b in batch])\n actions = np.array([b[\"action\"] for b in batch])\n rewards = np.array([b[\"reward\"] for b in batch])\n next_inputs = np.array([b[\"next_state\"] for b in batch])\n\n actions_one_hot = np.eye(self.action_space_size)[actions]\n\n next_qvalues = np.squeeze(self.target_network.model(next_inputs))\n targets = rewards + self.discount * np.amax(next_qvalues, axis=-1)\n\n self.online_network.train_step(inputs, targets, actions_one_hot)", "title": "" }, { "docid": "10519bed4e7e92fbac7ac440bf1d495c", "score": "0.7157696", "text": "def train(self):\n self.print(\"Log dir: {}\".format(self.logdir))\n # Calculate total step\n self.n_train = len(self.trainset)\n self.steps_per_epoch = np.ceil(\n self.n_train / self.batch_size).astype(np.int32)\n self.verbose = min(self.verbose, self.steps_per_epoch)\n self.n_steps = self.max_epochs * self.steps_per_epoch\n # calculate model parameters memory\n para = sum([np.prod(list(p.size())) for p in self.net.parameters()])\n memory = para * 4 / 1000 / 1000\n self.print('Model {} : params: {:4f}M'.format(self.net._get_name(), memory))\n self.print('###### Experiment Parameters ######')\n for k, v in self.params.items():\n self.print('{0:<22s} : {1:}'.format(k, v))\n self.print(\"{0:<22s} : {1:} \".format('trainset sample', self.n_train))\n # GO!!!!!!!!!\n start_time = time.time()\n self.train_total_time = 0\n self.time_sofar = 0\n for epoch in range(self.start_epoch, self.max_epochs + 1):\n # Decay Learning Rate\n self.scheduler.step()\n # Train one epoch\n total_loss = self.train_epoch(epoch)\n torch.cuda.empty_cache()\n # Evaluate the model\n if self.eval_freq and epoch % self.eval_freq == 0:\n acc = self.eval(epoch)\n torch.cuda.empty_cache()\n self.print(\"Finished training! Best epoch {} best acc {}\".format(self.best_epoch, self.best_acc))\n self.print(\"Spend time: {:.2f}h\".format((time.time() - start_time) / 3600))", "title": "" }, { "docid": "86b5bd89ad809dc9028df021820567ed", "score": "0.71401083", "text": "def on_train_step_end(self):", "title": "" }, { "docid": "c4e52bb81c4652704d59f6f5fc092023", "score": "0.7134784", "text": "def train(self, training_data):\n pass", "title": "" }, { "docid": "7efe3502ccf2e31d54693f3f5e7a4857", "score": "0.71305126", "text": "def train(self, dataset):\n iteration = 1\n while iteration < 15: # TODO: ADD AUTO STOP FUNCTIONALITY\n print(\"ITERATION %d\" % iteration)\n print(\"weightt at this iteration:\", self.weightls)\n for row in dataset:\n rsum = self.weightls[0] * 1 # bias, w0 * 1, the interception\n # print(\"current row -> \", row)\n # print((iteration, rsum))\n for i in range(len(row) - 1):\n # print(\"weight * xi: \", \" \", self.weightls[i+1], \" * \", row[i])\n rsum += self.weightls[i+1] * row[i]\n t = row[-1] - self.activation_func(rsum) # acutal - predicted\n if t != 0:\n self.update_weight(row, t)\n # print(\"t != 0\")\n\n iteration += 1\n time.sleep(0.5)", "title": "" }, { "docid": "57635c60559c92a04a4cb535625b87cd", "score": "0.7128246", "text": "def step(model):\n global trainx, trainy\n print('\\tTraining model with hyperparams: ', model[1])\n model[0].train(trainx, trainy)", "title": "" }, { "docid": "8ef5344512d22972b76ff244fa677a62", "score": "0.70925283", "text": "def train(self):\n raise NotImplementedError(\"Train method not implemented\")", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.7091035", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "4fd3b782ced16256b145d0fc2cd861b5", "score": "0.70871305", "text": "def train(self, network, iterations: int, dataset: [np.array, np.array]) -> None:\n ...", "title": "" }, { "docid": "83278e9c8c35ff8269d7e744825d73eb", "score": "0.70814633", "text": "def train_step(self, dataset: TrainDataset, eval_dataset: EvaluationDataset, *args, **kwargs):\n pass", "title": "" }, { "docid": "2428380a3e585c0e4a9df839814e56fd", "score": "0.70649415", "text": "def train(self, **kwargs):\n pass", "title": "" }, { "docid": "2428380a3e585c0e4a9df839814e56fd", "score": "0.70649415", "text": "def train(self, **kwargs):\n pass", "title": "" }, { "docid": "4c73e3928ce4c14941626f7bb3508838", "score": "0.7035765", "text": "def on_step_training(self, state):\n pass", "title": "" }, { "docid": "9c8e9a56ac6cf184c7965cbf117683d5", "score": "0.70246565", "text": "def train_all(self):\n self.train_inner()", "title": "" }, { "docid": "210ceca6374801fb5e3a1932b8aca4be", "score": "0.70152277", "text": "def train(self,\n batch_size=128,\n epochs=2,\n learning_rate=1\n ):", "title": "" }, { "docid": "7b8c3db6f33f991a995e64158a8d85d3", "score": "0.7004122", "text": "def _training_step(self, batch) -> dict:\n pass", "title": "" }, { "docid": "e12ad72e839ad3ccd3bcaeda9c10871e", "score": "0.6999807", "text": "def train(self, iterations, verbose=False):\n for i in range(iterations):\n self.feedForward()\n self._backPropagate(0.5)\n\n if verbose:\n error = self.totalError()\n print(\"It {} error = {:.2f}\".format(i, error))", "title": "" }, { "docid": "00f18c1463cc8f58dc853d043495915c", "score": "0.6989651", "text": "def train(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "00f18c1463cc8f58dc853d043495915c", "score": "0.6989651", "text": "def train(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "24bad5b8ed4f3d9f717f595af09ac44e", "score": "0.6979436", "text": "def run():\n trainingdata = make_dataset()\n trained = training(trainingdata)\n test(trained)", "title": "" }, { "docid": "5edc6244da6c0e07561732a22355df4a", "score": "0.6968603", "text": "def test_train(self):\n model = default_N4()\n data_folder = os.path.dirname(os.path.abspath(__file__)) + '/../trace/isbi/'\n data_provider = DPTransformer(data_folder, 'train.spec')\n\n learner.train(model, data_provider, data_folder, n_iterations=10)", "title": "" }, { "docid": "7d0769ad0843a3b4c9a5c3e8ea421275", "score": "0.69664055", "text": "def train(self):\n # load the most recent checkpoint\n if self.resume:\n self.load_checkpoint(best=False)\n\n print(\"\\n[*] Train on {} samples, validate on {} samples\".format(\n self.num_train, self.num_valid)\n )\n\n for epoch in range(self.start_epoch, self.epochs):\n self.curr_epoch = epoch\n print(\n '\\nEpoch: {}/{} - LR: {:.6f}'.format(\n epoch + 1, self.epochs, self.lr)\n )\n\n # train for 1 epoch\n train_loss, train_acc, glimpses = self.train_one_epoch(epoch)\n # evaluate on validation set\n valid_loss, valid_acc, val_glimpses = self.validate(epoch)\n # # reduce lr if validation loss plateaus\n # self.scheduler.step(valid_loss)\n\n is_best = valid_acc > self.best_valid_acc\n msg1 = \"train loss: {:.3f} - train acc: {:.3f} - train glm {:.3f}\"\n msg2 = \"- val loss: {:.3f} - val acc: {:.3f} - val glm {:.3f}\"\n if is_best:\n self.counter = 0\n msg2 += \" [*]\"\n msg = msg1 + msg2\n print(msg.format(train_loss, train_acc, glimpses, valid_loss, valid_acc, val_glimpses))\n wandb.log({\n \"test_accuracy\": valid_acc,\n \"train_accuracy\": train_acc,\n 'train_glimpses': glimpses,\n 'val_glimpses': val_glimpses\n })\n\n # check for improvement\n if not is_best:\n self.counter += 1\n if self.counter > self.train_patience:\n print(\"[!] No improvement in a while, stopping training.\")\n return\n self.best_valid_acc = max(valid_acc, self.best_valid_acc)\n # self.save_checkpoint(\n # {'epoch': epoch + 1,\n # 'model_state': self.model.state_dict(),\n # 'optim_state': self.optimizer.state_dict(),\n # 'best_valid_acc': self.best_valid_acc,\n # }, is_best\n # )\n # decay\n # for param_group in self.optimizer.param_groups:\n # old_lr = param_group['lr']\n # param_group['lr'] = param_group['lr']*0.98\n # print(f\"Reducing LR from {old_lr} to {old_lr*0.98}\")", "title": "" }, { "docid": "7154cba57ce0cd07f22c2691c549edac", "score": "0.69555604", "text": "def train(X_train, Y_train, X_valid, Y_valid, layer_sizes,\n activations, alpha, iterations, save_path=\"/tmp/model.ckpt\"):", "title": "" }, { "docid": "49830a699e0de9a5d173ef6c9289a0f7", "score": "0.6948445", "text": "def train(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "5402cf405f6650d4eb3e4e30352fbb03", "score": "0.69430566", "text": "def _train(self, epochs, **kwargs):\n return", "title": "" }, { "docid": "20f12d7176e3912a1c61e276b3d3c365", "score": "0.6939519", "text": "def train(args):\n\n # Setup experiment ID\n exp_ID = \"%s_depth_%s_opt_%s_drop_%s_bn_%s\" % (args.model,\n args.n_inner_layers + 2,\n args.optimizer,\n args.dropout,\n args.batchnorm)\n\n ###########################\n # Data and normalization\n ###########################\n\n # Get data\n train_set = dset.MNIST(root=\".\", train=True)\n test_set = dset.MNIST(root=\".\", train=False)\n\n train_data = train_set.train_data.numpy().astype(np.float32)\n test_data = test_set.test_data.numpy().astype(np.float32)\n\n # Flatten\n train_data = np.reshape(train_data, (-1, 784))\n test_data = np.reshape(test_data, (-1, 784))\n\n # Stack\n data = np.concatenate((train_data, test_data), axis=0)\n # Scaler\n scaler = StandardScaler()\n scaler.fit(data)\n\n # Normalize data\n train_data = scaler.transform(train_data).astype(np.float32)\n test_data = scaler.transform(test_data).astype(np.float32)\n\n train_target = train_set.train_labels.numpy()\n test_target = test_set.test_labels.numpy()\n\n ###########################\n # Neural Net\n ###########################\n\n if args.hidden_dim == -1:\n hidden_dim = 784\n\n if args.model == \"RELUNet\":\n model = models.RELUNet(args.n_inner_layers, 784, hidden_dim, 10, dropout=args.dropout, batchnorm=args.batchnorm)\n elif args.model == \"SELUNet\":\n model = models.SELUNet(args.n_inner_layers, 784, hidden_dim, 10, dropout=args.dropout)\n\n if args.optimizer == \"Adam\":\n optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)\n elif args.optimizer == \"SGD\":\n optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)\n\n loss_fn = torch.nn.CrossEntropyLoss(size_average=True)\n\n if args.use_cuda:\n model = model.cuda()\n loss_fn = loss_fn.cuda()\n\n # Get a list of batch idxs\n n_samples = train_target.shape[0]\n num_batches = n_samples // args.batch_size\n list_batches = np.array_split(np.arange(n_samples), num_batches)\n\n # Initialize train loss to monitor\n train_loss = np.inf\n # Dict to save losses\n d_loss = {\"train_loss\": []}\n\n for e in tqdm(range(args.nb_epoch), desc=\"Training\"):\n\n # List of train losses for current epoch\n list_train_loss = []\n\n for batch_idxs in tqdm(list_batches, desc=\"Epoch: %s, TR loss: %.2g\" % (e, train_loss)):\n\n optimizer.zero_grad()\n\n # Start and end index\n start, end = batch_idxs[0], batch_idxs[-1]\n\n # Get the data\n X = train_data[start: end + 1]\n y = train_target[start: end + 1]\n\n # Wrap to tensor\n X = torch.FloatTensor(X)\n y = torch.LongTensor(y)\n\n if args.use_cuda:\n X = X.cuda()\n y = y.cuda()\n\n # Wrap to variable\n X = Variable(X)\n y = Variable(y)\n\n # Forward pass\n y_pred = model(X, training=True)\n loss = loss_fn(y_pred, y)\n\n # Backward pass\n loss.backward()\n optimizer.step()\n\n list_train_loss.append(loss.cpu().data.numpy()[0])\n\n # Update train loss\n train_loss = np.mean(list_train_loss)\n d_loss[\"train_loss\"].append(float(train_loss))\n\n # Save\n with open(\"results/%s.json\" % exp_ID, \"w\") as f:\n json.dump(d_loss, f)", "title": "" }, { "docid": "17d9bbd64e947724d5a9eab41f198619", "score": "0.6937822", "text": "def train_step(self, model, data, device):\n pass", "title": "" }, { "docid": "190032ad993d2fffc63d6d172361aa53", "score": "0.6935639", "text": "def train_epoch(self):\n n_evals = rl_training.remaining_evals(\n self._trainer.step,\n self._epoch,\n self._train_steps_per_epoch,\n self._supervised_evals_per_epoch)\n for _ in range(n_evals):\n self._trainer.train_epoch(\n self._train_steps_per_epoch // self._supervised_evals_per_epoch,\n self._supervised_eval_steps)", "title": "" }, { "docid": "a14ebbbe7e9e88b4f6b50accc8c64903", "score": "0.6931989", "text": "def train(self):\n ##################\n # YOUR CODE HERE #\n ##################\n # Initialize the environment and state\n state = self.env.reset()\n t_r = 0\n for t in count():\n if t > 10000000:\n break\n # Select and perform an action\n if t> self.learning_start:\n action = self.make_action(state,False)\n else:\n action = random.randrange(4)\n next_state, reward, done, _ = self.env.step(action)\n #print(\"Step {}, reward {}\".format(t,reward))\n t_r += reward\n reward = Tensor([reward])\n if not done:\n # Store the transition in memory\n self.memory.push(torch.from_numpy(state).permute(2,0,1).unsqueeze(0), LongTensor([[action]]),\\\n torch.from_numpy(next_state).permute(2,0,1).unsqueeze(0), reward)\n else:\n self.memory.push(torch.from_numpy(state).permute(2,0,1).unsqueeze(0), LongTensor([[action]]),\\\n None, reward)\n \n if done:\n print(\"Done at time {}, total reward {}\".format(t,t_r))\n self.reward_queue.append(t_r)\n if len(self.reward_queue) > 30:\n self.reward_queue.popleft()\n t_r = 0\n next_state = self.env.reset()\n # Move to the next state\n state = next_state\n\n # Perform one step of the optimization (on the target network)\n if t > self.learning_start and t % self.learning_freq == 0:\n self.optimize_model()\n\n if (t+1)%self.save_every == 0:\n torch.save({'Q':self.Q.state_dict(),'target_Q':self.target_Q.state_dict(),'opt':self.opt.state_dict(),'reward_queue':self.reward_queue}\\\n ,'duel_{}.tar'.format(t+1))", "title": "" }, { "docid": "d896f935ca9064828890f7b5073d535f", "score": "0.69234926", "text": "def _step(self):\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config", "title": "" }, { "docid": "1892fe19932355b1bd1bdd0c4d5c4a28", "score": "0.6914408", "text": "def train_model(self):", "title": "" }, { "docid": "1892fe19932355b1bd1bdd0c4d5c4a28", "score": "0.6914408", "text": "def train_model(self):", "title": "" }, { "docid": "7598d69271805ba74572dca2a51557a1", "score": "0.6902967", "text": "def train_next(self):\n # NB self.iters_per_sess_run = 1 if not using infeeds\n if self.iters < self.max_iter:\n\n if is_nearest_multiple(self.iters, self.iters_per_sess_run, self.n_batch_freq_val) and \\\n self.experiment.data_meta['validation_size'] > 0 and \\\n self.experiment.config.validation and \\\n self.iters != 0 and \\\n self.max_iter - self.iters > self.iters_per_sess_run:\n # Evaluate model on validation set\n self.validation()\n\n if is_nearest_multiple(self.iters, self.iters_per_sess_run, int(self.max_iter / 20)) and \\\n self.iters != 0 and \\\n self.config.get('save_checkpoints', True): # Don't save CP on first or last iteration\n\n # Checkpoint session - overwrite previous\n self.save_checkpoint(timestep=-1)\n\n if is_nearest_multiple(self.iters, self.iters_per_sess_run, self.n_batch_freq_te) and \\\n self.experiment.config.testing and \\\n self.iters != 0 and \\\n self.max_iter - self.iters > self.iters_per_sess_run:\n # Don't do on penultimate iteration - will do testing after training anyway\n self.test()\n\n if is_nearest_multiple(self.iters, self.iters_per_sess_run, N_BATCH_PRINT_LOSS):\n self.train_time = 0.\n\n # Do training update and increment global step, time it\n t_before = time.time()\n train_out = self.train()\n t_after = time.time()\n self.train_time += t_after - t_before\n\n if is_nearest_multiple(self.iters, self.iters_per_sess_run, N_BATCH_PRINT_LOSS):\n # Print training progress and save to file\n tr_out_labelled = dict(zip(self.train_output_labels, train_out))\n self.experiment.log.info(self.train_update_str(n_iter=self.iters,\n tr_out=tr_out_labelled,\n time_diff=self.train_time))\n record_tr = {'n_iters': self.iters,\n 'train output': tr_out_labelled,\n 'seconds_taken': self.train_time}\n self.experiment.save_record(record_tr, scope='train_speed')\n\n return True\n else:\n if self.config.get('save_checkpoints', True):\n self.save_checkpoint(timestep=self.iters)\n\n return False", "title": "" }, { "docid": "4d6b3a40f84dcb046678adb611a2779c", "score": "0.69026494", "text": "def retrain_net_main(_):\n \n tr_files = training_file()\n flags.parse_and_retrieve(tr_files)\n retrainer.retrain_net(tr_files)", "title": "" }, { "docid": "2313eea73d4fc9f51178fe554f39703a", "score": "0.69018716", "text": "def train():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-ng', dest='Ng', default=32, type=int,\n help='N_g as defined in the original paper')\n parser.add_argument('-nd', dest='Nd', default=64, type=int,\n help='N_d as defined in original paper')\n parser.add_argument('-cd', '--cond_dim', default=128, type=int,\n help='dimension of final conditioning variable')\n parser.add_argument('-zd', '--noise_dim', type=int, default=100,\n help='noise dimension')\n parser.add_argument('-dvc', '--device', type=str, default='cuda:0',\n help='device to run on')\n parser.add_argument('--restore', default=False, action='store_true',\n help='restore checkpoint with the same hyperparameters')\n parser.add_argument('-dd', '--dataset_dir', type=str, required=True,\n help='dataset root directory')\n parser.add_argument('-id', '--image_dir', type=str, required=True,\n help='image directory wrt dataset dir')\n parser.add_argument('-ed', '--emb_dir', type=str, required=True,\n help='embedding directory wrt dataset dir')\n parser.add_argument('-avc', '--available_classes', type=str, default=None,\n help='txt to choose subset of classes')\n parser.add_argument('-bs', '--batch_size', type=int, default=64,\n help='batch size')\n parser.add_argument('--glr', type=float, default=2e-4,\n help='generator learning rate')\n parser.add_argument('--dlr', type=float, default=2e-4,\n help='discriminators learning rate')\n parser.add_argument('--kl_coef', type=float, help='coefficient of KLD loss')\n parser.add_argument('--uncond_coef', type=float, default=0,\n help='coefficient of unconditional losses')\n parser.add_argument('--wrong_coef', type=float, default=0.5,\n help='coefficient of discriminator fake input losses')\n parser.add_argument('--aux_coef', type=float, default=0.1,\n help='coefficient of classification loss')\n parser.add_argument('--epochs', type=int, default=600,\n help=('number of training epochs. can be used'\n ' with restore to further train a network'))\n parser.add_argument('-lfp', dest='label_flip_prob', type=float, default=0.,\n help='prob of switching real and fake labels during a batch')\n parser.add_argument('-ld', '--log_dir', type=str, required=True,\n help='root directory of logs')\n parser.add_argument('-v', '--verbose', default=False, action='store_true',\n help='whether to print error metrics during training')\n parser.add_argument('-md', '--model_dir', type=str, required=True,\n help='directory of model')\n parser.add_argument('-ci', '--checkpoint_interval', type=int, default=30,\n help='per how many epochs to save model checkpoint')\n\n\n args = parser.parse_args()\n\n gan_kw = dict(Ng=args.Ng, Nd=args.Nd, cond_dim=args.cond_dim,\n noise_dim=args.noise_dim, device=args.device,\n restore=args.restore)\n dataset_kw = dict(dataset_dir=args.dataset_dir, image_dir=args.image_dir,\n embedding_dir=args.emb_dir, batch_size=args.batch_size,\n available_classes=args.available_classes)\n try:\n kl_coef = args.kl_coef\n except AttributeError:\n kl_coef = 2 / gan_kw['cond_dim']\n\n optim_kw = dict(glr=args.glr, dlr=args.dlr, beta_1=0.5, kl_coef=kl_coef,\n uncond_coef=args.uncond_coef, wrong_coef=args.wrong_coef,\n aux_coef=args.aux_coef,\n epochs=args.epochs, label_flip_prob=args.label_flip_prob)\n log_kw = dict(log_dir=args.log_dir, n_samples=6, verbose=args.verbose,\n model_dir=args.model_dir, checkpoint_interval=args.checkpoint_interval)\n\n trainer = StackGANv2Trainer(gan_kw, dataset_kw, optim_kw, log_kw)\n trainer()", "title": "" }, { "docid": "0e7c457bc700d80e7f1072eb3bc2f12d", "score": "0.6897139", "text": "def train_start(self):\n self.img_enc.train()\n self.txt_enc.train()\n self.sim_enc.train()", "title": "" }, { "docid": "bf7f710cb8c02ab3359d55c1e51a6380", "score": "0.68930787", "text": "def finish_train(trainer):\n set_eval_mode(trainer)", "title": "" }, { "docid": "d76eee0c061e4b6854f72713668e4531", "score": "0.6882276", "text": "def set_train(self):\n self.bpNet.train()", "title": "" }, { "docid": "9d750c061d66e18f1bc666330c806fe8", "score": "0.6877971", "text": "def step_training(self):\n # init meters for loss value and metric values\n loss_meter, metric_meters = self.get_init_meters()\n\n # whether or not to record visual summary\n enable_write_figures = bool(\n self.current_epoch % self.configs.SUMM.FIGURE.TRAIN_PATIENCE == (\n self.configs.SUMM.FIGURE.TRAIN_PATIENCE - 1))\n\n # get data loader for training\n epoch_progress = self.get_progress_bar(\n iterable=self.train_loader,\n desc='Training epoch {}'.format(self.current_epoch),\n remain_on_screen=False,\n )\n\n # turn on training mode for model\n self.model.train()\n\n for sample_batch in epoch_progress:\n images, targets = sample_batch\n images = images.to(self.device)\n targets = targets.to(self.device)\n\n # model outputs inactivated logits\n logits = self.model(images)\n prob_maps = torch.sigmoid(logits)\n binary_maps = self.get_binary_maps(prob_maps)\n analytic_maps = self.get_analytic_maps(binary_maps, targets)\n\n # criterion inputs inactivated logits\n current_loss = self.criterion(logits, targets)\n\n if torch.isnan(current_loss):\n LOGGER.warning('Criterion (%s) reached NaN during validation '\n 'at training epoch %d, training step %d',\n self.configs.LOSS.LOSS_NAME,\n self.current_epoch, self.current_step)\n continue\n\n # update model\n self.optimizer.zero_grad()\n current_loss.backward()\n self.optimizer.step()\n\n # update meters\n loss_meter.accumulate(current_loss.item())\n current_metrics = self.get_metrics(\n prob_maps, binary_maps, targets)\n metric_meters.accumulate(current_metrics)\n\n # record visual summary\n if enable_write_figures:\n self.write_figures('train', **{\n 'images': images,\n 'targets': targets,\n # 'masks': masks,\n 'probability_maps': prob_maps,\n 'binary_maps': binary_maps,\n 'analytic_maps': analytic_maps,\n })\n\n # cyclic learning rate steps every batch instead of epoch\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.CyclicLR):\n self.lr_scheduler.step()\n\n self.current_step += 1\n\n epoch_progress.close()\n\n return loss_meter, metric_meters", "title": "" }, { "docid": "c8966edeca9372dd791f00ad64a6ac91", "score": "0.6875389", "text": "def learn(self):\n\n self.loss_value = float('inf') # loss value after the last iteration\n \n iteration = self.args.next_iteration if self.args.load_model else 1\n \n # load model and check if examples are ready\n if self.args.load_model:\n modelIteration = self.args.next_iteration - 1 \n if os.path.isfile(os.path.join(self.args.load_folder_file[0], getCheckpointFile(modelIteration))):\n self.nnet.load_checkpoint(self.args.load_folder_file[0], getCheckpointFile(modelIteration))\n else:\n self.nnet.load_checkpoint(self.args.checkpoint, getCheckpointFile(modelIteration))\n # check examples file\n examplesFile = os.path.join(self.args.load_folder_file[0], getCheckpointFile(modelIteration)+\".examples.mini\")\n if os.path.isfile(examplesFile):\n print(\"skip first Self-Play because examples file exists:\", examplesFile)\n self.skipFirstSelfPlay = True\n else:\n examplesFile = os.path.join(self.args.checkpoint, getCheckpointFile(modelIteration)+\".examples.mini\")\n if os.path.isfile(examplesFile):\n print(\"skip first Self-Play because examples file exists:\", examplesFile)\n self.skipFirstSelfPlay = True\n else:\n self.skipFirstSelfPlay = False\n\n for i in range(iteration, iteration+self.args.numIters):\n # bookkeeping\n print('------ITER ' + str(i) + '------' + ', start at ' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n \n # examples of the iteration\n if not self.skipFirstSelfPlay or i>iteration:\n print('Self-Play of ITER ' + str(i))\n start = time.time()\n\n iterationTrainExamples = self.selfPlay.executeEpisodes(self.game, self.nnet, self.args, i)\n \n elapsed = time.time() - start\n print(\"all episodes took \", elapsed, \"s\")\n print(\"total examples: \", len(iterationTrainExamples))\n \n # backup examples to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i-1, iterationTrainExamples)\n \n #ask_for_continue(\"\\nSelf play finished, continue? [y|n]\\n\")\n\n if self.args.skipArena:\n print('Optimize of ITER ' + str(i) + ', start at ' + datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.optimize(i)\n self.compare_networks(i)\n else:\n print('Optimize_and_Evaluate of ITER ' + str(i))\n self.optimize_and_evaluate(i)\n\n check_stop_condition()\n # final competition\n # ...", "title": "" }, { "docid": "b2b66f06a6bc6d2ddd7c95c66ca9c539", "score": "0.6875028", "text": "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n cnn.input_x: x_batch,\r\n cnn.input_y: y_batch,\r\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob,\r\n }\r\n _, step, loss = sess.run([train_op, global_step, cnn.loss], feed_dict)", "title": "" }, { "docid": "4a0cdcaca22539df608e4ba45ab65035", "score": "0.68689007", "text": "def run_training(self, save_fct: Callable) -> None:\n if self.run_for_epochs is None or self.run_for_epochs > 0:\n for src, trg in self.next_minibatch():\n if self.dev_zero:\n self.checkpoint_and_save(save_fct)\n self.dev_zero = False\n with utils.ReportOnException({\"src\": src, \"trg\": trg, \"graph\": utils.print_cg_conditional}):\n dy.renew_cg(immediate_compute=settings.IMMEDIATE_COMPUTE, check_validity=settings.CHECK_VALIDITY)\n with self.train_loss_tracker.time_tracker:\n event_trigger.set_train(True)\n loss_expr = self.training_step(src, trg)\n loss, loss_value = loss_expr.compute()\n self.backward(loss, self.dynet_profiling)\n self.update(self.trainer)\n self.train_loss_tracker.report(loss_value, trg)\n if self.checkpoint_needed():\n self.checkpoint_and_save(save_fct)\n if self.should_stop_training(): break", "title": "" }, { "docid": "954730f3c93cc064ee06ae17cee2e178", "score": "0.68669844", "text": "def run(self):\n self.preprocess()\n self.train_all()", "title": "" }, { "docid": "a949db246cead7c24586d1fc2bc03e5f", "score": "0.6860227", "text": "def train(self) -> None:\n self.sess.run(self.train_op)", "title": "" }, { "docid": "90178be9aef5b7673134293280934158", "score": "0.6859336", "text": "def __call__(self):\n self.set_gpu()\n self.logger.info(\"\\U0001F3C3 beginning with training\")\n run_experiment(self.config)\n self.logger.info(\"\\U0001F3C1 training complete\")", "title": "" }, { "docid": "53dadb995cf5d61f47e392e6b06467f7", "score": "0.68539405", "text": "def train(self, model):\r\n pass", "title": "" }, { "docid": "a8ccf003b957448564a669ec5eb5ec43", "score": "0.68537307", "text": "def iterate_train(self):", "title": "" }, { "docid": "bfd9dc78b55404cf5e5ee9fbe882fffc", "score": "0.68534315", "text": "def run_training(self):\n\n self.logger.info(\"Running training...\")\n\n for episodes_rewards in self.algorithm.train():\n model_score = self.get_model_score(episodes_rewards)\n episode = len(episodes_rewards)\n\n if len(episodes_rewards) % 1000 == 0:\n print('@metric score %d %f' % (len(episodes_rewards), model_score))\n\n if self.should_save_model(model_score, episode):\n self.model_save(model_score, episode)\n\n self.update_episode_count(len(episodes_rewards))\n\n self.update_episode_count(len(episodes_rewards), force=True)\n self.model_save(model_score, episode)\n\n self.logger.info(\"Training has finished successfully\")", "title": "" }, { "docid": "97b214bd250c397f05e31ce6bd0c3f1c", "score": "0.6845571", "text": "def on_train_begin(self):", "title": "" }, { "docid": "1a10af8f00159e040461f586fd801092", "score": "0.6840541", "text": "def _post_training(self):\n pass", "title": "" }, { "docid": "d1fdfc380091ec44e3dd935da9bde0ad", "score": "0.6826358", "text": "def train(self):\n assert self.setup_params is not None\n self.mode = \"train\"", "title": "" }, { "docid": "918068b8d39f844d42d1b5ee9fff7c3b", "score": "0.68184006", "text": "def train(self, epochs=100, verbose=0):\n for i in range(epochs):\n self.feedforward()\n self.backprop()\n if verbose > 1:\n print(self.loss())", "title": "" }, { "docid": "89c3e2d11977e077159f479b87a50e9c", "score": "0.681252", "text": "def run(self):\n # build model\n self.build()\n # initialize\n self.initialize()\n\n # model\n self.train()", "title": "" }, { "docid": "61fab695390ed94100c58311b4b1a6e0", "score": "0.68070096", "text": "def main():\n\n parser = argparse.ArgumentParser(\n add_help=True,\n description=\"This file trains a new neural network on the given dataset.\",\n )\n parser.add_argument(\n \"data_dir\",\n help=\"data directory containing data for training\",\n action=\"store\",\n type=check_dir_validity,\n )\n parser.add_argument(\n \"--save_dir\",\n action=\"store\",\n default=\"./\",\n dest=\"save_dir\",\n help=\"directory to save model checkpoints. Expects full path, e.g. /path/to/dir/ without trailing '/'. By default it is stored in the current directory\",\n type=check_dir_validity,\n )\n parser.add_argument(\n \"--arch\",\n action=\"store\",\n default=\"vgg13\",\n dest=\"arch\",\n help=\"architecture to use as base for model training. Valid values can be found at https://pytorch.org/docs/stable/torchvision/models.html\",\n )\n parser.add_argument(\n \"--learning_rate\",\n dest=\"learning_rate\",\n type=float,\n default=0.001,\n action=\"store\",\n help=\"learning rate for the optimizer\",\n )\n parser.add_argument(\n \"--hidden_units\",\n dest=\"hidden_units\",\n type=int,\n default=512,\n action=\"store\",\n help=\"amount of hidden units to use for classifier\",\n )\n parser.add_argument(\n \"--epochs\",\n action=\"store\",\n dest=\"epochs\",\n default=1,\n help=\"amount of training runs\",\n )\n parser.add_argument(\n \"--gpu\",\n action=\"store_true\",\n default=False,\n dest=\"gpu\",\n help=\"enables training on gpu to increase performance\",\n )\n\n args = parser.parse_args()\n\n data_preparation = DataPreparation()\n data_preparation.prepare_training_data(args.data_dir)\n model_wrapper = ImageModelWrapper()\n model_wrapper.init_model(\n args.arch, int(args.hidden_units), float(args.learning_rate)\n )\n\n train(model_wrapper, data_preparation, int(args.epochs), args.gpu)\n\n model_wrapper.save(\n args.save_dir, int(args.epochs), data_preparation.class_to_idx\n )", "title": "" }, { "docid": "45ae5a7bc036ad589ac35d7eed22d17c", "score": "0.6806989", "text": "def train(self) -> None:\n begin = time.time()\n self.setup()\n previous = None\n for trainer in self.training_configs.values():\n self.current_subset = trainer.subset\n if previous is not None:\n self.current_model = IvectorExtractorModel(previous.exported_model_path)\n os.makedirs(trainer.working_log_directory, exist_ok=True)\n self.current_model.export_model(trainer.working_directory)\n self.set_current_workflow(trainer.identifier)\n trainer.train()\n previous = trainer\n logger.info(f\"Completed training in {time.time()-begin} seconds!\")", "title": "" }, { "docid": "f3cc722a860f829ab0fad37d2080d10c", "score": "0.68069714", "text": "def train_step(x_batch, y_batch, train_entity):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.input_entity: train_entity,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob,\n cnn.embedding_placeholder: embedding\n }\n _, step, summaries, _, loss, accuracy, pred= sess.run( \\\n [train_op, global_step \\\n , train_summary_op, cnn.embedding_init \\\n , cnn.loss, cnn.accuracy \\\n , cnn.predictions \\\n ], \\\n feed_dict)\n y_true = np.argmax(y_batch, 1)\n #print 'y_true:{}'.format(y_true)\n #print 'pred:{}'.format(pred)\n acc = metrics.precision_score(y_true, pred, average=\"micro\")\n recall = metrics.recall_score(y_true, pred, average=\"micro\")\n f1_score = metrics.f1_score(y_true, pred, average=\"micro\")\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}, recal {:g}, f1 {:g}\".format( \\\n time_str, step, loss, acc, recall, f1_score))\n sys.stdout.flush()\n train_summary_writer.add_summary(summaries, step)", "title": "" }, { "docid": "a677c2cb1fda1ab3364432bda94ec17a", "score": "0.680105", "text": "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n while True:\n err = 0\n for x, y in dataset.iterate_once(1):\n pred = self.get_prediction(x)\n\n if nn.as_scalar(y) != pred:\n self.w.update(x, -1*pred)\n err = err + 1\n if err == 0:\n break", "title": "" }, { "docid": "2f2cc667be02a51572a15082817e9b0b", "score": "0.6800669", "text": "def train(self, state, td_error):\n pass", "title": "" }, { "docid": "3ea7d78796d5de24aa502fb7d9a80352", "score": "0.6798321", "text": "def run_training():\n # TODO: argparse hyparparms\n try:\n execute_pipeline(\n (format_source_data, train_model),\n source_path=source_data_path,\n training_data_path=source_data_path,\n model_save_path=model_path\n )\n except Exception as e:\n with open(os.path.join(output_path, 'failure'), 'w') as f:\n f.write(f'Exception raised during training\\n\\n{str(e)}\\n{traceback.format_exc()}')\n raise e", "title": "" }, { "docid": "93c9f9711d81dcd744a88f49196982ca", "score": "0.67967767", "text": "def train(self):\n net = self.Model().cuda()\n net = nn.DataParallel(net, self.config.gpu_id)\n if self.config.pretrain:\n net = self.config.load_function(self.config.pretrained_model_dir,\n net)\n print 'loading data...'\n train_loader = data.DataLoader(self.train_dataset,\n batch_size=self.config.batch_size,\n shuffle=True)\n val_loader = data.DataLoader(self.val_dataset,\n batch_size=1,\n shuffle=False)\n print 'Data loaded!'\n\n stage = 0\n global_step = 0\n max_validation_acc = 0\n best_epoch = -1\n tb_writer = SummaryWriter(log_dir=self.config.tb_log_path)\n for e in range(self.config.epoch_num):\n if e in self.config.decay_points:\n stage += 1\n lr = self.config.learning_rates[stage]\n optimizer = optim.SGD(params=self.config.parameters_func(net, lr),\n lr=lr, momentum=self.config.momentum,\n weight_decay=self.config.weight_decay)\n\n # train for an epoch\n net.train()\n info = '------EPOCH %i START------' % e\n self.logger.log_to(info, self.config.logger_alias)\n global_step = self.train_an_epoch(net, optimizer, train_loader, tb_writer, e, global_step)\n info = '------EPOCH %i COMPLETE------' % e\n self.logger.log_to(info, self.config.logger_alias)\n self.logger.flush(self.config.logger_alias)\n\n # do validation\n net.eval()\n val_acc = self.validate(net, val_loader, e, tb_writer, global_step)\n if max_validation_acc < val_acc:\n max_validation_acc = val_acc\n best_epoch = e\n\n #save model\n self.save_model(net, e)\n # training complete\n info = '==========Training Complete=========='\n self.logger.log_to(info, self.config.logger_alias)\n info = 'Best accuracy is %.3f, at epoch %i' % (max_validation_acc, best_epoch)\n self.logger.log_to(info, self.config.logger_alias)", "title": "" }, { "docid": "cfaf68fda7ba922e77918e07028c3774", "score": "0.6786937", "text": "def train(self):\n self.print_log('Training for %d epochs' % self.flags['num_epochs'])\n \n tf_inputs = (self.x['TRAIN'], self.im_dims['TRAIN'], self.gt_boxes['TRAIN'])\n \n self.step += 1\n for self.epoch in trange(1, self.flags['num_epochs']+1, desc='epochs'):\n train_order = randomize_training_order(len(self.names['TRAIN'])) \n for i in tqdm(train_order):\n feed_dict = create_feed_dict(flags['data_directory'], self.names['TRAIN'], tf_inputs, i)\n # Run a training iteration\n if self.step % (self.flags['display_step']) == 0:\n # Record training metrics every display_step interval\n summary = self._record_train_metrics(feed_dict)\n self._record_training_step(summary)\n else: \n summary = self._run_train_iter(feed_dict)\n self._record_training_step(summary) \n \n ## Epoch finished\n # Save model \n if self.epoch % cfg.CHECKPOINT_RATE == 0: \n self._save_model(section=self.epoch)\n # Perform validation\n if self.epoch % cfg.VALID_RATE == 0: \n self.evaluate(test=False)", "title": "" }, { "docid": "fe71f9990020961a82dfcc0c5ac3656c", "score": "0.6785018", "text": "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n if step == 601:\n return 0\n else:\n return 1", "title": "" }, { "docid": "29043a6b55c3f3dff1e246d3d5c99cdd", "score": "0.6784557", "text": "def train(self):\n for it in range(self.max_EM_iter):\n log_likelihood_prev = self.log_likelihood\n self.M_step()\n self.E_step()\n logging.info('log likelihood of iteration {0}: {1:.4f}'.format(it, self.log_likelihood))\n # Plot training process\n # plt.scatter([it],[log_likelihood_prev])\n # plt.pause(0.5)\n if abs(self.log_likelihood - log_likelihood_prev) < self.EM_tol and it > 20:\n break\n self.trained = True", "title": "" }, { "docid": "ab024f8f0e746147a767e8063aa44059", "score": "0.67819643", "text": "def train(*args, **kwargs):\n pass", "title": "" }, { "docid": "26b7aeae08c4ef380eb972387f1c194f", "score": "0.67760175", "text": "def train(self,train_data):\n print('model trained')", "title": "" }, { "docid": "e8fb57751da1300cb5772ed5af01e3ef", "score": "0.677579", "text": "def training(d):\n n = buildNetwork(d.indim, 4, d.outdim,recurrent=True)\n t = BackpropTrainer(n, d, learningrate = 0.01, momentum = 0.99, verbose = True)\n for epoch in range(0,1000):\n t.train()\n return t", "title": "" }, { "docid": "5700f1fbaa6f1af68a9810942a34e9ec", "score": "0.67731875", "text": "def train(self) -> None:\n\n # read the parameter file and check whether the required parameters are defined\n params = read_parameter_file(self.parameter_file)\n assert 'training' in params, \"Parameter file needs to contain the key 'training', \" \\\n \"in which the training configurations are defined \"\n # extract the training settings\n num_dis_iter = params['training']['num_dis_iter']\n num_gen_iter = params['training']['num_gen_iter']\n num_epochs = params['training']['num_epochs']\n lr_gen = params['training']['lr_gen']\n lr_dis = params['training']['lr_dis']\n k_dis = params['training']['k_dis']\n k_gen = params['training']['k_gen']\n\n # perform the training", "title": "" }, { "docid": "16149b6ce109b4aaab3d49bfe3d65157", "score": "0.6768789", "text": "def train(self, data):\n\t\treturn", "title": "" }, { "docid": "16ff5c317917b3dbba96525285b2ed76", "score": "0.6768485", "text": "def setup_training(self):\n\n self.step_counter = 0\n self.steps_before_replay = 8\n self.num_rounds = 100\n #Create new folder with time step for test run\n self.directory = create_folder()\n\n '''\n #Load expert data as demonstrations for training\n directory_expert = \"test_data/\"\n start_of_filename = \"coin\"\n init_states, action_names, new_states, rewards = load_data(directory_expert, start_of_filename)\n\n for i in range(self.qlearner.memory_size):\n self.qlearner.transitions.append([init_states[i], action_names[i], new_states[i], rewards[i], False, 1, True])\n '''", "title": "" }, { "docid": "47660c6c5b3367ca6dca7ff118843ea9", "score": "0.67659396", "text": "def train_start(self, opt):\n self.clip_enc.train()\n self.txt_enc.train()\n self.vid_seq_enc.train()\n self.txt_seq_enc.train()\n if opt.reconstruct_loss:\n self.vid_seq_dec.train()\n self.txt_seq_dec.train()\n if opt.lowest_reconstruct_loss:\n self.clip_seq_dec.train()\n self.sent_seq_dec.train()", "title": "" }, { "docid": "3cd175104e9d03999bac290d2307445e", "score": "0.676586", "text": "def train(self, train_data, *args, **kwargs):\n\t\tpass", "title": "" }, { "docid": "1b1b09239faad9f2eb8d032ae185d462", "score": "0.67591345", "text": "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n self._sess.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._sess.run(self._target_ops)", "title": "" }, { "docid": "b8ac52c88f5e6eb5bed5c91ca6061b89", "score": "0.67519724", "text": "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n # _, step, summaries, loss, accuracy = sess.run(\n # [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy],\n # feed_dict)\n _, step, summaries, loss = sess.run([train_op, global_step, train_summary_op,\n cnn.loss], feed_dict)\n time_str = datetime.datetime.now().isoformat()\n # print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n if step % FLAGS.evaluate_every == 0:\n print(\"{}: step {}, loss {:g}\".format(time_str, step, loss))\n train_summary_writer.add_summary(summaries, step)", "title": "" }, { "docid": "64448afadf127ae4b1236145459ce89d", "score": "0.67517644", "text": "def train_step(loss):\n\traise NotImplementedError\n\treturn train_op", "title": "" }, { "docid": "be873c0095458cbbd487f432dbd7ecef", "score": "0.6750425", "text": "def train(train, test, LR):\n \n loss = {}\n accuracy = {}\n train_losses = []\n train_acc = []\n test_losses = []\n test_acc = []\n used_early_stopping = False\n \n try:\n model = Resnet().to(DEVICE)\n model.load_state_dict(torch.load(CHECKPOINT))\n \n except Exception as e:\n print(\"No saved model found!\")\n model = Resnet().to(DEVICE)\n \n optimizer = torch.optim.Adam(model.parameters(), lr=LR, betas=(0.9, 0.999))\n criterion = torch.nn.BCELoss()\n early_stop = EarlyStopping(patience=PATIENCE)\n \n for epoch in range(EPOCHS):\n\n\n print(\"Running Epoch {}\".format(epoch+1))\n\n epoch_train_loss, train_accuracy, epoch_test_loss, test_accuracy = train_loop( model, train, test, criterion, optimizer)\n train_losses.append(epoch_train_loss)\n train_acc.append(train_accuracy)\n test_losses.append(epoch_test_loss)\n test_acc.append(test_accuracy)\n \n early_stop(epoch_test_loss, model)\n \n if early_stop.early_stop:\n print(\"Early stopping\")\n os.rename(EARLY_STOPPING_PATH, FINAL_OUTPUT)\n used_early_stopping = True\n break\n \n print(\"Training loss: {0:.4f} Train Accuracy: {1:0.2f}\".format(epoch_train_loss, train_accuracy))\n print(\"Val loss: {0:.4f} Val Accuracy: {1:0.2f}\".format(epoch_test_loss, test_accuracy))\n print(\"--------------------------------------------------------\")\n \n if epoch%5==0:\n torch.save(model.state_dict(), \"temp_model.pth\")\n os.rename(\"temp_model.pth\", CHECKPOINT)\n \n print(\"Training done!\")\n \n loss['train'] = train_losses\n loss['test'] = test_losses\n accuracy['train'] = train_acc\n accuracy['test'] = test_acc\n \n return loss, accuracy, model, used_early_stopping", "title": "" }, { "docid": "d1bce73e84c37fb7f77563be299f07c4", "score": "0.6746053", "text": "def train_step(self):\n\n for i in range(self.train_data_length):\n if i % 10000 == 0:\n print(i)\n\n # forward\n start = time.time()\n o = self.forward(i)\n\n #print(\"Time of forward: {}s\\n\".format(time.time() - start))\n error = self.error(o.asarray(), self.Y[i])\n\n # backward\n start = time.time()\n self.backward(error, i)\n #print(\"Time of backward: {}s\\n\".format(time.time() - start))\n #input()", "title": "" } ]
d7574399af854a5dcf2ec9f029cf30f0
Command launched when 'Duplicate' QMenuItem is triggered. Duplicate cache version
[ { "docid": "769d74ecd271bda126770aa98dee5905", "score": "0.69807845", "text": "def on_miDuplicateCache(self):\n #-- Check --#\n if not os.path.exists(os.path.normpath(self.pItem.cacheFullPath)):\n raise IOError, \"!!! Cache file not found: %s !!!\" % self.pItem.cacheFullPath\n #-- Duplicate Version --#\n print \"\\n#===== Duplicate Cache File =====#\"\n print 'Duplicate %s' % self.pItem.cacheFileName\n cachePath = '/'.join(self.pItem.cacheFullPath.split('/')[:-2])\n cacheVersion = self.pItem.cacheFullPath.split('/')[-2]\n deCmds.duplicateCacheVersion(cachePath, cacheVersion)\n #-- Refresh Ui --#\n self.pWidget.rf_cacheList()\n self.pWidget.lastVersionItem._widget.on_cacheAssigned()", "title": "" } ]
[ { "docid": "41371b400127f7426b313b81795478c3", "score": "0.6907194", "text": "def on_duplicateSelected():\n toolBoxCmds.duplicateSelected()", "title": "" }, { "docid": "40f1f0a1823da38422e53dadfa4d6dcc", "score": "0.6071569", "text": "def copyCommand(self):\n\n selection = self.selectedIndexes()\n\n if selection:\n rows = [index.row() for index in selection]\n columns = [index.column() for index in selection]\n if len(rows) == 4:\n model = self.proxyModel.sourceModel()\n row = rows[3]\n column = columns[3]\n command = model.dataset.data[row][column].cell.strip()\n job = model.dataset.data[row][JobHistoryKey.Status].obj\n QApplication.clipboard().setText(command)\n self.parent.pasteCommandSignal.emit(command)\n if job is not None:\n self.parent.updateAlgorithmSignal.emit(job.algorithm)", "title": "" }, { "docid": "1e2690119b31bd0623ef15b82533ce8f", "score": "0.58980995", "text": "def on_miArchiveCache(self):\n self.cdArchive = pQt.ConfirmDialog(\"Archive seelcted cache versions ?\", ['Archive'], [self.archiveCache])\n self.cdArchive.exec_()", "title": "" }, { "docid": "dc674f6708b42b71ac80dec31acb79b2", "score": "0.5711299", "text": "def on_miDeleteCache(self):\n self.cdDelCache = pQt.ConfirmDialog(\"Delete selected versions ?\", ['Ok'], [self.deleteCache])\n self.cdDelCache.exec_()", "title": "" }, { "docid": "6dcad3a48c5f2819382160fb109cf09d", "score": "0.54042405", "text": "def Duplicate(self, *args, **kwargs):\n payload = { \"Arg1\": self.href }\n for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]\n for item in kwargs.items(): payload[item[0]] = item[1]\n return self._execute('duplicate', payload=payload, response_object=None)", "title": "" }, { "docid": "3de538d7049dd6d4884946e4a1105c0d", "score": "0.52238", "text": "def DuplicateItems(self):\n payload = { \"Arg1\": self }\n return self._execute('duplicateItems', payload=payload, response_object=None)", "title": "" }, { "docid": "81fb19f130c355e2b488453326b0fc9b", "score": "0.5197283", "text": "def contextMenuEventOriginal(self, event):\n\n index = self.indexAt(event.pos())\n row = self.proxyModel.mapToSource(index).row()\n contextMenu = QMenu(self)\n menuItems = {}\n\n for item in [\"Copy\", \"Delete\", \"Remove\"]: # Build menu first\n menuItems[item] = contextMenu.addAction(item)\n\n selection = contextMenu.exec_(event.globalPos()) # Identify the selected item\n\n if selection == menuItems[\"Copy\"]: # Specify what happens for each item\n self.copySelection()\n elif selection == menuItems[\"Remove\"]:\n self.proxyModel.filterConditions[\"Remove\"].append(row)\n self.proxyModel.setFilterFixedString(\"\")\n elif selection == menuItems[\"Delete\"]:\n self.deleteSelectedRows()", "title": "" }, { "docid": "9293295dc62c831a76600f561702c9b9", "score": "0.5189916", "text": "def process_command(self, sender, cmd, args):", "title": "" }, { "docid": "1bbca65024acfd9e1c9771fa35fe5fc5", "score": "0.5122015", "text": "def duplicate(self):\n\t\traise NotImplementedError", "title": "" }, { "docid": "838bf75bb0bf32803f213ea95b2f8588", "score": "0.51158684", "text": "def store(self):\n cmd = self.command()\n if cmd is not None:\n with auto_dupl_on(self.astergui().study().activeCase):\n cmd.rename(self._name.text())\n wid = self._viewByPath(ParameterPath(cmd))\n if wid is not None:\n cmd.init(wid.view().itemValue())", "title": "" }, { "docid": "ff1f5b8eef1f88db6233613e7701e7f1", "score": "0.50177705", "text": "def on_appendToCache(self):\n assigned = self.cacheList.assignedVersionItem\n last = self.cacheList.lastVersionItem\n if assigned is not None and last is not None:\n #-- Check assigned chached is last version --#\n if not assigned.cacheFileName == last.cacheFileName:\n mess = \"!!! Assigned version %r doesn't match with last version %r !!!\" % (assigned.cacheVersion,\n last.cacheVersion)\n raise ValueError, mess\n #-- Append To Cache --#\n print \"\\n#===== Append To Cache =====#\"\n cacheItem = assigned\n infoDict = cacheItem._widget.infoDict\n cachePath = os.path.dirname(os.path.normpath(cacheItem.cacheFullPath))\n cacheFile = os.path.basename(os.path.normpath(cacheItem.cacheFullPath)).split('.')[0]\n if infoDict['cacheType'] == 'nCloth':\n deCmds.appendToNCacheFile(cachePath, cacheFile, cacheItem.cacheNodeName, deCmds.getCurrentFrame(),\n self.endTime, self.mainUi.displayState, infoDict['cacheModeIndex'],\n backup=self.mainUi.backupState)\n else:\n raise AttributeError, \"!!! Append To Cache works only with clothNodes !!!\"", "title": "" }, { "docid": "3477529a2c0bef43b86e553de1d422c2", "score": "0.50047654", "text": "def __pushActTriggered(self):\n itm = self.logTree.selectedItems()[0]\n if not itm.data(0, self.__incomingRole):\n rev = itm.text(self.RevisionColumn).strip().split(\":\", 1)[0]\n if rev:\n self.vcs.hgPush(self.repodir, rev=rev)\n self.on_refreshButton_clicked(\n addNext=self.initialCommandMode == \"outgoing\")", "title": "" }, { "docid": "2f59f5c3ed0272791d474f6f73a03b0a", "score": "0.5003406", "text": "def OnDuplicateNode(self, event):\n self.DuplicateNode(self._active_node)", "title": "" }, { "docid": "009dfaaeb19e2b82e985da503f229a55", "score": "0.5002657", "text": "def cmd_dup(stack: FifthStack, args_str: str) -> None:\n a = stack.pop()\n stack.push(a)\n stack.push(a)\n return None", "title": "" }, { "docid": "12df67c8b4daf71560f8596835e1c566", "score": "0.49980277", "text": "def dup(self_update=False, snapshot=None):\n cmd = [\"transactional-update\"]\n cmd.extend(_global_params(self_update=self_update, snapshot=snapshot))\n cmd.append(\"dup\")\n return _cmd(cmd)", "title": "" }, { "docid": "768b911bdb7bfc87271eac676cc376d1", "score": "0.49930748", "text": "def custom_menu(self, modelIndex, selected_path, QPos):\n if self.file_manager.type(modelIndex) != \"preset File\":\n return\n self.pubMenu = QtWidgets.QMenu()\n parentPosition = self.file_view.viewport().mapToGlobal(QtCore.QPoint(0, 0))\n menu_item_01 = self.pubMenu.addAction(\"Publish Preset\")\n menu_item_01.setToolTip(\"Publish Preset\")\n menu_item_01.triggered.connect(self.publish_preset)\n\n user_dir = preset_share_utils.get_user_dir(getpass.getuser())\n user_dir = user_dir.replace(\"\\\\\", \"/\")\n if preset_share_utils.in_directory(selected_path, user_dir):\n menu_item_02 = self.pubMenu.addAction(\"Delete Preset\")\n self.pubMenu.insertSeparator(menu_item_02)\n menu_item_02.triggered.connect(self.delete_selected_preset)\n\n self.pubMenu.move(parentPosition + QPos)\n self.pubMenu.show()", "title": "" }, { "docid": "82f8afb6eaf725b92d193e1cdc3e1d2a", "score": "0.49794155", "text": "def add(self, name, command):", "title": "" }, { "docid": "aa71843840be2cd5adfec4dbad9fb08e", "score": "0.49789703", "text": "def __bookmarkActTriggered(self):\n if len([itm for itm in self.logTree.selectedItems()\n if not itm.data(0, self.__incomingRole)]) == 1:\n itm = self.logTree.selectedItems()[0]\n rev, changeset = (\n itm.text(self.RevisionColumn).strip().split(\":\", 1)\n )\n bookmark, ok = QInputDialog.getText(\n self,\n self.tr(\"Define Bookmark\"),\n self.tr('Enter bookmark name for changeset \"{0}\":').format(\n changeset),\n QLineEdit.Normal)\n if ok and bool(bookmark):\n self.vcs.hgBookmarkDefine(\n self.repodir, revision=\"rev({0})\".format(rev),\n bookmark=bookmark)\n self.on_refreshButton_clicked()", "title": "" }, { "docid": "06ede35e8f23000dcfb351de7989cd68", "score": "0.49759877", "text": "def copy(self):\n\n self._generic('IDM_COPY')", "title": "" }, { "docid": "d4e762b528300e373859eb824dafe7eb", "score": "0.4964532", "text": "def duplicate(client, slug, prefix=None):\n\n # Copped this logic directly from Redash.duplicate_dashboard\n current_dashboard = client.dashboard(slug)\n new_dash_name = \"Copy of: {}\".format(current_dashboard[\"name\"])\n new_dashboard = client.create_dashboard(new_dash_name)\n\n if current_dashboard[\"tags\"]:\n client.update_dashboard(\n new_dashboard[\"id\"], {\"tags\": current_dashboard[\"tags\"]}\n )\n\n # Widgets can hold text boxes or visualizations. Filter out text boxes.\n # I use a dictionary here because it de-duplicates query IDs\n queries_to_duplicate = {\n widget[\"visualization\"][\"query\"][\"id\"]: widget[\"visualization\"][\"query\"]\n for widget in current_dashboard.get(\"widgets\", [])\n if \"visualization\" in widget\n }\n\n # Fetch full query details for the old query IDs\n # Duplicate the query and store the result\n old_vs_new_query_pairs = [\n {\n \"old_query\": client._get(f\"api/queries/{old_query.get('id')}\").json(),\n \"new_query\": client.duplicate_query(\n old_query.get(\"id\"), new_name=\" \".join([prefix + old_query.get(\"name\")])\n ),\n }\n for old_query in queries_to_duplicate.values()\n ]\n\n # Compare old visualizations to new ones\n # Create a mapping of old visualization IDs to new ones\n old_viz_vs_new_viz = {\n old_viz.get(\"id\"): new_viz.get(\"id\")\n for pair in old_vs_new_query_pairs\n for old_viz in pair[\"old_query\"].get(\"visualizations\")\n for new_viz in pair[\"new_query\"].get(\"visualizations\")\n if old_viz.get(\"options\") == new_viz.get(\"options\")\n }\n\n # This is a version of the same logic from Redash.duplicate_dashboard\n # But it substitutes in the new visualiation ID pointing at the copied query.\n for widget in current_dashboard[\"widgets\"]:\n visualization_id = None\n if \"visualization\" in widget:\n visualization_id = old_viz_vs_new_viz.get(widget[\"visualization\"][\"id\"])\n client.create_widget(\n new_dashboard[\"id\"], visualization_id, widget[\"text\"], widget[\"options\"]\n )\n\n return new_dashboard", "title": "" }, { "docid": "21bfe9987ac3de1978af601787d1415e", "score": "0.49604604", "text": "def setupBrowserMenu(browser):\r\n a = QAction(\"Bulk-add Kanji-Cards\", browser)\r\n browser.connect(a, SIGNAL(\"triggered()\"), lambda e=browser: onRegenerate(e))\r\n browser.form.menuEdit.addSeparator()\r\n browser.form.menuEdit.addAction(a)", "title": "" }, { "docid": "2018ef15cae066d3bbf29f293eeb4f7a", "score": "0.4955932", "text": "def _context_menu_make(self, pos):\n menu = super(FrontendWidget, self)._context_menu_make(pos)\n for before_action in menu.actions():\n if before_action.shortcut().matches(QtGui.QKeySequence.Paste) == \\\n QtGui.QKeySequence.ExactMatch:\n menu.insertAction(before_action, self._copy_raw_action)\n break\n return menu", "title": "" }, { "docid": "d96c8cd8c48445b9756abeb778538f39", "score": "0.49502826", "text": "def add_unique(self, *args):\n return _ida_graph.screen_graph_selection_base_t_add_unique(self, *args)", "title": "" }, { "docid": "c5fe1a9ba51c7c1395b90b19655a4739", "score": "0.4931238", "text": "def duplicateStandin(self, *args):\n standIn = self.getStandInFromSelection()\n newStandin = standIn.duplicate()", "title": "" }, { "docid": "8fa1bec72d8e719a7e0a4ecec4bb434a", "score": "0.4901077", "text": "def __bookmarkMoveActTriggered(self):\n if len([itm for itm in self.logTree.selectedItems()\n if not itm.data(0, self.__incomingRole)]) == 1:\n itm = self.logTree.selectedItems()[0]\n rev, changeset = (\n itm.text(self.RevisionColumn).strip().split(\":\", 1)\n )\n bookmarksList = self.vcs.hgGetBookmarksList(self.repodir)\n bookmark, ok = QInputDialog.getItem(\n self,\n self.tr(\"Move Bookmark\"),\n self.tr('Select the bookmark to be moved to changeset'\n ' \"{0}\":').format(changeset),\n [\"\"] + bookmarksList,\n 0, False)\n if ok and bool(bookmark):\n self.vcs.hgBookmarkMove(\n self.repodir, revision=\"rev({0})\".format(rev),\n bookmark=bookmark)\n self.on_refreshButton_clicked()", "title": "" }, { "docid": "d29c593b4043fef4a468595e2b25d871", "score": "0.48985678", "text": "def handle_custom_commands(self, entry):\n return False", "title": "" }, { "docid": "412321c0a0d4dca61eb72517981a48f7", "score": "0.48933396", "text": "def ui_command_copy(self, name='@prompt', desc='@prompt'):\n name = self.ui_eval_param(name, 'string', '@prompt')\n desc = self.ui_eval_param(desc, 'string', '@prompt')\n newBpDescription = \"Created by {} as a copy of blueprint '{}'\".format(user, self.bpName)\n # Prompt for a blueprint name if necessary\n if name == '@prompt':\n b = raw_input(c.CYAN(\"\\nEnter a unique name for your new blueprint [{}]: \".format(self.bpName)))\n if len(b):\n newBpName = b\n else:\n newBpName = self.bpName\n elif name == '@auto':\n newBpName = ravello_sdk.new_name(rCache.get_bps(myOrgOnly=True), self.bpName + '_')\n else:\n newBpName = name\n # Prompt for description if necessary\n if desc == '@prompt':\n d = raw_input(c.CYAN(\"\\nOptionally enter a description for your new blueprint [{}]: \".format(newBpDescription)))\n if len(d):\n newBpDescription = d\n elif desc == '@auto':\n pass\n else:\n newBpDescription = desc\n # Create request dictionary and post new bp\n req = {\n \"blueprintId\": self.bpId,\n \"blueprintName\": newBpName,\n \"description\": newBpDescription,\n \"offline\": True,\n }\n try:\n newBp = rClient.create_blueprint(req)\n except:\n print(c.red(\"\\nProblem creating new blueprint!\\n\"))\n raise\n print(c.green(\"\\nSUCCESS! New blueprint '{}' created!\".format(newBpName)))\n # Add new bp to directory tree\n Bp(newBp, rootNode.get_child('blueprints'))\n print()", "title": "" }, { "docid": "1e42905f3ea0abffd97359f1131ddc01", "score": "0.4871508", "text": "def _replicate(self):\n return f'{self.version()} {self.short_version_hash()}'", "title": "" }, { "docid": "f0320bac02e46ee4041d4eb6f502e2b6", "score": "0.48475376", "text": "def launch_menu():", "title": "" }, { "docid": "63e20b6b00cf33f94d928a904b634d4b", "score": "0.48431996", "text": "def dup(self):\n self._add_instruction(Opcode.DUP)", "title": "" }, { "docid": "8e17e5e7f71f175940e94f8dd867d33a", "score": "0.48342356", "text": "def eventFilter(self, source, event):\n if event.type() == QEvent.ContextMenu:\n menu = QtWidgets.QMenu()\n menu.addAction(AddRowToListAction(self))\n menu.exec_(event.globalPos())\n return True\n return super().eventFilter(source, event)", "title": "" }, { "docid": "b181f46144935598340d205345618a31", "score": "0.48330116", "text": "def OnCopy(self, event):\n self.statusBar.SetStatusText('Copied selected text', 0)\n self.scriptCtrl.Copy()", "title": "" }, { "docid": "b6e056cec9edd440eb28f913d44f629c", "score": "0.48323214", "text": "def onCopy(self, widget):\n msg = _(\"Please enter the new group name:\")\n old_name = self.keygroup\n for grp in self.gpgconf.groups:\n if grp[0] == self.keygroup:\n name = self.getGroupName(msg)\n if name:\n n = len(self.store1)\n self.store1.append([ name, len(grp[1]) ])\n self.gpgconf.groups.append([ name, list(grp[1]) ])\n self.select.select_path(n)\n self.keygroup = name\n self.save.set_sensitive(False)\n self.ok.set_sensitive(True)\n break", "title": "" }, { "docid": "0f81695a7333a55e5290a8902be2665c", "score": "0.48127168", "text": "def _push(self, path):\n self.cache.push(path)\n self._combo_box.setCurrentIndex(0)", "title": "" }, { "docid": "0db306d40d2d1de842ce0448f17cca2e", "score": "0.4801096", "text": "def duplicated(self):\n raise NotImplementedError('To be implemented')", "title": "" }, { "docid": "e3165c07c96ffaeaf9ae511d9b477c12", "score": "0.48001036", "text": "def do_unique(self,args):\n if not self.ctlr.unique_cmd(args):\n self._invalid(\"unique\")", "title": "" }, { "docid": "abb533a0d3b4f146d18160621023dcb5", "score": "0.47776484", "text": "def _get_modified_command(self, command: CommandItem) -> CommandItem:\n self.channel.print(\"Existing command: {}\", command.command)\n new_command = self.channel.input(\"Enter new command (TAB to input old):\", command.command)\n if not new_command:\n self.channel.print(\"No change :(\")\n return command\n return command.get_modified(new_command)", "title": "" }, { "docid": "ae8cf6fa83ad91f4e2e83b9e36ee61fc", "score": "0.47611898", "text": "def command(self):\n return", "title": "" }, { "docid": "e0bf17886ac8ca0b22e641f124b847eb", "score": "0.4757458", "text": "def on_actionVersion_triggered(self):\n dialog = QMessageBox.information(self, \"Copyright\", \n QApplication.translate(\"MainWindow\", \"版权所有:\\nCopyright @ Jensen 2012.12\"\n , None, QApplication.UnicodeUTF8), QMessageBox.Ok)", "title": "" }, { "docid": "b1f710599a1e911921a6af381e275bcc", "score": "0.47509944", "text": "def CLICommand(self) -> str:", "title": "" }, { "docid": "b1f710599a1e911921a6af381e275bcc", "score": "0.47509944", "text": "def CLICommand(self) -> str:", "title": "" }, { "docid": "8e805ad4a1997e1e0a58827bd0338c84", "score": "0.47442517", "text": "def on_miAssignToSel(self):\n #-- Check --#\n if not os.path.exists(os.path.normpath(self.pItem.cacheFullPath)):\n raise IOError, \"!!! Cache file not found: %s !!!\" % self.pItem.cacheFullPath\n #-- Assign Version --#\n print \"\\n#===== Assign Cache File =====#\"\n print 'Assign %s' % self.pItem.cacheFileName\n cachePath = '/'.join(self.pItem.cacheFullPath.split('/')[:-1])\n cacheFile = self.pItem.cacheFullPath.split('/')[-1]\n cacheNode = deCmds.assignCacheFileToSel(cachePath, cacheFile)\n print \"// Result: New cacheFile node ---> %s\" % cacheNode", "title": "" }, { "docid": "c6db821c7665553242711baf13f332ff", "score": "0.47384992", "text": "def link_copy_action():\n self.root.clipboard_clear()\n self.root.clipboard_append(self.video_link_box.get())", "title": "" }, { "docid": "734299326103a183fb0048b567fee629", "score": "0.47337195", "text": "def on_actionModification_2_triggered(self):\n # TODO: not implemented yet\n raise NotImplementedError", "title": "" }, { "docid": "1c3d638e7fd662dff6021f10ce1bd56f", "score": "0.47310266", "text": "def perform_copy_QAction(cls, src_action, dest_parent=None, debug_print=False):\n if debug_print:\n print(f'perform_copy_QAction(src_action: {src_action}, src_action.parent(): {src_action.parent()}, dest_parent: {dest_parent})')\n new_action = QtWidgets.QAction(dest_parent)\n new_action.setIcon(src_action.icon())\n new_action.setText(src_action.text())\n new_action.setObjectName(src_action.objectName())\n new_action.setActionGroup(src_action.actionGroup())\n new_action.setEnabled(new_action.isEnabled())\n return new_action", "title": "" }, { "docid": "b634765409279ae5ca1cd34ffe9313cb", "score": "0.4717901", "text": "def setupAction_ZRdestroy(self, *args):\n #======================================================================\n # Action: Accept; discard; Browse...\n #======================================================================\n self.connect(\n self.pushButton_2, SIGNAL(\"clicked()\"), self.completeSelect)\n self.connect(\n self.pushButton, SIGNAL(\"clicked()\"), self.abortSelect)\n self.connect(\n self.tableWidget, SIGNAL(\"itemClicked(QTableWidgetItem*)\"), self.handleItemClicked)\n #Keep next line for reference : New style for managing handler\n #self.tableWidget.itemClicked.connect(self.handleItemClicked)\n\n #Make reference to QGIS main objects\n self.mapinstance = QgsMapLayerRegistry.instance()\n self.qproject = QgsProject.instance()\n #Retrieve the project Path from the QgsProject instance\n try:\n project_path = self.qproject.homePath()\n #Rebuild the current folder path\n shelve_path = os.path.join(\n str(project_path), \"current/shelf_PP.db\")\n current_path = os.path.join(\n str(project_path), \"current\")\n tmp_path = os.path.join(\n str(project_path), \"shelve.db\") \n\n #======================================================================\n #Rebuild Oproject instance from shelf\n #======================================================================\n \n shelve_p = shelve.open(shelve_path, writeback=True)\n self.oproject = shelve_p['object']\n\n Global.setOproject(self.oproject)\n self.loaded = True\n\n #======================================================================\n # make local reference on the vector Layer\n #======================================================================\n layers = QgsMapLayerRegistry.instance().mapLayers().keys()\n for layer in layers:\n if QgsMapLayerRegistry.instance().mapLayer(layer).name() != 'ZR':\n continue\n else:\n #make temporary reference on the displayed ZR layer\n self.ZRlayertmp = QgsMapLayerRegistry.instance().mapLayer(layer)\n break\n\n\n #======================================================================\n # Populate the tableWidget and give checkboxes\n #======================================================================\n #Make reference to QGIS main objects\n self.mapinstance = QgsMapLayerRegistry.instance()\n self.qproject = QgsProject.instance()\n #Retrieve the project Path from the QgsProject instance\n\n project_path = self.qproject.homePath()\n #Rebuild the current folder path\n current_path = os.path.join(\n str(project_path), \"current\")\n\n ###########################################\n #make connection to the underlying database\n ###########################################\n uri = QgsDataSourceURI()\n vlayerpath = os.path.join(current_path, \n \"ZR.sqlite\")\n uri.setDatabase(vlayerpath)\n schema = ''\n table = 'ZR'\n geom_column = 'geom'\n uri.setDataSource(schema, table, geom_column)\n \n vlayer = QgsVectorLayer(uri.uri(),\n 'ZR', 'spatialite')\n self.ZRlayer = vlayer\n self.ZRlayer.selectAll()\n numberRows = int(self.ZRlayer.selectedFeatureCount())\n self.ZRlayer.invertSelection()\n\n\n ###########################################\n #Count features (number of ZR)\n # numberRows = int(self.ZRlayer.featureCount())\n print numberRows\n if numberRows ==0:\n return\n ###########################################\n #set the fidlist;\n indexmax = -1 \n for feature in self.ZRlayer.getFeatures():\n if feature.id() > indexmax:\n indexmax = feature.id()\n\n\n for i in range(indexmax):\n self.fidlist.append(False)\n\n ###########################################\n #Prepare an additional column for description\n ###########################################\n numberColumns = 3\n \n self.tableWidget.setRowCount(numberRows)\n self.tableWidget.setColumnCount(numberColumns)\n ###########################################\n #prepare value to be inserterted within the Qtablewidget\n ###########################################\n self.tab = []\n iterator = self.ZRlayer.getFeatures()\n idx = self.ZRlayer.fieldNameIndex('description')\n idx2 = vlayer.fieldNameIndex('indexZR')\n \n for feat in iterator:\n \n attrs = feat.attributes()[idx]\n fid = feat.attributes()[idx2]\n fid2 = int(feat.id())\n self.tab.append([fid2, fid, attrs])\n\n ###########################################\n #fill the table\n ###########################################\n for rowNumber in range(numberRows):\n for columnNumber in range(numberColumns):\n item = QTableWidgetItem(\"{0} \".format(self.tab[rowNumber][columnNumber]))\n if columnNumber == 0:\n item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)\n item.setCheckState(QtCore.Qt.Unchecked)\n\n self.tableWidget.setItem(rowNumber, columnNumber, item)\n \n #self.tableWidget.setItem(rowNumber, columnNumber, item)\n ###########################################\n #reset table geometry\n ###########################################\n self.tableWidget.resizeColumnsToContents()\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.setHorizontalHeaderLabels([' ','index ZR', 'description'])\n except:\n self.loaded = False", "title": "" }, { "docid": "6b873ac4f2b92f93676e6eba9f4a6615", "score": "0.4716109", "text": "def on_tagAsOk(self):\n self.cdTagCache = pQt.ConfirmDialog(\"Tag selected version as 'Ok' ?\", ['Tag As Ok'], [self.tagCacheVersion])\n self.cdTagCache.exec_()", "title": "" }, { "docid": "fd99caf1fc0aa93ef45b8b2ac8bd892c", "score": "0.47063133", "text": "def right_click(self, eventbox, event):\n menu = Gtk.Menu()\n self.copy = Gtk.MenuItem(\"Copy\")\n self.paste = Gtk.MenuItem(\"Paste\")\n menu.append(self.paste)\n menu.append(self.copy)\n\n self.copy.connect(\"activate\", self.copy_text)\n self.paste.connect(\"activate\", self.paste_text)\n\n self.copy.show()\n self.paste.show()\n menu.popup(None, None, None, None, event.button, event.time)", "title": "" }, { "docid": "79b30e123dd524c41ec2ee708d98de32", "score": "0.46902856", "text": "def constructing_menu(self):\n self.parent.add_command(label=self.name, command=self.save_as)", "title": "" }, { "docid": "d49ed99b9e1fe13e50fbb2eecf7c920a", "score": "0.46815246", "text": "def new_cmd(self):\n self._add_variables_to_history()\n self._reset_variables()", "title": "" }, { "docid": "b5bd5e6c7dcab4ad2eea4dd0afff06b3", "score": "0.46796992", "text": "def addNewAlgArgument(self):\n\t\tcurrList = self.newAlgArgsModel.stringList()\n\t\tcurrList.append(self.selectedArg)\n\t\tself.newAlgArgsModel = QStringListModel(QStringList(currList))\n\t\tself.newAlgArgsListView2.setModel(self.newAlgArgsModel)", "title": "" }, { "docid": "9bf10b39cbced8390dff794fc12eff87", "score": "0.4672232", "text": "def Shortcut(self) -> str:", "title": "" }, { "docid": "9bf10b39cbced8390dff794fc12eff87", "score": "0.4672232", "text": "def Shortcut(self) -> str:", "title": "" }, { "docid": "8baa66014dc4c095d22c7698c7e1faac", "score": "0.4663717", "text": "def on_cacheNodeSingleClick(self):\n selCacheItems = self.twCaches.selectedItems()\n if len(selCacheItems) == 1:\n if selCacheItems[0]._widget.infoDict is not None:\n self.cacheInfo.cacheItem = selCacheItems[0]\n self.cacheInfo.clearAll()\n self.cacheInfo.rf_widget()", "title": "" }, { "docid": "9b72ce3679514d69331b4741a1dde0c4", "score": "0.46619028", "text": "def test_dups1(self):\n\n # XXX dp: \"duplicates\" is an odd name for this routine.\n\n self.m1.set_content(\"\"\"\\\ndir mode=0755 owner=root group=sys path=bin\ndir mode=0755 owner=root group=sys path=bin\ndir mode=0755 owner=root group=sys path=bin\ndir mode=0755 owner=root group=sys path=usr\ndir mode=0755 owner=root group=root path=usr\ndir mode=0755 owner=bin group=sys path=usr\n \"\"\")\n\n acount = 0\n for kv, actions in self.m1.duplicates():\n self.assertEqual(kv, ('dir', 'usr'))\n for a in actions:\n acount += 1\n #print(\" {0} {1}\".format(kv, a))\n self.assertEqual(acount, 3)", "title": "" }, { "docid": "40274ffb1cde7a3c0a22578bcfb4cc5f", "score": "0.4661644", "text": "def ui_command_redeploy(self):\n if not self.confirm_app_is_published():\n return\n try:\n rClient.redeploy_vm(self.appId, self.vmId)\n except:\n print(c.red(\"\\nProblem redploying VM!\\n\"))\n raise\n print(c.yellow(\"\\nVM was destroyed and is being re-published from most recent library state!\"))\n print(\"FQDN should stay the same; VNC URL will change; ssh host key might change\\n\")\n rCache.purge_app_cache(self.appId)", "title": "" }, { "docid": "93c89519a45fcddff121c8cf4eb26c4d", "score": "0.46611497", "text": "def slot_actionNew(self):\n \"\"\"\n QMessageBox.question(\n QWidget,\n QString,\n QString,\n QMessageBox.StandardButtons buttons=QMessageBox.Ok,\n QMessageBox.StandardButton defaultButton=QMessageBox.NoButton\n ) -> QMessageBox.StandardButton\n \"\"\"\n button = QMessageBox().question(\n self,\n \"Warning!\",\n \"Do you want to save this file before create a new one?\",\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel,\n QMessageBox.Yes\n )\n if button == QMessageBox.Cancel:\n return\n if button == QMessageBox.Yes:\n self.slot_actionSave_as()\n self.filename = \"\"\n self.scene.clear()", "title": "" }, { "docid": "39c8556489b5040cb8939d6a10bdf5da", "score": "0.46584526", "text": "def command_added(self, deputy_obj, cmd_obj):\n return", "title": "" }, { "docid": "f02d65b67c73fb9c3ee5b6d5c596c28d", "score": "0.4646259", "text": "def new_postproc_version_from_existing(self):\n\n new_version = deepcopy(self.versions[self.version_selected])\n self.versions.append(new_version)\n self.number_versions += 1\n self.version_selected = self.number_versions\n return new_version", "title": "" }, { "docid": "f02d806dc6401c793b92298542b9cb56", "score": "0.46393433", "text": "def menuItem(*args):\n\toptionsWindow()", "title": "" }, { "docid": "f02d806dc6401c793b92298542b9cb56", "score": "0.46393433", "text": "def menuItem(*args):\n\toptionsWindow()", "title": "" }, { "docid": "4f1728be6e4da5271d56c5377b411402", "score": "0.4637212", "text": "def save_and_push_command():\n return Command().command(_save_and_push)", "title": "" }, { "docid": "5fb4278324a32c35a4edbbb21343e26f", "score": "0.46334055", "text": "def add(self, cmd):\n #self.ordered_history[time.time()] = {'cmd': cmd}\n cmd['timestamp'] = time.time()\n self.ordered_history.append(cmd)", "title": "" }, { "docid": "3cdc8f8b575ca8fd8c6409c45aaac38e", "score": "0.46326897", "text": "def command(self):", "title": "" }, { "docid": "3fef32c29780312d234609bee85b2f17", "score": "0.4631785", "text": "def resolve_duplicate(self, task, found_duplicates):\n log.warn(u\"This {0} is already in the library!\",\n (u\"album\" if task.is_album else u\"item\"))\n\n if config['import']['quiet']:\n # In quiet mode, don't prompt -- just skip.\n log.info(u'Skipping.')\n sel = u's'\n else:\n # Print some detail about the existing and new items so the\n # user can make an informed decision.\n for duplicate in found_duplicates:\n print_(u\"Old: \" + summarize_items(\n list(duplicate.items()) if task.is_album else [duplicate],\n not task.is_album,\n ))\n\n print_(u\"New: \" + summarize_items(\n task.imported_items(),\n not task.is_album,\n ))\n\n sel = ui.input_options(\n (u'Skip new', u'Keep both', u'Remove old')\n )\n\n if sel == u's':\n # Skip new.\n task.set_choice(importer.action.SKIP)\n elif sel == u'k':\n # Keep both. Do nothing; leave the choice intact.\n pass\n elif sel == u'r':\n # Remove old.\n task.should_remove_duplicates = True\n else:\n assert False", "title": "" }, { "docid": "c3181241aa6f097e9ee0f0fe8f87b808", "score": "0.46257728", "text": "def customContextMenu(self, event):\n selectedObjects = event.source().selectedH5Nodes(ignoreBrokenLinks=False)\n menu = event.menu()\n\n if not menu.isEmpty():\n menu.addSeparator()\n\n for obj in selectedObjects:\n h5 = obj.h5py_object\n\n name = obj.name\n if name.startswith(\"/\"):\n name = name[1:]\n if name == \"\":\n name = \"the root\"\n\n action = qt.QAction(\"Show %s\" % name, event.source())\n action.triggered.connect(lambda: self.displayData(h5))\n menu.addAction(action)\n\n if silx.io.is_dataset(h5):\n action = qt.QAction(\"Use as a new custom signal\", event.source())\n action.triggered.connect(lambda: self.useAsNewCustomSignal(h5))\n menu.addAction(action)\n\n if silx.io.is_group(h5) and silx.io.nxdata.is_valid_nxdata(h5):\n action = qt.QAction(\"Use as a new custom NXdata\", event.source())\n action.triggered.connect(lambda: self.useAsNewCustomNxdata(h5))\n menu.addAction(action)\n\n if silx.io.is_file(h5):\n action = qt.QAction(\"Close %s\" % obj.local_filename, event.source())\n action.triggered.connect(lambda: self.__treeview.findHdf5TreeModel().removeH5pyObject(h5))\n menu.addAction(action)\n action = qt.QAction(\"Synchronize %s\" % obj.local_filename, event.source())\n action.triggered.connect(lambda: self.__synchronizeH5pyObject(h5))\n menu.addAction(action)", "title": "" }, { "docid": "a4183b2205fef3c234baae1fefb1da65", "score": "0.46246752", "text": "def test_insert_duplicate_file_names(self):\n fake_hash = 'asdf'\n items = self.__build_items()\n item = items[0]\n\n self.assertTrue(self.catalogue.add_item(item=item))\n item['source_hash'] = fake_hash\n\n self.assertTrue(self.catalogue.add_item(item=item))\n return fake_hash", "title": "" }, { "docid": "e9ac62c7a9ae4c0012d0e224f9114417", "score": "0.4619878", "text": "def copyWidget(widget):\n\t\treturn PMFileNameWidget(widget.getName(), widget.filename, widget.mode, widget.browser, widget.postional, widget.initdefault, widget.checkfileexist, infolabels=False)", "title": "" }, { "docid": "ce72412636679937d217ee4d40b90bcb", "score": "0.46158594", "text": "def add_clone_resource_arg(parser):\n concept_parsers.ConceptParser.ForResource(\n '--clone',\n get_order_resource_spec(),\n 'The order to clone.',\n prefixes=True,\n required=False).AddToParser(parser)", "title": "" }, { "docid": "582d892450a29a14723991f640edaeef", "score": "0.46154988", "text": "def perform_copy_QMenu(cls, src_menu, action_parent, menu_parent, debug_print=False):\n assert src_menu.parent() != menu_parent, \"Expect that src_menu's parent isn't already dest_parent, but it already seems to be. \"\n if debug_print:\n print(f'perform_copy_QMenu(src_menu: {src_menu}, src_menu.parent(): {src_menu.parent()}, menu_parent: {menu_parent})')\n new_menu = QtWidgets.QMenu(menu_parent) # dest_parent: self.menubar\n new_menu.setIcon(src_menu.icon())\n new_menu.setTitle(src_menu.title())\n new_menu.setObjectName(src_menu.objectName())\n new_menu.setEnabled(src_menu.isEnabled())\n \n # new_menu.setActionGroup(src_menu.actionGroup())\n\n old_children_items = src_menu.children()\n new_children_items = []\n new_actions = []\n for a_child in old_children_items:\n if isinstance(a_child, QtWidgets.QMenu):\n # it's a submenu\n # new_children_items.append(cls.perform_copy_QMenu(a_child, dest_parent=new_menu, debug_print=debug_print))\n child_submenu, child_children_items, child_actions = cls.perform_copy_QMenu(a_child, action_parent=action_parent, menu_parent=new_menu, debug_print=debug_print)\n new_children_items.append(child_submenu)\n new_children_items.extend(child_children_items)\n new_actions.extend(child_actions)\n \n elif isinstance(a_child, QtWidgets.QAction):\n # it's a sub-action\n # new_actions.append(cls.perform_copy_QAction(a_child, dest_parent=new_menu, debug_print=debug_print))\n new_actions.append(cls.perform_copy_QAction(a_child, dest_parent=action_parent, debug_print=debug_print))\n \n return new_menu, new_children_items, new_actions", "title": "" }, { "docid": "8fa8340047e05b80ff066cb7b70fc11e", "score": "0.46011093", "text": "def selected_item(self,event):\n global selected_file, selected_file_to_paste\n\n if not self.sender().isFlat() :\n self.CheckFileExe(self.sender().text())\n dragged_item.append(self.sender())\n selected_file = self.sender().objectName()\n if not os.path.isfile(selected_file):\n selected_file_to_paste = selected_file\n self.sender().setFlat(False)\n if len(dragged_item)>1:\n dragged_item[-2].setFlat(True)", "title": "" }, { "docid": "98db91fe2ab1a65e0d6a8ab8c54352e3", "score": "0.4589644", "text": "def run (args):\n cmds.init.require_init()\n (options, args) = optargs (args)\n History = repo.history.load()\n print History.tip()", "title": "" }, { "docid": "da7c1d6ee1a3be46fb169df56b7159ae", "score": "0.4586333", "text": "def on_bt_ma_primer_clicked(self, widget, *args):\n\t\tself.primera_fitxa(\"ma\")", "title": "" }, { "docid": "1efd33592e450a63213fc3963ed9f1cd", "score": "0.45822185", "text": "def handleCopyLink(self, boolean):\n clipboard = QtGui.QApplication.clipboard()\n a_note = self.noteFromProxyIndex(self.right_clicked)\n clipboard.setText(\"<note_link><split>\" + a_note.getName() + \"<split>\" + a_note.getLink() + \"<split></note_link>\")", "title": "" }, { "docid": "edbd21a8c722101aaf818d8b159ae1c7", "score": "0.45804292", "text": "def on_cacheNodeDoubleClick(self):\n selCacheItems = self.twCaches.selectedItems()\n if len(selCacheItems) == 1:\n selCacheItems[0]._widget.on_cacheAssigned()", "title": "" }, { "docid": "3a5cc86c3613376668093812d42c67d4", "score": "0.45774204", "text": "def cli_update():", "title": "" }, { "docid": "baec95215269ba9ce8bd0dc2a801abd3", "score": "0.45711648", "text": "def test_duplicate(logger):\n duplicate = actions.duplicate.DuplicateAction()\n assert str(duplicate) == \"duplicate\", \"Duplicate returned incorrect string representation: %s\" % str(duplicate)\n\n packet = layers.packet.Packet(IP(src=\"127.0.0.1\", dst=\"127.0.0.1\")/TCP(sport=2222, dport=3333, seq=100, ack=100, flags=\"S\"))\n packet1, packet2 = duplicate.run(packet, logger)\n assert id(packet1) != id(packet2), \"Duplicate aliased packet objects\"\n duplicate.mutate()", "title": "" }, { "docid": "4500646a7bc0a64a4b6f60917a6e65a5", "score": "0.45710704", "text": "def copy(self):\n data = {'id' : self.id,\n 'application_id' : self.application_id,\n 'name' : self.name,\n 'description' : self.description,\n 'options' : [option.to_dict() for option in self.options]}\n return ApplicationCommand(state=self._state, guild=self.guild, data=data)", "title": "" }, { "docid": "8f7c1757e1e794c54e6d76bdc4c48a41", "score": "0.45631808", "text": "def clone(context, request):\n if request.has_permission('create'):\n return {\n 'name': 'clone',\n 'title': 'Clone',\n 'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),\n 'href': '{item_uri}#!clone'.format(item_uri=request.resource_path(context)),\n }", "title": "" }, { "docid": "de85d10b9c48a2de26268e512616d2e8", "score": "0.4558552", "text": "def TDF_CopyTool_Copy(*args):\n return _TDF.TDF_CopyTool_Copy(*args)", "title": "" }, { "docid": "5003537cb6a18fb640ba23f219d96a72", "score": "0.45544004", "text": "def menuItem(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "78e1ec6ae841d733aeded88eba470ab4", "score": "0.4553222", "text": "def test_same_name(self):\n command_line = self._MENU + [self._POOLNAME, self._FSNAME, self._FSNAME]\n self.check_error(StratisCliNoChangeError, command_line, _ERROR)", "title": "" }, { "docid": "233ec16fd8abb47fecdcbf7cf15e886a", "score": "0.455225", "text": "def clone_options(self, command):\n super().clone_options(command)\n self.target_image = command.target_image", "title": "" }, { "docid": "9c55be03b741213e5681bcae03087a61", "score": "0.45519733", "text": "def __pushAllActTriggered(self):\n self.vcs.hgPush(self.repodir)\n self.on_refreshButton_clicked()", "title": "" }, { "docid": "7e14fda1666197071cc9650ca972a595", "score": "0.45497704", "text": "def addProcess(process):\n if process not in admin.processes:\n admin.processes.append(process)\n #signal.send('admin.processes')", "title": "" }, { "docid": "7b0a38ae66b7f88f425bd9fb5d79941c", "score": "0.4547573", "text": "def add(self, cmd: ghidra.framework.cmd.Command) -> None:\n ...", "title": "" }, { "docid": "9c505b33fd30fd3d3a87f584f8e24fac", "score": "0.45414853", "text": "def __insertHistory(self, cmd):\n self.setCursorPosition(self.prline, self.prcol)\n self.setSelection(self.prline, self.prcol,\n self.prline, self.lineLength(self.prline))\n self.removeSelectedText()\n self.__insertText(cmd)", "title": "" }, { "docid": "df02b0bbd0425b97b8a59bebe2ff27f2", "score": "0.45360968", "text": "def dup2(self):\n self._add_instruction(Opcode.DUP2)", "title": "" }, { "docid": "6432e088978a692158c37853f2c99d91", "score": "0.45324138", "text": "def __initActionsMenu(self):\n self.__actionsMenu = QMenu()\n self.__actionsMenu.setTearOffEnabled(True)\n self.__actionsMenu.setToolTipsVisible(True)\n \n self.__graftAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsGraft.png\"),\n self.tr(\"Copy Changesets\"), self.__graftActTriggered)\n self.__graftAct.setToolTip(self.tr(\n \"Copy the selected changesets to the current branch\"))\n \n self.__mergeAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsMerge.png\"),\n self.tr(\"Merge with Changeset\"), self.__mergeActTriggered)\n self.__mergeAct.setToolTip(self.tr(\n \"Merge the working directory with the selected changeset\"))\n \n self.__phaseAct = self.__actionsMenu.addAction(\n self.tr(\"Change Phase\"), self.__phaseActTriggered)\n self.__phaseAct.setToolTip(self.tr(\n \"Change the phase of the selected revisions\"))\n self.__phaseAct.setWhatsThis(self.tr(\n \"\"\"<b>Change Phase</b>\\n<p>This changes the phase of the\"\"\"\n \"\"\" selected revisions. The selected revisions have to have\"\"\"\n \"\"\" the same current phase.</p>\"\"\"))\n \n self.__tagAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsTag.png\"), self.tr(\"Tag\"),\n self.__tagActTriggered)\n self.__tagAct.setToolTip(self.tr(\"Tag the selected revision\"))\n \n self.__closeHeadsAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"closehead\"), self.tr(\"Close Heads\"),\n self.__closeHeadsActTriggered)\n self.__closeHeadsAct.setToolTip(self.tr(\"Close the selected heads\"))\n \n self.__switchAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsSwitch.png\"), self.tr(\"Switch\"),\n self.__switchActTriggered)\n self.__switchAct.setToolTip(self.tr(\n \"Switch the working directory to the selected revision\"))\n \n self.__actionsMenu.addSeparator()\n \n self.__bookmarkAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"addBookmark.png\"),\n self.tr(\"Define Bookmark...\"), self.__bookmarkActTriggered)\n self.__bookmarkAct.setToolTip(\n self.tr(\"Bookmark the selected revision\"))\n self.__bookmarkMoveAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"moveBookmark.png\"),\n self.tr(\"Move Bookmark...\"), self.__bookmarkMoveActTriggered)\n self.__bookmarkMoveAct.setToolTip(\n self.tr(\"Move bookmark to the selected revision\"))\n \n self.__actionsMenu.addSeparator()\n \n self.__pullAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsUpdate.png\"), self.tr(\"Pull Changes\"),\n self.__pullActTriggered)\n self.__pullAct.setToolTip(self.tr(\n \"Pull changes from a remote repository\"))\n self.__lfPullAct = self.__actionsMenu.addAction(\n self.tr(\"Pull Large Files\"), self.__lfPullActTriggered)\n self.__lfPullAct.setToolTip(self.tr(\n \"Pull large files for selected revisions\"))\n self.__fetchAct = self.__actionsMenu.addAction(\n self.tr(\"Fetch Changes\"), self.__fetchActTriggered)\n self.__fetchAct.setToolTip(self.tr(\n \"Fetch changes from a remote repository\"))\n \n self.__actionsMenu.addSeparator()\n \n self.__pushAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsCommit.png\"),\n self.tr(\"Push Selected Changes\"), self.__pushActTriggered)\n self.__pushAct.setToolTip(self.tr(\n \"Push changes of the selected changeset and its ancestors\"\n \" to a remote repository\"))\n self.__pushAllAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsCommit.png\"),\n self.tr(\"Push All Changes\"), self.__pushAllActTriggered)\n self.__pushAllAct.setToolTip(self.tr(\n \"Push all changes to a remote repository\"))\n \n self.__actionsMenu.addSeparator()\n \n self.__bundleAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsCreateChangegroup.png\"),\n self.tr(\"Create Changegroup\"), self.__bundleActTriggered)\n self.__bundleAct.setToolTip(self.tr(\n \"Create a changegroup file containing the selected changesets\"))\n self.__bundleAct.setWhatsThis(self.tr(\n \"\"\"<b>Create Changegroup</b>\\n<p>This creates a changegroup\"\"\"\n \"\"\" file containing the selected revisions. If no revisions\"\"\"\n \"\"\" are selected, all changesets will be bundled. If one\"\"\"\n \"\"\" revision is selected, it will be interpreted as the base\"\"\"\n \"\"\" revision. Otherwise the lowest revision will be used as\"\"\"\n \"\"\" the base revision and all other revision will be bundled.\"\"\"\n \"\"\" If the dialog is showing outgoing changesets, all\"\"\"\n \"\"\" selected changesets will be bundled.</p>\"\"\"))\n self.__unbundleAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"vcsApplyChangegroup.png\"),\n self.tr(\"Apply Changegroup\"), self.__unbundleActTriggered)\n self.__unbundleAct.setToolTip(self.tr(\n \"Apply the currently viewed changegroup file\"))\n \n self.__actionsMenu.addSeparator()\n \n self.__gpgSignAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"changesetSign.png\"),\n self.tr(\"Sign Revisions\"), self.__gpgSignActTriggered)\n self.__gpgSignAct.setToolTip(self.tr(\n \"Add a signature for the selected revisions\"))\n self.__gpgVerifyAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"changesetSignVerify.png\"),\n self.tr(\"Verify Signatures\"), self.__gpgVerifyActTriggered)\n self.__gpgVerifyAct.setToolTip(self.tr(\n \"Verify all signatures there may be for the selected revision\"))\n \n self.__actionsMenu.addSeparator()\n \n self.__stripAct = self.__actionsMenu.addAction(\n UI.PixmapCache.getIcon(\"fileDelete.png\"),\n self.tr(\"Strip Changesets\"), self.__stripActTriggered)\n self.__stripAct.setToolTip(self.tr(\n \"Strip changesets from a repository\"))\n \n self.__actionsMenu.addSeparator()\n \n self.__selectAllAct = self.__actionsMenu.addAction(\n self.tr(\"Select All Entries\"), self.__selectAllActTriggered)\n self.__unselectAllAct = self.__actionsMenu.addAction(\n self.tr(\"Deselect All Entries\"),\n lambda: self.__selectAllActTriggered(False))\n \n self.actionsButton.setIcon(\n UI.PixmapCache.getIcon(\"actionsToolButton.png\"))\n self.actionsButton.setMenu(self.__actionsMenu)", "title": "" }, { "docid": "8854ca2d24a36dd40d3e159eaa26a114", "score": "0.4527079", "text": "def _script_from_history(self, line):\n return ''", "title": "" }, { "docid": "d7eb4494f180e59c3ed0ea19a4bb06bd", "score": "0.4524771", "text": "def add_command(self, name, func=None, shortcut=None):\n action = GraphAction(name, self._graph.viewer())\n action.graph = self._graph\n if LooseVersion(QtCore.qVersion()) >= LooseVersion('5.10'):\n action.setShortcutVisibleInContextMenu(True)\n if shortcut:\n action.setShortcut(shortcut)\n if func:\n action.executed.connect(func)\n qaction = self.qmenu.addAction(action)\n return NodeGraphCommand(self._graph, qaction)", "title": "" }, { "docid": "1d98cf9fe7a7138b253fee832132138f", "score": "0.45158693", "text": "def duplicate_paths(self, action, manifest, engine, pkglint_id=\"001\"):\n\n self.dup_attr_check([\"file\", \"license\"], \"path\", self.ref_paths,\n self.processed_paths, action, engine,\n manifest.get_all_variants(), msgid=pkglint_id)", "title": "" }, { "docid": "03eb95af8ce4f7204113204b1582ff47", "score": "0.4514079", "text": "def add_command(self, name, cmd):\n\n action = QAction(name, self, triggered=cmd)\n self.addAction(action)\n return action", "title": "" }, { "docid": "c9fa1fd7a31fb38595d0b998fe05336f", "score": "0.45099378", "text": "def __contextMenuCopyPathToClipboard(self):\n if self.contextMenuEditor:\n fn = self.contextMenuEditor.getFileName()\n if fn:\n cb = QApplication.clipboard()\n cb.setText(fn)", "title": "" }, { "docid": "c3c317377733266bb69d9eaa6f7a03fb", "score": "0.44969213", "text": "def __AddUndoCommand__(self, command):\r\n self.undo_stack.append(command)", "title": "" }, { "docid": "3ae7eec9214e79f4156e5ed9548e5851", "score": "0.44926047", "text": "def duplicate(self, item_path, workspace, user):\n # checking if the file requested exists\n full_path = self.get_path(filename=item_path,\n workspace=workspace)\n if not os.path.exists(full_path):\n return (constant.NOT_FOUND, '(%s) does not exits' % item_path)\n\n # is a file or folder ?\n if os.path.isfile(full_path):\n return self.duplicate_file(file_path=full_path)\n \n return self.duplicate_folder(folder_path=full_path)", "title": "" }, { "docid": "a9b3e876c809ba411ea33f2da2f8f69e", "score": "0.4492259", "text": "def update(self):\n sleep(2)\n while True:\n self._shell.cmd('sh /data/security/shell/copy')\n break", "title": "" }, { "docid": "b0bd7e87055e24a02c42bb3ac9985f30", "score": "0.44883534", "text": "def deleteNewAlgArgument(self):\n\t\tcurrList = self.newAlgArgsModel.stringList()\n\t\tif self.selectedModifArgIndex != -1:\n\t\t\tcurrList.takeAt(self.selectedModifArgIndex)\n\t\t\tself.newAlgArgsListView2.model().setStringList(currList)\n\t\t\tself.newAlgArgOpGroupBox.setEnabled(False)\n\t\t\tself.selectedModifArgIndex = -1", "title": "" }, { "docid": "176ae738025403d43a86a2ff59d250f6", "score": "0.44868857", "text": "def menu(self, *args):\n pass", "title": "" }, { "docid": "dc3c35850857760b4ac413f940b3dfd0", "score": "0.44848737", "text": "def shortcut(chrome, url, icon, name):\n template = Template(DESKTOP_TEMPLATE.read_text())\n rendered = template.render(\n url=url,\n icon=icon,\n chrome=chrome,\n name=name,\n )\n\n filename = name.replace(\" \", \"_\").lower()\n\n Path(\n f\"~/.local/share/applications/{filename}.desktop\"\n ).expanduser().write_text(rendered)\n\n click.secho(f\"A desktop entry has been added for {url}\", fg=\"green\")", "title": "" } ]
fbb66c295298d642ea346b8998a7d514
TCPSocket specifies an action involving a TCP port.
[ { "docid": "ea1836355d8f29e6c48cc2de789a993b", "score": "0.7387273", "text": "def tcp_socket(self) -> 'outputs.TCPSocketActionResponse':\n return pulumi.get(self, \"tcp_socket\")", "title": "" } ]
[ { "docid": "fb0171d918a4881bd87aa287bf0ed002", "score": "0.6811422", "text": "def open_tcp_port(ec2_resource, port, vpc_id):\n try:\n vpc = ec2_resource.Vpc(id=vpc_id)\n defaultSg = list(vpc.security_groups.all())[0]\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(port),\n ToPort=int(port)\n )\n except Exception as e:\n print(e)", "title": "" }, { "docid": "c59403d4e5283f093dfa1034a76b9ed9", "score": "0.65056944", "text": "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.connect((target, port))\n return conn", "title": "" }, { "docid": "7bec9c2f751a2174ab0aa3fc64a8894b", "score": "0.64622724", "text": "def open_tcp_ports(ec2, myClusterProps, DWH_PORT):\n try:\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name, \n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)", "title": "" }, { "docid": "630dbcf436f265117fe4434c3c9266e6", "score": "0.63231856", "text": "def listen_tcp(self, port=DEFAULT_TCP_PORT):\n self.__clean__()\n self.run_cmd(['tcpip', port])\n return self.__output", "title": "" }, { "docid": "7f8ad9152f8aeee4cbc4dc3eb45f3efc", "score": "0.6304194", "text": "def open_tcp_socket(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('0.0.0.0', port))\n s.listen(0)\n\n conn, addr = s.accept()\n\n print '[+] reverse shell connection received from %s' % str(addr)\n data = conn.recv(4096)\n print data\n\n return conn", "title": "" }, { "docid": "2a8eabdca94832c5c459c29a52f55afd", "score": "0.62676847", "text": "def sock_tcp_conn(ipaddr, ipport):\n global readsocks, waitsocks, deftimeout\n buf = 2048\n addr = (ipaddr, ipport)\n\n mysock = socket(AF_INET, SOCK_STREAM)\n # Temporarily commented. Will be tested on VHT test bed before deletion\n mysock.settimeout(deftimeout)\n try:\n mysock.connect(addr)\n except:\n exc_info = sys.exc_info()\n wfa_sys_exit(\"Control Network Timeout - IP-%s:%s REASON = %s\" %(ipaddr,ipport,exc_info[1]))\n\n readsocks.append(mysock)\n # Add the descriptor to select wait\n waitsocks.append(mysock)\n return mysock", "title": "" }, { "docid": "7f099f077bfe2b8131c6d0a951f69ae4", "score": "0.6250033", "text": "async def tcp(self, ctx, address: str, port: int) -> None:\n timeout = get_config(\"timeout\")\n address = escape_mentions(address)\n\n conn = asyncio.open_connection(address, port)\n try:\n reader, writer = await asyncio.wait_for(conn, timeout)\n await ctx.send(f\"Connection established on {address}:{port}\")\n writer.close()\n await writer.wait_closed()\n except asyncio.TimeoutError:\n await ctx.send(f\"Request timed out after {timeout} seconds\")\n except ConnectionRefusedError:\n await ctx.send(f\"Could not establish a connection to {address}:{port}\")", "title": "" }, { "docid": "f72bd19e906fb220d1191ec5d718de33", "score": "0.62200016", "text": "def unused_tcp_port() -> int:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n sock.bind((\"\", 0))\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return cast(int, sock.getsockname()[1])", "title": "" }, { "docid": "9913ce5ef14e594e1ff1c1ced54a9ee5", "score": "0.59342617", "text": "def __init__(self, protocol, from_port, to_port, address=None, group=None, group_name=None):\n self.protocol = protocol or \"tcp\"\n self.from_port = from_port\n self.to_port = to_port\n self.address = address\n self.group = group\n self.group_name = group_name", "title": "" }, { "docid": "fcf88993a26fb146f32e1fc78e453fb1", "score": "0.59126407", "text": "def tcp_connect(host: str, port: int) -> socket:\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect((host, port))\n return sock", "title": "" }, { "docid": "da37c2a7c17aabd94cf55079376e95f0", "score": "0.5905992", "text": "def unused_tcp_port():\n with closing(socket.socket()) as sock:\n sock.bind((pytest.saltyrtc.host, 0))\n return sock.getsockname()[1]", "title": "" }, { "docid": "e287af2f115d11ca949ddb325f082938", "score": "0.5873835", "text": "def do_socket_logic():\n pass", "title": "" }, { "docid": "9620d44664309873f65f79fcc1547bd8", "score": "0.5841808", "text": "def get_tcp_port(self):\r\n try:\r\n port = int(self.tcp_port_combobox.currentText())\r\n except Exception as err:\r\n logger.error(\"Error setting TCP Port: \", err)\r\n self.tcp_port_combobox.setCurrentIndex(0)\r\n return \"55056\"\r\n\r\n return str(port)", "title": "" }, { "docid": "381d93aa45d2a44f362e623395488ab3", "score": "0.5826469", "text": "def set_socket(self, host=\"\", port=0, listen_n=5):\n #initializing\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n dest = (host, port)\n #binding\n self.sock.bind(dest)\n #listening\n self.sock.listen(listen_n)\n\n return self.sock.getsockname()", "title": "" }, { "docid": "cb9880e522933f14c346a470bed61e72", "score": "0.5821775", "text": "def open_socket_accept(\n port: int,\n gen_param_name: MaybeSequence[str],\n gen_param_value: MaybeSequence[Union[float, int, str]]\n) -> HHandle:\n with HalconOperator(343) as proc:\n proc.set_input_tuple(0, port)\n proc.set_input_tuple(1, gen_param_name)\n proc.set_input_tuple(2, gen_param_value)\n proc.init_oct(0)\n proc.execute()\n accepting_socket = proc.get_output_tuple_s(0)\n return accepting_socket # type: ignore", "title": "" }, { "docid": "a443115ed21b3b3ab63fcd644a873db9", "score": "0.5812719", "text": "def socket_init(self):\n self.socket = socket.socket(\n socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)", "title": "" }, { "docid": "0a155a020753f3dd3055743c81a73b7e", "score": "0.58104354", "text": "def __init__(self, ip:str, port:int):\n self.addr = (ip, port)\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "title": "" }, { "docid": "38a7a5ef1171d128fbd46fdd7a705705", "score": "0.57982105", "text": "def connect(self, ip, port):\n self.socket.connect((ip, port))", "title": "" }, { "docid": "bc3be679006bbdbc51e548ee7d43fb44", "score": "0.57903844", "text": "def unused_tcp_port():\n with contextlib.closing(socket.socket()) as sock:\n sock.bind(('127.0.0.1', 0))\n return sock.getsockname()[1]", "title": "" }, { "docid": "fee548c3fdd6c4e1add134030c1fa008", "score": "0.57806873", "text": "def connScan(tgtHost, tgtPort):\n\ttry:\n\t\tconnSkt = socket(AF_INET, SOCK_STREAM)\n\t\tconnSkt.connect((tgtHost, tgtPort))\n\n\t\tprint '[+]%d/tcp open'% tgtPort\n\t\tprint '[+]' + 'Connection successful\\n'\n\t\n\t# if it encounters an error, it will print the error and show that \n\t# the port is closed \n\texcept Exception as e:\n\t\tprint(e)\n\t\tprint '[-]%d/tcp closed'% tgtPort\t\n\t\n\t# ends the program and closes the connection\n\tfinally:\n\t\tconnSkt.close()", "title": "" }, { "docid": "52cabf277de39f832fd27364833c2374", "score": "0.5775819", "text": "def bind_port(self, context):\n pass", "title": "" }, { "docid": "ef99ab0ad909b7f755ede132bb47a5a3", "score": "0.5737731", "text": "def __init__(self, port, debug=True):\r\n\t\tself.TCP_PORT = port\r\n\t\tself.debug = debug", "title": "" }, { "docid": "ca0d146eac736393c57944ddb66d81ac", "score": "0.5734161", "text": "def protocol(self):\n return 'TCP'", "title": "" }, { "docid": "8ba12603a21447ca646d1656d838c114", "score": "0.5725548", "text": "def Open(self, hostname, port):", "title": "" }, { "docid": "8cf4e0b135716a54bcead2e838610cec", "score": "0.57115406", "text": "def random_port(tcp=True):\n # Port 0 will allocate an ephemeral port\n socktype = socket.SOCK_STREAM if tcp else socket.SOCK_DGRAM\n s = socket.socket(socket.AF_INET, socktype)\n s.bind((\"\", 0))\n addr, port = s.getsockname()\n s.close()\n return port", "title": "" }, { "docid": "2c361901709a839d5ec1d9c4586b012a", "score": "0.56897837", "text": "def get_socket(node, prt, prt_kind='TCP'):\n socket = Toolkit(node=node)\n result = socket.check_socket(port=prt, kind=prt_kind)\n print('Type=[{}] Socket=[{}]:[{}] Status=[{}]'.format(\n prt_kind, node, prt, result))", "title": "" }, { "docid": "1bd4869da6bf19856120b332e8ee745b", "score": "0.5687892", "text": "async def tcp_listener(application: Application, hostname: str, host: str, port: int):\n\n async def handle_connection(reader, writer):\n data = await reader.readline()\n writer.write(await application.dispatch(hostname, port, data))\n writer.write_eof()\n\n await asyncio.start_server(handle_connection, host, port)", "title": "" }, { "docid": "f0b2042b4c5c60b660b78a115a9af8d8", "score": "0.5678651", "text": "def send_to( self, message, host, port, user_additional_info = {},\n tcp = True ):\n \n factory = ClientFactory( self, message, user_additional_info )\n if tcp:\n reactor.connectTCP( host, port, factory )\n else:\n reactor.connectUDP( host, port, factory )", "title": "" }, { "docid": "78bd353764f4e009d5cd27e610cf00e9", "score": "0.5640606", "text": "def TestPortOpen(ip, port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.settimeout(1)\n s.connect((ip, int(port)))\n s.shutdown(TIMEOUT)\n return True\n except:\n return False", "title": "" }, { "docid": "c57f8a6bf12d4263ae909c87b6920d22", "score": "0.5626735", "text": "def connect(self, target_ip, target_port):\n raise NotImplementedError()", "title": "" }, { "docid": "a070d87355d3f40b2a1b5f316d561105", "score": "0.5616814", "text": "def get_free_tcp_port():\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp.bind((\"\", 0))\n arr_port = tcp.getsockname()\n tcp.close()\n return arr_port[1]", "title": "" }, { "docid": "f58b5199f6845b4029331d49481df0ad", "score": "0.56078774", "text": "def get_tcp_server_for_port(self):\n server = socketserver.TCPServer(\n ('', self.port), http.server.SimpleHTTPRequestHandler,\n )\n\n return server", "title": "" }, { "docid": "f5e289e88fd12834ced47fc0b4869a22", "score": "0.5602182", "text": "def open_socket_connect(\n host_name: str,\n port: int,\n gen_param_name: MaybeSequence[str],\n gen_param_value: MaybeSequence[Union[float, int, str]]\n) -> HHandle:\n with HalconOperator(342) as proc:\n proc.set_input_tuple(0, host_name)\n proc.set_input_tuple(1, port)\n proc.set_input_tuple(2, gen_param_name)\n proc.set_input_tuple(3, gen_param_value)\n proc.init_oct(0)\n proc.execute()\n socket = proc.get_output_tuple_s(0)\n return socket # type: ignore", "title": "" }, { "docid": "1ebbd385db2c394c497a54f434ef0338", "score": "0.55906177", "text": "def TCP_ConnectToServer(self, IP, port, timeOut):\n if type(IP) is not str:\n raise TypeError\n if type(port) is not int:\n raise TypeError\n\n socketId = 0\n if (SlitSim.__nbSockets < self.MAX_NB_SOCKETS):\n while (SlitSim.__usedSockets[socketId] == 1 and socketId < self.MAX_NB_SOCKETS):\n socketId += 1\n if (socketId == self.MAX_NB_SOCKETS):\n return -1\n else:\n return -1\n\n SlitSim.__usedSockets[socketId] = 1\n SlitSim.__nbSockets += 1\n try:\n pass\n # SlitSim.__sockets[socketId] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # SlitSim.__sockets[socketId].settimeout(timeOut)\n # SlitSim.__sockets[socketId].connect((IP, port))\n # SlitSim.__sockets[socketId].setblocking(1)\n except socket.error:\n return -1\n\n return socketId", "title": "" }, { "docid": "3690c1a36a404155484fb40f0242c301", "score": "0.55771893", "text": "def socketBinding(port):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(port)\n s.listen(1)\n clientSocket, address = s.accept()\n print(\"Socket bind completed\")\n print(f\"Connection from {address} has been established\")\n return clientSocket\n except socket.error as msg:\n print(msg)\n #return clientSocket", "title": "" }, { "docid": "770c97c8c38cc9cd72752de411e29627", "score": "0.55765736", "text": "def _openTcpSession(self, sIpAddr, uPort = None, fReversedSetup = False, cMsIdleFudge = 0):\n self.oCv.acquire();\n if self.oTxsSession is None:\n reporter.log2('_openTcpSession: sIpAddr=%s, uPort=%d, fReversedSetup=%s' % \\\n (sIpAddr, uPort if uPort is not None else 0, fReversedSetup));\n self.sIpAddr = sIpAddr;\n self.oTxsSession = txsclient.openTcpSession(self.cMsTimeout, sIpAddr, uPort, \\\n fReversedSetup, cMsIdleFudge);\n self.oTxsSession.setTaskOwner(self);\n else:\n self.sNextIpAddr = sIpAddr;\n reporter.log2('_openTcpSession: sNextIpAddr=%s' % (sIpAddr,));\n self.oCv.release();\n return None;", "title": "" }, { "docid": "e1629bb9333d82ef0c55cb507f1e8184", "score": "0.5534811", "text": "def check_if_port_isopen(ip_address: str, port: int):\n try:\n socket_object = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)\n socket_object.connect((ip_address, port))\n return True\n except Exception as var:\n return False", "title": "" }, { "docid": "06f6c2a5f813206bff7a45705d10ce4b", "score": "0.55344427", "text": "def socket_accept_connect(\n accepting_socket: HHandle,\n wait: str\n) -> HHandle:\n with HalconOperator(341) as proc:\n proc.set_input_tuple(0, accepting_socket)\n proc.set_input_tuple(1, wait)\n proc.init_oct(0)\n proc.execute()\n socket = proc.get_output_tuple_s(0)\n return socket # type: ignore", "title": "" }, { "docid": "934bda28d9df43bf1445f1aa6cc83097", "score": "0.5529849", "text": "def connection(self):\n address = (self.target_ip, self.target_port)\n print(\"build connection\")\n state = self.tcpCliSock.connect(address)\n print(\"{}\".format(state))", "title": "" }, { "docid": "17f99a4da2c9aff15e3f04180062bb14", "score": "0.5517254", "text": "def __init__(self, ip, port):\n\n self.host = ip\n self.port = port\n self.sock = socket.socket()\n\n # setsockopt allows for more flexible socket binding\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self.sock.bind((self.host, self.port)) \n print(\"Bounded on {}:{}\".format(self.host, self.port))", "title": "" }, { "docid": "6c5bf2da4a82ec1c4240d48ba1fc947e", "score": "0.55110204", "text": "def openTcpSession(cMsTimeout, sHostname, uPort = None, fReversedSetup = False, cMsIdleFudge = 0):\n reporter.log2('openTcpSession(%s, %s, %s, %s, %s)' % \\\n (cMsTimeout, sHostname, uPort, fReversedSetup, cMsIdleFudge));\n try:\n oTransport = TransportTcp(sHostname, uPort, fReversedSetup);\n oSession = Session(oTransport, cMsTimeout, cMsIdleFudge);\n except:\n reporter.errorXcpt(None, 15);\n return None;\n return oSession;", "title": "" }, { "docid": "3ab7512e9e058f9145a55ab5a0e678cd", "score": "0.5492841", "text": "def tcp_server_on(host: str, port: int) -> \"NetworkJSONStream\":\n njs = NetworkJSONStream(host, port)\n njs.sock.bind((host, port))\n njs.sock.listen(0)\n conn, _ = njs.sock.accept()\n njs.conn = conn\n return njs", "title": "" }, { "docid": "37a9516583128161fffacb6bc1474f93", "score": "0.548368", "text": "def doPort(self, param):\n\n\t\tif not self.isAuth:\n\t\t\treturn \"530 User is not logged in.\"\n\n\t\t# TODO Check if value are integers\n\n\t\tparams = param.split(\",\")\n\t\taddr = \"\"\n\n\t\tif len(params) == 6:\n\t\t\tfor arg in params[:4]:\n\t\t\t\t# Validate Address\n\t\t\t\tself.debug(\"PORT - PARAM\", arg)\n\t\t\t\tif int(arg) < 0 or int(arg) > 255:\n\t\t\t\t\treturn \"501 Address is invalid\"\n\t\t\t\telse:\n\t\t\t\t\taddr += str(arg) + \".\"\n\n\t\t\taddr = addr[:-1]\n\n\t\t\tp1 = int(params[4])\n\t\t\tp2 = int(params[5])\n\n\t\t\t# TODO Check if this is a valid port\n\n\t\t\tdataport = (p1 * 256) + p2\n\n\t\t\tself.debug(\"port - address\", addr)\n\t\t\tself.debug(\"port - dataport\", dataport)\n\n\t\t\tself.dataAddr = addr\n\t\t\tself.dataPort = dataport\n\n\t\t\tself.createDatasock(dataport, addr=addr)\n\n\t\t\t# return self.createDatasock(dataport, addr=addr)\n\t\t\treturn \"200 Port Command.\"\n\t\telse:\n\t\t\treturn \"501 Invalid arguement\"", "title": "" }, { "docid": "05b940fdd9eeca8d2951ec9679f07ecd", "score": "0.54628175", "text": "def port_open(host: str, port: int) -> bool:\n a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n location = (host, port)\n result_of_check = a_socket.connect_ex(location)\n a_socket.close()\n\n if result_of_check == 0:\n return True\n\n return False", "title": "" }, { "docid": "ab2430ce97064203dd067a7897b1a3f4", "score": "0.54578996", "text": "def listen(cls,port,verbose=None):\n lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n lsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n lsock.bind((\"127.0.0.1\", port))\n lsock.listen(1)\n (csock, address) = lsock.accept()\n return SockIo(csock,verbose)", "title": "" }, { "docid": "b9d411217fb97f6f700b2ab29a94bef3", "score": "0.54507995", "text": "def check_socket(port):\n import socket\n from contextlib import closing\n\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n try:\n sock.bind(('', port))\n except:\n #Port is not open\n res = False\n else:\n #Port is open\n res = True\n\n return res", "title": "" }, { "docid": "66b29c765f8cf564758173e5b27e03c2", "score": "0.5442868", "text": "def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePostStartTcpSocketPortArgs']:\n return pulumi.get(self, \"port\")", "title": "" }, { "docid": "1000111c03b28770184be770a1a3fafd", "score": "0.54182833", "text": "def handle_connection(socket):", "title": "" }, { "docid": "3b2b9959c278dd62e2f1416d86f5f9b2", "score": "0.5397981", "text": "def next(self, app, port_base=None):\n if port_base is not None:\n port = port_base\n else:\n port = 0\n\n for i in range(1, 100):\n s = None\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', port))\n port = s.getsockname()[1]\n s.close()\n except Exception:\n if s is not None:\n s.close()\n if port_base is not None:\n port += 1\n continue\n raise\n if self.cluster.tcp_ports.get(port, None) is not None:\n port += 1\n continue\n self.cluster.tcp_ports[port] = app\n return port\n\n raise Exception((\"Could not allocate port (port_base=%s) \"\n \"in 100 attempts\") % port_base)", "title": "" }, { "docid": "d7cb1666e579647b8ff0af3b80740699", "score": "0.5396025", "text": "def Start(self, hostname, port):", "title": "" }, { "docid": "a2da70743119efa76782853155a9a6da", "score": "0.538891", "text": "def openconnection(self, response=None):\n\t\tif not self.socket.datasocket:\n\t\t\t\"\"\"Creates a datasocket with definded port {default 20} if none exist\"\"\"\n\t\t\tself.socket.datasocket = ClientSocket(sys.argv[1], sys.argv[2], self.port)", "title": "" }, { "docid": "2fccdaf8abfecafff396d4c70c191926", "score": "0.5379375", "text": "def createDatasock(self, port, addr=None):\n\n\t\tif addr is None:\n\t\t\taddr = self.ADDR[0]\n\t\tif not self.isPassive:\n\t\t\ttry:\n\t\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\t\tsock.connect((addr, port))\n\n\t\t\t\t# save socket\n\t\t\t\tself.datasock = sock\n\t\t\t\tself.isDataPort = True\n\n\t\t\t\treturn \"200 Data connection established.\"\n\n\t\t\texcept socket.error:\n\t\t\t\treturn \"501 Unable to create dataport at given address\"\n\n\t\telse:\n\t\t\tself.datasock, _ = self.serversock.accept()\n\n\t\t\treturn \"225 Data connection is open.\"", "title": "" }, { "docid": "8af96b0dbe303f5409ab774001b1404c", "score": "0.5379368", "text": "def ConnectTCPIP(self, ipaddress, ipport=50000):\n self._host = ipaddress\n self._port = ipport\n debug('PISocket.connect: open connection to %s:%s', self._host, self._port)\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self._host, self._port))\n self._socket.setblocking(0)\n self._socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) # disable Nagle algorithm\n self._connected = True\n self.flush()", "title": "" }, { "docid": "521af2a20139857cfa13762cf23201c8", "score": "0.53781986", "text": "def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePostStartTcpSocketPortArgs']:\n return pulumi.get(self, \"port\")", "title": "" }, { "docid": "71e8921ea057be108056ef78acc7151a", "score": "0.5377033", "text": "def _get_tcp(self):\n return self.__tcp", "title": "" }, { "docid": "68c3d83a10d0413b525239ffd09ec84b", "score": "0.5376204", "text": "def send_tcp(sock, what, expiration=None):\n\n if isinstance(what, dns.message.Message):\n what = what.to_wire()\n l = len(what)\n # copying the wire into tcpmsg is inefficient, but lets us\n # avoid writev() or doing a short write that would get pushed\n # onto the net\n tcpmsg = struct.pack(\"!H\", l) + what\n _wait_for_writable(sock, expiration)\n sent_time = time.time()\n _net_write(sock, tcpmsg, expiration)\n return (len(tcpmsg), sent_time)", "title": "" }, { "docid": "ae4263929a577a55ec1e34c4ed961e56", "score": "0.53687984", "text": "def send_msg(sock, msg, ip, port):\n\n sock.sendto(msg, (ip, port))", "title": "" }, { "docid": "217f69a2e6837afb92c5cb19d09abbae", "score": "0.53625727", "text": "def __init__(self, ip_address, port, calling_method):\n\n\t\tself.ip_address = ip_address\n\t\tself.port = port\n\t\tself.calling_method = calling_method\n\n\t\tself.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "title": "" }, { "docid": "4512aad5df960340c586fba31b4cf82e", "score": "0.5333273", "text": "def on_connection(self, socket):", "title": "" }, { "docid": "cfd2858d17bb46eafcd959a7f0dbaa0e", "score": "0.5324515", "text": "def tcp_packet(self, setup, direction, ptfadapter, ip_version,\n src_ip=None, dst_ip=None, proto=None, sport=0x4321, dport=0x51, flags=None):\n src_ip = src_ip or DEFAULT_SRC_IP[ip_version]\n dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)\n if ip_version == \"ipv4\":\n pkt = testutils.simple_tcp_packet(\n eth_dst=setup[\"destination_mac\"][direction][self.src_port],\n eth_src=ptfadapter.dataplane.get_mac(0, 0),\n ip_dst=dst_ip,\n ip_src=src_ip,\n tcp_sport=sport,\n tcp_dport=dport,\n ip_ttl=64\n )\n\n if proto:\n pkt[\"IP\"].proto = proto\n else:\n pkt = testutils.simple_tcpv6_packet(\n eth_dst=setup[\"destination_mac\"][direction][self.src_port],\n eth_src=ptfadapter.dataplane.get_mac(0, 0),\n ipv6_dst=dst_ip,\n ipv6_src=src_ip,\n tcp_sport=sport,\n tcp_dport=dport,\n ipv6_hlim=64\n )\n\n if proto:\n pkt[\"IPv6\"].nh = proto\n\n if flags:\n pkt[\"TCP\"].flags = flags\n\n return pkt", "title": "" }, { "docid": "c300fc922ea901fcb0a54bcc9f4bd073", "score": "0.53236043", "text": "def port(self):\n if 'port' not in self.state:\n #sock = socket.socket()\n #sock.bind(('', 0))\n #self.state['port'] = sock.getsockname()[1]\n #sock.close()\n self.state['port']=8000\n return self.state['port']", "title": "" }, { "docid": "c4e0ff6b01cdb45f5d9fe25b33f8b99f", "score": "0.5323065", "text": "def test_tcp_port(host, port=23, timeout=5, check_result=False,\n expected_result=''):\n try:\n t = telnetlib.Telnet(host, port, timeout)\n if check_result:\n result = t.read_some()\n t.close()\n return result.startswith(expected_result)\n except (socket.timeout, socket.error):\n return False\n\n t.close()\n return True", "title": "" }, { "docid": "749765e7eb27464eedd4e8bbe035e83b", "score": "0.53210473", "text": "def set_port(self, port):\n self.port = port", "title": "" }, { "docid": "8b39d7c95a10b9691040fac8bb734c19", "score": "0.532083", "text": "def test_ipc_tcp(self):\n server = ipc.Server(\n address=(self.host, self.port),\n callback=self.process_server_request,\n )\n ipc_thread = threading.Thread(target=server.run, daemon=False)\n ipc_thread.start()\n time.sleep(1) # let server start properly\n\n # Verify the IPC server works in this mode\n is_open = port_open(self.host, self.port)\n user_input = [\n {\n \"class\": \"Event\",\n \"args\": \"test\",\n \"kwargs\": {\"key\": \"socket\", \"value\": \"true\"},\n }\n ]\n objects = ipc.Message.deserialize(user_input)\n with ipc.Client((self.host, self.port)) as client:\n response = client.send(objects)\n is_active = self.process_client_response(response)\n\n server.shutdown()\n ipc_thread.join()\n\n self.assertEqual(True, is_open)\n self.assertEqual(True, is_active)", "title": "" }, { "docid": "e3e9062c68a90a7f9560ee39c7198bcf", "score": "0.531783", "text": "def __init__(self, host: str, port: int) -> None:\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.conn: Optional[socket.socket] = None", "title": "" }, { "docid": "758f39acaf856e012dd3eca7f47bc279", "score": "0.53153086", "text": "def raw_send_tcp(fd, int_src_ip, int_dst_ip, src_port, dst_port, seq_no, ack_no, flags, window_size=1024, payload=b'', options=b''):\n hed_len = 5 + (len(options) // 4)\n empty = 0\n flags_line = ((hed_len <<12) & 0xf000) | ((empty <<9) & 0x0e00) | (flags.asbyte & 0x01ff)\n chk_sum = 0x0000\n urg_ptr = 0x0000\n segment = struct.pack(\n '!HHIIHHHH',\n src_port,\n dst_port,\n seq_no,\n ack_no,\n flags_line,\n window_size,\n chk_sum,\n urg_ptr\n ) + options + payload\n # segment = fix_checksum_tcp(segment, int_src_ip, int_dst_ip)\n pseudohdr = struct.pack('!IIHH', int_src_ip, int_dst_ip, 0x0006, len(segment))\n seg = bytearray(segment)\n seg[16:18] = b'\\x00\\x00'\n checksum = calc_checksum_tcp(pseudohdr + seg)\n seg[16:18] = struct.pack('!H', checksum)\n segment = bytes(seg)\n # return bytes(seg)\n print(\n 'TCP ENVIADO-->\\t',\n '\\n\\tsrc_ip : src_port => %s (%s) :\\t%d' % (hex(int_src_ip), int2strIP(int_src_ip), src_port),\n '\\n\\tdst_ip : dst_port => %s (%s) :\\t%d' % (hex(int_dst_ip), int2strIP(int_dst_ip), dst_port),\n '\\n\\tseq_no | ack_no => %s\\t|\\t%s' % (hex(seq_no), hex(ack_no)),\n '\\n\\thed_len | flags | window_size =>', hex(hed_len), '|', str(flags), '|', hex(window_size),\n '\\n\\tchecksum | urg_ptr =>', hex(checksum), '|', hex(urg_ptr),\n '\\n\\toptions =>', options.hex(),\n '\\n\\tpayload =>', payload.hex(),\n )\n send_ip(\n fd=fd,\n msg=segment,\n protocol=TCP,\n int_src_ip=int_src_ip,\n int_dst_ip=int_dst_ip\n )", "title": "" }, { "docid": "d25a3dc381064388e470f294d1a838fb", "score": "0.5309898", "text": "def accept(self, port):\n #should take care of bind, listen, and accept\n\n #sys.stderr.write(\"Called Connect\")\n if self.connection_state == __CONNECTION_OPEN__:\n sys.stderr.write(\"CONNECTION ALREADY EXISTS\")\n print(\"ACCEPT\")\n\n self.port = port\n try:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket.bind(('', self.port))\n except socket.error:\n raise MP2SocketError\n ''' wait for initial connection and log address of sender '''\n initial_handshake, self.target = self.socket.recvfrom(16)\n initial_ack = MSG()\n initial_ack.decode_packet(initial_handshake)\n\n\n ''' send response ack'''\n response_ack = MSG(__ACK__, self.seq_number, self.rwnd)\n response_ack.encode_packet()\n self.socket.sendto(response_ack.packet, self.target)\n ''' get local port'''\n self.rcv_buffer = RCV_BUFFER()\n self.connection_state = __CONNECTION_OPEN__\n self.socket_type = __SERVER__\n\n\n return self.target", "title": "" }, { "docid": "1bc78dced8343284edfa01c9f64b64e4", "score": "0.5309462", "text": "def init_tcp(self):\n server = DConnectThreadingTCPServer(self, ('0.0.0.0', self.conf['port']), DConnectHandler)\n return server", "title": "" }, { "docid": "75773992752bbae150eb781805519f45", "score": "0.5304171", "text": "def openPort(self, port_name=\"COM7\"):", "title": "" }, { "docid": "a9fecf61209360383fa288119f8d54dd", "score": "0.52946085", "text": "def create_socket(host, port):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((host, port))\n sock.listen(5)\n sock.settimeout(PLAYER_SIGNUP_DURATION)\n\n return sock", "title": "" }, { "docid": "97e6b16b4e0c44027f21a9b83a80370f", "score": "0.5292776", "text": "def listen(self, port, address=None):\r\n\t\traise NotImplementedError", "title": "" }, { "docid": "4a7f9abe7b27070fe953f3f3d7ca7810", "score": "0.5290682", "text": "def doConnect(self, (host, port)):\r\n\t\traise NotImplementedError", "title": "" }, { "docid": "b6a617ed0f2ed84023b404c39e8029b4", "score": "0.5285737", "text": "def sendTCP(msgs,ip,port):\n\tsym=Symbol(messages=msgs)\n\treceived_datas=list()\n\tchannel = TCPClient(remoteIP=str(ip), remotePort=port)\n\tchannel.open()\n\tfor field in sym.fields:\n\t\tfor b in field.getValues():\n\t\t\tchannel.write(b)\n\t\t\treceived_datas.append(channel.read())\n\tchannel.close()\n\treturn received_datas", "title": "" }, { "docid": "7203e2796b577e6549718e267d7f7d8c", "score": "0.52845085", "text": "def bindSocket(sock):\n # Server on localhost\n host = \"127.0.0.1\"\n # Get port number from user\n host_port = int(raw_input(\"Enter port:\"))\n # Bind socket to given host and port\n try:\n sock.bind((host, host_port))\n except socket.error, msg:\n print \"Error binding socket to given host: \"+str(msg)\n sys.exit()\n print \"Binding established!\"", "title": "" }, { "docid": "9b9456db5160ac44235d1ac599321ad2", "score": "0.5284369", "text": "def create_connection_via_tcp(self):\n # create tcp connection\n tcp_connection = socket(AF_INET, SOCK_STREAM)\n tcp_connection.settimeout(WAIT_TIME)\n tcp_connection.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n # bind socket with addr\n tcp_connection.bind(self.addr)\n # st listing to clients\n tcp_connection.listen()\n self.udp_lock.release() # udp can start running\n self.tcp_lock.acquire() # tcp need to be realse by udp\n self.tcp_lock.release() # free for next run\n while self.end_time > time(): # run on global time\n try:\n tcp_connection.settimeout(self.end_time - time())\n connection_socket, addr = tcp_connection.accept()\n self.connected_client_socket.append(connection_socket)\n Thread(target=self.when_client_connected, args=(connection_socket, addr,)).start()\n except timeout:\n break\n tcp_connection.close()", "title": "" }, { "docid": "0abc16cba64c7b4bcc247b74a2a1dfae", "score": "0.5278671", "text": "def open_ports(self):\n\n # STEP 3: Open an incoming TCP port to access the cluster ednpoint\n try:\n vpc = self.ec2.Vpc(id=self.clusterProperties['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(self.DWH_PORT),\n ToPort=int(self.DWH_PORT)\n )\n except Exception as e:\n print(e)\n\n print(\"Ports of virtual private cloud open for incoming and outgoing traffic\")", "title": "" }, { "docid": "c0da171d90ccd268e9aa0dbd256b18e2", "score": "0.52779746", "text": "def tcp_scan(ip, startPort, endPort):\n\n for port in range (startPort, endPort + 1):\n\n #Create TCP socket\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n #TCP connection returns an exception (aka != 0)\n if tcp.connect_ex((ip, port)):\n\n pass\n\n #TCP connection returns no exceptions (0)\n else:\n\n print('[+] %s:%d/TCP Open' % (ip, port))\n\n #Close TCP connection\n tcp.close()", "title": "" }, { "docid": "cd47c7b986ab42e1eda6cdec29391cbd", "score": "0.5270157", "text": "def open_socket(self):\r\n\t\ttry:\r\n\t\t\tself.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\t\tself.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\r\n\t\t\tself.server.bind((self.host,self.port))\r\n\t\t\tself.server.listen(5)\r\n\t\t\tself.server.setblocking(0)\r\n\t\texcept socket.error, (value,message):\r\n\t\t\tif self.server:\r\n\t\t\t\tself.server.close()\r\n\t\t\tprint \"Could not open socket: \" + message\r\n\t\t\tsys.exit(1)", "title": "" }, { "docid": "0bcc91345cf6c526c31d110d296a9667", "score": "0.5269014", "text": "def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLifecyclePreStopTcpSocketPortArgs']:\n return pulumi.get(self, \"port\")", "title": "" }, { "docid": "ca1177eedb852c5732e0daddd0fa8b65", "score": "0.52682596", "text": "def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLifecyclePreStopTcpSocketPortArgs']:\n return pulumi.get(self, \"port\")", "title": "" }, { "docid": "e3ecf6a664e19653f0c0d716648e5653", "score": "0.5266918", "text": "def test_connectTCP(self):\n class RecordDataProtocol(Protocol):\n\n def dataReceived(self, data):\n self.data = data\n proto = RecordDataProtocol()\n factory = ClientFactory()\n factory.protocol = lambda: proto\n reactor = MemoryReactor()\n logged = loggedReactor(reactor)\n logged.connectTCP('192.168.1.2', 1234, factory, 21, '127.0.0.2')\n [(host, port, factory, timeout, bindAddress)] = reactor.tcpClients\n self.assertEqual('192.168.1.2', host)\n self.assertEqual(1234, port)\n self.assertIsInstance(factory, _TrafficLoggingFactory)\n self.assertEqual(21, timeout)\n self.assertEqual('127.0.0.2', bindAddress)\n\n # Verify that the factory and protocol specified are really being used\n protocol = factory.buildProtocol(None)\n protocol.makeConnection(None)\n protocol.dataReceived(\"foo\")\n self.assertEqual(proto.data, \"foo\")", "title": "" }, { "docid": "6bc1be339dcde1e9af73415f1f7fd91b", "score": "0.5263176", "text": "def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecContainersLivenessProbeTcpSocketPortArgs']:\n return pulumi.get(self, \"port\")", "title": "" }, { "docid": "c9957ce05689ff07e5ae7a5c291002f1", "score": "0.5262879", "text": "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "title": "" }, { "docid": "6612390b85255cd8c900896aeac0ef6c", "score": "0.52534497", "text": "def port(self) -> pulumi.Input['ScaledObjectSpecJobTargetRefTemplateSpecInitContainersLivenessProbeTcpSocketPortArgs']:\n return pulumi.get(self, \"port\")", "title": "" }, { "docid": "6ae0992c71c65906cea19ddf4cbf2213", "score": "0.5241176", "text": "def openconnection(self, destip, destport, localip, localport, timeout):\n\n # Call the next layer of openconnection.\n repy_socket = self.get_next_shim_layer().openconnection(destip, destport, localip, localport, timeout)\n\n return HideSizeSocket(repy_socket, self)", "title": "" }, { "docid": "3980880d4628a5f97a441b828fcdaa89", "score": "0.5237145", "text": "def tcp(q, where, timeout=None, port=53, af=None, source=None, source_port=0,\n one_rr_per_rrset=False, ignore_trailing=False):\n\n wire = q.to_wire()\n (af, destination, source) = _destination_and_source(af, where, port,\n source, source_port)\n s = socket_factory(af, socket.SOCK_STREAM, 0)\n begin_time = None\n received_time = None\n try:\n expiration = _compute_expiration(timeout)\n s.setblocking(0)\n begin_time = time.time()\n if source is not None:\n s.bind(source)\n _connect(s, destination)\n send_tcp(s, wire, expiration)\n (r, received_time) = receive_tcp(s, expiration, one_rr_per_rrset,\n q.keyring, q.mac, ignore_trailing)\n finally:\n if begin_time is None or received_time is None:\n response_time = 0\n else:\n response_time = received_time - begin_time\n s.close()\n r.time = response_time\n if not q.is_response(r):\n raise BadResponse\n return r", "title": "" }, { "docid": "bbb7d222abf1738b336612e42a0b965b", "score": "0.52369875", "text": "def tryOpenTcpSession(cMsTimeout, sHostname, uPort = None, fReversedSetup = False, cMsIdleFudge = 0):\n try:\n oTransport = TransportTcp(sHostname, uPort, fReversedSetup);\n oSession = Session(oTransport, cMsTimeout, cMsIdleFudge, fTryConnect = True);\n except:\n reporter.errorXcpt(None, 15);\n return None;\n return oSession;", "title": "" }, { "docid": "96824d9eab41f9fdc302e469a99e231c", "score": "0.5231343", "text": "def is_port_open(self, port):\n sock = socket()\n sock.settimeout(3)\n try:\n sock.connect((self.ip_address, port))\n return True\n except ConnectionError:\n return False", "title": "" }, { "docid": "91c5e240e06bb60d08ffb0e88ea8d362", "score": "0.52296484", "text": "def server_bind(self):\n TCPServer.server_bind(self)\n _, self.server_port = self.socket.getsockname()[:2]", "title": "" }, { "docid": "53f207761c6792a35dbf04957d83726b", "score": "0.5215154", "text": "def __init__(self, port : int):\n self.port = port\n self.sock = socket(AF_INET,SOCK_DGRAM) # UDP\n self.sock.bind(('', self.port))", "title": "" }, { "docid": "83113175068feafa9aa886d299b0d0af", "score": "0.5214368", "text": "def __init__(self, port):\n try:\n self.port = int(port)\n except ValueError:\n print port\n self.port = socket.getservbyname(port)", "title": "" }, { "docid": "0b3748548f018755d12b9c6b86666a1e", "score": "0.51889807", "text": "def test_port(host, port):\n curr_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n try:\n curr_socket.connect((host, port))\n return 1\n except socket.gaierror as g_e:\n print('No such host found: %s: \"%s\"' % (host, str(g_e)))\n return 0\n except socket.timeout as t_e:\n print('Couln\\'t connect to host in time: \"%s\"' % (str(t_e)))\n return 0\n except socket.error as e_e:\n print('Error: \"%s\" occured.' % (str(e_e)))\n return 0", "title": "" }, { "docid": "568a5b76499a53fcd653bc1aecf0d1d6", "score": "0.5184586", "text": "def connect(addr, port):\n conn = socket.create_connection((addr, port))\n return conn", "title": "" }, { "docid": "f6daf4569b89153fb9eaf1b7ccf54363", "score": "0.5179292", "text": "def connect(self, host, port):\n self.sock.connect((host, int(port)))", "title": "" }, { "docid": "6cf357ad9bd73eca05722319fcf7221d", "score": "0.5170057", "text": "def create_listening_socket(port):\n try:\n new_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n new_socket.bind((IP, port))\n new_socket.listen(1)\n except IOError:\n new_socket = None\n \n return new_socket", "title": "" }, { "docid": "7662270e7b1b0ebc3b596379ec6f21de", "score": "0.51652676", "text": "def free_port():\n free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n free_socket.bind(('0.0.0.0', 0))\n free_socket.listen(5)\n port = free_socket.getsockname()[1]\n free_socket.close()\n return port", "title": "" }, { "docid": "02e9bc861461c8f8ac58dd8c6b56524e", "score": "0.516274", "text": "def __init__(self):\r\n\r\n host = 'localhost'\r\n port = 8787\r\n self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n self.sock.connect((host,port))", "title": "" }, { "docid": "45ebc472022c9f3f15b8d1daf0c24d02", "score": "0.51625115", "text": "def setUdpJoinerPort(self, portNumber):", "title": "" }, { "docid": "b53f7797bb868713f1b425c3f3999e20", "score": "0.51576644", "text": "def ctcp ( self, destination, command, message = None):\n command = command.upper()\n if message == None:\n self.send ( 'PRIVMSG {} :\\x01{}\\x01'.format( destination, command ) )\n else:\n self.send ( 'PRIVMSG {} :\\x01{} {}\\x01'.format( destination, command, message ) )", "title": "" }, { "docid": "1f2ed38559114d91069a86a6e2cf54af", "score": "0.5150463", "text": "def start_socket(self, ip_address_server: str, port_server: int, password: Optional[str] = \"\") -> None:\n self.port = port_server\n self.ip_address = ip_address_server\n self.socket.bind((ip_address_server, port_server))\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.hash_password = hashlib.sha1(bytes(password, \"utf8\")).hexdigest()\n self.start()", "title": "" } ]
1d6071a8d7ae909fcf0d44cb3e19d560
Creates a tree representation as a set of nodes and a mapping from nodes to children Also creates two vptrees for Euclidean and assisting metrics, used for nearest neighbour queries
[ { "docid": "24194cd4d239b01000f1816a0e4d2fbd", "score": "0.6277342", "text": "def __init__(self, root, space, assisting_dist_fn = lambda a, b: np.linalg.norm(a.pos - b.pos)):\n self.root = root\n self.nodes = set()\n self.edges = {}\n self.back_edges = {}\n self.node_amount = 0\n self.space = space\n self.euclidean_vpt = DynamicVPTree(lambda a, b: np.linalg.norm(a.pos - b.pos))\n self.assisting_vpt = DynamicVPTree(assisting_dist_fn)\n self.add_node(root)", "title": "" } ]
[ { "docid": "4bc6e510335ff66acf124c19b506fc82", "score": "0.6659278", "text": "def generate_tree(self):\n calculate_criterion = None\n if(self.criterion == \"entropy\"):\n calculate_criterion = calculate_entropy\n elif self.criterion == \"accuracy\":\n calculate_criterion = calculate_accuracy\n self.root = create_tree(self.df,self.attr_list,self.target,1,self.maxlevel,calculate_criterion)\n return self.print_stats(self.df)", "title": "" }, { "docid": "8ea96dd096735ed31efeca6eacbd71fe", "score": "0.6617935", "text": "def make_tree(distances, method):\n return hierarchy.linkage(distances, method=method)", "title": "" }, { "docid": "7514c8e7fb06b7851f72ca11d06c0a6c", "score": "0.65008026", "text": "def build_tree(self):\n c = tf.math.softplus(self.c)\n items = self.get_all_items()\n tot_levels = self.tot_levels\n nodes = [self.get_all_nodes_level(l) for l in range(tot_levels)]\n # closest_node_to_items is a numpy array of size (n_items, )\n # holding the parent node index from one level up.\n closest_node_to_items = (sum(\n self.nodes_per_level[:tot_levels - 1]) + tf.math.argmin(\n hyp_utils.hyp_distance_all_pairs(items, nodes[tot_levels - 1], c),\n axis=1)).numpy()\n # closest_node_to_nodes is a numpy array of size (n_tot_nodes, )\n # holding the parent node index from one level up. Root index is -1.\n closest_node_to_nodes = -np.ones(sum(self.nodes_per_level))\n for l in range(1, tot_levels):\n first_ind = sum(self.nodes_per_level[:l])\n last_ind = sum(self.nodes_per_level[:l + 1])\n closest_node_to_nodes[first_ind:last_ind] = (\n sum(self.nodes_per_level[:l - 1]) + tf.math.argmin(\n hyp_utils.hyp_distance_all_pairs(nodes[l], nodes[l - 1], c),\n axis=1)).numpy()\n self.tree = tree_utils.build_tree(closest_node_to_items,\n closest_node_to_nodes,\n self.nodes_per_level)", "title": "" }, { "docid": "417b131ed2f07cdd8ff6567b676ce356", "score": "0.63986343", "text": "def construct_tree(self):\n out = []\n for node in self.walk():\n if node.parent:\n start = (node.parent.x, node.parent.y)\n end = (node.x, node.y)\n tup = (node.name, start, end)\n out.append(tup)\n return out", "title": "" }, { "docid": "a6a15553aa32e1700b94e7cba7cb2ef8", "score": "0.62317646", "text": "def example_tree():\n tree = DiGraph()\n tree.add_nodes_from(['S', 'NP-1', 'N-1', 'Jeff', 'VP', 'V', 'ate', 'NP-2', 'D',\n 'the', 'N-2', 'apple'])\n tree.add_edges_from([('S', 'NP-1'), ('NP-1', 'N-1'), ('N-1', 'Jeff'),\n ('S', 'VP'), ('VP', 'V'), ('V', 'ate'),\n ('VP', 'NP-2'), ('NP-2', 'D'), ('D', 'the'),\n ('NP-2', 'N-2'), ('N-2', 'apple')])\n return tree", "title": "" }, { "docid": "cdc7efa8e42e82dad3744884432285b8", "score": "0.6194422", "text": "def _create_tree_from_edges(edges):\n tree = {}\n for v1, v2 in edges:\n tree.setdefault(v1, []).append(v2)\n tree.setdefault(v2, []).append(v1)\n return tree", "title": "" }, { "docid": "df6d4dd626cfadfae612e9f62e71bb3f", "score": "0.61813855", "text": "def _build_tree(self):\n if len(self.ranges) == 0:\n return\n self.top_node = self.CertificateNode(self.center)\n for r in self.ranges:\n self._insert(r.start, self.top_node)\n self._insert(r.finish, self.top_node)\n\n for r in sorted(self.ranges, key=lambda x: x.datesort()):\n nodes = self.find_nodes_by_range(r.start, r.finish)\n for n in nodes:\n n.owner = r.owner\n n.factor = r.factor", "title": "" }, { "docid": "5ddda1cb017b9b8984bbd98845568500", "score": "0.6169927", "text": "def build_tree(self):\n self.root = 0\n self.nodes = {}\n self.links = {}\n for line in self.dot_string:\n line = line.rstrip()\n if '->' in line:\n # It's a link\n (from_node, to_node) = self.parse_link(line)\n try:\n f = int(from_node)\n t = int(to_node)\n except ValueError:\n continue\n if f in self.links:\n self.links[f].append(t)\n if (self.links[f][0] > self.links[f][1]):\n print('ouch')\n sys.exit(-1)\n else:\n self.links[f] = [t]\n else:\n # It's a node\n if ' [label=\"' not in line:\n \"\"\" If the line does not contain [label=\" it means is not a line\n representing a node\n \"\"\"\n continue\n (type, node_id, (f, v)) = self.parse_node(line)\n try:\n node_id = int(node_id)\n except ValueError:\n print('Error converting node_id... Please check', node_id)\n continue\n if type == 'node':\n try:\n feature_id = int(f)\n feature_value = float(v)\n except ValueError:\n continue\n if node_id in self.nodes:\n print('Duplicate node', node_id)\n sys.exit(-1)\n self.nodes[node_id] = (type, feature_id, feature_value)\n elif type == 'leaf':\n try:\n label_value = int(v)\n except ValueError:\n continue\n if node_id in self.nodes:\n print('Duplicate node', node_id)\n sys.exit(-1)\n self.nodes[node_id] = (type, label_value)\n else:\n print('Unexpected error')\n sys.exit(-1)", "title": "" }, { "docid": "af3ba2730c0ef2148851ed0e5cac822d", "score": "0.6150103", "text": "def build_subtrees(mapped_points, params):\n\n def create_combiner(point):\n ids = [point.id]\n X = [point.x]\n return DataMatrix(ids, X)\n\n def merge_value(mat, point):\n mat.ids.append(point.id)\n mat.X.append(point.x)\n return mat\n\n def merge_combiners(mat1, mat2):\n mat1.ids.extend(mat2.ids)\n mat1.X.extend(mat2.X)\n return mat1\n\n def build_subtree(mat):\n subtree = kdt.KDTBuilder(\n np.asarray(mat.X),\n np.asarray(mat.ids),\n maxleaf=params.maxnode,\n ).build_tree()\n return subtree\n\n subtrees = mapped_points.combineByKey(\n create_combiner,\n merge_value,\n merge_combiners\n ).mapValues(build_subtree)\n\n return subtrees", "title": "" }, { "docid": "a928b13b46a9864d4c8fd1cde29a1447", "score": "0.61460924", "text": "def build_tree(self):\n\n if self.stores is not None:\n stores_ecef = []\n for store in self.stores:\n store_ecef = geodetic2ecef(\n float(store[STORE_FIELDS['LATITUDE']]),\n float(store[STORE_FIELDS['LONGITUDE']])\n )\n store['ecef'] = store_ecef\n stores_ecef.append(store_ecef)\n self.tree = KDTree(numpy.array(stores_ecef))", "title": "" }, { "docid": "c5c60d4c709b5b0ea704ee928acf64d1", "score": "0.6132797", "text": "def build_tree_map(df, \n average_score = 0.5, \n maxdepth = None, \n column_nm = {\n 'id':'id',\n 'label':'labels',\n 'parent':'parent',\n 'value':'value',\n 'color':'color'\n },\n value_name = '# docs',\n color_name = 'Avg. Similarity'):\n if isinstance(df,list):\n pass\n elif isinstance(df,pd.core.frame.DataFrame):\n df=[df]\n else:\n print('df of not expected format')\n\n # Assert mandatory columns are present in dataframe\n for (i, df_i) in enumerate(df):\n for m in column_nm:\n assert(column_nm[m] in df_i.columns)\n\n fig = make_subplots(1, len(df), specs=[[{\"type\": \"domain\"}]*len(df)],)\n\n for (i, df_all_trees) in enumerate(df):\n fig.add_trace(go.Treemap(\n ids=df_all_trees[column_nm['id']],\n labels=df_all_trees[column_nm['label']],\n parents=df_all_trees[column_nm['parent']],\n values=df_all_trees[column_nm['value']],\n branchvalues='total',\n marker=dict(\n colors=df_all_trees[column_nm['color']],\n colorscale='RdBu',\n cmid=average_score),\n hovertemplate='<b>%{label} </b> <br> '+value_name+': %{value}<br>'+color_name+': %{color:.2f}',\n name=''\n ), 1, i+1)\n if maxdepth:\n if maxdepth < 2:\n print('try maxdepth > 1')\n fig.update_traces(maxdepth=maxdepth)\n fig.update_layout(margin=dict(t = 30, b = 10, r = 10, l = 10))\n #uniformtext_minsize=12, uniformtext_mode='show')\n fig.show()", "title": "" }, { "docid": "27e8e46309cfe4a013b202f418b27bef", "score": "0.6110824", "text": "def _build_tree(self):\n while len(self.min_heap) > 1:\n left = heapq.heappop(self.min_heap)\n right = heapq.heappop(self.min_heap)\n parent = self.merge(left, right)\n\n heapq.heappush(self.min_heap, parent)\n\n # No need for frequencies in a tree, thus only names are being added.\n self.tree[parent[1]] = [left[1], right[1]]", "title": "" }, { "docid": "5adaf6a8eaee7e093783b974918f29c4", "score": "0.61049724", "text": "def _build_tree(self, X, y, feature_names, depth, sample_weights):\n mytree = dict()\n # YOUR CODE HERE\n # TODO: Use `_choose_best_feature` to find the best feature to split the X. Then use `_split_dataset` to\n # get subtrees.\n # Hint: You may find `np.unique` is useful.\n # begin answer\n # Todo prune, early stop\n if depth <= self.max_depth and X.shape[0] >= self.min_samples_leaf:\n fea_idx, best_thresh = self._choose_best_feature(X, y, sample_weights)\n fea_name = feature_names[fea_idx]\n sub_fea_names =feature_names[:fea_idx] + feature_names[fea_idx+1:]\n if self.continuous:\n mytree[(fea_name, best_thresh)] = {}\n for c_idx in range(2):\n sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, fea_idx, best_thresh, sample_weights, c_idx)\n if len(sub_y) > 0:\n mytree[(fea_name, best_thresh)][c_idx] = self._build_tree(sub_X, sub_y, sub_fea_names, depth+1, sub_sample_weights)\n else:\n mytree[fea_name] = {}\n fea_set = np.unique(X[:, fea_idx])\n for i in range(len(fea_set)):\n sub_X, sub_y, sub_sample_weights = self._split_dataset(X, y, fea_idx, fea_set[i], sample_weights)\n mytree[fea_name][fea_set[i]] = self._build_tree(sub_X, sub_y, sub_fea_names, depth+1, sub_sample_weights)\n else:\n mytree = self.majority_vote(y, sample_weights)\n # end answer\n return mytree", "title": "" }, { "docid": "ca36fff783da365b4d94d0ab63dd88e2", "score": "0.60191315", "text": "def construct_tree(D, sequences):\n\n clusters = [Node(name) for name in sequences]\n distances = {(c1, c2): D[c1.label, c2.label] for c1 in clusters\n for c2 in clusters}\n\n while len(clusters) > 1:\n cx, cy = min(((c1, c2) for c1 in clusters for c2 in clusters if c1 != c2),\n key=lambda x: score(x[0], x[1], distances, clusters))\n new_cluster = Node(cx.label + ',' + cy.label)\n new_cluster.add(cx, cy)\n\n clusters.remove(cx)\n clusters.remove(cy)\n\n for cz in clusters:\n new_distance = (distances[cx, cz] + distances[cy, cz]\n - distances[cx, cy]) / 2\n distances[new_cluster, cz] = new_distance\n distances[cz, new_cluster] = new_distance\n\n clusters.append(new_cluster)\n\n write_dot(clusters[0])\n\n return clusters[0]", "title": "" }, { "docid": "ca1aa72308e6d2142b290f845b31020f", "score": "0.6002931", "text": "def create_tree(tuples: Iterable[Relation]) -> DefaultDict[T, List[T]]:\n # convert to dict\n tree: DefaultDict[T, List[T]] = defaultdict(list)\n for pair in tuples:\n child, father = pair\n if father:\n tree[father].append(child)\n\n return tree", "title": "" }, { "docid": "d55f965e7bd0c6796aadddb6726c8e72", "score": "0.5988668", "text": "def build_tree(self) -> None:\n # First initialize the dominance links and then populate them\n for t in self.types:\n t['parentNode'] = None\n t['childNodes'] = []\n for t in self.types:\n parentName = t['parent']\n if parentName is not None and parentName in self.types_idx:\n parentNode = self.types_idx.get(parentName)\n t['parentNode'] = parentNode\n parentNode.setdefault('childNodes', []).append(t)", "title": "" }, { "docid": "9332843f00cd318e163dffcaf461d2f1", "score": "0.5987696", "text": "def make_children(self):\r\n \r\n S, xj = self.best_split()\r\n \r\n # May want to put this in __init__\r\n if S is None and xj is None:\r\n self.lhs = None\r\n self.rhs = None\r\n return \r\n \r\n \r\n left_rows = list(self.x_train[self.x_train[xj] <= S].index.values)\r\n left_x = self.x_train.loc[left_rows, :]\r\n left_y = self.y_train.loc[left_rows, :]\r\n \r\n right_rows = list(self.x_train[self.x_train[xj] > S].index.values)\r\n right_x = self.x_train.loc[right_rows, :]\r\n right_y = self.y_train.loc[right_rows, :]\r\n \r\n if left_y.shape[0] == 0 or right_y.shape[0] == 0:\r\n return \r\n \r\n else:\r\n # Make lhs and rhs nodes (children) \r\n self.lhs = Node(left_x, left_y, self.criterion, self.path + [xj, ' <= ', str(S)], self.nrow, S, xj)\r\n self.rhs = Node(right_x, right_y, self.criterion, self.path + [xj, ' > ', str(S)], self.nrow, S, xj)\r\n \r\n return", "title": "" }, { "docid": "d5d7ebf58d1dbf8ea836a546118393ce", "score": "0.5947557", "text": "def build_tree(records, symptoms):\r\n root = node_generator(symptoms, 0, [], records)\r\n return root", "title": "" }, { "docid": "a67eaf09b29e3ee8c16e9440e1d3cca2", "score": "0.59221405", "text": "def hash_trees(tree):\n (vertex, edge, ordering, labels) = tree\n v_ordered = sorted(list(vertex), key=lambda x: (ordering[x], labels[x]))\n vertices_hash_map = dict()\n vertices = dict()\n for v in v_ordered:\n if v not in edge or len(edge[v]) == 0:\n if labels[v] not in vertices_hash_map:\n vertices_hash_map[labels[v]] = list()\n vertices_hash_map[labels[v]].append(v)\n vertices[v] = [0, 1, str(labels[v])]\n else:\n neighbors_ids = []\n d = 0\n for n in edge[v]:\n d += 1 + vertices[n][0]\n neighbors_ids.append(vertices[n][2])\n\n ID = str(labels[v])+'('+(','.join(neighbors_ids))+')'\n\n vertices[v] = [d, 1, ID]\n if ID not in vertices_hash_map:\n vertices_hash_map[ID] = list()\n vertices_hash_map[ID].append(v)\n\n return (vertices, vertices_hash_map, v_ordered)", "title": "" }, { "docid": "46ba123690b058855604f2b0f115d048", "score": "0.5902018", "text": "def compute_taxonomy_tree(otu_defs):\n tree_root = AttributeTree()\n taxa_levels = otus.TAXONOMY_LEVELS\n tree_root.set_attribute('node_level', 'root')\n tree_root.set_attribute('node_name', 'root')\n for otu_def in otu_defs:\n current_node = tree_root\n for taxa_level in taxa_levels:\n taxa_name = otu_def.get_value(taxa_level)\n existing_child = current_node.find_child('node_name',taxa_name)\n if existing_child is None:\n new_node = AttributeTree()\n new_node.set_attribute('node_level', taxa_level)\n new_node.set_attribute('node_name', taxa_name)\n current_node.add_child(new_node)\n current_node = new_node\n else: \n current_node = existing_child\n current_level = current_node.get_attribute('node_level')\n assert(current_level == taxa_level)\n #add the otu as if it is a level below species\n species_node = current_node\n otu_name = otu_def.get_value('otu_name')\n existing_otu_node = species_node.find_child('node_name', otu_name)\n if existing_otu_node is None:\n otu_idx = otu_def.get_value('index_within_tornado_run')\n otu_node = AttributeTree()\n otu_node.set_attribute('node_level', 'otu_name')\n otu_node.set_attribute('node_name', otu_name)\n otu_node.set_attribute('otu_index', otu_idx)\n species_node.add_child(otu_node)\n _set_node_idxs(tree_root)\n #logger.pretty_print_jdata(tree_root.to_jdata())\n return tree_root", "title": "" }, { "docid": "df7011256d9fa685f37c8cccc977ce4f", "score": "0.59014285", "text": "def _mk_root(self):\n\n class _node:\n def __init__(self, parent=None, title=\"\"):\n if (isinstance(parent, _node)):\n parent = parent._n\n\n self._n = estimate.Node(\n parent = parent,\n title = title\n )\n\n if (parent is not None):\n parent.append(self._n)\n\n def estimate(self, numbers):\n assert self._n.is_role()\n assert self._n.parent() is not None\n self._n.estimate(None, numbers)\n self._n.parent().estimate(self._n.title(), numbers)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n with _node(parent=None, title=None) as root:\n\n with _node(parent=root, title=\"C. 1st node\") as n1:\n\n with _node(parent=n1, title=\"ZA. 1st subnode\") as n1_1:\n\n with _node(parent=n1_1, title=\"subnode A\") as n1_1a:\n with _node(parent=n1_1a, title=\"(role1)\") as role: role.estimate((1,2,3))\n with _node(parent=n1_1a, title=\"(role2)\") as role: role.estimate((2,3,4))\n with _node(parent=n1_1a, title=\"(role3)\") as role: role.estimate((3,4,5))\n\n with _node(parent=n1_1, title=\"subnode B\") as n1_1b:\n with _node(parent=n1_1b, title=\"(role2)\") as role: role.estimate((1,2,3))\n with _node(parent=n1_1b, title=\"(role3)\") as role: role.estimate((2,3,4))\n with _node(parent=n1_1b, title=\"(role4)\") as role: role.estimate((3,4,5))\n\n with _node(parent=n1, title=\"YA. 2nd subnode\") as n1_2:\n\n with _node(parent=n1_2, title=\"subnode A\") as n1_2a:\n with _node(parent=n1_2a, title=\"(role3)\") as role: role.estimate((1,2,3))\n with _node(parent=n1_2a, title=\"(role4)\") as role: role.estimate((2,3,4))\n with _node(parent=n1_2a, title=\"(role5)\") as role: role.estimate((3,4,5))\n\n with _node(parent=n1_2, title=\"subnode B\") as n1_2b:\n with _node(parent=n1_2b, title=\"(role4)\") as role: role.estimate((1,2,3))\n with _node(parent=n1_2b, title=\"(role5)\") as role: role.estimate((2,3,4))\n with _node(parent=n1_2b, title=\"(role1)\") as role: role.estimate((3,4,5))\n\n with _node(parent=n1, title=\"XA. 3rd subnode\") as n1_3:\n\n with _node(parent=n1_3, title=\"subnode A\") as n1_3a:\n with _node(parent=n1_3a, title=\"(role5)\") as role: role.estimate((1,2,3))\n with _node(parent=n1_3a, title=\"(role1)\") as role: role.estimate((2,3,4))\n with _node(parent=n1_3a, title=\"(role2)\") as role: role.estimate((3,4,5))\n\n with _node(parent=n1_3, title=\"subnode B\") as n1_3b:\n with _node(parent=n1_3b, title=\"(role1)\") as role: role.estimate((1,2,3))\n with _node(parent=n1_3b, title=\"(role2)\") as role: role.estimate((2,3,4))\n with _node(parent=n1_3b, title=\"(role3)\") as role: role.estimate((3,4,5))\n\n with _node(parent=root, title=\"B. 2nd node\") as n2:\n\n with _node(parent=n2, title=\"BZ. 1st subnode\") as n2_1:\n with _node(parent=n2_1, title=\"(role21)\") as role: role.estimate((1,2,3))\n\n with _node(parent=n2, title=\"BY. 2nd subnode\") as n2_2:\n with _node(parent=n2_2, title=\"(role22)\") as role: role.estimate((1,2,3))\n\n with _node(parent=n2, title=\"BX. 3rd subnode\") as n2_3:\n with _node(parent=n2_3, title=\"(role23)\") as role: role.estimate((1,2,3))\n\n with _node(parent=root, title=\"A. 3rd node\") as n3:\n\n with _node(parent=n3, title=\"3) 1st subnode\") as n3_1:\n with _node(parent=n3_1, title=\"subnode A\") as n3_1a:\n with _node(parent=n3_1a, title=\"(role32)\") as role: role.estimate((1,2,3))\n with _node(parent=n3_1, title=\"subnode B\") as n3_1b:\n with _node(parent=n3_1b, title=\"(role33)\") as role: role.estimate((1,2,3))\n\n with _node(parent=n3, title=\"2) 2nd subnode\") as n3_2:\n with _node(parent=n3_2, title=\"subnode A\") as n3_2a:\n with _node(parent=n3_2a, title=\"(role31)\") as role: role.estimate((1,2,3))\n with _node(parent=n3_2, title=\"subnode B\") as n3_2b:\n with _node(parent=n3_2b, title=\"(role33)\") as role: role.estimate((1,2,3))\n\n with _node(parent=n3, title=\"1) 3rd subnode\") as n3_3:\n with _node(parent=n3_3, title=\"subnode A\") as n3_3a:\n with _node(parent=n3_3a, title=\"(role31)\") as role: role.estimate((1,2,3))\n with _node(parent=n3_3, title=\"subnode B\") as n3_3b:\n with _node(parent=n3_3b, title=\"(role32)\") as role: role.estimate((1,2,3))\n\n # just a strange thing with deep hierarchy too\n with _node(parent=root, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"(role)\") as role: role.estimate((0,0,0))\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"(role)\") as role: role.estimate((0,0,0))\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"...\") as n:\n with _node(parent=n, title=\"(role)\") as role: role.estimate((0,0,0))\n \n\n return root._n", "title": "" }, { "docid": "2b35003ccefd300fde3da8b76f2f5de0", "score": "0.5887609", "text": "def __init__(self, nodes = []):\n self.__adjacency_list = {} # maps parent -> set of (child, label) pairs\n for n in nodes:\n self.__adjacency_list[n] = HashSet()\n self.__label_map = {} # maps label -> set of (parent, child) pairs", "title": "" }, { "docid": "cb4c19f90e5b138f05738587cb454dcc", "score": "0.5882052", "text": "def tree_growth(self, records):\n # Your code here\n # Hint-1: Test whether the stopping criterion has been met by calling function stopping_cond()\n # Hint-2: If the stopping criterion is met, you may need to create a leaf node\n # Hint-3: If the stopping criterion is not met, you may need to create a\n # TreeNode, then split the records into two parts and build a\n # child node for each part of the subset\n\n # stopping criterion 1: out of data\n if len(records) == 0:\n return TreeNode()\n # len(rows) is the number of units in a set\n current_score = self.entropy(records)\n\n # Set up some variables to track the best criteria\n best_gain = 0.0\n best_criteria = None\n best_sets = None\n\n column_count = len(records[0]) - 1\n # count the # of attributes/columns.\n # It's -1 because the last one is the target attribute and it does not count.\n\n for col in range(0, column_count):\n # Generate the list of all possible different values in the considered column\n # Added for debugging\n global column_values\n column_values = {}\n for row in records:\n column_values[row[col]] = 1\n # Now try dividing the rows up for each value in this column\n for value in column_values.keys():\n # the 'values' here are the keys of the dictionary\n (set1, set2) = self.divideset(records, col, value)\n # define set1 and set2 as the 2 children set of a division\n\n # Information gain\n p = float(len(set1)) / len(records)\n # p is the size of a child set relative to its parent\n\n gain = current_score - p * self.entropy(set1) - (1 - p) * self.entropy(set2)\n\n if gain > best_gain and len(set1) > 0 and len(set2) > 0:\n # set must not be empty\n best_gain = gain\n best_criteria = (col, value)\n best_sets = (set1, set2)\n\n # Create the sub branches: right branch and left branch\n if best_gain > 0:\n trueBranch = self.tree_growth(best_sets[0])\n falseBranch = self.tree_growth(best_sets[1])\n return TreeNode(col=best_criteria[0], value=best_criteria[1],\n tb=trueBranch, fb=falseBranch)\n else:\n # stopping criterion 2: when the gain is not going up\n return TreeNode(results=self.uniquecounts(records))", "title": "" }, { "docid": "12865e1a45db966fce306ce1b27636c0", "score": "0.5872629", "text": "def new_from_tree(cls,t):\n newT=cls.__new__(cls) #Do not call the __init__() because we will fill the args by hand\n newT.tokens=t.tokens\n newT.childs=defaultdict(lambda:set())\n for tok,s in t.childs.iteritems():\n if tok in t.ready_nodes:\n newT.childs[tok]=s # this one is ready, no need to copy\n else:\n newT.childs[tok]=s.copy()\n newT.ready_nodes=t.ready_nodes.copy() # this needs to be copied\n newT.govs=t.govs.copy()\n newT.deps=t.deps[:]\n newT.dtypes=t.dtypes.copy()\n newT.root=t.root\n newT.projective_order=t.projective_order #no need to copy this one\n newT.ready=t.ready\n return newT", "title": "" }, { "docid": "11c41a4752a853d9410b2bd4f82753b2", "score": "0.5871484", "text": "def build_tree():\n \n root = TreeNode(1)\n root.left = TreeNode(2)\n root.right = TreeNode(3)\n root.left.left = TreeNode(4)\n root.left.right = TreeNode(5)\n return root", "title": "" }, { "docid": "826a57ac1c8d6460f0592629b6395941", "score": "0.5864981", "text": "def create_tree(variables, values, offset, default_lefts=None, tree_title='tree'):\n tree = ROOT.TTree('tree', tree_title)\n\n variables_array = ROOT.std.vector('int')()\n values_array = ROOT.std.vector('float')()\n offset_array = array('f', [0.])\n default_left_array = ROOT.std.vector('bool')()\n\n tree.Branch('vars', 'vector<int>', ROOT.AddressOf(variables_array))\n tree.Branch('values', 'vector<float>', ROOT.AddressOf(values_array))\n\n if offset is not None:\n tree.Branch('offset', offset_array, 'offset/F')\n else:\n offset = [0.] * len(values)\n\n if default_lefts is not None:\n tree.Branch('default_left', 'vector<bool>', default_left_array)\n else:\n default_lefts = [True] * len(variables)\n\n for var, val, off, dl in zip(variables, values, offset, default_lefts):\n variables_array.clear()\n values_array.clear()\n default_left_array.clear()\n\n for v in val:\n values_array.push_back(v)\n for v in var:\n variables_array.push_back(v)\n if dl is not True:\n for v in dl:\n default_left_array.push_back(v)\n\n offset_array[0] = off\n\n tree.Fill()\n return tree", "title": "" }, { "docid": "239ea3960bb81bfaf3b9b42456a41f72", "score": "0.5852034", "text": "def CreateTreesForOptimization(mva_method=\"\",fileout=\"OptimizeTrees.root\"):\n\n\n if (len(mva_method)<=0) : return None\n results=[]\n if os.path.exists(mva_method+\"_result.gz\"):\n f=open(mva_method+\"_result\"+\".gz\",'rb')\n data=zlib.decompress(f.read())\n results=pickle.loads(data)\n f.close()\n \n if os.path.exists(\"grid/\"+mva_method+\"_result.gz\"):\n f=open(mva_method+\"_result\"+\".gz\",'rb')\n data=zlib.decompress(f.read())\n results+=pickle.loads(data)\n f.close()\n\n results=sorted(results,key=lambda x: x[2] ,reverse=True)\n results=[x for x in results if not x[0] == \"000000\"]\n results=[x for x in results if not x[2] == 0] # additional filtering\n\n file=ROOT.TFile(fileout,\"RECREATE\")\n tree=ROOT.TTree(\"optimization\",\"optimization\")\n vars=PrintVariableNames(mva_method,True)\n container={}\n for var in vars:\n container.update({var:array.array('f',[0])})\n tree.Branch(var,container[var],var+\"/F\");\n\n container.update({\"result\":array.array('f',[0])}) \n tree.Branch(\"result\",container[\"result\"],\"result/F\");\n\n for res in results:\n varmap=res[1]\n separarion=res[2]\n for i,j in varmap.items(): \n try:\n container[i][0]=j \n except KeyError:\n pass\n\n container[\"result\"][0]=separarion\n tree.Fill()\n\n tree.Write()\n file.Close()\n\n return", "title": "" }, { "docid": "63a7f64fcd11c1cd88e59e067dcfcf9c", "score": "0.58482504", "text": "def tree_fasttree(self):\n sh.FastTreeMP('-fastest' ,'-out', self.fasttree_tree, '-nt', self.mothur_aligned)", "title": "" }, { "docid": "49ebbe8ad10bd6a5dbcf13aae5a12745", "score": "0.5841543", "text": "def __init__(self, nodes = []):\r\n self._adjacency_list = {} # maps parent -> set of (child, label) pairs\r\n for n in nodes:\r\n self._adjacency_list[n] = set()\r\n self._label_map = {} # maps label -> set of (parent, child) pairs\r", "title": "" }, { "docid": "9811c0ff7f335dbd35904106229a7087", "score": "0.5831913", "text": "def _make_leaf_dist(self):", "title": "" }, { "docid": "6b51f50c7d0ee437d8cce318c88a26a0", "score": "0.5825731", "text": "def generate_treemap(self, rect):\n if self._subtrees == []:#leaf\n return [(rect,self.colour)]\n else:\n rects = []\n move = 0\n for subtree in self._subtrees:\n rate = float(subtree.data_size)/self.data_size#the rate of subtree in its parent\n x,y,width,height = rect#unpack input rect\n if width > height:\n x = x +move#update x\n width = round(rate*width)#update width\n rects.extend(subtree.generate_treemap((x,y,width,height)))#implement treemap algorithm\n move += width#update move\n else:\n y = y +move#update y\n height = round(rate*height)#update height\n rects.extend(subtree.generate_treemap((x,y,width,height)))#implement treemap algorithm\n move += height#update move\n return rects", "title": "" }, { "docid": "9ecc75f4130f4a9c1d2846d0d6d1f654", "score": "0.5812372", "text": "def generatetree(pred):\n tree = {}\n for i, p in enumerate(pred):\n if p == -1:\n # root node\n tree[i] = [i]\n continue\n idx = p\n path = [idx]\n while idx >= 0:\n nextnode = pred[idx]\n idx = nextnode\n if idx >= 0:\n path.append(nextnode)\n tree[i] = path\n return tree", "title": "" }, { "docid": "666c8f50361e3354a7785ceca2b6a178", "score": "0.58120066", "text": "def setUp(self):\n arr = np.array(([1] * 5) + ([2] * 5))\n ndarr = np.array(([1, 2, 3] * 5) + ([2, 2, 3] * 5)).reshape(10, 3)\n self.tree = CHAID.Tree(ndarr, arr)", "title": "" }, { "docid": "c23280da275cbfcb49a4b1cd4457c231", "score": "0.58107394", "text": "def transformed(tree):\n t = {}\n cols = set()\n for name, table in tree['columns'].iteritems():\n for column in table:\n cols.add(name + '.' + column)\n filters = frozenset(map(hashable, tree['filters']))\n t['columns'] = frozenset(cols)\n t['filters'] = filters\n t['operator'] = tree['operator']\n t['total'] = tree['total']\n children = []\n for child in tree['children']:\n children.append(transformed(child))\n t['children'] = children\n return t", "title": "" }, { "docid": "3d3f24eb6511e4c91175103f42663190", "score": "0.5803691", "text": "def _create_tree(self, newick_info):\n t = ete.Tree(newick_info, format=1) # see ete/coretype/TreeNode doc for format\n\n ts = ete.TreeStyle()\n ts.show_leaf_name = False\n ts.force_topology = True\n ts.show_scale = False\n ts.legend = None\n ts.complete_branch_lines_when_necessary = True\n ts.extra_branch_line_type = 1 # 0 solid, 1 dashed, 2 dotted\n ts.extra_branch_line_color = \"black\"\n ts.draw_guiding_lines = True\n tree_face = ete.TextFace(\"Significant organisms phylogenetic relationship\", fsize=100, bold=True)\n ts.title.add_face(tree_face, column=0)\n\n ns = ete.NodeStyle()\n ns[\"hz_line_type\"], ns[\"vt_line_type\"] = 0, 0\n ns[\"hz_line_color\"], ns[\"vt_line_color\"] = \"black\", \"black\"\n ns[\"hz_line_width\"], ns[\"vt_line_width\"] = 2, 2\n ns[\"size\"] = 0\n\n for node in t.traverse():\n node.set_style(ns)\n if not node.is_leaf():\n face = ete.TextFace(node.name, fsize=40, penwidth=10, fgcolor=\"blue\")\n node.add_face(face, 1)\n else:\n face = ete.TextFace(node.name, fsize=65, penwidth=10)\n node.add_face(face, 1, \"aligned\")\n destination = os.path.join(os.getcwd(), \"src\", \"data\", \"visualizations\", \"phylogeny\", \"sig_orgs_phylo.pdf\")\n t.render(destination, tree_style=ts, dpi=500)", "title": "" }, { "docid": "050bff026f2c6b868f5851013b4e917f", "score": "0.58009845", "text": "def _extract_tree(self):\n bfs = deque([])\n free = np.ones(self._vertices_num, bool)\n # consider multi-component graph\n for i in range(self._vertices_num):\n if free[i]:\n free[i] = False\n bfs.append(self._graph.vs[i])\n self._inference_route.append(self._graph.vs[i])\n # set root parent itself makes it easier when dealing root\n self._graph.vs[i]['parent'] = i\n self._component_root.append(i)\n\n while len(bfs):\n parent = bfs.popleft()\n ne_list = parent.neighbors()\n for ne in ne_list:\n if free[ne.index]:\n free[ne.index] = False\n bfs.append(ne)\n self._inference_route.append(ne)\n ne['parent'] = parent.index", "title": "" }, { "docid": "0194fcee363dab55b030b7da6bd83a9b", "score": "0.5782339", "text": "def build_tree(self, root: DTNode) -> None:\n print(\"\\n\\nnew node!!! depth: %d, current size: %d\" % (root.depth, root.leftGroup[1].size + root.rightGroup[1].size))\n if root.depth >= self.params[\"max_tree_depth\"] or root.leftGroup[1].size + root.rightGroup[1].size <= self.params['min_sample_split']:\n self.set_leaf(root)\n return\n \n # left\n if root.leftGroup[0].size != 0 and root.leftGroup[1].size != 0:\n left_feature_index, left_feature_value, left_groups = self.split_node(root.leftGroup[0], root.leftGroup[1])\n root.leftChild = DTNode(left_feature_index, left_feature_value, deepcopy(left_groups), root, root.depth+1,\n left_groups[0][1].size + left_groups[1][1].size)\n self.build_tree(root.leftChild)\n\n # right\n if root.rightGroup[0].size != 0 and root.rightGroup[1].size != 0:\n right_feature_index, right_feature_value, right_groups = self.split_node(root.rightGroup[0], root.rightGroup[1])\n root.rightChild = DTNode(right_feature_index, right_feature_value, deepcopy(right_groups), root, root.depth+1,\n right_groups[0][1].size + right_groups[1][1].size)\n self.build_tree(root.rightChild)\n\n del root.leftGroup, root.rightGroup", "title": "" }, { "docid": "25b6d8eabdd7c5ed4fe5b9acc8e408c1", "score": "0.5778617", "text": "def generate_treemap(self, rect):\n # return empty list if tree is empty\n treemap = []\n if self._root is None:\n return []\n\n # return rect and the colour of the rectangle for a tree w/just root\n if self._subtrees is None:\n treemap.append((rect, self.colour))\n return treemap\n\n # recursion\n height = rect[3]\n width = rect[2]\n x = rect[0]\n y = rect[1]\n for subtree in self._subtrees:\n percent = int((subtree.data_size/self.data_size)*100)\n if width < height:\n temp_width = int(width*(percent/100))\n temp = (x, y, temp_width, height)\n treemap.append((temp, subtree.colour))\n x += width\n subtree.generate_treemap(temp)\n elif height >= width:\n temp_height = int(height*(percent/100))\n temp = (x, y, width, temp_height)\n y += height\n treemap.append((temp, subtree.colour))\n subtree.generate_treemap(temp)", "title": "" }, { "docid": "19806eca445e39016800112e3401b2ae", "score": "0.5766866", "text": "def ctree():\n\treturn defaultdict(ctree)", "title": "" }, { "docid": "74d62bc2b7d26bf259ce90e6f1499939", "score": "0.57662874", "text": "def __init__(self,n):\n self.parent=dict([])\n self.parent['node_vol']=-1*np.ones([3,3],dtype=int)\n \n self.me=dict([])\n self.me['node_vol']=-1*np.ones([3,3],dtype=int)\n self.me['node_power']=-1*np.ones([3,1],dtype=int)\n self.me['branch_cur']=-1*np.ones([3,3],dtype=int)\n self.me['branch_power']=-1*np.ones([3,3],dtype=int)\n \n self.children=dict([])\n for childID in n.neighbor.children:\n self.children[childID]=dict([])\n self.children[childID]['branch_cur']=-1*np.ones([3,3],dtype=int)\n self.children[childID]['branch_power']=-1*np.ones([3,3],dtype=int)", "title": "" }, { "docid": "7fcf8774e10012b9ff2cece8d48d154c", "score": "0.5766245", "text": "def generate_treemap(self, rect):\n if self._subtrees == []:#current tree is leaf,return the input value\n rects = [(rect,self.colour)]\n return rects\n else:\n rects = []\n move = 0#how long next subtree should move \n for subtree in self._subtrees:\n rate = float(subtree.data_size)/self.data_size#the rate of subtree in its parent\n x,y,width,height = rect#unpack input rect\n if width > height:\n x = x +move#update x\n width = round(rate*width)#update width\n rects.extend(subtree.generate_treemap((x,y,width,height)))#implement treemap algorithm\n move += width#update move\n else:\n y = y +move#update y\n height = round(rate*height)#update height\n rects.extend(subtree.generate_treemap((x,y,width,height)))#implement treemap algorithm\n move += height#update move\n return rects", "title": "" }, { "docid": "ff9b7eb8683c347836b5aa7be933cc9c", "score": "0.57657284", "text": "def make(cls, nodepath, extras ):\n node = cls(nodepath, extras )\n if node.index == 0:\n cls.root = node\n\n cls.registry.append(node)\n cls.idlookup[node.id] = node \n\n # a list of nodes for each pv.id, need for a list is not so obvious, maybe GDML PV identity bug ?\n pvid = node.pv.id\n if pvid not in cls.pvlookup:\n cls.pvlookup[pvid] = []\n cls.pvlookup[pvid].append(node) \n\n # list of nodes for each lv.id, need for a list is obvious\n lvid = node.lv.id\n if lvid not in cls.lvlookup:\n cls.lvlookup[lvid] = []\n cls.lvlookup[lvid].append(node) \n\n \n cls.lookup[node.digest] = node \n cls.created += 1\n\n parent = cls.lookup.get(node.parent_digest)\n node.parent = parent\n if parent is None:\n if node.id == \"top.0\":\n pass\n elif node.id == \"top.1\":\n log.info(\"root node name %s indicates have parsed twice \" % node.id )\n else:\n log.fatal(\"failed to find parent for %s (failure expected only for root node)\" % node )\n assert 0\n else:\n parent.children.append(node) \n\n if cls.created % 1000 == 0:\n log.debug(\"make %s : [%s] %s \" % ( cls.created, id(node), node ))\n return node", "title": "" }, { "docid": "63d3bf2dcfd86ac97e51e73f0fb4ece3", "score": "0.57634395", "text": "def test_score_tree(self):\n # set RankNames and RankNameScores\n # if name in RankNames, check score, look at tips, etc\n t_str = StringIO(u\"(((a,b),(c,d))e,(f,g),h)i;\")\n t = TreeNode.read(t_str)\n t.RankNames = ['i', None, None, None] # 1.0 * 6\n t.RankNameScores = [1.0, None, None, None]\n t.children[0].RankNames = [None, 'e', 'foo', None] # 0.5 * 3, 0.6 * 3\n t.children[0].RankNameScores = [None, 0.5, 0.6, None]\n t.children[0].children[0].RankNames = [None] * 7\n t.children[0].children[1].RankNames = [None] * 7\n t.children[1].RankNames = [None] * 7\n t.children[1].RankNameScores = [None] * 7\n tips = list(t.tips())\n tips[0].Consensus = [None] * 7\n tips[1].Consensus = [1, 3, None, None]\n tips[2].Consensus = [2, 4, 5, None]\n tips[3].Consensus = [None, 1, None, None]\n tips[4].Consensus = [None, 1, None, None]\n tips[5].Consensus = [2, None, 3, None]\n tips[6].Consensus = [None, 4, None, None]\n decorate_ntips(t)\n exp = ((1.0 * 6) + (0.5 * 3) + (0.6 * 3)) / (6 + 3 + 3)\n obs = score_tree(t)\n self.assertEqual(obs, exp)", "title": "" }, { "docid": "19719fb423ca137896db8d67bf1ddbb6", "score": "0.57542104", "text": "def construct_tree(*karg):\n res = karg[0]\n for d in karg[1:]:\n res = treefy(res, d)\n return res", "title": "" }, { "docid": "27001e59cea5c8c25f6a29fff1d75a8a", "score": "0.57527506", "text": "def create_data_for_tree(univ, major, skills_employer_tree, univ_major_map, major_code, employer_second_degree_tree):\n if os.path.isfile(\"static/treegraph.json\"):\n os.remove(\"static/treegraph.json\")\n\n univ = str(univ).lower()\n\n result = {}\n result[\"name\"] = \"Me\"\n result[\"children\"] = []\n if univ in univ_major_map:\n indices = []\n if major:\n print major_code[major]\n if major in major_code:\n if major_code[major] in univ_major_map[univ]:\n indices.append(univ_major_map[univ][major_code[major]])\n else:\n for key in univ_major_map[univ]:\n if len(indices) < 8:\n indices.append(univ_major_map[univ][key])\n temp = {}\n\n for index in indices:\n if str(index) in skills_employer_tree:\n for i in range(0, len(skills_employer_tree[str(index)][\"children\"])):\n if skills_employer_tree[str(index)][\"children\"][i][\"name\"] in temp:\n pass\n else:\n temp [skills_employer_tree[str(index)][\"children\"][i][\"name\"]] = 1\n i = 0\n\n for key in temp:\n new = []\n if key.lower() in employer_second_degree_tree:\n for x in employer_second_degree_tree[key.lower()]:\n if x not in temp:\n if len(new) < 51:\n new.append({\"name\": x.strip('\\t').strip('\\n').strip() , \"children\": []})\n result[\"children\"].append({\"name\" : key.title(), \"children\" : new })\n i += 1\n\n # j = json.dumps(result, indent=4, separators=(',', ': '))\n # f = open(\"static/treegraph.json\", \"w\")\n # print >> f, j\n # f.close()\n return result", "title": "" }, { "docid": "d720519821925db56c2cf815fc1ffb00", "score": "0.57503814", "text": "def _build_tree(\n self, span_scores: Any, seq_length: int, gold_proto_node: Optional[ProtoNode]\n ) -> Tuple[ProtoNode, torch.Tensor]:\n raise NotImplementedError", "title": "" }, { "docid": "0c8998f8190a83fba71c73d55bffc6de", "score": "0.57456094", "text": "def _create_trees(self):\n\n self.trees = [self.grow(self.min_depth, self.max_depth)\n for _ in range(self.n_agents)]\n\n self.best_tree = copy.deepcopy(self.trees[0])\n\n logger.debug('Depth: [%d, %d] | Terminals: %d | Function: %s.',\n self.min_depth, self.max_depth, self.n_terminals, self.functions)", "title": "" }, { "docid": "3fc78cf37302deb4d866daefb88dcdb1", "score": "0.57389736", "text": "def _inspect_tree(self) -> Tuple[Dict[int, List[int]], Dict[int, List[int]]]:\n tree = self.model.tree_\n parents = dict()\n value_leaves = defaultdict(list)\n leaves = list()\n\n def recurse(node: int, depth: int, parent: int):\n parents[node] = [parent, *parents.get(parent, [])]\n if tree.children_left[node] != tree.children_right[node]:\n recurse(tree.children_left[node], depth + 1, node)\n recurse(tree.children_right[node], depth + 1, node)\n else:\n val = tree.value[node][0].argmax()\n value_leaves[val].append(node)\n leaves.append(node)\n\n recurse(0, 0, None)\n\n parents = {k: v[:-1] for k, v in parents.items() if k in leaves}\n\n return parents, value_leaves", "title": "" }, { "docid": "6801b88ccbb289df245c462130756c47", "score": "0.57363254", "text": "def __init__(self, nodes = []):\r\n self._adjacency_list = {} # maps parent -> set of child objects\r\n for n in nodes:\r\n self._adjacency_list[n] = set()\r\n self._label_map = {} # maps label -> set of (parent, child) pairs\r\n self._edge_map = {} # maps (parent, child) pair -> label\r", "title": "" }, { "docid": "31f5700d7a348dfe836f3c190f6299fe", "score": "0.57346994", "text": "def translate_tree(self, root):\n def extract_fields(node):\n if isinstance(node, logical.Relation):\n fnames = self.schema.stats[str(node.name)][1].keys()\n fields = map(lambda x: logical.Field.from_components(x, str(node.name)), fnames)\n return set(fields)\n elif isinstance(node, logical.Selection):\n return extract_fields(node.children[0])\n elif isinstance(node, logical.CartesianProduct) or isinstance(node, logical.NaturalJoin):\n return extract_fields(node.children[0]) | extract_fields(node.children[1])\n else:\n raise NotImplementedError()\n\n def aux(parent, node):\n if isinstance(node, logical.Relation):\n return plan.Relation(parent, str(node.name), self.schema.relations[str(node.name)])\n elif isinstance(node, logical.Projection):\n projection = plan.Projection(parent, node.fields)\n for c in node.children:\n aux(projection, c)\n return projection\n elif isinstance(node, logical.Selection):\n selection = plan.Selection(parent, node.conds)\n for c in node.children:\n aux(selection, c)\n return selection\n elif isinstance(node, logical.CartesianProduct):\n join = plan.CartesianProduct(parent)\n for c in node.children:\n aux(join, c)\n return join\n elif isinstance(node, logical.ThetaJoin):\n join = plan.NLJoin(parent, node.conds) # default is Nested loop join\n for c in node.children:\n aux(join, c)\n return join\n elif isinstance(node, logical.NaturalJoin):\n fs_left, fs_right = map(extract_fields, node.children)\n # fns_left = {x.name for x in fs_left}\n # fns_right = {x.name for x in fs_right}\n # common_attr_names = fns_left & fns_right\n\n conds = []\n for f_left in fs_left:\n for f_right in fs_right:\n if f_left.name == f_right.name:\n conds.append(logical.Comparison(f_left, f_right, '='))\n break\n\n join = plan.NLJoin(parent, conds)\n for c in node.children:\n aux(join, c)\n else:\n raise NotImplementedError()\n\n return aux(None, root)", "title": "" }, { "docid": "02f87e3a4476d9490d43ae4d0325895d", "score": "0.5729669", "text": "def main():\n tree = defaultdict(set)\n vertex_count, edge_count = input().split()\n vertex_count = int(vertex_count)\n edge_count = int(edge_count)\n for _ in range(edge_count):\n v1, v2 = input().split()\n v1 = int(v1)\n v2 = int(v2)\n tree[v1].add(v2)\n tree[v2].add(v1)\n trim_tree(tree)\n print(prune_tree(tree))", "title": "" }, { "docid": "8f77a9450bd87b9bf8a98f8975f0ae3a", "score": "0.57263565", "text": "def test_tree():\n node1 = Node_Situation(9) #fizz\n node2 = Node_Situation(4) #fizz\n node3 = Node_Situation(10) #buzz\n node4 = Node_Situation(15) #fizzbuzz\n\n node1.child.append(node2)\n node1.child.append(node3)\n node1.child.append(node4)\n\n temp = Tree_situation(node1, node2)\n\n return temp", "title": "" }, { "docid": "122529fb3ab512597b03861ca3370f62", "score": "0.5725581", "text": "def generatetree(self, train_data, train_labels, score=None):\n if score==None:\n score=self.entropy\n\n if train_data.shape[0]==0:\n return DTNode()\n\n current_score=score(train_labels)\n\n best_gain=0.0\n best_attr_idx=None\n best_val=None\n\n # find best split accross all attributes and values\n for attr_idx in range(train_data.shape[1]):\n\n # unique values of this attribute in training data\n attrib_vals=np.unique(train_data[:,attr_idx])\n\n for val in attrib_vals:\n (tb_data, tb_labels, fb_data, fb_labels)=self.split_data(train_data, train_labels, attr_idx, val)\n\n # compute information gain\n p=float(tb_labels.shape[0])/train_data.shape[0]\n gain=current_score - p* score(tb_labels) - (1-p)*score(fb_labels)\n\n if gain>best_gain and tb_labels.shape[0]>0 and fb_labels.shape[0]>0:\n best_gain=gain\n best_attr_idx=attr_idx\n best_val=val\n best_tb=(tb_data, tb_labels)\n best_fb=(fb_data, fb_labels)\n\n if best_gain>0.0:\n tb_node=self.generatetree(best_tb[0], best_tb[1])\n fb_node=self.generatetree(best_fb[0], best_fb[1])\n return DTNode(tb_node=tb_node, fb_node=fb_node, attr_idx=best_attr_idx, val=best_val)\n\n else:\n ra,rb=np.unique(train_labels, return_counts=True)\n res= np.vstack((ra,rb)).tolist()\n return DTNode(results=res)", "title": "" }, { "docid": "4209ad1e5b2c50fab161ef0cdbf26df1", "score": "0.5723341", "text": "def _build_tree(self):\n self.clear()\n categories = set()\n node_types = {}\n for name, node_ids in self._factory.names.items():\n for nid in node_ids:\n categories.add('.'.join(nid.split('.')[:-1]))\n node_types[nid] = name\n\n category_items = {}\n for category in sorted(categories):\n if category in self._custom_labels.keys():\n label = self._custom_labels[category]\n else:\n label = '- {}'.format(category)\n cat_item = BaseNodeTreeItem(self, [label], type=TYPE_CATEGORY)\n cat_item.setFirstColumnSpanned(True)\n cat_item.setFlags(QtCore.Qt.ItemIsEnabled)\n self.addTopLevelItem(cat_item)\n cat_item.setExpanded(True)\n category_items[category] = cat_item\n\n for node_id, node_name in node_types.items():\n category = '.'.join(node_id.split('.')[:-1])\n category_item = category_items[category]\n\n item = BaseNodeTreeItem(category_item, [node_name], type=TYPE_NODE)\n item.setToolTip(0, node_id)\n\n category_item.addChild(item)", "title": "" }, { "docid": "a61d87576510394f63d22b55f2532a13", "score": "0.5722529", "text": "def _build_hierplane_tree(self, tree: Tree, index: int, is_root: bool) -> JsonDict:\n children = []\n for child in tree:\n if isinstance(child, Tree):\n # If the child is a tree, it has children,\n # as NLTK leaves are just strings.\n children.append(self._build_hierplane_tree(child, index, is_root=False))\n else:\n # We're at a leaf, so add the length of\n # the word to the character index.\n index += len(child)\n\n label = tree.label()\n span = \" \".join(tree.leaves())\n hierplane_node = {\"word\": span, \"nodeType\": label, \"attributes\": [label], \"link\": label}\n if children:\n hierplane_node[\"children\"] = children\n # TODO(Mark): Figure out how to span highlighting to the leaves.\n if is_root:\n hierplane_node = {\n \"linkNameToLabel\": LINK_TO_LABEL,\n \"nodeTypeToStyle\": NODE_TYPE_TO_STYLE,\n \"text\": span,\n \"root\": hierplane_node,\n }\n return hierplane_node", "title": "" }, { "docid": "92a2ee119d1b68c1d988fe94872be594", "score": "0.572147", "text": "def embed(self):\n tree = self.tree\n k = self.k\n epsilon = self.epsilon\n is_weighted = self.is_weighted\n\n coords = zeros(tree.number_of_nodes(), 2)\n \n root_children = list(tree.successors(0))\n d = len(root_children) \n if self.tau == None:\n tau = self.compute_tau()\n else:\n tau = self.tau\n\n \n #lengths of unweighted edges\n edge_lengths = list(map(self.euc_to_hyp_dist, ones(d, 1) * tau))\n \n #lengths of weighted edges\n if is_weighted:\n k = 0\n for child in root_children:\n weight = tree[0][child]['weight']\n edge_lengths[k] = self.euc_to_hyp_dist(fmul(tau, weight))\n k += 1\n # queue containing the nodes whose children we're placing\n q = []\n \n #place children of the root\n for i in range(d):\n coords[root_children[i], 0] = fmul(edge_lengths[i], cos(i * 2 * pi / d))\n coords[root_children[i], 1] = fmul(edge_lengths[i], sin(i * 2 * pi / d))\n \n q.append(root_children[i])\n \n while len(q) > 0:\n #pop the node whose children we're placing off the queue\n h = q.pop(0)\n \n children = list(tree.successors(h))\n parent = list(tree.predecessors(h))[0]\n num_children = len(children)\n \n for child in children:\n q.append(child)\n \n #lengths of unweighted edges\n edge_lengths = list(map(self.euc_to_hyp_dist, ones(num_children, 1) * tau))\n \n #lengths of weighted edges\n if is_weighted:\n k = 0\n for child in children:\n weight = tree[h][child]['weight']\n edge_lengths[k] = self.euc_to_hyp_dist(fmul(tau, weight))\n k += 1\n \n if num_children > 0:\n R = self.add_children(coords[parent, :], coords[h, :], edge_lengths)\n for i in range(num_children):\n coords[children[i], :] = R[i, :]\n \n return coords", "title": "" }, { "docid": "faaf6e950eb666575c6405b38f638cbf", "score": "0.57138836", "text": "def build_tree(dependencies):\n tree = collections.defaultdict(lambda: [])\n for rel, parent, child in dependencies:\n tree[parent].append(child)\n return tree", "title": "" }, { "docid": "cc0ef33c73b3ea42679716d28a60c034", "score": "0.5705206", "text": "def make_tree_dict(discovered, node_list, tree_dict):\n for i in range(1, len(node_list) + 1): # O(n)\n v = node_list[-i]\n e = discovered[v]\n if e is not None:\n u = e.opposite(v)\n if tree_dict[u]._left is None:\n tree_dict[u]._left = v\n else:\n tree_dict[u]._right = v\n tree_dict[v]._parent = u", "title": "" }, { "docid": "aad6ae99f47db6e6b54725fa19281e53", "score": "0.57028943", "text": "def __init__(self):\n self.varMap = {}\n self.edges = []\n self.rootNodes = []", "title": "" }, { "docid": "6e96a7bb3727dff7e97d65d7f10a8d7f", "score": "0.5681768", "text": "def gather_tree(values, parents):\n res = tf.py_func(func=gather_tree_py, inp=[values, parents], Tout=values.dtype)\n res.set_shape(values.get_shape().as_list())\n return res", "title": "" }, { "docid": "bc7f51ab1f80ef3956f1dcaddb43a352", "score": "0.5680426", "text": "def __init__(self,rules):\n self.tree = {} # Chart of tree IDs\n self.nodes = {} # Maps each ID to a Node\n self.parentOf = {} # parentOf[x] == x's parent in tree\n self.func = None\n self.definefunc()\n self.head = None # Root of tree\n self.nodesvisited = {} # Maps all nodes to 1?\n # Apply all of the operations in \"rules\"\n for rule in rules:\n r,arg = rule.split('(')\n args = arg[:-1].split(',')\n self.func[r](args,r)\n # Creates wildcard Node for any missing referenced nodes in tree\n for h in self.nodesvisited:\n if not self.nodes.has_key(h):\n self.nodes[h] = Node(h,\"ANY\")\n # Define the tree's root\n for n in self.nodes:\n if not self.parentOf.has_key(n):\n self.head = n", "title": "" }, { "docid": "94f369d5539f18e07682e1fb8ebe4a40", "score": "0.5665672", "text": "def buildStateTree(self, state):\n\n # loop all nodes\n for n in self.nodeList:\n # find real references for children codes if they exist\n if n.leftCode in self.nodeDict:\n n.left = self.nodeDict[ n.leftCode ]\n n.left.parent = n\n # else, the child it has to be a leaf with a model name \n else:\n leaf = self.Node()\n leaf.code = n.leftCode\n leaf.macroName = n.leftCode.strip( '\"' )\n leaf.parent = n\n leaf.tree = self\n self.leavesList.append(leaf)\n self.leavesDict[ leaf.macroName ] = leaf\n n.left = leaf\n #print 'Added leaf ' + leaf.code\n \n # find real references for children codes if they exist\n if n.rightCode in self.nodeDict:\n n.right = self.nodeDict[ n.rightCode ] \n n.right.parent = n\n # else, the child it has to be a leaf with a model name \n else: \n leaf = self.Node()\n leaf.code = n.rightCode\n leaf.macroName = n.rightCode.strip( '\"' )\n leaf.parent = n\n leaf.tree = self\n self.leavesList.append(leaf)\n self.leavesDict[ leaf.macroName ] = leaf\n n.right = leaf\n #print 'Added leaf ' + leaf.code", "title": "" }, { "docid": "3149be9f8abfe2b6cb327357aa56adb5", "score": "0.5661555", "text": "def make_matrix(treefile, outputf):\n\n tree = Tree(treefile, quoted_node_names=True, format=1)\n\n leaves = tree.get_leaves()\n paths = {x:set() for x in leaves}\n\n # get the paths going up the tree\n # we get all the nodes up to the last one and store them in a set\n sys.stderr.write(\"Precalculating distances\\n\")\n for n in leaves:\n if n.is_root():\n continue\n movingnode = n\n while not movingnode.is_root():\n paths[n].add(movingnode)\n movingnode = movingnode.up\n\n # now we want to get all pairs of nodes using itertools combinations. We need AB AC etc but don't need BA CA\n\n leaf_distances = {x.name:{} for x in leaves}\n\n\n sys.stderr.write(\"Iterating over the leaves\\n\")\n sys.stderr.write(\"THere are {} leaves\\n\".format(len(leaves)))\n combi = combinations(leaves, 2)\n combidef = int(len(list(combi))/500);\n sys.stderr.write(\"There are {} combinations. Each dot is {} combinations\\n\".format(len(list(combi)), combidef))\n c=0\n cc=0\n for (leaf1, leaf2) in combi:\n if (c % combidef) == 0:\n if cc == 5:\n sys.stdout.write(\" \")\n cc=0\n sys.stdout.write(\".\")\n cc+=1\n c+=1\n\n # figure out the unique nodes in the path\n uniquenodes = paths[leaf1] ^ paths[leaf2]\n distance = sum(x.dist for x in uniquenodes)\n leaf_distances[leaf1.name][leaf2.name] = leaf_distances[leaf2.name][leaf1.name] = distance\n\n sys.stdout.write(\"\\n\")\n\n allleaves = sorted(leaf_distances.keys())\n\n with open(outputf, 'w') as out:\n out.write(\"\\t\".join([\"\"] + allleaves) + \"\\n\")\n for n in allleaves:\n out.write(n + \"\\t\")\n for m in allleaves:\n if m == n:\n out.write(\"0\\t\")\n else:\n out.write(\"{}\\t\".format(leaf_distances[n][m]))\n out.write(\"\\n\")", "title": "" }, { "docid": "92282f07df9c107a118b302a1a7b0024", "score": "0.56608087", "text": "def build_tree(self):\n self.select_root()\n self.genera_arbol(self.rootNode)", "title": "" }, { "docid": "a35bcc3ae9ad33eedda5c7e6a57e6935", "score": "0.56593126", "text": "def build_tree(df, attribute_list, attribute_value):\n # create new node\n node = Node(df, attribute_value)\n\n # if all labels are equal return node\n if node.only_one_label():\n return node\n\n # check if attributes left\n if len(attribute_list) > 0:\n # get best attribute and store in current node\n best_attribute = get_best_attribute(node.get_content(), attribute_list)\n node.set_split_attribute(best_attribute)\n # get subsets from the split\n subsets = get_subsets(node.get_content(), best_attribute)\n # create new nodes and store in children of current node\n children = []\n for subset in subsets:\n # remove current attribute so it will not be used in the subtree\n attribute_list.remove(best_attribute)\n # create a subtree and add it to children list of current tree\n children.append(build_tree(subset, attribute_list, subset[best_attribute][0]))\n # add it back again so it can be used in other branches\n attribute_list.append(best_attribute)\n node.set_children(children)\n\n return node", "title": "" }, { "docid": "3a185bea80cd258fb93cd97cd27e5f56", "score": "0.56543034", "text": "def create_tree(matrix):\n node_list = []\n node_index = 0\n for node in matrix:\n # create a Node object for each entry in the matrix\n cur_node = Node(node_index)\n node_list.append(cur_node)\n child_index = 0\n # check to see if each entry has children. if so, then add the data to\n # the corresponding Node object\n for child in node:\n if child == 1:\n if child_index <= node_index:\n cur_node.left = child_index\n elif child_index > node_index:\n cur_node.right = child_index\n\n child_index += 1\n node_index += 1\n\n return node_list", "title": "" }, { "docid": "c753575b9c74bf9d6e70e02a8706783a", "score": "0.5638595", "text": "def gather_tree(ids, parents):\n helper = LayerHelper('gather_tree', **locals())\n check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')\n check_variable_and_dtype(parents, 'parents', ['int32', 'int64'],\n 'gather_tree')\n out = helper.create_variable_for_type_inference(dtype=ids.dtype)\n\n helper.append_op(\n type=\"gather_tree\",\n inputs={\"Ids\": ids,\n \"Parents\": parents},\n outputs={\"Out\": out})\n\n return out", "title": "" }, { "docid": "74a41d09ca99308a2b16bcd9b06fbcd3", "score": "0.56346697", "text": "def tree2(idtype):\n g = dgl.graph(([], [])).astype(idtype).to(F.ctx())\n g.add_nodes(5)\n g.add_edges(2, 4)\n g.add_edges(0, 4)\n g.add_edges(4, 1)\n g.add_edges(3, 1)\n g.ndata[\"h\"] = F.tensor([0, 1, 2, 3, 4])\n g.edata[\"h\"] = F.randn((4, 10))\n return g", "title": "" }, { "docid": "eefd40c9936a822512df5a4f763db034", "score": "0.5633507", "text": "def TreeGenerate(self,dataset,attributes,depth,cnt_leaves=0,root=None):\n catagory = dataset[\"salary\"].unique()\n node = Node() if root == None else root # better used for validation indexing\n cnt_leaves += 1\n\n # 1) All samples in `dataset` belongs to the same catagory\n if len(catagory) == 1:\n node.setLeaf(catagory[0],cnt_leaves)\n return node\n\n # 2) `attributes` is empty, or the values of `dataset` on `attributes` are the same\n if len(attributes) == 0 or np.array([len(dataset[a].unique()) == 1 for a in attributes]).all() == True:\n node.setLeaf(dataset[\"salary\"].value_counts().argmax(),cnt_leaves)\n return node\n\n \"\"\"The general case\"\"\"\n # without partition\n node.setLeaf(dataset[\"salary\"].value_counts().argmax(),cnt_leaves)\n acc_without_partition = self.validation()\n\n # with partition\n # find the attribute with greatest information gain\n max_gain = (-0x3f3f3f3f,None)\n for a in attributes:\n gain = information_gain(dataset,a,self.attr_dict[a])\n if gain[0] > max_gain[0]:\n a_best, max_gain = a, gain\n num_leaves = 0\n # make branches\n if self.attr_dict[a_best]: # discrete\n num_leaves = len(self.train_set[a_best].unique())\n for av in self.train_set[a_best].unique(): # be careful, not dataset!\n Dv = dataset[dataset[a_best] == av]\n cnt_leaves += 1\n leafnode = Node()\n if len(Dv) == 0:\n leafnode.setLeaf(dataset[\"salary\"].value_counts().argmax(),cnt_leaves)\n else:\n leafnode.setLeaf(Dv[\"salary\"].value_counts().argmax(),cnt_leaves)\n node.setBranch(a_best,av,leafnode)\n else: # continuous\n num_leaves = 2\n for flag in [\"Smaller\",\"Bigger\"]:\n Dv = dataset[dataset[a_best] < max_gain[1]] if flag == \"Smaller\" else dataset[dataset[a_best] >= max_gain[1]]\n cnt_leaves += 1\n leafnode = Node()\n if len(Dv) == 0:\n leafnode.setLeaf(dataset[\"salary\"].value_counts().argmax(),cnt_leaves)\n else:\n leafnode.setLeaf(Dv[\"salary\"].value_counts().argmax(),cnt_leaves)\n node.setBranch(a_best,flag,leafnode,branch_value=max_gain[1])\n acc_with_partition = self.validation()\n\n # pre-pruning (to make sure it has generated sufficient nodes, depth is set here)\n if depth > 5 and acc_without_partition >= acc_with_partition:\n cnt_leaves -= num_leaves\n print(\"Prune at {}: {} (without) >= {} (with)\".format(a_best,acc_without_partition,acc_with_partition))\n logger.info(\"Prune at {}: {} (without) >= {} (with)\".format(a_best,acc_without_partition,acc_with_partition))\n node.setLeaf(dataset[\"salary\"].value_counts().argmax())\n return node\n elif depth > 5:\n print(a_best,acc_without_partition,acc_with_partition)\n\n # true partition (branching makes more gains)\n if self.attr_dict[a_best]: # discrete\n for av in self.train_set[a_best].unique(): # be careful, not dataset!\n Dv = dataset[dataset[a_best] == av]\n # 3) `Dv` is empty, which can not be partitioned\n if len(Dv) != 0:\n node.setBranch(a_best,av,self.TreeGenerate(Dv,attributes[attributes != a_best],depth+1,cnt_leaves))\n else: # continuous\n for flag in [\"Smaller\",\"Bigger\"]:\n Dv = dataset[dataset[a_best] < max_gain[1]] if flag == \"Smaller\" else dataset[dataset[a_best] >= max_gain[1]]\n if len(Dv) != 0:\n node.setBranch(a_best,flag,self.TreeGenerate(Dv,attributes,depth+1,cnt_leaves),branch_value=max_gain[1])\n return node", "title": "" }, { "docid": "09edaca063792c94f3151fccced3731a", "score": "0.5630759", "text": "def create_tree(self):\n path = os.path.join(os.getcwd(), \"src\", \"data\", \"phylogeny\", \"sig_orgs_phyliptree.phy\")\n with open(path, \"r\") as f:\n newick_info = f.read()\n self._create_tree(newick_info)", "title": "" }, { "docid": "1d138b5bf90433e7fababbf1f9f89c76", "score": "0.5619234", "text": "def build_kdtree_nodes(vertexes, indexes, depth=0):\n if not indexes:\n return\n\n # Select axis based on depth so that axis cycles through all valid values\n split_axis = depth % 3\n\n # Sort point list and choose median as pivot element\n indexes.sort(key=lambda v: vertexes[v][split_axis])\n median = len(indexes) / 2 # choose median\n\n # Create node and construct subtrees\n node = Node(indexes[median], split_axis)\n node.left_child = build_kdtree_nodes(vertexes, indexes[0:median], depth + 1)\n node.right_child = build_kdtree_nodes(vertexes, indexes[median+1:], depth + 1)\n\n return node", "title": "" }, { "docid": "b4adf77164c95c94ec2bd9d09dd0b111", "score": "0.5612856", "text": "def __build_tree__(self, features, classes, depth=30):\r\n\r\n # p = sum(classes)\r\n # n = len(classes) - p\r\n #\r\n # if n == 0:\r\n # decision_tree_root = DecisionNode(None, None, None, 1)\r\n #\r\n # elif p == 0:\r\n # decision_tree_root = DecisionNode(None, None, None, 0)\r\n #\r\n # elif depth > 0:\r\n # best_feature = -1\r\n # max_gini_gain = -1\r\n # alpha = 0\r\n #\r\n # feature_class0 = []\r\n # feature_class1 = []\r\n # feature_class2 = []\r\n # feature_class3 = []\r\n # feature_class4 = []\r\n # feature_class5 = []\r\n # # feature_class6 = []\r\n # # feature_class7 = []\r\n # # feature_class8 = []\r\n #\r\n #\r\n # i = 0\r\n # for x in features:\r\n # feature_class0.append([x[0], classes[i]])\r\n # feature_class1.append([x[1], classes[i]])\r\n # feature_class2.append([x[2], classes[i]])\r\n # feature_class3.append([x[3], classes[i]])\r\n # feature_class4.append([x[4], classes[i]])\r\n # feature_class5.append([x[5], classes[i]])\r\n # # feature_class6.append([x[6], classes[i]])\r\n # # feature_class7.append([x[7], classes[i]])\r\n # # feature_class8.append([x[8], classes[i]])\r\n # i += 1\r\n #\r\n # sorted_feature_class0 = sorted(feature_class0)\r\n # sorted_feature_class1 = sorted(feature_class1)\r\n # sorted_feature_class2 = sorted(feature_class2)\r\n # sorted_feature_class3 = sorted(feature_class3)\r\n # sorted_feature_class4 = sorted(feature_class4)\r\n # sorted_feature_class5 = sorted(feature_class5)\r\n # # sorted_feature_class6 = sorted(feature_class6)\r\n # # sorted_feature_class7 = sorted(feature_class7)\r\n # # sorted_feature_class8 = sorted(feature_class8)\r\n #\r\n # sortedclasses0 = [x[1] for x in sorted_feature_class0]\r\n # sortedclasses1 = [x[1] for x in sorted_feature_class1]\r\n # sortedclasses2 = [x[1] for x in sorted_feature_class2]\r\n # sortedclasses3 = [x[1] for x in sorted_feature_class3]\r\n # sortedclasses4 = [x[1] for x in sorted_feature_class4]\r\n # sortedclasses5 = [x[1] for x in sorted_feature_class5]\r\n # # sortedclasses6 = [x[1] for x in sorted_feature_class6]\r\n # # sortedclasses7 = [x[1] for x in sorted_feature_class7]\r\n # # sortedclasses8 = [x[1] for x in sorted_feature_class8]\r\n #\r\n # sortedfeature0 = [x[0] for x in sorted_feature_class0]\r\n # sortedfeature1 = [x[0] for x in sorted_feature_class1]\r\n # sortedfeature2 = [x[0] for x in sorted_feature_class2]\r\n # sortedfeature3 = [x[0] for x in sorted_feature_class3]\r\n # sortedfeature4 = [x[0] for x in sorted_feature_class4]\r\n # sortedfeature5 = [x[0] for x in sorted_feature_class5]\r\n # # sortedfeature6 = [x[0] for x in sorted_feature_class6]\r\n # # sortedfeature7 = [x[0] for x in sorted_feature_class7]\r\n # # sortedfeature8 = [x[0] for x in sorted_feature_class8]\r\n #\r\n # for i in range(1, len(classes)):\r\n #\r\n # current_classes = [sortedclasses0[:i], sortedclasses0[i:]]\r\n # gain = gini_gain(sortedclasses0, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 0\r\n # alpha = sortedfeature0[i]\r\n #\r\n # current_classes = [sortedclasses1[:i], sortedclasses1[i:]]\r\n # gain = gini_gain(sortedclasses1, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 1\r\n # alpha = sortedfeature1[i]\r\n #\r\n # current_classes = [sortedclasses2[:i], sortedclasses2[i:]]\r\n # gain = gini_gain(sortedclasses2, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 2\r\n # alpha = sortedfeature2[i]\r\n #\r\n # current_classes = [sortedclasses3[:i], sortedclasses3[i:]]\r\n # gain = gini_gain(sortedclasses3, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 3\r\n # alpha = sortedfeature3[i]\r\n #\r\n # current_classes = [sortedclasses4[:i], sortedclasses4[i:]]\r\n # gain = gini_gain(sortedclasses4, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 4\r\n # alpha = sortedfeature4[i]\r\n #\r\n # current_classes = [sortedclasses5[:i], sortedclasses5[i:]]\r\n # gain = gini_gain(sortedclasses5, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 5\r\n # alpha = sortedfeature5[i]\r\n\r\n # current_classes = [sortedclasses6[:i], sortedclasses6[i:]]\r\n # gain = gini_gain(sortedclasses6, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 6\r\n # alpha = sortedfeature6[i]\r\n #\r\n # current_classes = [sortedclasses7[:i], sortedclasses7[i:]]\r\n # gain = gini_gain(sortedclasses7, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 7\r\n # alpha = sortedfeature7[i]\r\n #\r\n # current_classes = [sortedclasses8[:i], sortedclasses8[i:]]\r\n # gain = gini_gain(sortedclasses8, current_classes)\r\n # if gain >= max_gini_gain:\r\n # max_gini_gain = gain\r\n # best_feature = 8\r\n # alpha = sortedfeature8[i]\r\n\r\n # decision_tree_root = DecisionNode(None, None, lambda y: y[best_feature] <= alpha)\r\n #\r\n # left_features = []\r\n # right_features = []\r\n # left_classes = []\r\n # right_classes = []\r\n # for j in range(len(features)):\r\n # if features[j][best_feature] <= alpha:\r\n # left_features.append(features[j])\r\n # left_classes.append(classes[j])\r\n # else:\r\n # right_features.append(features[j])\r\n # right_classes.append(classes[j])\r\n #\r\n # decision_tree_root.left = self.__build_tree__(left_features, left_classes, depth - 1)\r\n # decision_tree_root.right = self.__build_tree__(right_features, right_classes, depth - 1)\r\n #\r\n # else:\r\n # if p > n:\r\n # decision_tree_root = DecisionNode(None, None, None, 1)\r\n # else:\r\n # decision_tree_root = DecisionNode(None, None, None, 0)\r\n #\r\n # return decision_tree_root\r", "title": "" }, { "docid": "65817e0da15a74874da429273db014c5", "score": "0.56101954", "text": "def _gen_tree_impl(p_tree, p_config):\r\n if p_tree.level >= p_config.max_depth: #abort if too low in the tree...\r\n return p_tree\r\n new_rooms = _split_room(p_tree.val, p_config) #...or splitting is not possible\r\n if not new_rooms:\r\n return p_tree\r\n \r\n first_room, second_room = new_rooms\r\n \r\n left = bintree() #set new room attributes\r\n right = bintree()\r\n left.val = first_room\r\n right.val = second_room\r\n p_tree.set_left(left)\r\n p_tree.set_right(right)\r\n \r\n _gen_tree_impl(p_tree.left, p_config) #recurse for new child rooms\r\n _gen_tree_impl(p_tree.right, p_config)", "title": "" }, { "docid": "aaa3ec98805fc79ce836e2bb8ddb3ce8", "score": "0.560775", "text": "def generate_tree( spec, tokeniser ):\n\t\ttree = {}\n\t\tdef add_to_tree( statement ):\n\t\t\tnode = tree\n\t\t\tfor token in statement.chla():\n\t\t\t\tif token not in node:\n\t\t\t\t\tnode[token] = {}\n\t\t\t\tnode = node[token]\n\t\t\tif node.get( Keywords.LEAF ):\n\t\t\t\traise ValueError( \"two statements parse to same value:\\n1.\\t{0}\\n2.\\t{1}\".format( node.get( Keywords.LEAF ).chla(), statement.chla() ) )\n\t\t\tnode[ Keywords.LEAF ] = statement \n\n\t\tGenerator.generate( spec, tokeniser, add_to_tree )\n\t\t\n\t\treturn tree", "title": "" }, { "docid": "ce2a5ce7daed3062f9b0b7330dba4ce6", "score": "0.56056666", "text": "def gen_samples(trees, labels, vectors, vector_lookup):\n\n # encode labels as one-hot vectors\n label_lookup = {label: _onehot(i, len(labels)) for i, label in enumerate(labels)}\n # print vector_lookup\n\n for tree in trees:\n\n nodes = []\n children = []\n label = label_lookup[tree['label']]\n\n queue = [(tree['tree'], -1)]\n # print queue\n while queue:\n # print \"############\"\n node, parent_ind = queue.pop(0)\n # print node\n # print parent_ind\n node_ind = len(nodes)\n # print \"node ind : \" + str(node_ind)\n # add children and the parent index to the queue\n queue.extend([(child, node_ind) for child in node['children']])\n # create a list to store this node's children indices\n children.append([])\n # add this child to its parent's child list\n if parent_ind > -1:\n children[parent_ind].append(node_ind)\n \n #n = str(node['node'])\n n = int(node['node'])-1\n look_up_vector = vector_lookup[n]\n nodes.append(vectors[int(n)])\n # print \"children list length: \" + str(len(children))\n yield (nodes, children, label)", "title": "" }, { "docid": "7354acaa83139262b48d48baa2351f44", "score": "0.56032807", "text": "def tree_view():\n db, cur = db_conn()\n\n view = \"\"\"CREATE VIEW category_tree AS\n SELECT root.CategoryName as level_1,\n down1.CategoryName as level_2,\n down2.CategoryName as level_3,\n down3.CategoryName as level_4,\n down4.CategoryName as level_5,\n down5.CategoryName as level_6\n FROM categories AS root\n LEFT JOIN categories AS down1\n ON down1.CategoryParentID = root.CategoryID\n LEFT JOIN categories AS down2\n ON down2.CategoryParentID = down1.CategoryID\n LEFT JOIN categories AS down3\n ON down3.CategoryParentID = down2.CategoryID\n LEFT JOIN categories AS down4\n ON down4.CategoryParentID = down3.CategoryID\n LEFT JOIN categories AS down5\n ON down5.CategoryParentID = down4.CategoryID\n WHERE root.CategoryParentID = -1\n ORDER BY level_1, level_2, level_3, level_4, level_5, level_6;\"\"\"\n\n cur.execute(view)\n db.commit()\n db.close()", "title": "" }, { "docid": "b2843858dd4c8a0c9b1491406eb4a019", "score": "0.5600586", "text": "def layout_tree(self, tree_orientation=\"down\", tree_root=None,\n dim=2, **options):\n if dim != 2:\n raise ValueError('only implemented in 2D')\n\n from sage.graphs.all import Graph\n if not Graph(self).is_tree():\n raise RuntimeError(\"Cannot use tree layout on this graph: \"\n \"self.is_tree() returns False.\")\n\n try:\n emb = self.get_embedding()\n use_embedding = True\n except ValueError:\n use_embedding = False\n\n n = self.order()\n vertices = self.vertices()\n\n if tree_root is None:\n root = self.center()[0]\n else:\n root = tree_root\n\n pos = {}\n\n # The children and parent of each vertex\n if not use_embedding:\n children = {root: self.neighbors(root)}\n else:\n children = {root: emb[root]}\n parent = {u: root for u in children[root]}\n\n # stack[i] is the list of children of stick[i] which have not been given\n # a position yet.\n stack = [list(children[root])]\n stick = [root]\n\n # obstruction[y] is the smallest value of x to which a vertex at height\n # y can be assigned. All vertices at height y which have already been\n # assigned are on the left of (x-1,y).\n obstruction = [0.0] * self.num_verts()\n\n if tree_orientation in ['down', 'left']:\n o = -1\n elif tree_orientation in ['up', 'right']:\n o = 1\n else:\n raise ValueError('orientation should be \"up\", \"down\", \"left\" or \"right\"')\n\n def slide(v, dx):\n \"\"\"\n shift the vertex ``v`` and its descendants to the right by ``dx``\n\n Precondition: ``v`` and its descendants have already had their\n positions computed.\n \"\"\"\n level = [v]\n while level:\n nextlevel = []\n for u in level:\n x, y = pos[u]\n x += dx\n obstruction[y] = max(x + 1, obstruction[y])\n pos[u] = x, y\n nextlevel += children[u]\n\n level = nextlevel\n\n while stack:\n C = stack[-1]\n\n # If all the children of stick[-1] have been given a position\n if not C:\n p = stick.pop()\n stack.pop()\n cp = children[p]\n y = o * len(stack)\n\n if not cp:\n # If p has no children, we draw it at the leftmost position\n # which has not been forbidden\n x = obstruction[y]\n pos[p] = x, y\n else:\n # If p has children, we put v on a vertical line going\n # through the barycenter of its children\n x = sum([pos[c][0] for c in cp]) / len(cp)\n pos[p] = x, y\n ox = obstruction[y]\n if x < ox:\n slide(p, ox - x)\n x = ox\n\n # If the vertex to the right of p has not children, we want it\n # at distance 1 from p\n obstruction[y] = x + 1\n\n # Otherwise, we take one of the children and add it to the\n # stack. Note that this vertex is removed from the list C.\n else:\n t = C.pop()\n\n pt = parent[t]\n\n if not use_embedding:\n ct = [u for u in self.neighbors(t) if u != pt]\n else:\n ct = emb[t]\n idx = ct.index(pt)\n ct = ct[idx + 1:] + ct[:idx]\n\n children[t] = ct\n for c in ct:\n parent[c] = t\n\n stack.append(list(ct))\n stick.append(t)\n\n if tree_orientation in ['right', 'left']:\n return {p: (py, px) for p, (px, py) in pos.iteritems()}\n\n return pos", "title": "" }, { "docid": "60659d350740c90c0c948ccb00042729", "score": "0.5600021", "text": "def test_score_tree(self):\n # set RankNames and RankNameScores\n # if name in RankNames, check score, look at tips, etc\n t_str = \"(((a,b),(c,d))e,(f,g),h)i;\"\n t = DndParser(t_str)\n t.RankNames = ['i',None,None,None] # 1.0 * 6\n t.RankNameScores = [1.0,None,None,None]\n t.Children[0].RankNames = [None,'e','foo',None] # 0.5 * 3, 0.6 * 3\n t.Children[0].RankNameScores = [None, 0.5, 0.6, None]\n t.Children[0].Children[0].RankNames = [None] * 7\n t.Children[0].Children[1].RankNames = [None] * 7\n t.Children[1].RankNames = [None] * 7\n t.Children[1].RankNameScores = [None] * 7\n tips = t.tips()\n tips[0].Consensus = [None] * 7\n tips[1].Consensus = [1,3,None,None]\n tips[2].Consensus = [2,4,5,None]\n tips[3].Consensus = [None,1,None,None]\n tips[4].Consensus = [None,1,None,None]\n tips[5].Consensus = [2,None,3,None]\n tips[6].Consensus = [None,4,None,None]\n decorate_ntips(t)\n exp = ((1.0 * 6) + (0.5 * 3) + (0.6 * 3)) / (6 + 3 + 3)\n obs = score_tree(t)\n self.assertEqual(obs, exp)", "title": "" }, { "docid": "ff8737dd1fefa7e37f92dcc99a40dec8", "score": "0.5599189", "text": "def build_tree_hash(tree):\n tree_hash = {}\n\n def per_node(node):\n label_id = build_label(node)\n tree_hash[label_id] = node\n\n for c in node['children']:\n per_node(c)\n if tree:\n per_node(tree)\n return tree_hash", "title": "" }, { "docid": "066a4435b5503884aba7233caa82a05c", "score": "0.5598946", "text": "def tree1(idtype):\n g = dgl.graph(([], [])).astype(idtype).to(F.ctx())\n g.add_nodes(5)\n g.add_edges(3, 1)\n g.add_edges(4, 1)\n g.add_edges(1, 0)\n g.add_edges(2, 0)\n g.ndata[\"h\"] = F.tensor([0, 1, 2, 3, 4])\n g.edata[\"h\"] = F.randn((4, 10))\n return g", "title": "" }, { "docid": "f0ab3ec8730aaf7c9f9908bb4b1de997", "score": "0.5597938", "text": "def generate(points):\r\n if len(points) == 0:\r\n return None\r\n\r\n # VERTICAL=1 is the initial division\r\n tree = KDTree()\r\n tree.root = generateSubTree(1, 2, points, 0, len(points)-1)\r\n propagate(tree.root, maxRegion)\r\n return tree", "title": "" }, { "docid": "b36b3111c5b8bbafc12811acb29ef9b1", "score": "0.5587122", "text": "def create_graph(self):\n try:\n stats_dict = pstats.Stats(self.pstats_file).__dict__[\"stats\"]\n except ValueError:\n print_incomptable_msg(self.pstats_file)\n raise\n list_roots = []\n\n # We iterate through each function/node in our stats dict\n for dst_module_data, dst_stats in stats_dict.items():\n dst_name = dst_module_data[NameData.FNCNAME]\n dst_hnode = self._create_node_and_row(dst_module_data, dst_name, stats_dict)\n\n # get all parents of our current destination node\n # create source nodes and link with destination node\n srcs = self._get_src(dst_stats)\n if srcs == {}:\n list_roots.append(dst_hnode)\n else:\n for src_module_data in srcs.keys():\n src_name = src_module_data[NameData.FNCNAME]\n\n if src_name is not None:\n src_hnode = self._create_node_and_row(\n src_module_data, src_name, stats_dict\n )\n dst_hnode.add_parent(src_hnode)\n src_hnode.add_child(dst_hnode)\n\n return list_roots", "title": "" }, { "docid": "9485c694dd128d8eac0a2517ee6da784", "score": "0.5580661", "text": "def build_kdtree(X, relative_scales=None,**kwargs):\n\n offset = np.mean(X, axis=0)\n\n if relative_scales is None:\n # Whiten the data.\n relative_scales = np.ptp(X, axis=0)\n \n X_norm = (X - offset)/relative_scales\n\n kdt_kwds = dict(leaf_size=40, metric=\"minkowski\")\n kdt_kwds.update(kwargs)\n kdt = neighbours.KDTree(X_norm, **kdt_kwds)\n\n return (kdt, relative_scales, offset)", "title": "" }, { "docid": "2550ea4de41fb521007d3f2fb67a0409", "score": "0.557979", "text": "def generate_nodes(operands, operators):\n commutative_operators = ['+', '*']\n if operators[2] in commutative_operators and already_used(operands, operators):\n return\n\n node = Node( # Left oriented tree.\n operator=operators[0],\n right=Node(operand=operands[0]),\n left=Node(\n operator=operators[1],\n right=Node(operand=operands[1]),\n left=Node(\n operator=operators[2],\n right=Node(operand=operands[2]),\n left=Node(operand=operands[3])\n )\n )\n )\n yield node\n\n swap_sides(node.left) # Half left oriented tree.\n if node.left.operator not in commutative_operators:\n yield node\n\n swap_sides(node) # Right oriented tree.\n if node.left.operator not in commutative_operators:\n yield node\n\n swap_sides(node.right) # Half right oriented tree.\n if node.left.operator not in commutative_operators:\n yield node\n\n node.left, node.right.left = node.right.left, node.left # Half right and half left oriented.\n if node.operator not in commutative_operators or node.left.operator not in commutative_operators or \\\n node.right.operator in commutative_operators:\n yield node", "title": "" }, { "docid": "a1661b71270dd869f79891ed84140c48", "score": "0.5571809", "text": "def construct_evaltree(self):\n raise NotImplementedError(\"construct_evaltree(...) is not implemented!\")", "title": "" }, { "docid": "9071a4af82cbdb882342a234648cbef5", "score": "0.5567376", "text": "def build_kdtree(vertexes):\n return build_kdtree_nodes(vertexes, range(len(vertexes)))", "title": "" }, { "docid": "2dcdf51ee0968fa49e3fcd4c594b666c", "score": "0.55577296", "text": "def featurize_add_minimal_tree_nodes(self):\n\t\tfor doc_i, doc in enumerate(self.docs):\n\t\t\tfor tt_i, tt in enumerate(doc.two_tokens):\n\t\t\t\t# split clears up instances like Arizona_Rattlers, which \n\t\t\t\t# are tow words in the parsed sentences\n\t\t\t\ttoken1 = tt.token1.split(\"_\")\n\t\t\t\ttoken2 = tt.token2.split(\"_\")\n\t\t\t\tin_between_words = self.get_in_between_words_and_pos(doc, tt)[0]\n\t\t\t\tsubtree_string = token1 + in_between_words + token2\n\t\t\t\ttt_sent_tree = doc.parses[tt.sent_offset1]\n\t\t\t\ttt_subtree = self.get_subtree_between_words(tt_sent_tree, subtree_string)\n\n\t\t\t\tif isinstance(tt_subtree, nltk.tree.Tree):\n\t\t\t\t\ttt_subtree_labels = self.get_tree_labels(tt_subtree)\n\t\t\t\t\ttt_subtree_labels = '_'.join(tt_subtree_labels)\n\t\t\t\telse:\n\t\t\t\t\ttt_subtree_labels = 'no_comm_subtree'\n\t\t\t\tself.rel_inst_list[doc_i][tt_i].features.append('subtree_node_labels__'+tt_subtree_labels)", "title": "" }, { "docid": "fa01381fec6fe9eb0753102858411293", "score": "0.55571055", "text": "def make_layout(self, horizontal=True):\n self.max_depth = self.get_depth()\n leafs = self.get_leafs()\n self.max_width = len(leafs)\n x_options = np.linspace(0, 1, self.max_depth)\n y_options = np.linspace(0, 1, self.max_width)\n pos = {self.starting_node.node_id: [x_options[0], None]}\n layers = [[self.starting_node]]\n for i in range(self.max_depth - 1):\n next_layer = []\n for node in layers[i]:\n next_layer += node.get_children()\n for node in next_layer:\n pos[node.node_id] = [x_options[i + 1], None]\n layers.append(next_layer)\n for i, leaf in enumerate(leafs):\n pos[leaf.node_id][1] = y_options[i]\n parent = leaf.get_parent()\n while parent:\n pos[parent.node_id][1] = y_options[i]\n parent = parent.get_parent()\n if horizontal:\n return {key: np.array(val, dtype=float) for key, val in pos.items()}\n else:\n return {key: np.array([1, 0])-np.array(val[::-1], dtype=float) for key, val in pos.items()}", "title": "" }, { "docid": "5b7c4cd2847b4348aab20fea76a4d155", "score": "0.55563784", "text": "def create_tree(self, split_node_fn):\n # A list of nodes that need to be evaluated.\n node_stack = [self.root]\n\n while len(node_stack) != 0:\n node = node_stack.pop(-1)\n split = split_node_fn(node.x, node.y, node.dim, node.dim)\n if split and node.dim != self.min_node_dim:\n # Split the node into 4 children.\n split_dim = node.dim / 2\n tl_node = QuadTreeNode(\n node.x, node.y, split_dim, node, True)\n tr_node = QuadTreeNode(\n node.x + split_dim, node.y, split_dim, node, True)\n bl_node = QuadTreeNode(\n node.x, node.y + split_dim, split_dim, node, True)\n br_node = QuadTreeNode(\n node.x + split_dim, node.y + split_dim, split_dim, node, True)\n # Update the node being evaluated.\n node.set_children(tl_node, tr_node, bl_node, br_node)\n node.set_is_leaf(False)\n # Evaluate the split nodes soon.\n node_stack.append(tl_node)\n node_stack.append(tr_node)\n node_stack.append(bl_node)\n node_stack.append(br_node)", "title": "" }, { "docid": "bdb0b3d48a6416957605238f204d51bf", "score": "0.55561", "text": "def construct_graph(self):\n self.validate_tree()\n self._construct_tags()\n self.validate_tree()\n self._construct_types()\n self.validate_tree()\n self._construct_clones()\n self.validate_tree()\n self._construct_outer_namespaces()\n self.validate_tree()", "title": "" }, { "docid": "5168ef85731d53466f8de05d78a4918b", "score": "0.55482996", "text": "def __init__(\n self, GM: DiGraphMatcher, G1_node: XModelNode = None, G2_node: XModelNode = None\n ):\n self.GM = GM\n\n # Initialize the last stored node pair.\n self.G1_node = None\n self.G2_node = None\n self.depth = len(GM.core_1)\n\n if G1_node is None or G2_node is None:\n # Then we reset the class variables\n GM.core_1 = {}\n GM.core_2 = {}\n GM.in_1 = {}\n GM.in_2 = {}\n GM.out_1 = {}\n GM.out_2 = {}\n\n # Watch out! G1_node == 0 should evaluate to True.\n if G1_node is not None and G2_node is not None:\n # Add the node pair to the isomorphism mapping.\n GM.core_1[G1_node] = G2_node\n GM.core_2[G2_node] = G1_node\n\n # Store the node that was added last.\n self.G1_node = G1_node\n self.G2_node = G2_node\n\n # Now we must update the other four vectors.\n # We will add only if it is not in there already!\n self.depth = len(GM.core_1)\n\n # First we add the new nodes...\n for vector in (GM.in_1, GM.out_1):\n if G1_node not in vector:\n vector[G1_node] = self.depth\n for vector in (GM.in_2, GM.out_2):\n if G2_node not in vector:\n vector[G2_node] = self.depth\n\n # Now we add every other node...\n\n # Updates for T_1^{in}\n new_nodes = set()\n for node in GM.core_1:\n new_nodes.update(\n [\n predecessor\n for predecessor in GM.G1.predecessors(node)\n if predecessor not in GM.core_1\n ]\n )\n for node in new_nodes:\n if node not in GM.in_1:\n GM.in_1[node] = self.depth\n\n # Updates for T_2^{in}\n new_nodes = set()\n for node in GM.core_2:\n new_nodes.update(\n [\n predecessor\n for predecessor in GM.G2.predecessors(node)\n if predecessor not in GM.core_2\n ]\n )\n for node in new_nodes:\n if node not in GM.in_2:\n GM.in_2[node] = self.depth\n\n # Updates for T_1^{out}\n new_nodes = set()\n for node in GM.core_1:\n new_nodes.update(\n [\n successor\n for successor in GM.G1.successors(node)\n if successor not in GM.core_1\n ]\n )\n for node in new_nodes:\n if node not in GM.out_1:\n GM.out_1[node] = self.depth\n\n # Updates for T_2^{out}\n new_nodes = set()\n for node in GM.core_2:\n new_nodes.update(\n [\n successor\n for successor in GM.G2.successors(node)\n if successor not in GM.core_2\n ]\n )\n for node in new_nodes:\n if node not in GM.out_2:\n GM.out_2[node] = self.depth", "title": "" }, { "docid": "d2a272f5bedf3eae3fd0399c0765c4f6", "score": "0.5544365", "text": "def test_create_decision_per_leafs():\n # data creation\n n = 200\n min_size_leaf = 1\n\n X = np.random.uniform(size = (n, 510), low = -1,high = 1)\n y = 10 * np.sin(np.pi * X[:,0]*X[:,1]) + 20 * ( X[:,2] - .5)**2 +\\\n 10 * X[:,3] + 5 * X[:,4] + np.random.normal(size = n)\n\n rf_class = sklearn.ensemble.RandomForestRegressor(n_estimators = 2,\n min_samples_leaf = min_size_leaf)\n random_forest = rf_class.fit(X = X,\n y = y.ravel())\n\n tree = random_forest.estimators_[0]\n\n v_leaf, v_all = smooth_rf.create_decision_per_leafs(tree)\n\n assert v_all.shape[0] == v_all.shape[1] and \\\n v_all.shape[1] == v_leaf.shape[1], \\\n \"number of nodes in tree not preserved in output matrices shapes\"\n\n assert v_leaf.shape[0] == \\\n np.sum(tree.tree_.children_right == -1), \\\n \"number of leaves doesn't matrix ouput matrix shape (v_leaf)\"\n\n\n # static check\n\n # tree structure:\n # ~upper: left, lower: right~\n # |--1\n # -0-|\n # | |--3\n # |-2-|\n # | |--5\n # |-4-|\n # |--6\n\n # creating desired structure\n class inner_fake_tree():\n def __init__(self, cl, cr):\n self.children_left = cl\n self.children_right = cr\n\n class fake_tree():\n def __init__(self, cl, cr):\n self.tree_ = inner_fake_tree(cl, cr)\n self.__class__ = sklearn.tree.tree.DecisionTreeRegressor\n\n children_left = np.array([2,-1,4,-1,6,-1,-1], dtype = np.int)\n children_right = np.array([1,-1,3,-1,5,-1,-1], dtype = np.int)\n\n test = fake_tree(children_left,children_right)\n\n\n v_all_should = np.array([[1,0,0,0,0,0,0],\n [1,1,0,0,0,0,0],\n [1,0,1,0,0,0,0],\n [1,0,1,1,0,0,0],\n [1,0,1,0,1,0,0],\n [1,0,1,0,1,1,0],\n [1,0,1,0,1,0,1]],dtype = np.int)\n\n v_leaf_should = v_all_should[test.tree_.children_left == -1,:]\n\n v_leaf_static, v_all_static = smooth_rf.create_decision_per_leafs(test)\n\n if type(v_all_static) == scipy.sparse.coo.coo_matrix:\n v_all_static = v_all_static.todense()\n if type(v_leaf_static) == scipy.sparse.coo.coo_matrix:\n v_leaf_static = v_leaf_static.todense()\n\n assert np.all(v_all_should == v_all_static), \\\n \"static test failed to reproduce correct solutions, (v_all)\"\n assert np.all(v_leaf_should == v_leaf_static), \\\n \"static test failed to reproduce correct solutions, (v_leaf)\"", "title": "" }, { "docid": "c2805614962f815347b5558e5db64443", "score": "0.554282", "text": "def buildChildrenMap(self):\n self.mapChildren = {}\n for nodeId in self.mapParent:\n parentId = self.mapParent[nodeId]\n self.mapChildren.setdefault(parentId, []).append(nodeId)", "title": "" }, { "docid": "a9e38ef732ba6e3dff820ca0aa31550a", "score": "0.553929", "text": "def construct(p_idx, near_p_ds, far_p_ds, i):\n #assert all(d <= child_d[i] for (k, d) in near_p_ds)\n #assert all(child_d[i] < d <= child_d[i + 1]\n # for (k, d) in far_p_ds)\n\n if len(near_p_ds) + len(far_p_ds) <= self.leafsize:\n idx = [ii for (ii, d) in itertools.chain(near_p_ds,\n far_p_ds)]\n radius = max(d for (ii, d) in itertools.chain(near_p_ds,\n far_p_ds,\n [(0.0, -np.inf)]))\n #print(\"Building level %d leaf node for p_idx=%d with %s\"\n # % (i, p_idx, str(idx)))\n node = CoverTree._LeafNode(idx, p_idx, radius)\n return node, []\n else:\n # Remove points very near to p, and as many as possible of\n # those that are just \"near\"\n nearer_p_ds, so_so_near_p_ds = split_with_dist(\n child_d[i - 1], child_d[i], near_p_ds)\n p_im1, near_p_ds = construct(p_idx, nearer_p_ds,\n so_so_near_p_ds, i - 1)\n\n # If no near points remain, p_i would only have the\n # trivial child p_im1. Skip directly to p_im1 in the\n # explicit representation\n if not near_p_ds:\n #print(\"Passing though level %d child node %s \"\n # \"up to level %d\" % (i - 1, str(p_im1), i))\n return p_im1, far_p_ds\n else:\n # near_p_ds now contains points near to p at level i,\n # but not descendants of p at level i-1.\n #\n # Make new children of p at level i from each one until\n # none remain\n children = [p_im1]\n while near_p_ds:\n q_idx, _ = random.choice(near_p_ds)\n\n near_q_ds, far_q_ds = split_without_dist(\n q_idx, child_d[i - 1], child_d[i], near_p_ds)\n near_q_ds2, far_q_ds2 = split_without_dist(\n q_idx, child_d[i - 1], child_d[i], far_p_ds)\n near_q_ds += near_q_ds2\n far_q_ds += far_q_ds2\n\n #assert not (set(i for (i,d) in near_q_ds) &\n # set(i for (i,d) in far_q_ds))\n #assert not (set(i for (i,d) in near_q_ds+far_q_ds) &\n # set(i for (i,d) in far_p_ds))\n\n q_im1, unused_q_ds = construct(\n q_idx, near_q_ds, far_q_ds, i - 1)\n\n children.append(q_im1)\n\n # TODO: Figure out an effective way of not having\n # to recalculate distances to p\n new_near_p_ds, new_far_p_ds = split_without_dist(\n p_idx, child_d[i], child_d[i + 1], unused_q_ds)\n near_p_ds += new_near_p_ds\n far_p_ds += new_far_p_ds\n\n p_i = CoverTree._InnerNode(p_idx, i, heir_d[i], children)\n #print(\"Creating level %d inner node with %d children, \"\n # \"remaining points = %s\" %\n # (i, len(p_i.children), str(far_p_ds)))\n return p_i, far_p_ds", "title": "" }, { "docid": "86930033c85711a098a510006e227a9e", "score": "0.55342615", "text": "def build_genus_tree(tax_tab, **kw):\n kw.setdefault('other_idxs', {'rank':-1})\n kw.setdefault('sep', '*')\n kw.setdefault('field_constructors',\n [int, str, int, int, get_rank_number])\n kw.setdefault('ignore', lambda x: x[-1] == 6)\n return tree_from_tab(tax_tab, 0,1,2, **kw)", "title": "" }, { "docid": "266bb5ee7555a692265c23108ab7ca1e", "score": "0.5533038", "text": "def _build_tree(self, data, parent=None):\n\n Y = data[:, 0]\n nsamples = np.bincount(Y)\n\n # if data is \"pure\" i.e has examples of a single class\n # then return a leaf node predicting that class\n if len(set(Y)) <= 1:\n return Node(parent, nsamples)\n\n # Find the attribute that maximizes the gain\n gain, split_attr = self._best_attribute(data)\n\n # print(\"Splitting on:\", ATTRIBUTES[split_attr],\n # \"Data len \", data[:, split_attr])\n\n # Split if gain is positive\n # Does this is result in pre-pruned trees?\n if gain > 0:\n\n this_node = Node(parent, nsamples, split_attr, children=[])\n\n # There's some code duplicacy here - _best_attribute has exact same line\n binarize = not self.binarize_median and attribute_is_numerical(split_attr)\n\n if binarize:\n this_node.median_value = np.median(data[:, split_attr])\n\n partitions = partition(data[:, split_attr], binarize).items()\n\n # This handles the case when splitting on attributes after binarizing\n # there may be a single partition which is not pure (wrt class label)\n # NOTE: Not too sure about this.\n if binarize and len(partitions) == 1:\n return this_node\n\n # Create children of this node\n for val, part in partitions:\n\n child = self._build_tree(data[part], parent=this_node)\n child.split_value = val\n\n this_node.children.append(child)\n\n return this_node\n\n # Otherwise create a leaf node that predicts the majority class\n else:\n return Node(parent, nsamples)", "title": "" }, { "docid": "d2a5c40d0c28851373ab77266c328c72", "score": "0.55314696", "text": "def tree_to_nodes(tree):\n def get_node(tree_node):\n return Node(\n tree_node.value,\n {n: get_node(tree.nodes[n]) for n in tree_node.children}\n )\n return get_node(tree.root)", "title": "" }, { "docid": "80e3f8abddaec86e58a421213699a90c", "score": "0.5530057", "text": "def gather_tree_py(values, parents):\n beam_length = values.shape[0]\n num_beams = values.shape[1]\n res = np.zeros_like(values)\n res[-1, :] = values[-1, :]\n for beam_id in range(num_beams):\n parent = parents[-1][beam_id]\n for level in reversed(range(beam_length - 1)):\n res[level, beam_id] = values[level][parent]\n parent = parents[level][parent]\n return np.array(res).astype(values.dtype)", "title": "" }, { "docid": "1c5d003ed4ca80dcefcc079926f961bc", "score": "0.55258864", "text": "def _generate_tree(self, X_train, y_train, X_val, y_val, depth=0, removed_features=[]):\n\n X_train = np.copy(X_train)\n y_train = np.copy(y_train)\n X_val = np.copy(X_val)\n y_val = np.copy(y_val)\n\n X_values = []\n X_counts = []\n for i in range(X_train.shape[1]):\n Xi_values, Xi_counts = np.unique(X_train[:, i], return_counts=True)\n if is_number(Xi_values[0]):\n Xi_values = Xi_values.astype('float')\n X_values.append(Xi_values)\n X_counts.append(Xi_counts)\n\n y_values, y_counts = np.unique(y_train, return_counts=True)\n mode = y_values[np.argmax(y_counts)]\n\n # Out of recursion cases return a leaf node.\n # 1. There is only one class.\n # 2. There is no valid feature, i.e. all values are the same for samples, or the left feature set is empty.\n # 3. Maximum tree depth is reached.\n valid_features = [i for i in range(X_train.shape[1]) if len(X_values[i]) > 1 and i not in removed_features]\n print('-'*20)\n print('valid features')\n print(valid_features)\n print('-'*20)\n if any((len(np.unique(y_train)) == 1, len(valid_features) == 0, depth >= self.max_depth)):\n return Node(mode, feature=None, threshold='', val=y_val)\n\n # Select the best feature. threshold = '' if the feature is categorical.\n best_feature, threshold = self._select_feature(X_train, y_train, X_values, X_counts, y_counts, valid_features)\n print('best_feature')\n print(best_feature)\n print('-'*20)\n root = Node(mode, feature=best_feature, threshold=threshold, val=y_val)\n\n # Branching.\n x = X_train[:, best_feature]\n x_val = X_val[:, best_feature]\n if is_number(x[0]):\n x = x.astype('float')\n x_val = x_val.astype('float')\n binary_dict = {'<=': operator.__le__, '>': operator.__gt__}\n for name, operation in binary_dict.items():\n train_indices = np.where(operation(x, threshold))\n X_train_branch = X_train[train_indices]\n y_train_branch = y_train[train_indices]\n val_indices = np.where(operation(x_val, threshold))\n X_val_branch = X_val[val_indices]\n y_val_branch = y_val[val_indices]\n if X_train_branch.size == 0:\n # Generate a leaf node that inherits its parent value.\n Node(mode, parent=root, feature=None, threshold=f'{name}{threshold}', val=y_val_branch)\n else:\n branch = self._generate_tree(X_train_branch, y_train_branch, X_val_branch, y_val_branch, depth=depth+1, removed_features=removed_features)\n branch.parent = root\n branch.threshold = f'{name}{threshold}'\n else:\n for e in X_values[best_feature]:\n train_indices = np.where(x == e)\n X_train_branch = X_train[train_indices]\n y_train_branch = y_train[train_indices]\n val_indices = np.where(x_val == e)\n X_val_branch = X_val[val_indices]\n y_val_branch = y_val[val_indices]\n if X_train_branch.size == 0:\n # Generate a leaf node that inherits its parent value.\n Node(mode, parent=root, feature=None, threshold=e, val=y_val_branch)\n else:\n # Remove the column of categorical best feature.\n removed_features.append(best_feature)\n branch = self._generate_tree(X_train_branch, y_train_branch, X_val_branch, y_val_branch, depth=depth+1, removed_features=removed_features)\n branch.parent = root\n branch.threshold = e\n return root", "title": "" }, { "docid": "8f44c63924c99066b80de08724a5eaf1", "score": "0.5525428", "text": "def gen_fast_samples(trees, labels, vectors, vector_lookup):\n\n print(\"number of trees : \" + str(len(trees)))\n # encode labels as one-hot vectors\n label_lookup = {label: _onehot(i, len(labels)) for i, label in enumerate(labels)}\n # print vector_lookup\n # print vectors\n for tree in trees:\n\n nodes = []\n children = []\n label_one_hot = label_lookup[tree['label']]\n \n queue = [(tree['tree'], -1)]\n # print queue\n while queue:\n # print \"############\"\n node, parent_ind = queue.pop(0)\n # print node\n # print parent_ind\n node_ind = len(nodes)\n # print \"node ind : \" + str(node_ind)\n # add children and the parent index to the queue\n queue.extend([(child, node_ind) for child in node['children']])\n # create a list to store this node's children indices\n children.append([])\n # add this child to its parent's child list\n if parent_ind > -1:\n children[parent_ind].append(node_ind)\n \n node_index = int(node['node'])\n # print \"node : \" + str(node_index)\n\n # print vectors[node_index]\n # look_up_vector = vector_lookup[int(node)]\n # print \"vector look up : \" + str(look_up_vector)\n nodes.append(vectors[node_index])\n # print \"children list length: \" + str(len(children))\n yield (nodes, children, label_one_hot)", "title": "" } ]
ff56ebfcdc84a9c0a7c66a26c96c92f3
Yield items for which the indices match
[ { "docid": "c86d89c67dc21a52ec5d455e38133ce9", "score": "0.7073103", "text": "def _yield_subset(iterable, indices):\n if not indices:\n return\n remaining = sorted(indices, reverse=True)\n cur = remaining.pop()\n for idx, item in tqdm(enumerate(iterable)):\n if idx == cur:\n yield item\n if remaining:\n cur = remaining.pop()\n else:\n return", "title": "" } ]
[ { "docid": "7ac2b4fa3302390e27de2e0f054f793f", "score": "0.69100356", "text": "def select_indices(self, indices):\n indices = copy(indices) # passed indices is a reference, need own copy to modify\n for idx, record in enumerate(self):\n if idx in indices:\n yield record\n indices.remove(idx)\n\n # stopping condition\n if not indices:\n break", "title": "" }, { "docid": "e3e05028013a92010705374c57e9189e", "score": "0.6864556", "text": "def iter_matched_indices(self) -> Generator:\n return (\n self._iter_indices_1_seq()\n if self.num_seqs == 1\n else self._iter_indices_2_seq()\n )", "title": "" }, { "docid": "241b7c3fe6e39c7e444ad2e7878baaef", "score": "0.6139824", "text": "def __filter_sorted_index_list(\n self, sorted_idx: List[np.ndarray], remaining_idx: List[Tuple[int, int]]\n ) -> List[np.ndarray]:\n sorted_idx_filtered = []\n for idx_tuple in sorted_idx:\n remaining = False\n for i, j in enumerate(idx_tuple):\n if i in remaining_idx and j in remaining_idx[i]:\n remaining = True\n break\n if remaining:\n sorted_idx_filtered.append(idx_tuple)\n return sorted_idx_filtered", "title": "" }, { "docid": "00196a6876a758fcc69c418fe3a0d917", "score": "0.6037143", "text": "def non_redundant_index_generator(self):\n # In the present case, we simply rely on Manifold.index_generator:\n for ind in self.manifold.index_generator(self.nid):\n yield ind", "title": "" }, { "docid": "a7270588f7bf8abac0dd7122eb7c2f44", "score": "0.5872336", "text": "def connected(self, *idx): # TODO\n assert(len(idx) == 1)\n it = np.nditer(self.weights_[...,idx[0]], flags=['multi_index'])\n pidx = (self.idx_[0]-1,)\n while not it.finished:\n yield pidx + it.multi_index[:-1]\n it.iternext()", "title": "" }, { "docid": "399c1f71a90381096203635ab36577ff", "score": "0.5805829", "text": "def neighbors_matching(self, item, n=12, check=None):\n\n for item in self.nearest_matching(self.vec(item), n, check):\n yield item", "title": "" }, { "docid": "49d427d21d061592d36b09ab773b7565", "score": "0.58020824", "text": "def __contradicts(self, M: List[Dict[int, int]], idx_tuple: List[int]) -> bool:\n for i, index in enumerate(idx_tuple):\n existing_idx = [idx[i] for idx in M]\n if index in existing_idx:\n return True\n return False", "title": "" }, { "docid": "593de20de06a1d200750b36dd1edc08d", "score": "0.5782576", "text": "def indices(self) -> list:\n ...", "title": "" }, { "docid": "5dd5a5394c4e8325d4cdc95993e01b08", "score": "0.57335544", "text": "def all(self):\n for idx in self.__indexes:\n yield IDMixin(self.__population, idx)", "title": "" }, { "docid": "6c8d081cf65cbe70a90f34ec38e0f1d2", "score": "0.5717195", "text": "def search_vectors(self):\n unique_cell_dimensions = set(self._target_unit_cell.parameters()[:3])\n for i, direction in enumerate(self.search_directions):\n for l in unique_cell_dimensions:\n yield direction * l", "title": "" }, { "docid": "14f29be5b9ed3904b42fdb3a5e355991", "score": "0.5714759", "text": "def __iter__(self):\n for idx in self._values:\n yield idx", "title": "" }, { "docid": "10703ea614dafb045c40c8f84460e8ae", "score": "0.56955326", "text": "def neighbors(index, words):\n for j in range(len(words)):\n if j != index:\n yield j", "title": "" }, { "docid": "862358d8766fb49c560295bf1036fbb7", "score": "0.56557876", "text": "def __iter__(self):\n for idx in self.__indexes:\n yield IDMixin(self.__population, idx)", "title": "" }, { "docid": "8540ab5535f7091d567667dcd7773686", "score": "0.564651", "text": "def match_indices(self,f):\n matched_vertices = list()\n tol = 1E-6\n matchedIndices = list()\n for i in range(self.nvertex):\n for j in range(f.nvertex):\n dx = abs(self.x[i] - f.x[j])\n dy = abs(self.y[i] - f.y[j])\n dz = abs(self.z[i] - f.z[j])\n if (dx<tol and dy<tol and dz<tol and (j not in matched_vertices)):\n matchedIndices.append([i,j])\n matched_vertices.append(j) # This vertex has been matched, remove from list\n break # each index can only have a single match\n return matchedIndices", "title": "" }, { "docid": "230c46c28488800baabdf83aa9130e4f", "score": "0.5636526", "text": "def __star(self, node, link_inner_index):\r\n for link in self.A:\r\n if self.N[node] == link[link_inner_index]:\r\n yield link", "title": "" }, { "docid": "08b91971da87713fcb4078cbff5008d4", "score": "0.56069535", "text": "def common_inds(arrs):\n\tvals = common_vals(arrs)\n\treturn [find(arr, vals) for arr in arrs]", "title": "" }, { "docid": "f582f2bc1f3764797f760423cd8d392f", "score": "0.56008524", "text": "def __iter__(self):\n for indprev, indnow in zip(self.sparse.indptr, self.sparse.indptr[1:]):\n yield list(zip(self.sparse.indices[indprev:indnow], self.sparse.data[indprev:indnow]))", "title": "" }, { "docid": "52e1fd51e2c7c553ca3f2c734ca4b661", "score": "0.5584126", "text": "def search_idx( self, frag_idx, s_l):\n\t\tsmiles_map = list()\n\t\texclusive_map = list()\n\t\tfor s in s_l:\n\t\t\tfrag_map = self.search( s)\n\t\t\tsmiles_map.append(frag_map[ frag_idx] == True)\n\t\t\texclusive_map.append( sum( frag_map))\n\n\t\treturn smiles_map, exclusive_map", "title": "" }, { "docid": "7ff50d9ae1039b183963ee859d99ba6b", "score": "0.55827695", "text": "def items(self, j=None):\n if j == None:\n return [(k, self.__dict__[k]) for k in self.indices]\n elif type(j) == int:\n if 0 <= j <= self.J:\n return [(k, self.__dict__[k]) for k in self.indices if sum(k) == j]\n else:\n raise IndexError\n else:\n raise TypeError", "title": "" }, { "docid": "afa2526987cefa6d83bb344287ab82b2", "score": "0.5553996", "text": "def selection_iterator(iterable, ids):\n ids = sorted(ids)\n counter = 0\n for id, item in enumerate(iterable):\n if id == ids[counter]:\n yield item\n counter += 1\n if counter == len(ids):\n break", "title": "" }, { "docid": "f2ce1e2c0965eb9d4ab6080027490e59", "score": "0.55418795", "text": "def all_pairs(nodes):\n for i, u in enumerate(nodes):\n for j, v in enumerate(nodes):\n if i < j:\n yield u, v", "title": "" }, { "docid": "f09e081152c217d37c039fcba35c0912", "score": "0.5518375", "text": "def _intersect_idxs(idxs):\n if len(idxs) == 1:\n return idxs[0]\n elif len(idxs) == 0:\n return pd.Index([])\n else:\n return idxs[0].intersection(_intersect_idxs(idxs[1:]))", "title": "" }, { "docid": "a15290b1dc8a69bb033c24fda576e9c4", "score": "0.55082256", "text": "def project(vecs, index):\n for vec in vecs:\n yield vec[index]", "title": "" }, { "docid": "f4439987529ec38423f1631bd9b25476", "score": "0.5507702", "text": "def get_indices(values, listlike):\n\n return [i for i, l in zip(count(), listlike) if l in values]", "title": "" }, { "docid": "7d7a02b3053d8ad4cb86719174e78e53", "score": "0.5471021", "text": "def _gen_matches(search, conn, target_units, source_units, stoplist_set,\n features_size):\n target_feature_matrix, target_breaks = _construct_unit_feature_matrix(\n target_units, stoplist_set, features_size)\n for hits2positions in gen_hits2positions(search, conn,\n target_feature_matrix,\n target_breaks, source_units,\n stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v)\n for k, v in hits2positions.items() if len(v) >= 2\n }\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "title": "" }, { "docid": "bf55c52e460fcf5b7a2749fd762332b1", "score": "0.54694223", "text": "def find_distribution_match_in_one_hand(deal, indexes, pattern):\n if a_wildcard_b(summary_of_distribution(deal[indexes[0]]),pattern) == pattern:\n return (indexes[0], indexes[1])\n if a_wildcard_b(summary_of_distribution(deal[indexes[1]]),pattern) == pattern:\n return (indexes[1], indexes[0])\n return False", "title": "" }, { "docid": "9be9f8476e84387ff967f645fab19072", "score": "0.5461056", "text": "def matched_entity_finder(iterator, data_list):\n matched_items = []\n check_element = data_list[iterator][1]\n for i, element in enumerate(data_list):\n for j, sub_element in enumerate(element):\n if sub_element == check_element:\n print(\"%s in %s\" % (check_element, element))\n matched_items.append(i)\n else:\n # print(\"BOO: %s\" % j)\n None\n print(matched_items)\n return matched_items", "title": "" }, { "docid": "48b071e64105e997c916de1b8b1ec3aa", "score": "0.5449023", "text": "def process_2indices(self, index1, index2, pairwise_unit):\n\t\tif self.debug_process_2indices:\n\t\t\tprint \"\\t\\t##Enter process_2indices()\"\n\t\t#compute three distances for these two indices\n\t\tgo_id1 = index1[-1]\n\t\tgo_id2 = index2[-1]\n\t\tif go_id1 == go_id2:\n\t\t\t#indices pointing to same go id, stop!\n\t\t\treturn\n\t\tif self.debug_process_2indices:\n\t\t\tprint \"index of %d: %s\\n\"%(go_id1, repr(index1)) \n\t\t\tprint \"index of %d: %s\\n\"%(go_id2, repr(index2))\n\n\t\tdepth1 = len(index1)\n\t\tdepth2 = len(index2)\n\t\tif depth1 <= depth2:\n\t\t\tmin_depth = depth1\n\t\t\tfixed_index = index1\n\t\t\t#convert to list because we want to use .index() method\n\t\t\tdynamic_index = list(index2)\n\t\t\tindex_set_form = Set(index2)\n\t\telse:\n\t\t\tmin_depth = depth2\n\t\t\tfixed_index = index2\n\t\t\tdynamic_index = list(index1)\n\t\t\tindex_set_form = Set(index1)\n\t\t#find the length of the longest common head sequence\n\t\tlca_fixed_index = 0\n\t\tlca_dynamic_index = 0\n\t\tran = range(min_depth)\n\t\t#start from the end\n\t\tran.reverse()\n\t\tfor i in ran:\n\t\t\tif fixed_index[i] in index_set_form:\n\t\t\t\tlca_fixed_index = i\n\t\t\t\tlca_dynamic_index = dynamic_index.index(fixed_index[i])\n\t\t\t\tbreak\n\t\t#i points to the first different column\n\t\tlca = fixed_index[lca_fixed_index]\n\t\t\n\t\t#add the lca to the common_ancestor set of this pair\n\t\tpairwise_unit.common_ancestor_set.add(lca)\n\n\t\tdistance1_from_lca = len(fixed_index) - (lca_fixed_index+1)\n\t\tdistance2_from_lca = len(dynamic_index) - (lca_dynamic_index+1)\n\t\traw_distance = distance1_from_lca + distance2_from_lca\n\t\tlee_distance = 15 - (min(lca_fixed_index, lca_dynamic_index) +1)\n\t\tjasmine_distance = min(distance1_from_lca, distance2_from_lca)\n\t\t\n\t\tif raw_distance < pairwise_unit.raw_distance:\n\t\t\tpairwise_unit.raw_distance = raw_distance\n\t\t\tif self.debug_process_2indices:\n\t\t\t\tprint \"raw_distance replaced\"\n\t\tif lee_distance < pairwise_unit.lee_distance:\n\t\t\tpairwise_unit.lee_distance = lee_distance\n\t\t\tif self.debug_process_2indices:\n\t\t\t\tprint \"lee_distance replaced\"\n\t\tif jasmine_distance < pairwise_unit.jasmine_distance:\n\t\t\tpairwise_unit.jasmine_distance = jasmine_distance\n\t\t\tif self.debug_process_2indices:\n\t\t\t\tprint \"jasmine_distance replaced\"\n\n\t\tif self.debug_process_2indices:\n\t\t\tprint \">%d %d: %d(raw) %d(lee) %d(jasmine) %s(lca)\\n\"%(go_id1, go_id2, raw_distance, \\\n\t\t\t\tlee_distance, jasmine_distance, lca)\n\t\t\tprint \"Corresponding GO acc: %s and %s:\\n\"%(self.go_id2acc[go_id1], self.go_id2acc[go_id2])\n\t\t\tprint \"\\tOne lowest common ancestor: %s(%s)\\n\"%(lca, self.go_id2acc[lca])\n\t\t\tprint \"\\t\\t##Leave process_2indices()\"", "title": "" }, { "docid": "d83602b629f0a6a314ed2006fb7328a0", "score": "0.5443816", "text": "def index_in_grandparent(self, indices):\n return [self.__indexes[index] for index in indices]", "title": "" }, { "docid": "753de7455d471c064d537103f5d40ee2", "score": "0.5419731", "text": "def test_index(self):\n self.assertEqual(('c', 'd', 'e'), yourtest.partial_index(2, 'a', 'b', 'c', 'd', 'e'))\n self.assertEqual((6, 7, 8, 9), yourtest.partial_index(5, 1, 2, 3, 4, 5, 6, 7, 8, 9))", "title": "" }, { "docid": "5e83814999274e31b7e6ccbb8f069743", "score": "0.54147476", "text": "def filter_with_filters(rows, filters):\n for row in rows:\n ddict = DotDict(row)\n flist = [(f, ddict.get(f)) for f in filters]\n for idx in flist:\n yield idx", "title": "" }, { "docid": "2765314b1cead4687d76a7669b8745d0", "score": "0.5398051", "text": "def all_index(self,ent,x):\n saida = self.__index(ent,x,0)\n for i in saida:\n return i", "title": "" }, { "docid": "638d47bacfefd436a08e91484a04ab28", "score": "0.53912914", "text": "def test_build_range_indexing():\n builds = [\"abc\", \"zyx\", \"foo\", \"bar\"]\n build_range = BuildRange(builds)\n for k1, k2 in zip(build_range, builds):\n assert k1 == k2\n for index, build in enumerate(builds, start=0):\n assert build_range.index(build) == index", "title": "" }, { "docid": "bdfcb360d249b3cb151e8341e8eb2f6d", "score": "0.5388594", "text": "def findAsIndices(self, start, end, offset=0):\n i = offset\n pt = self.items[i]\n pts = []\n indices = []\n while pt.time<start:\n i+=1\n pt = self.items[i]\n while pt.time<end:\n indices.append(i)\n pts += [pt]\n i+=1\n if i>=len(self.items):\n break\n pt = self.items[i]\n return indices", "title": "" }, { "docid": "7f9c2111b2a1cbceb0bec7ca8fb7bacd", "score": "0.5388513", "text": "def target_indices(self, indices):\n\n relative = [] # indices in the instance lists\n for i, (s, _, o) in enumerate(self.triples):\n relative.append((indices[i].index(s), indices[i].index(o)))\n\n absolute = [] # indices in the flattened list\n count = 0\n for i, (s, o) in enumerate(relative):\n absolute.append((s + count, o + count))\n count += len(indices[i])\n\n return relative, absolute", "title": "" }, { "docid": "472157306ad8e1673feb0da0980750fa", "score": "0.5380627", "text": "def sorted_idx_iter(self, types: List[int]) -> Iterable[int]:\n if types:\n idx_iters = [self.idx[key] for key in types if key in self.idx]\n else:\n idx_iters = [val for key, val in self.idx.items()]\n\n # Use the heapq.merge function to return sorted iterator of file indices\n return heapq.merge(*idx_iters)", "title": "" }, { "docid": "bc83b60cb334b4db275d4dd4331d0b27", "score": "0.5361057", "text": "def _find_duplicates(indices):\n\n indices_organized = dict()\n skip_bibs = []\n for k, v in indices.iteritems():\n d = []\n for n, m in indices.iteritems():\n if m is not None and m == v:\n d.append(n)\n indices_organized[v] = d\n\n dups = dict()\n for k, v in indices_organized.iteritems():\n if len(v) > 1:\n dups[k] = v\n skip_bibs.extend(v)\n return dups, sorted(skip_bibs)", "title": "" }, { "docid": "f161fc1dadb26c5450bff890f4ee16e1", "score": "0.53465444", "text": "def iterate_mb_idxs(data_length, minibatch_size, shuffle=False):\n if shuffle:\n indexes = np.arange(data_length)\n np.random.shuffle(indexes)\n for start_idx in range(0, data_length - minibatch_size + 1, minibatch_size):\n batch = slice(start_idx, start_idx + minibatch_size)\n if shuffle:\n batch = indexes[batch]\n yield batch", "title": "" }, { "docid": "2e5779653487b31824e5a8e18ba01ebb", "score": "0.5334865", "text": "def get_matching_indexes(self, possible_hash, possible_range):\n matches = [\n index\n for index in self.iter_query_indexes()\n if index.hash_key in possible_hash\n ]\n range_matches = [\n index for index in matches if index.range_key in possible_range\n ]\n if range_matches:\n return range_matches\n return matches", "title": "" }, { "docid": "80c500ba38082983144155f6e3b67542", "score": "0.5333845", "text": "def extended_partition(outcomes, indices, part, ctr):\n return frozenset(frozenset(o for o in outcomes if ctr(o[i] for i in indices) in p) for p in part)", "title": "" }, { "docid": "1993a8968a668d61893db154b513e181", "score": "0.5331605", "text": "def take(items, indices):\n try:\n return [items[index] for index in indices]\n except TypeError:\n return items[indices]", "title": "" }, { "docid": "76056b2f1f93e74814cf50b71eb8f946", "score": "0.5320165", "text": "def _get_indexes(obj: DataArray | Dataset, key: Hashable) -> list[Hashable]:\n return [k for k in _get_all(obj, key) if k in obj._indexes]", "title": "" }, { "docid": "eb888a649951175828b0ec60e3280fab", "score": "0.5311824", "text": "def _write_index_items_conditional(self, items):\n for item in items:\n self._write_index_item_conditional(item)", "title": "" }, { "docid": "caf0b256a2f9fd5606b454e52012757c", "score": "0.52895576", "text": "def generate_matches(self, nodes: List[NL]) -> Iterator[Tuple[int, _Results]]:\n r: _Results = {}\n if nodes and self.match(nodes[0], r):\n yield 1, r", "title": "" }, { "docid": "817b82cb9b6eb2b81b5bce9f8d25d474", "score": "0.5287231", "text": "def indices(x, y):\n return [(i, j) for i in range(x) for j in range(y)]", "title": "" }, { "docid": "2d75e89e00ee52ae6dce2369cd42b23e", "score": "0.52855736", "text": "def iter_edges(self):\r\n \r\n for idx, edges in enumerate(self.edges):\r\n for edge in edges:\r\n if edge.v1.id == idx: # condition to ensure that we return each edge only once\r\n yield edge", "title": "" }, { "docid": "2bfd732c155b04b2e6609bf6c252aab6", "score": "0.52842975", "text": "def __contains__(self, item):\n indices = self.__indexes(item)\n all_set = all(index in self.bitvector for index in indices)\n return all_set", "title": "" }, { "docid": "98bdbf801f2e32f2e0435ede190ac82d", "score": "0.52732235", "text": "def match(self, *args, **kwargs):\n adherence = self.according_to_x_set\n vertices = [self.x_vertices, self.y_vertices][adherence]\n for vertex in vertices:\n self.evaluate_match(vertex, *args, **kwargs)\n return self.matches", "title": "" }, { "docid": "bf2b8f1e120a3033dcc5361eb61c7993", "score": "0.52712506", "text": "def test_indices_from_overall_to_file_specific(self):\n\n these_indices_by_file = (\n feature_vectors._indices_from_overall_to_file_specific(\n OVERALL_INDICES, NUM_OBJECTS_BY_FILE))\n\n self.assertTrue(len(these_indices_by_file) == len(INDICES_BY_FILE))\n\n num_files = len(INDICES_BY_FILE)\n for i in range(num_files):\n self.assertTrue(numpy.array_equal(\n these_indices_by_file[i], INDICES_BY_FILE[i]))", "title": "" }, { "docid": "37b807752897d42cc62fc7d86676b881", "score": "0.5269037", "text": "def contain(self, contains_index):\n ret = []\n for pair in self.edge_table:\n if pair[0] >= contains_index:\n continue\n if pair[1] < contains_index:\n continue\n for index in range(0, len(self.edge_table[pair]) - 1):\n ret.append(pair + (index,))\n return ret", "title": "" }, { "docid": "614617bd0673158684ac426603edb17c", "score": "0.52613926", "text": "def aaindex(seq):\n ix = []\n for a in seq:\n if a in d1_to_index:\n ix.append(d1_to_index[a])\n return ix", "title": "" }, { "docid": "0685ab72f395ea7a80f1130ccb23c351", "score": "0.5260794", "text": "def gen_idx():\n for i, geo in enumerate(geos):\n yield (i, geo.bounds, None)", "title": "" }, { "docid": "84a4e2704bc164c83318303f12a7ceb4", "score": "0.5257315", "text": "def combinations_with_replacement(iterable, r):\r\n pool = tuple(iterable)\r\n n = len(pool)\r\n for indices in product(range(n), repeat=r):\r\n if all( x<=y for x,y in pairwise(indices) ): #sorted(indices) == list(indices):\r\n yield tuple(pool[i] for i in indices)", "title": "" }, { "docid": "0876ccd64399ae34a0eb38c7457050a6", "score": "0.5253012", "text": "def non_redundant_index_generator(self):\n si = self.manifold.sindex\n imax = self.manifold.dim - 1 + si\n ind = [si for k in range(self.nid)]\n ind_end = [si for k in range(self.nid)]\n ind_end[0] = imax+1\n while ind != ind_end:\n ordered = True\n for isym in self.sym:\n for k in range(len(isym)-1):\n if ind[isym[k+1]] < ind[isym[k]]:\n ordered = False\n break \n for isym in self.antisym:\n for k in range(len(isym)-1):\n if ind[isym[k+1]] <= ind[isym[k]]:\n ordered = False\n break\n if ordered:\n yield tuple(ind)\n ret = 1\n for pos in range(self.nid-1,-1,-1):\n if ind[pos] != imax:\n ind[pos] += ret\n ret = 0\n elif ret == 1:\n if pos == 0:\n ind[pos] = imax + 1 # end point reached\n else:\n ind[pos] = si\n ret = 1", "title": "" }, { "docid": "4b26593ec476ca9d68db0253973e473e", "score": "0.5251204", "text": "def iter_left_indexes(dims):\n arg = [py3range(dim) for dim in dims]\n for idxs in product(*arg):\n yield idxs", "title": "" }, { "docid": "39e49336ff08854f8a2f6062a50cceca", "score": "0.52507913", "text": "def test_indices(self, indices):\n return self.test_data[indices], self.test_label[indices]", "title": "" }, { "docid": "848294455ac94554c54b5f4caf5ad387", "score": "0.5247983", "text": "def test_list_indices(self):\n\n result = dict_filter(self.simple_dict, {\n 'c': [1, 0, -1, 2, -2]\n })\n\n self.assertEqual(result, {'c': [4, 3, 5, 5, 4]})", "title": "" }, { "docid": "ac458516f324e14a70a7bd182e5d614e", "score": "0.52475077", "text": "def _neighbors(self, objects):\n doubleprime = self._extents.doubleprime\n\n minimal = ~objects\n\n for add in self._Extent.atomic(minimal):\n objects_and_add = objects | add\n extent, intent = doubleprime(objects_and_add)\n if extent & ~objects_and_add & minimal:\n minimal &= ~add\n else:\n yield extent, intent", "title": "" }, { "docid": "9adc50c3554ab210c460904ea7caa263", "score": "0.5241219", "text": "def distribute(bmu_idx: Iterable[int], n_units: int\n ) -> Dict[int, List[int]]:\n unit_matches = {i:[] for i in range(n_units)}\n for data_idx, bmu in enumerate(bmu_idx):\n unit_matches[bmu].append(data_idx)\n return unit_matches", "title": "" }, { "docid": "e978601258ee7f73f9ccf79f0311a15e", "score": "0.5237702", "text": "def __iter__(self):\n self._reindex()\n return (x[0] for x in self._index.values())", "title": "" }, { "docid": "bef7b3b4277d0777ef2925040c1fdfcb", "score": "0.5234648", "text": "def index_transform(self, indices):\n # I need a way to find out the dimension of an element of the codomain\n codomain_element_size = self.basis.domain_element_sizes()[1]\n index_set = []\n for idx_set in grouper(indices, codomain_element_size):\n index_set.append(\n self.basis.rev(idx_set[0] if len(idx_set) == 1 else idx_set))\n\n return tuple(index_set)", "title": "" }, { "docid": "de98ff6d40c854faccc38e48dfc4f5dd", "score": "0.5234249", "text": "def fragmentate(self, give_only_index=False):\n list_fragment_indices = []\n still_to_check = set(self.index)\n while still_to_check != set([]):\n indices = self.connected_to(\n pick(still_to_check),\n give_only_index=True)\n still_to_check = still_to_check - indices\n list_fragment_indices.append(indices)\n\n if give_only_index:\n value_to_return = list_fragment_indices\n else:\n value_to_return = [self[indices, :] for\n indices in list_fragment_indices]\n return value_to_return", "title": "" }, { "docid": "8109997fdcf32b50e147906132209132", "score": "0.5226218", "text": "def _indices_in_constraints(self, name, loc):\n cons = {'initial': self._initial_boundary_constraints,\n 'final': self._final_boundary_constraints,\n 'path': self._path_constraints}\n\n all_flat_idxs = set()\n\n for con in cons[loc]:\n if con['name'] != name:\n continue\n\n flat_idxs = get_constraint_flat_idxs(con)\n duplicate_idxs = all_flat_idxs.intersection(flat_idxs)\n if duplicate_idxs:\n s = {'initial': 'initial boundary', 'final': 'final boundary', 'path': 'path'}\n raise ValueError(f'Duplicate constraint in phase {self.pathname}. '\n f'The following indices of `{name}` are used in '\n f'multiple {s[loc]} constraints:\\n{duplicate_idxs}')\n\n all_flat_idxs.update(flat_idxs)\n\n return all_flat_idxs", "title": "" }, { "docid": "08ca389315001120f9dcc0babefe8ac6", "score": "0.5218791", "text": "def __getitem__(self, indices):\n\n return self.Index(indices)", "title": "" }, { "docid": "4d98fd4333210adffb8556a328e43364", "score": "0.52152675", "text": "def pairs(self):\n for i in xrange(len(self)):\n yield (self._first_index[i], self._second_index[i])", "title": "" }, { "docid": "9370a2ff3e51acf814efd8b6fe6f761c", "score": "0.5205744", "text": "def _iterative_matches(self, nodes) -> Iterator[Tuple[int, _Results]]:\n nodelen = len(nodes)\n if 0 >= self.min:\n yield 0, {}\n\n results = []\n # generate matches that use just one alt from self.content\n for alt in self.content:\n for c, r in generate_matches(alt, nodes):\n yield c, r\n results.append((c, r))\n\n # for each match, iterate down the nodes\n while results:\n new_results = []\n for c0, r0 in results:\n # stop if the entire set of nodes has been matched\n if c0 < nodelen and c0 <= self.max:\n for alt in self.content:\n for c1, r1 in generate_matches(alt, nodes[c0:]):\n if c1 > 0:\n r = {}\n r.update(r0)\n r.update(r1)\n yield c0 + c1, r\n new_results.append((c0 + c1, r))\n results = new_results", "title": "" }, { "docid": "8643b06f18451718a9e4de30942ebf60", "score": "0.52045095", "text": "def _get_valid_indices(amps, freqs):\n return index_filter(amps).intersection(index_filter(freqs))", "title": "" }, { "docid": "74d6c72dcac11e4a9bba4583037de718", "score": "0.520355", "text": "def index(self, *args, **kwargs):\n x = None\n start = 0\n end = self.len\n if len(args) == 0:\n return\n\n if len(args) >= 1:\n x = args[0]\n\n if len(args) >= 2:\n if type(args[1]) != int:\n raise TypeError\n if args[1] < 0 or args[1] > self.len:\n raise IndexError\n start = args[1]\n\n if len(args) >= 3:\n if type(args[2]) != int:\n raise TypeError\n if args[2] < 0 or args[2] > self.len:\n raise IndexError\n end = args[2]\n\n curE = self.Front()\n i = 0\n while i < start:\n curE = curE.Next()\n i += 1\n while i < end:\n if x == curE.value:\n return i\n curE = curE.Next()\n i += 1\n\n raise ValueError", "title": "" }, { "docid": "17ff3a95b68b51bc9714b71e2b4cee08", "score": "0.51889217", "text": "def combinations(iterable, r):\r\n pool = tuple(iterable)\r\n n = len(pool)\r\n for indices in permutations(range(n), r):\r\n if all( x<=y for x,y in pairwise(indices) ): #sorted(indices) == list(indices):\r\n yield tuple(pool[i] for i in indices)", "title": "" }, { "docid": "7037c879b4ad32eb7b04da4a0c9ab5be", "score": "0.5182607", "text": "def _batched_extract_indices(batch, indices):\n upper = tf.shape(indices, out_type=indices.dtype)[0]\n iota = tf.range(upper, dtype=indices.dtype)\n return tf.gather_nd(batch, tf.stack([iota, indices], axis=1))", "title": "" }, { "docid": "d8f7f0925fcccb606723b335eafbf264", "score": "0.517975", "text": "def indices(a, func): \n return [i for (i, val) in enumerate(a) if func(val)]", "title": "" }, { "docid": "c9915d2b18a9ecf313f41d75f59de13e", "score": "0.5177299", "text": "def find_matches(sh_ids, mg_ids):\n\n # Keep this two nested loops to find multiple duplicates,\n # that will mean data are wrong\n for sh in sh_ids:\n for mg in mg_ids:\n if sh.id == mg.uuid:\n m = {'uuid' : sh.uuid, 'people_id' : mg.id}\n yield m", "title": "" }, { "docid": "88e482660e155a32a0f295216b8e370e", "score": "0.5173621", "text": "def _select_indexes_uar(orig_size: int, new_size: int) -> np.ndarray:\n shuffled = np.arange(orig_size)\n np.random.shuffle(shuffled)\n keep_idx = np.zeros_like(shuffled, dtype=np.bool)\n for i in np.arange(new_size):\n keep_idx[shuffled[i]] = True\n return keep_idx", "title": "" }, { "docid": "eaaa5f47b72df2548f2aea73790d7361", "score": "0.5170974", "text": "def find_duplicate(self, index_only=False, values_only=False):\n dup = []\n idd = []\n for i in range(len(self.data)):\n if self.data[i] in self.data[i + 1 :]:\n if self.data[i] not in dup:\n dup.append(self.data[i])\n idd.append(i)\n if index_only:\n return idd\n elif values_only:\n return dup\n else:\n return list(zip(idd, dup))", "title": "" }, { "docid": "f696d99d3eefdbf52c4b95b5bafd12de", "score": "0.5166638", "text": "def flat_idxs(self, i_lst, j_lst, k_lst):\n return np.ravel_multi_index((i_lst, j_lst, k_lst), self.v.shape)", "title": "" }, { "docid": "9083b7e03897739fa36d5365be9f837b", "score": "0.516032", "text": "def findUnknowns(grid):\n def allIndices(lst, item):\n yield from [i for i, x in enumerate(lst) if x == item]\n for row in grid:\n for col in allIndices(row, 0):\n yield [grid.index(row), col]", "title": "" }, { "docid": "2b2dda3e695e5dd68e6aa4a06d25c708", "score": "0.5151677", "text": "def see (self, index):\r\n for l in self.lists:\r\n l.see (index)", "title": "" }, { "docid": "0ec7444e21555df26682779322ab70d9", "score": "0.5148596", "text": "def find_key(self, keys, targ):\n if isinstance(targ, dict):\n for k, v in targ.items():\n for key in keys:\n if k == key:\n yield [[k], v]\n for path, vn in self.find_key(keys, v):\n yield [[k, *path], vn]\n if isinstance(targ, list):\n for i, v in enumerate(targ):\n for path, vn in self.find_key(keys, v):\n yield [[i, *path], vn]", "title": "" }, { "docid": "a1a457bcfb19a8cfca13cb6c413c5c8d", "score": "0.51425767", "text": "def filteredItems(self):\n indexes = [self.mapToSource( self.proxy.index(r, c) ) for r in xrange(self.proxy.rowCount()) for c in xrange(self.proxy.columnCount())]\n items = [self.model.item( mi ) for mi in indexes]\n uitems = set()\n return [i for i in items if i not in uitems and not uitems.add(i)]", "title": "" }, { "docid": "001b57da843cda66dde98005c1ce1eea", "score": "0.5141489", "text": "def __iter__( self ):\n\t\tfor field in self.fields:\n\t\t\tyield field\n\t\t\tfor item in field:\n\t\t\t\tyield item\n\t\tfor index in self.indices:\n\t\t\tindex.table = self.name\n\t\t\tyield index\n\t\tfor constraint in self.constraints:\n\t\t\tyield constraint", "title": "" }, { "docid": "d047f78b669c8cec97f4b654d828c2bf", "score": "0.51376635", "text": "def select_parts_batch(self, args):\n index_file = args[2]\n with open(index_file) as f:\n indexes = f.read().split()\n for index in indexes:\n self.select_parts_single(index)", "title": "" }, { "docid": "5e3d61e8a1fdf0911e7b6605d15c900d", "score": "0.5132376", "text": "def tie_index_match(self):\n tie_indx = numpy.full(self.data['tie_index'].shape, -1, dtype=int)\n indx = self.data['tie_index'] > 0\n tie_indx[indx] = [numpy.where(self.data['index'] == i)[0][0]\n for i in self.data['tie_index'][indx]]\n return tie_indx", "title": "" }, { "docid": "43058a9d423bfd79f6d5218310659f18", "score": "0.5131967", "text": "def itertests(self, es, index, weights = None):\n raise NotImplementedError\n yield", "title": "" }, { "docid": "50470e65e4415684c3954afe941f8037", "score": "0.5119157", "text": "def getindexedvals(inputlist,indexlist):\r\n outputlist=[inputlist[i] for i in indexlist]\r\n return outputlist", "title": "" }, { "docid": "bfc3504f524a1c4ddc8061b029f665d7", "score": "0.51170546", "text": "def split_by_index(self, *index: int) -> Sequence[target_type]:\n index = sorted(set(index))\n ret = []\n tmp = []\n count = 0\n for o in self.__obj:\n if count in index:\n ret.append(tmp)\n tmp = [o]\n else:\n tmp.append(o)\n count += 1\n ret.append(tmp)\n return ret", "title": "" }, { "docid": "a5e055541b1e3d0d73a9475d9240cadf", "score": "0.5116301", "text": "def contains_indices(self):\n return False", "title": "" }, { "docid": "08a19cd3cdccbae0c4e0739cc3c5fe42", "score": "0.51044255", "text": "def puzzle_indices(self):\n for i in range(self.m):\n for j in range(self.n):\n cell = self.cells[i][j]\n yield i, j, cell", "title": "" }, { "docid": "cb11d3c2eb09ea1e76a9df8cb6658b1f", "score": "0.5104038", "text": "def index_based(index,value,input_list):\n \n if index in input_list:\n return True\n else:\n return False", "title": "" }, { "docid": "63296f679ef5d8b431e00f10610f1f3c", "score": "0.5092264", "text": "def find_indexes(graph, edge_index):\n aux = graph.edge_index.T\n indexes = []\n for j in range(len(aux)):\n indexes.append(np.nonzero([bool(torch.all(i)) for i in edge_index.T == aux[j]])[0][0])\n values = np.zeros(len(edge_index.T)).astype(int)\n values[indexes] = np.ones(len(indexes))\n return values", "title": "" }, { "docid": "e44aca1f8a7a182a4dfdbe9a63887a80", "score": "0.50888145", "text": "def indices(self) -> List[SymIndex]:\r\n indices_trans = self.indices_trans\r\n arrows_trans = self.arrows_trans\r\n \r\n cumul_grouping = np.insert(np.cumsum(self.index_groups),0,0)\r\n indices = [0]*self.ndim\r\n for n in range(self.ndim):\r\n curr_group = slice(cumul_grouping[n],cumul_grouping[n+1])\r\n # Note: arrow for combined indices taken from the first index in the group.\r\n indices[n] = SymTensor.fuse_indices(indices_trans[curr_group], (arrows_trans[curr_group] != arrows_trans[cumul_grouping[n]]))\r\n return indices", "title": "" }, { "docid": "1034e4e74b3129dab4ebdfc0ed639974", "score": "0.5075979", "text": "def iterate_patches(\n claims: pandas.DataFrame,\n) -> Iterator[Tuple[int, Tuple[slice, slice]]]:\n for _, claim in claims.iterrows():\n index = (slice(claim.x_start, claim.x_stop),\n slice(claim.y_start, claim.y_stop))\n yield claim.claim_id, index", "title": "" }, { "docid": "266d9397d8118a0256d7d26abb486b3b", "score": "0.5073652", "text": "def get_odd_indices(items):\n\n return items[1::2]", "title": "" }, { "docid": "d5dcc7c1ab27e1285e36000a84c55868", "score": "0.50688136", "text": "def iterfinds(self, key, yes={}, no={}):\n \n #iterate over list of all subelements given by key\n for subelement in self.__gen_dict_value(key, self):\n match = True\n \n #iterate over all key, value pairs in yes\n for yes_key, yes_value in yes.iteritems():\n key_match = False\n \n #iterate over list of all values associated with kwarg_key in the subelement\n for value in self.__gen_dict_value(yes_key, subelement):\n if value == yes_value:\n key_match = True\n break\n \n #if a kwarg_key-kwarg_value match is not found, then the subelement is not a match\n if not key_match:\n match = False\n break\n \n #iterate over all key, value pairs in no\n if match:\n for no_key, no_value in no.iteritems():\n key_match = True\n \n #iterate over list of all values associated with kwarg_key in the subelement\n for value in self.__gen_dict_value(no_key, subelement):\n if value == no_value:\n key_match = False\n break\n \n #if a kwarg_key-kwarg_value match is not found, then the subelement is not a match\n if not key_match:\n match = False\n break\n \n #if match is still true, yield subelement\n if match:\n yield subelement", "title": "" }, { "docid": "6d5500adf1405453f1c4672dea09f9b9", "score": "0.5066287", "text": "def iter_serp_items(self):\n\n for key, value in self.search_results.items():\n if isinstance(value, list):\n for i, item in enumerate(value):\n if isinstance(item, dict) and item['link']:\n yield (key, i)", "title": "" }, { "docid": "edc5dd80b0640ac658b97bb15fa817e1", "score": "0.5064735", "text": "def map_(index: int):", "title": "" }, { "docid": "31895c5e9d4d46b312d247bc3bdd5046", "score": "0.5064302", "text": "def test_multi_index(self):\n self.index.delete_instance()\n\n indexes = [Index.create(name='idx-%s' % i) for i in range(3)]\n document = Document.create(content='hueybear')\n for index in indexes:\n index.index(\n document.content,\n document)\n\n self.assertEqual(Document.select().count(), 1)\n self.assertEqual(Index.select().count(), 3)\n self.assertEqual(IndexDocument.select().count(), 3)\n query = (IndexDocument\n .select(Index.name, IndexDocument.document)\n .join(Index)\n .order_by(Index.name)\n .dicts())\n idx_doc_data = [idx_doc for idx_doc in query]\n self.assertEqual(idx_doc_data, [\n {'document': document.get_id(), 'name': 'idx-0'},\n {'document': document.get_id(), 'name': 'idx-1'},\n {'document': document.get_id(), 'name': 'idx-2'},\n ])", "title": "" }, { "docid": "4bfd7ea1dfefc03e810ae56bc0e7d532", "score": "0.50615853", "text": "def _separable_pair(name, index):\r\n items = index.itervalues()\r\n reference = next(items)[name]\r\n\r\n return all([item[name] == reference for item in items])", "title": "" }, { "docid": "3ddaea19bd715de2d9f86de5cb40dcb9", "score": "0.5059448", "text": "def _tag_similarity_generator():\n \n # the artists that were tagged at least once\n qs_tagged_artists = Artist.objects.filter(artisttag__isnull=False).distinct()\n count = qs_tagged_artists.count()\n \n for a1, count in zip( qs_tagged_artists[1:].iterator(), \\\n range(1, count)):\n\n # obtain only first count entities\n for a2 in qs_tagged_artists[:count].iterator():\n if a1.artisttag_set.filter(\n tag__id__in=a2.artisttag_set.values_list('tag__id')\n ).distinct().count() > 45:\n yield (a1, a2)", "title": "" }, { "docid": "0e405579097953fe9642494b4e2a447e", "score": "0.50589097", "text": "def index (self, index):\r\n for l in self.lists:\r\n l.index (index)", "title": "" }, { "docid": "ef53355d5fe5037825cb9ff19a44a156", "score": "0.50553995", "text": "def generateIndices(self):\n size = 1\n for dim in self.dims:\n size *= dim\n for i in range(size):\n idx = ()\n j = i\n for ni in reversed(self.dims):\n idx = (j % ni,) + idx\n j = int(j / ni)\n yield idx", "title": "" } ]
9afcd2fe6299150fc06b983f24909787
Successful job submit with incorrect image link
[ { "docid": "6e4bfc6a94d06c4c548f190db9772e1e", "score": "0.61634725", "text": "def test_submit_successful_bad_link(self):\n payload = json.dumps({\n \"count\": 2,\n \"visits\": [\n {\n \"store_id\": \"S00339218\",\n \"image_url\": [\n \"https://www.gstatic.com/webp/galleryy/2.jpg\", # incorrect link\n \"https://www.gstatic.com/webp/gallery/3.jpg\"\n ],\n \"visit_time\": \"2020-09-02T00:00:53\"\n },\n {\n \"store_id\": \"S01408764\",\n \"image_url\": [\n \"https://www.gstatic.com/webp/gallery/3.jpg\"\n ],\n \"visit_time\": \"2020-09-02T00:00:56\"\n }\n ]\n })\n r = self.app.post('http://localhost:5000/api/submit',\n headers={\"Content-Type\": \"application/json\"}, data=payload)\n self.assertEqual(unicode, type(r.json['job_id'])) # Should return a job id\n self.assertEqual(r.status_code, 201) # Should return status 201\n\n msg = \"STATUS Check - <Success> - link|job success|status failed\"\n table = [[str(msg)]]\n output = tabulate(table, tablefmt='grid')\n print(output)\n print(r.json)\n print(r.status_code)\n print(\"\")", "title": "" } ]
[ { "docid": "d7ad10b1cdc5147deb72870a688c0aaf", "score": "0.66116846", "text": "def saveimage(self, info=False):\r\n request_image_content = requests.get(self.job_content)\r\n if request_image_content.status_code == 404:\r\n print(\"Нет подключения к https://elksale.xyz/\")\r\n return\r\n\r\n with open(self.job + \".png\", \"wb\") as file:\r\n file.write(request_image_content.content)\r\n file.close()\r\n if info:\r\n print('Сохранение: \"' + self.image_name() + '\" завершено.')\r\n return", "title": "" }, { "docid": "5dbb392ad32186b9945f79fbedd5f0bd", "score": "0.65932643", "text": "def test_softwareimages_post(self):\n pass", "title": "" }, { "docid": "a2655276d4a66e9526d038fd90261ea5", "score": "0.65842366", "text": "def test_image_post(self):\n pass", "title": "" }, { "docid": "8232a06331e98fb93b2239e3e1fa4982", "score": "0.63092446", "text": "def test_anon_create_job(self, mock_handle_job):\n\n response = self.client.post(self.image_url)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "76d6caed99a80b96ee6d7481347602c6", "score": "0.6270274", "text": "def test_image_ingestion_succeeds(self):\n req = {\n \"model\": \"image\",\n \"action\": \"INGEST_UPSTREAM\",\n \"callback_url\": bottle_url,\n }\n res = requests.post(f\"{ingestion_server}/task\", json=req)\n stat_msg = \"The job should launch successfully and return 202 ACCEPTED.\"\n self.assertEqual(res.status_code, 202, msg=stat_msg)\n\n # Wait for the task to send us a callback.\n assert self.__class__.cb_queue.get(timeout=120) == \"CALLBACK!\"", "title": "" }, { "docid": "5073acea738ff7d84f784bb90094d25d", "score": "0.6181062", "text": "def post(self, request, intern_id):\n try:\n intern = Intern.objects.get(pk=intern_id)\n image = request.FILES[\"image\"]\n\n serializer = JobSerializer(data=request.data)\n if serializer.is_valid():\n serializer.validated_data[\"intern\"] = intern\n serializer.validated_data[\"job_logo\"] = upload_image(image)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors)\n except Exception as e:\n return Response({\"exception\": f\"{e}\"}, status=status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "f252fed682e3eeddc8e2857ce027d049", "score": "0.6149725", "text": "def post_image_to_image_url(post_package):\n\n status = True\n\n duplicate_text_head = 'Destination path'\n duplicate_text_tail = 'already exists'\n\n #image POST also needs:\n #'deployment': deployment ID number\n post_data = {'deployment': post_package['deployment']}\n params = dict(username=post_package['username'], api_key=post_package['user_apikey'])\n\n url = urlparse.urljoin(server_root, post_package['image_object_api_path'])\n if os.path.isfile(os.path.join(post_package['deployment_path'], post_package['image_name'])):\n image_file = {'img': open(os.path.join(post_package['deployment_path'], post_package['image_name']), 'rb')}\n else:\n print 'FAILED: expect image missing at', os.path.join(post_package['deployment_path'], post_package['image_name'])\n return False\n\n r = requests.post(url, files=image_file, params=params, data=post_data)\n\n if duplicate_text_head in r.text and duplicate_text_tail in r.text:\n # this image already exists, we have nothing to do.\n status = True\n elif not (r.status_code == requests.codes.created):\n print 'FAILED: Server returned', r.status_code\n print 'MESSAGE: Full message from server follows:'\n print r.text\n status = False\n\n return status", "title": "" }, { "docid": "db42ccdaf3da60bfbab40ed0330b7dd3", "score": "0.6133388", "text": "def test_upload_innocent(self) -> None:\n self.helper.upload_media(\n self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200\n )", "title": "" }, { "docid": "db42ccdaf3da60bfbab40ed0330b7dd3", "score": "0.6133388", "text": "def test_upload_innocent(self) -> None:\n self.helper.upload_media(\n self.upload_resource, SMALL_PNG, tok=self.tok, expect_code=200\n )", "title": "" }, { "docid": "6afb8c88343b9528c7044d49e1f8629c", "score": "0.60878694", "text": "def image(update: Update, context: CallbackContext) -> None:\n url = update.message.text\n url = format_url(url)\n if validate_url(url):\n update.message.reply_text('Processing your request. This may take up to a minute. Please wait.')\n file_name = make_image(url)\n update.message.reply_photo(photo=open(file_name, 'rb'))\n else:\n update.message.reply_text(\"Please try a valid website address (e.g: www.google.com, https://www.google.com)\")", "title": "" }, { "docid": "83abce25735a9344fb741168f2ecd209", "score": "0.60734695", "text": "def verify_images():\n pass", "title": "" }, { "docid": "aafe4e5a3a743110e3e649c1fce2aea3", "score": "0.6065185", "text": "def test_invalid_image_url(self):\n self.invoice_data[\"image_url\"] = \"jgjgjgj\"\n self.invoice_data[\"order_id\"] = self.details_dict['order_id']\n response = self.query_with_token(\n self.access_token,\n upload_invoice.format(**self.invoice_data))\n expected_message = \"[Errno 2] No such file or directory: 'jgjgjgj'\"\n self.assertEqual(\n expected_message,\n response[\"errors\"][0][\"message\"])", "title": "" }, { "docid": "8cf3921db952b6a659fe1a6e80c429ec", "score": "0.6036606", "text": "def success(image_url):\n try:\n return render_template(\"success.html\",image_url=\"https://vmoksha-ali.s3.amazonaws.com/best-ugadi-wishes-and-messages.jpg\")\n except Exception as e:\n log.error(colored(str(e),\"red\"))", "title": "" }, { "docid": "37a6a1a028a7ec44f9fb5ccf5d2ec5ab", "score": "0.6036525", "text": "def update_image(signal, job_id):\n if signal == \"DONE\":\n job = Queue(connection=conn).fetch_job(job_id)\n img = job.result[1]\n image_filename = job.result[2]\n if img:\n img.to_file(image_directory+image_filename)\n print(image_filename)\n return app.get_asset_url(image_filename)\n return app.get_asset_url('blank.png')", "title": "" }, { "docid": "ca53e5f096f74114a6c49c7dd3f655bc", "score": "0.60067207", "text": "def test_for_invalid_image(self):\n\t\turl = image_upload_url(self.recipe.id)\n\t\tres = self.client.post(url, {'image':'notimage'}, format='multipart')\n\n\t\tself.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "7deb2a747023a944dab89da84f0095b3", "score": "0.60048294", "text": "def test_get_images_google_images_post(self):\n pass", "title": "" }, { "docid": "4eaca50201b6dd4b38e2981be168da28", "score": "0.6002332", "text": "def test_post_409(self, rnd_img):\n rnd_img['content'].seek(0)\n self.client.put('/ldp/post_409', data=rnd_img['content'], headers={\n 'Content-Disposition' : 'attachment; filename={}'.format(\n rnd_img['filename'])})\n assert self.client.post('/ldp/post_409').status_code == 409", "title": "" }, { "docid": "2e8c905df54987f67d243e9305d287a5", "score": "0.59976465", "text": "def submit():", "title": "" }, { "docid": "27f64c51d1c939250125a59574572cb7", "score": "0.59869945", "text": "def _upload(self, image_path):\n self._show_message(\"Uploading..\", \"\")\n return self.__imgur.upload_image(\n image_path, description=\"Uploaded with imgur-shot tool \"\n \"(https://github.com/poxip/imgur-shot)\"\n )", "title": "" }, { "docid": "28b738e252d029d0a29353048cf4ad35", "score": "0.5973857", "text": "def test_upload_style_image_wrong_input(self):\n self.client.force_authenticate(self.user2)\n\n img_file = generate_image_file(\"test2\")\n orig_img = Images.objects.get(id=1)\n data = {\n \"img_name\": \"test_img2\",\n \"styled_image\": img_file,\n \"original_image\": orig_img.id,\n }\n response = self.client.post(self.url_styled, data, format=\"multipart\")\n # Try stylizing original image which owner is user1\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "9354e61bcb70c7765daf5569496d2e57", "score": "0.5966317", "text": "def notify_bad_image(self, **kwargs):\n pass", "title": "" }, { "docid": "58b08ccb28dbfe79d2e9074db2b69706", "score": "0.5947266", "text": "def test_png_url(self):\n link = 'https://upload.wikimedia.org/'\n link += 'wikipedia/commons/f/fc/MP_sounds.png'\n bot = UploadRobot(url=[link], target_site=self.get_site(),\n **self.params)\n bot.run()", "title": "" }, { "docid": "1c3e7c4915b529ae4daf89e1f56cc308", "score": "0.5946019", "text": "def test_upload_image(self):\n pass", "title": "" }, { "docid": "b50dfc7c84c22ebc568acd5a87a82b61", "score": "0.59275687", "text": "def test_upload_invalid_image(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "2f4dfdd3f46d8cbbd0934f72b683c52b", "score": "0.5927388", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image':'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "a6282dbe013a5732cdfc4db252de9ab7", "score": "0.59204996", "text": "def test_upload_avatar_image_bad_request(self):\n url = image_upload_url(self.portfolio.id)\n res = self.client.post(url, {'avatar': 'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "6a65b295144b578a07eccf41cadd7a98", "score": "0.5915614", "text": "def test_users_id_image_post(self):\n pass", "title": "" }, { "docid": "37541e3e7ac30730285a6b6bb4142bc6", "score": "0.59152126", "text": "def test_upload_image_invalid(self):\n url = upload_image_url(self.recipe.id)\n res = self.client.post(url, {'image': 'not image'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "f9b65ef787034ee383d831ef292b27e3", "score": "0.5912487", "text": "def test_upload_invalid_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n payload = {'image': 'not-image'}\n response = self.client.post(url, payload, format='multipart')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "ba0e0aee12d36fb4956a6a86618699f2", "score": "0.59011775", "text": "def doIBPost(url, payload, name=None, email=None, subject=None, message=None, success=None):\n\n \n payload = prepPayload(payload)\n\n # Get and approve mime type\n mimeType = mimetypes.guess_type(payload.name)[0]\n allowedTypes = ['image/png','image/gif','image/jpeg']\n if mimeType not in allowedTypes:\n raise TypeError('Only PNG, GIF and JPG allowed.')\n\n\n\n\n # Make sure we have all the neaded values\n if not name: name = genRandomText(2)\n if not subject: subject = genRandomText()\n if not email:\n email = '%s@%s.%s' % tuple(genRandomText(2).split()+[random.choice(['com','net','org']),])\n if not message: \n message = genRandomText(random.randint(20, 45))+(random.randint(45, 65)*'\\n')+genRandomText(random.randint(35, 55))+(random.randint(45, 65)*'\\n')+genRandomText(random.randint(20, 45))\n\n\n\n br = mechanize.Browser()\n br.set_proxies({\"http\" : \"http://localhost:8118\"})\n\n\n br.open(url)\n\n b1 = br.response()\n br.select_form(nr=0)\n try:\n br.form['name']=name\n except ControlNotFoundError:\n pass\n try:\n br.form['email']=email\n except ControlNotFoundError:\n pass\n\n br.form['subject']=subject\n br.form['message']=message\n br.form.add_file(payload, mimeType, payload.name.split(\"/\")[-1])\n\n assert br.viewing_html()\n #print '\\ntitle\\n',br.title()\n #print '\\nurl\\n',b1.geturl()\n #print '\\ninfo\\n',b1.info()\n #print '\\nhtml\\n',b1.read()\n #print '\\nform\\n',br.form\n\n\n b2 = br.submit(nr=0)\n\n assert br.viewing_html()\n result = b2.read()\n #print '\\ntitle\\n',br.title()\n #print '\\nurl\\n',b2.geturl()\n #print '\\ninfo\\n',b2.info()\n #print '\\nhtml\\n',result\n \n\n \n if not success:\n return result\n else:\n if success in result:\n return True\n else:\n return False", "title": "" }, { "docid": "aff4fec298146c7d83768dfd41225178", "score": "0.58964235", "text": "def test_img_upload_bad_req(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "a77c2ecc210661ed9c5542e4b83e20b8", "score": "0.5894491", "text": "def _prepare_image(sconnect, nodeid, build, verbose):\n verbose(\"Requesting image of type \" + build, end=\": \")\n sconnect.post(\"/node/{}/prepare_image\".format(nodeid), data={\"type\": build})\n # In case there is no config for the appliance, the build will fail.\n # Checking the status when build fails will result in error 500.\n # Checking immediately, the explanation will have no error reason.\n # By delaying after the build, we can get correct error on check.\n time.sleep(0.5)\n verbose(\"Done.\")", "title": "" }, { "docid": "36bad34f90e5da14f9f72bac40f246eb", "score": "0.5893537", "text": "def test_upload_invalid_image_to_synthesize(self):\n url = image_upload_url(self.synthe.id)\n res = self.client.post(url, {'image': 'invalid image'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "dbafbb7f42d330259511f85a9241269d", "score": "0.5892227", "text": "def test_post_snapshot_no_ref_result_ko(self):\n with open('snapshotServer/tests/data/engie.png', 'rb') as fp:\n response = self.client.post(reverse('uploadStepRef'), data={'stepResult': self.sr_ko.id, 'image': fp})\n self.assertEqual(response.status_code, 200, 'status code should be 201: ' + str(response.content))\n time.sleep(0.5) # wait field computing\n \n uploaded_reference = StepReference.objects.filter(testCase=self.tcs1.testCase, testStep__id=1, version=Version.objects.get(pk=1), environment=TestEnvironment.objects.get(id=1)).last()\n self.assertIsNone(uploaded_reference, \"the uploaded snapshot should not be recorded\")\n\n self.assertFalse(os.path.isfile(os.path.join(self.reference_dir, 'engie.png')))", "title": "" }, { "docid": "2f2f09cd57457dcac8eb207cb08dd3c3", "score": "0.5872811", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "2f2f09cd57457dcac8eb207cb08dd3c3", "score": "0.5872811", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "b333aa29eaa0c899867fd957d4a502fb", "score": "0.58576596", "text": "def test_user_invalid_create_job(self, mock_handle_job):\n\n self.client.force_authenticate(self.user)\n response = self.client.post(self.image_url, data={'job_type': 'invalid_job'})\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "89a77bd4f50e00099e12d1958a4523d2", "score": "0.5830838", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n payload = {'image': 'image-sample'}\n res = self.client.post(url, payload, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "b45e8a1d33a118d251440f123c525342", "score": "0.5814305", "text": "def test_for_uploading_image_to_recipe(self):\n\t\turl = image_upload_url(self.recipe.id)\n\t\twith tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n\t\t\timg = Image.new('RGB', (10, 10))\n\t\t\timg.save(ntf, format='JPEG')\n\t\t\tntf.seek(0)\n\t\t\tres = self.client.post(url, {'image': ntf}, format='multipart')\n\t\t\n\t\tself.recipe.refresh_from_db()\n\t\tself.assertEqual(res.status_code, status.HTTP_200_OK)\n\t\tself.assertIn('image', res.data)\n\t\tself.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "8907fcd826fd5fa9292a1ccb21ccf19f", "score": "0.5809405", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "8907fcd826fd5fa9292a1ccb21ccf19f", "score": "0.5809405", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "2c08d1fc1537bc99207915d87d1129a9", "score": "0.58085227", "text": "def test_insert_image(self):\n img = settings.STATIC_ROOT + \"/rejected\" + '.jpeg'\n info = ImageProcessorModel(photo=img, image_verified=True, image_rejected=False)\n self.assertEqual(info.photo, img)\n self.assertEqual(info.image_verified, True)\n self.assertEqual(info.image_rejected, False)", "title": "" }, { "docid": "12556413774e14984eb7046424979f8f", "score": "0.5802388", "text": "def test_submit_missing_surl(self):\n self.setup_gridsite_environment()\n self.push_delegation()\n job = {'transfers': [{'destinations': ['root://dest.ch/file']}]}\n\n self.app.put(url=\"/jobs\",\n params=json.dumps(job),\n status=400)\n\n job = {'transfers': [{'source': 'root://source.es/file'}]}\n\n error = self.app.put(\n url=\"/jobs\",\n content_type='application/json',\n params=json.dumps(job),\n status=400\n ).json\n\n self.assertEquals(error['status'], '400 Bad Request')\n self.assertEquals(error['message'], 'No transfers or namespace operations specified')", "title": "" }, { "docid": "7a44d00d5ba2712d9847fefafb703081", "score": "0.5800745", "text": "def test_submit_function(self):\n\n data = {\n 'parent' : None,\n 'template' : None,\n 'title' : 'hc unit - do submit function: %d' % time.time(),\n 'pagetext' : \"test page text\\n[[Image(%s)]]\" % (IMAGENAME),\n 'upload' : IMAGEPATH,\n 'access' : 'Wiki page anyone can edit',\n 'lockpage' : False,\n 'tags' : ['hubcheck','hc'],\n 'summary' : 'test summary text',\n }\n self.po.populate_form(data)\n self.po.submit_form()", "title": "" }, { "docid": "4532fc4f747f96944dae7fed37a985ff", "score": "0.5791471", "text": "def UploadURLImage():\r\n return render_template('UploadURLImage.html')", "title": "" }, { "docid": "9c0f1d9d03482439fe9bd4fbf7b7ffc1", "score": "0.5789489", "text": "def submit_image(self):\r\n self.step_2 = LabelFrame(self.root, text=\" 2. Upload Your Image: \", bg=\"SteelBlue2\")\r\n self.step_2.grid(row=3, columnspan=7, sticky='W', padx=20, pady=20, ipadx=5, ipady=5)\r\n self.upload_image()", "title": "" }, { "docid": "d04f74d428c41feca006476955ccb0c1", "score": "0.5785538", "text": "def save_fail_img(self):\n self.save_img(\"Filemanager\")", "title": "" }, { "docid": "9478527817729ff78f3e6c4ff156eb19", "score": "0.5785267", "text": "def test_anon_get_jobs(self):\n\n response = self.client.get(self.image_url)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "273968299bc0a1fb408ad40ca5729ac6", "score": "0.5778738", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url, {\"image\": \"no image\"}, format=\"multipart\")\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "2c12fc7bbfe7199b8f8c0455f0ffbf3f", "score": "0.5777744", "text": "def test_url_upload_succeeds(self):\n self.client.ik_request.request = MagicMock(\n return_value=get_mocked_success_resp()\n )\n resp = self.client.upload(file=\"example.com/abc.jpg\", file_name=self.filename)\n self.assertIsNone(resp[\"error\"])\n self.assertIsNotNone(resp[\"response\"])", "title": "" }, { "docid": "ae72b98bb4e0851e08ad97bab9db6fa5", "score": "0.5777287", "text": "def _upload_image(self, filepath):\n\n \"\"\"\n TODO: figure out how to send the image from filepath to\n pasteall.org server\n \"\"\"\n return None", "title": "" }, { "docid": "e468f000245016c97f92fe50a23e5936", "score": "0.5774323", "text": "def test_publishing_publicly_visible_image_post_with_new_hashtag_should_use_image(self):\n user = make_user()\n\n headers = make_authentication_headers_for_user(user)\n\n image = Image.new('RGB', (100, 100))\n tmp_file = tempfile.NamedTemporaryFile(suffix='.jpg')\n image.save(tmp_file)\n tmp_file.seek(0)\n\n hashtag_name = make_hashtag_name()\n\n post_text = '#%s' % hashtag_name\n\n post = user.create_public_post(text=post_text, image=ImageFile(tmp_file), is_draft=True)\n\n url = self._get_url(post=post)\n\n response = self.client.post(url, **headers, format='multipart')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n post = Post.objects.get(pk=post.pk)\n\n self.assertEqual(post.status, Post.STATUS_PROCESSING)\n\n # Run the process handled by a worker\n get_worker('high', worker_class=SimpleWorker).work(burst=True)\n\n post.refresh_from_db()\n\n self.assertEqual(post.status, Post.STATUS_PUBLISHED)\n\n hashtag = Hashtag.objects.get(name=hashtag_name)\n self.assertTrue(hashtag.has_image())", "title": "" }, { "docid": "45fa16d7e3af1e3ebf76755007c5176f", "score": "0.5771844", "text": "def test_user_create_job_diff_user(self, mock_handle_job):\n\n self.image.user_id = self.admin.id\n self.image.save()\n\n self.client.force_authenticate(self.user)\n response = self.client.post(self.image_url)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "953a21870a0e48da48a6d34f49f37388", "score": "0.57662314", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n res = self.client.post(url,\n {\"image\": \"notanimage\"},\n format=\"multipart\")\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "76810f588460a99144772989a6a1c4fa", "score": "0.5762286", "text": "def test_classify_no_image(client):\n # Since the API checks if the image is there before anything else,\n # we don't need to include anything with the request\n res = client.post(\"/classify\", data=dict())\n # Check if the correct error code is returned\n assert b\"No image submitted\" in res.data", "title": "" }, { "docid": "520b957a210d91eac5c64efa98d1449e", "score": "0.5729533", "text": "def test_post_snapshot_no_ref(self):\n with open('snapshotServer/tests/data/replyDetection.json.png', 'rb') as fp:\n response = self.client.post(reverse('uploadStepRef'), data={'stepResult': self.sr1.id, 'image': fp})\n self.assertEqual(response.status_code, 201, 'status code should be 201: ' + str(response.content))\n time.sleep(1) # wait field computing\n \n uploaded_reference = StepReference.objects.filter(testCase=self.tcs1.testCase, testStep__id=1, version=Version.objects.get(pk=1), environment=TestEnvironment.objects.get(id=1)).last()\n self.assertIsNotNone(uploaded_reference, \"the uploaded snapshot should be recorded\")\n \n # check computing has been done\n self.assertIsNotNone(uploaded_reference.field_detection_data)\n self.assertIsNotNone(uploaded_reference.field_detection_date)\n self.assertEqual(uploaded_reference.field_detection_version, 'afcc45')\n \n self.assertTrue(os.path.isfile(os.path.join(self.reference_dir, 'replyDetection.json.png')))", "title": "" }, { "docid": "bbc51819ad48832c785342089e11c3a2", "score": "0.5725684", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.recipe.id)\n payload = {'image': 'notAnImage'}\n res = self.client.post(url, payload, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "cc0165f90f0649d0291a03ab30ea2254", "score": "0.572203", "text": "async def check_image(ctx: commands.Context, bot: commands.AutoShardedBot, title: str, name: str, link: str = \"\") -> bool:\n if not link:\n link = name\n if (name[-3:] in [\"png\", \"jpg\", \"gif\"] or\n name[-4:] == \"jpeg\"):\n return True\n else:\n em = discord.Embed(title=\"Error\",\n description=\"An invalid image was used.\"\n \"The supported formats are `png`, `jpg`, `jpeg` & `gif`\",\n colour=discord.Colour.red())\n await ctx.send(embed=em)\n return False", "title": "" }, { "docid": "93fa38682336c49cc66c0f24a95cea7e", "score": "0.5718513", "text": "def test_query_thumbnail_job_exist(self):\n source = {'key': self.key}\n resp = self.client.create_thumbnail_job(self.pipeline_name, source)\n nose.tools.assert_is_not_none(resp.job_id)\n job_id = ''\n if self.PY3:\n job_id = resp.job_id\n else:\n job_id = resp.job_id.encode(encoding='UTF-8')\n\n resp_query = self.client.get_thumbnail_job(job_id)\n nose.tools.assert_equal(job_id, resp_query.job_id)", "title": "" }, { "docid": "f1c303e37c32716d1aee322686783773", "score": "0.5708953", "text": "def test_upload_image_to_recip(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format='multipart')\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "e7ed49a01b42449732c1a4bbb3eff718", "score": "0.5704766", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.spot.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "3f3af37d1c9015ae11b4cda7282a9071", "score": "0.56929183", "text": "def test_image_upload_and_download(self) -> None:\n self.login(self.TEST_LEARNER_EMAIL)\n user_id = user_services.get_user_id_from_username('testlearneruser')\n csrf_token = base.CsrfTokenManager.create_csrf_token(user_id)\n\n with utils.open_file(\n os.path.join(feconf.TESTS_DATA_DIR, 'img.png'),\n 'rb', encoding=None\n ) as f:\n raw_image = f.read()\n with self.swap(self, 'testapp', self.testapp):\n response_dict = self.post_json(\n '/mock_upload/exploration/0', {'filename': 'test.png'},\n csrf_token=csrf_token,\n upload_files=[('image', 'unused_filename', raw_image)]\n )\n filename = response_dict['filename']\n self.logout()", "title": "" }, { "docid": "e0945758eba642e694755baa6ff343e2", "score": "0.5691178", "text": "def storeUploadImage(url, inUser, msg):\n img = Image()\n #img.author = inUser\n img.authorID = inUser._id\n #img.created_at = datetime.now()\n #img.locLabel = user.locLabel\n img.url = url\n setupImageLocation(img, msg) \n LoadedImage.loadedImages.append(img)\n img.save()\n return img", "title": "" }, { "docid": "e4aca960934a5c96432f6703be633975", "score": "0.5686368", "text": "def ImageUrl():\r\n\r\n # Fetch the Image from the Provided URL\r\n url = request.form['url']\r\n filename = url.split('/')[-1]\r\n if allowed_file(filename):\r\n urllib.request.urlretrieve(url, f\"static/{filename}\")\r\n\r\n results = image_preprocessing(filename)\r\n if results is None:\r\n return render_template('Error.html')\r\n else:\r\n\r\n img_preds = results[0]\r\n frame = results[1]\r\n faces_detected = results[2]\r\n\r\n results2 = predictions_results(img_preds, frame, faces_detected, filename)\r\n full_filename = os.path.join(app.config['UPLOAD_FOLDER'], filename)\r\n\r\n return render_template('UploadURLImage.html', user_image=full_filename,\r\n number_of_face=\"Number of faces detected: {}\".format(results2[0]),\r\n no_mask_face=\"No face mask count: {}\".format(results2[1]),\r\n correct_mask_face=\"Correct face mask count: {}\".format(results2[2]),\r\n incorrect_mask_face=\"Incorrect face mask count: {}\".format(results2[3]))", "title": "" }, { "docid": "d9f64fe11e57feb12e7d258100910562", "score": "0.56796294", "text": "def save_this_url(self, event, *args, **kwargs):\n\n # Get the url object\n url = event.url\n ustr = str(url)\n # If not image, save always\n if ('/poster' not in ustr):\n return False\n\n if url.is_document() or url.is_cgi() or (url.is_image() and re.search('[/]t_[^_]+_[^_.]*.jpg',ustr)):\n \n return True\n print 'rejecting ', ustr\n return False", "title": "" }, { "docid": "380d87a0f9a038b9842f7b147584bafc", "score": "0.5677725", "text": "def submit(self, script, working_directory):\n pass", "title": "" }, { "docid": "dd210666bde91169584ce6370276bf02", "score": "0.5677659", "text": "def test_ContentImage(self):\n\n self.assertEqual(build_func(ContentImage), LuigiStatusCode.SUCCESS)", "title": "" }, { "docid": "2de1561e22df8ac04f4174222124d9a9", "score": "0.5671251", "text": "def _UploadErrorImagesToCloudStorage(cls, image_name, screenshot, ref_img):\n machine_name = re.sub(r'\\W+', '_',\n cls.GetParsedCommandLineOptions().test_machine_name)\n upload_dir = '%s_%s_telemetry' % (\n cls.GetParsedCommandLineOptions().build_revision, machine_name)\n base_bucket = '%s/runs/%s' % (error_image_cloud_storage_bucket, upload_dir)\n image_name_with_revision = '%s_%s.png' % (\n image_name, cls.GetParsedCommandLineOptions().build_revision)\n cls._UploadBitmapToCloudStorage(\n base_bucket + '/gen', image_name_with_revision, screenshot,\n public=True)\n if ref_img is not None:\n cls._UploadBitmapToCloudStorage(\n base_bucket + '/ref', image_name_with_revision, ref_img, public=True)\n diff_img = image_util.Diff(screenshot, ref_img)\n cls._UploadBitmapToCloudStorage(\n base_bucket + '/diff', image_name_with_revision, diff_img,\n public=True)\n print ('See http://%s.commondatastorage.googleapis.com/'\n 'view_test_results.html?%s for this run\\'s test results') % (\n error_image_cloud_storage_bucket, upload_dir)", "title": "" }, { "docid": "bf62cdb6b7268d9f6e7d194af5289645", "score": "0.56687623", "text": "def test_user_valid_create_job(self, mock_handle_job):\n\n self.client.force_authenticate(self.user)\n response = self.client.post(self.image_url)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n mock_handle_job.assert_called()", "title": "" }, { "docid": "a1f069a67288f10bd31300eb04906f48", "score": "0.5668627", "text": "def create_job(form, request, image):\n job = Job()\n job.owner = request.user\n formatted = format_form(form.cleaned_data)\n formatted.pop('kliko_name') # inserted by rodrigues\n job.config = json.dumps(formatted)\n job.name = form.data['kliko_name']\n job.image = image\n job.save()\n\n # Create the placeholder for container IO\n try:\n tempdir = tempfile.mkdtemp(dir=os.path.realpath(settings.MEDIA_ROOT),\n prefix=str(job.id) + '-')\n\n # Nginx container runs as unprivileged\n os.chmod(tempdir, 0o0755)\n\n input_ = os.path.join(tempdir, 'input')\n output = os.path.join(tempdir, 'output')\n param_files = os.path.join(tempdir, 'param_files')\n\n os.mkdir(input_)\n os.mkdir(output)\n os.mkdir(param_files)\n\n except Exception as e:\n return False, \"Can't setup working directory:\\n%s\" % str(e)\n\n for fieldname, data in request.FILES.items():\n filename = request.FILES[fieldname].name\n with open(os.path.join(param_files, filename), 'wb+') as destination:\n for chunk in data.chunks():\n destination.write(chunk)\n\n job.results_dir = os.path.basename(tempdir)\n job.save()\n schedule_job(job, request)\n return True, \"\"", "title": "" }, { "docid": "36b8a551277eb8c8a8ff740c51ffb9b8", "score": "0.5665858", "text": "def test_action_flow(hostname, large_file):\n with open(large_file, 'r') as f:\n resp = requests.post(hostname + '/images',\n data={'user_id': 'test-user-{}'.format(uuid.uuid4())},\n files={'file': ('bridge.jpeg', f)})\n\n assert resp.status_code == 200, 'Error uploading an image; this should not fail: {}'.format(resp.content)\n data = resp.json()\n\n assert data.get('id'), 'Uploaded image did not respond with an ID'\n assert data.get('location'), 'Uploaded image did not respond with a location'\n\n # Current actions on the image should only be 'upload'\n resp = requests.get(hostname + '/image/{}'.format(data['id']))\n assert resp.json()['actions'] == ['upload']\n\n # Resize the image and check that the actions now include a resize\n resp = requests.put(hostname + '/image/{}'.format(data['id']), data={'action': 'resize', 'size': '50,50'})\n assert resp.status_code == 200\n resp = requests.get(hostname + '/image/{}'.format(data['id']))\n assert resp.json()['actions'] == ['upload', 'resize']\n\n # Crop the image and check that the actions include a crop\n resp = requests.put(hostname + '/image/{}'.format(data['id']), data={'action': 'crop', 'box': '0,50,900,200'})\n assert resp.status_code == 200\n resp = requests.get(hostname + '/image/{}'.format(data['id']))\n assert resp.json()['actions'] == ['upload', 'resize', 'crop']\n\n # Transcode the image, check that transcode is now in actions\n resp = requests.put(hostname + '/image/{}'.format(data['id']), data={'action': 'transcode', 'extension': 'png'})\n assert resp.status_code == 200\n resp = requests.get(hostname + '/image/{}'.format(data['id']))\n assert resp.json()['actions'] == ['upload', 'resize', 'crop', 'transcode']\n\n # Clean up test data and delete the image\n requests.delete(hostname + '/image/{}'.format(data['id']))", "title": "" }, { "docid": "fc627a9971b2ee31ca4fad069f9d2563", "score": "0.56644666", "text": "def test_submit_no_files(self):\n self.setup_gridsite_environment()\n self.push_delegation()\n\n job = {'files': []}\n\n error = self.app.put(\n url=\"/jobs\",\n params=json.dumps(job),\n status=400\n ).json\n\n self.assertEqual(error['status'], '400 Bad Request')\n self.assertEqual(error['message'], 'No valid pairs available')", "title": "" }, { "docid": "71967c80641a958d36ac6ed0a10cc94e", "score": "0.5660585", "text": "def test_upload_ephemeral_image(self):\n pass", "title": "" }, { "docid": "a282a57b97768688364d87f6916b6fca", "score": "0.5659505", "text": "def main():\n parser = OptionParser()\n parser.add_option('-f', '--al-file', dest='al_file', type='string', default=None,\n help=\"al file from bad worker to resubmit\")\n parser.add_option('-i', '--input-file', dest='input_file', type='string', default=None,\n help=\"input file in new hits directory to be resubmitted\")\n (opts, args) = parser.parse_args()\n assert opts.al_file is not None and opts.input_file is not None\n\n parse = al.parse(opts.al_file)\n images = []\n for p in parse:\n images.append(p.imageName)\n\n with open(opts.input_file) as fh:\n lines = fh.readlines()\n\n new_lines = ['urls\\n']\n for line in lines:\n for image in images:\n if line.find(image) != -1:\n new_lines.append(line)\n break\n\n with open(opts.input_file, 'w') as fh:\n fh.writelines(new_lines)", "title": "" }, { "docid": "aa0557ef766768d5ac7874fa0075da24", "score": "0.5656996", "text": "def _accept_image_upload_alert_(self):\n alert = self._wait_alert_()\n return alert is not None", "title": "" }, { "docid": "4b2f040c73d028582a5769040ae81a5d", "score": "0.5649231", "text": "def postJob(self):", "title": "" }, { "docid": "1caaff5c33af44868ec4b4315336ace4", "score": "0.56404394", "text": "def test_upload_valid_image(self): \n url = reverse('mfwgallery:post')\n image = SimpleUploadedFile(\"image.jpg\", \"file_content\", content_type=\"image/jpeg\")\n data = {\"description\": \"This is a valid image\", \"image\": image}\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # Not working because auto_now_add\n # self.assertEqual(response.data, {\"nick\": \"tester\", \"description\": \"This is a valid image\", \"url\": \"http://i.imgur.com/YnFMv0i.png\", \"image\": \"images/YnFMv0i.png\", \"thumbnail\": \"images/thumbs/YnFMv0i_uu.png\"})", "title": "" }, { "docid": "118c4735900bf4ca80ab47af868c860f", "score": "0.5637344", "text": "def test_classify_wrong_image(client):\n # Start time, player_id and user doesn't need to be valid, since the error is\n # supposed to be caught before these are used\n time = 0\n player_id, user = \"\", \"\"\n # Submit answer with the given parameters and get results\n res = classify_helper(\n client, cfg.API_PATH_DATA, cfg.API_IMAGE1, time, player_id, user\n )\n assert b\"415 Unsupported Media Type\" in res.data", "title": "" }, { "docid": "25fed658c6857e9fca0a399839abb122", "score": "0.56251806", "text": "def test_upload_image_recipe(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format='multipart')\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "2c824914d3b74fd05e8c3f07f6391419", "score": "0.56202173", "text": "def test_upload_live_photo(self):\n pass", "title": "" }, { "docid": "f10637f8f11903b088b478e8d1fb4780", "score": "0.5620174", "text": "def test_upload_image(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10,10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image':ntf}, format='multipart')\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "4a8ed77e4b7c1a6582e4712bf6169bd0", "score": "0.5619311", "text": "def test_image_upload(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format('multipart'))\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "5cdee96223c2b699b5ccf558ab938e11", "score": "0.561427", "text": "def check_and_save(image_name):\n received_sections = db_access.get_received_sections_count(image_name)\n total_sections = db_access.get_total_sections_count(image_name)\n \n if received_sections != total_sections:\n return None\n \n sections = db_access.get_image_sections(image_name)\n image_text = \"\".join([section[0] for section in sections])\n \n time_stamp = time.strftime(\"%Y%m%d%H%M%S\", time.gmtime())\n with open('output/%s-%s.jpg' % (image_name, time_stamp), 'wb') as image:\n\tbinary = base64.b64decode(image_text)\n image.write(binary)\n \n db_access.delete_image_and_info(image_name)\n return(\"Done Processing Image: %s\" % (image_name))", "title": "" }, { "docid": "80901a459103ab169a19cd6ff1465917", "score": "0.5614113", "text": "def upload_workflow_image(workflow):\n allowed = lambda x: x.rsplit('.', 1)[1].lower() in ('jpg', 'jpeg', 'dng')\n file = request.files['file']\n if not allowed(file.filename):\n abort(500, 'Only JPG or DNG files are permitted')\n save_path = workflow.path/'raw'\n if not save_path.exists():\n save_path.mkdir()\n if file and allowed(file.filename):\n filename = secure_filename(file.filename)\n file.save(unicode(save_path/filename))\n return \"OK\"", "title": "" }, { "docid": "3e9a0d7a83774152e2919ab4fc1969df", "score": "0.56130797", "text": "def execute(self):\n\t\timg = self.__img\n\n\t\tif is_null_or_whitespace(img):\n\t\t\traise ValueError('Blog image is required, currently supporting only Unsplash.')\n\n\t\tregex = re.compile(\n\t\t\t\t\tr'^(?:http|ftp)s?://' # http:// or https://\n\t\t\t\t\tr'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n\t\t\t\t\tr'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n\t\t\t\t\tr'(?::\\d+)?' # optional port\n\t\t\t\t\tr'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n\t\tresult = re.match(regex, img)\n\n\t\tif result is None :\n\t\t\traise ValueError('Invalid blog image url.')\n\n\t\tif \"unsplash.com/photos/\" not in img:\n\t\t\traise ValueError('Invalid blog image url, currently supporting only Unsplash images.')\n\n\t\treturn 'Validation: Image \"%s\" is valid.' % img", "title": "" }, { "docid": "40c8a0c9efcf58bad02e39a402cb056c", "score": "0.5613058", "text": "def _submit(client_payload, answer_file_path, context):\n file_key = client_payload[\"file_key\"]\n _update_job_event(\n context,\n job_info_template(\n context, \"Grading Submission....\")\n )\n\n if \"round\" not in client_payload.keys():\n raise Exception(\"\"\"\n The round parameter has not been specified. Please upgrade your\n crowdai client to atleast version 1.0.21 by :\n pip install -U crowdai\n\n and then update your submission code by following the latest instructions\n from :\n https://github.com/crowdAI/ieee_investment_ranking_challenge-starter-kit#submission-of-predicted-file-to-crowdai\n \"\"\")\n\n round_id = client_payload[\"round\"]\n assert round_id in config.crowdai_round_id_map.keys(), \\\n \"Unknown Round ID Passed. Allowed values : {}\".format(\n str(config.crowdai_round_id_map.keys())\n )\n crowdai_round_id = config.crowdai_round_id_map[round_id]\n\n _payload = {}\n _meta = {}\n _meta['file_key'] = file_key\n _payload[\"meta\"] = json.dumps(_meta)\n _payload[\"challenge_round_id\"] = crowdai_round_id\n submission_id = report_to_crowdai(\n context,\n _payload,\n submission_id=False,\n status='submitted')\n print(\"Submission id : \", submission_id)\n try:\n localfilepath = download_file(context, file_key)\n _client_payload = {}\n _client_payload[\"submission_file_path\"] = localfilepath\n\n _result_object = config.evaluator._evaluate(\n client_payload=_client_payload,\n round_indicator=round_id,\n _context=context)\n print _result_object\n _payload = _result_object\n report_to_crowdai(\n context,\n _payload,\n submission_id=submission_id,\n message = \"graded successfully\",\n status='graded')\n # Clean up file if possible\n os.remove(localfilepath)\n return _result_object\n except Exception as e:\n # Report to crowdAI\n if \"meta\" in _payload.keys():\n del _payload[\"meta\"]\n report_to_crowdai(\n context,\n _payload,\n submission_id=submission_id,\n status='failed',\n message=str(e)\n )\n # raise the exception again\n # so that it can be handled further down the chain\n raise e", "title": "" }, { "docid": "61ccd07dbb2cd80d2bc3ff495c7fba4f", "score": "0.5601048", "text": "def test_upload_image(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as image_file:\n img = Image.new('RGB', (10, 10))\n img.save(image_file, format='JPEG')\n image_file.seek(0)\n payload = {'image': image_file}\n res = self.client.post(url, payload, format='multipart')\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "acacb82c02d3b71cda394280e95d669f", "score": "0.5597166", "text": "def upload(submission, upload_path):\n print(\"Check uploaded log\")\n uploaded_url = uploaded_log_exists(upload_path)\n if uploaded_url:\n return uploaded_url\n\n permalink = \"https://www.reddit.com\" + submission.permalink\n\n try:\n print(\"Uploading via lew.la\")\n uploaded_url = upload_via_lewla(permalink)\n if is_url_valid(uploaded_url):\n return uploaded_url\n except Exception as e:\n print(e)\n\n try:\n print(\"Uploading via Ripsave\")\n uploaded_url = upload_via_ripsave(permalink, submission)\n if is_url_valid(uploaded_url):\n return uploaded_url\n except Exception as e:\n print(e)\n\n return uploaded_url", "title": "" }, { "docid": "6c7e21137d7cfc84c2b46a6dae0f7d45", "score": "0.55971265", "text": "def cp_image(self):\n self.download_image()\n self.tag_downloaded_image()\n self.upload_image()", "title": "" }, { "docid": "a305741c6fad547b19b2d9b5c1cbbc64", "score": "0.559635", "text": "def uploadImage(self) -> str:\n if not self.created:\n raise errors.FileNotCreated(\"File Not created\")\n try:\n # Upload files to S3\n client.upload_file(\"tempHolding/{}\".format(self.fileName),\n self.aws_bucket,\n \"TurnipBot/predictions/{}\".format(self.fileName),\n ExtraArgs={'ACL': 'public-read'})\n os.remove(\"tempHolding/{}\".format(self.fileName)) # remove temp file\n return \"{}/TurnipBot/predictions/{}\".format(self.CDNLink, self.fileName)\n except be.ClientError as e:\n os.remove(\"tempHolding/Graph{}\".format(self.fileName))\n raise errors.AWSError(e)\n except Exception as e:\n os.remove(\"tempHolding/Graph{}\".format(self.fileName))\n raise errors.AWSError(e)", "title": "" }, { "docid": "685d566c2b16f9ade7eb9ad70c3d3e39", "score": "0.55924636", "text": "def submit(self):\n pass", "title": "" }, { "docid": "34c4e6db0c0c7d0d3995d7aa214c8bd8", "score": "0.55885", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.content.uuid)\n res = self.api_client.post(\n url,\n {'image': 'notanimage'},\n format='multipart'\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "d5d128f83da44f75b66e28c5aee093c1", "score": "0.55883384", "text": "def upload_image():\n\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n flash('No image selected for uploading')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n #print('upload_image filename: ' + filename)\n flash('Image successfully uploaded!')\n \"\"\" run recommend_cuis and render_home with result variables\"\"\"\n results = clean_results(recommend_cuis(filename))\n return render_template('predict.html', filename=filename, results=results)\n else:\n flash('Allowed image types are - png, jpg, jpeg')\n return redirect(request.url)", "title": "" }, { "docid": "b613ff95ccc13cecc2e85121cdde231c", "score": "0.55801004", "text": "def test_empty_image_url(self):\n self.invoice_data[\"image_url\"] = \"\"\n self.invoice_data[\"order_id\"] = self.details_dict['order_id']\n response = self.query_with_token(\n self.access_token,\n upload_invoice.format(**self.invoice_data))\n expected_message = \"Invoice file field cannot be blank!\"\n self.assertEqual(\n expected_message,\n response[\"errors\"][0][\"message\"])", "title": "" }, { "docid": "3d6f59012a31ef41e0cd25ff75e55756", "score": "0.5578187", "text": "def test_admin_create_job(self, mock_handle_job):\n\n self.client.force_authenticate(self.admin)\n response = self.client.post(self.image_url)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n mock_handle_job.assert_called()", "title": "" }, { "docid": "6340ef10a3af661dc333fe17837c8120", "score": "0.55777526", "text": "def test_upload_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix=\".jpg\") as ntf:\n img = Image.new(\"RGB\", (10, 10))\n img.save(ntf, format=\"JPEG\")\n ntf.seek(0)\n res = self.client.post(url, {\"image\": ntf}, format=\"multipart\")\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn(\"image\", res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "6340ef10a3af661dc333fe17837c8120", "score": "0.55777526", "text": "def test_upload_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix=\".jpg\") as ntf:\n img = Image.new(\"RGB\", (10, 10))\n img.save(ntf, format=\"JPEG\")\n ntf.seek(0)\n res = self.client.post(url, {\"image\": ntf}, format=\"multipart\")\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn(\"image\", res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" }, { "docid": "1dca6af59f2fd8eafbc2c630adf504e7", "score": "0.55753446", "text": "def test_image_add_admin(self):\n self.client.force_authenticate(user=self.admin)\n data={\"url\":\"https://images.daznservices.com/di/library/GOAL/67/ed/thomas-partey-arsenal-2020_1a1h8hhdz7xw611hds7hueajs4.jpg?t=2129690261&quality=100\"}\n response=self.client.post('/images/add/',data)\n self.assertEqual(response.status_code,status.HTTP_201_CREATED)", "title": "" }, { "docid": "d1d35d7bda09a4006056caa26090ff6f", "score": "0.5574281", "text": "def test_upload_image_bad_request(self):\n url = image_upload_url(self.product.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "00b80095539539827a0c76bf5ff319f2", "score": "0.5567585", "text": "def test_upload_image_to_recipe(self):\n url = image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix=\".jpg\") as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format=\"JPEG\")\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format='multipart')\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))", "title": "" } ]
7341d6aa13b1cc286ce04d5827817308
Dump the value for database storage.
[ { "docid": "b577095ab886f50393e79e4e1d752c46", "score": "0.0", "text": "def db_value(self, val: 'Optional[Union[str, IPAddress]]') -> 'Optional[int]': # pylint: disable=inconsistent-return-statements\n if val is not None:\n if isinstance(val, str):\n val = ipaddress.ip_address(val)\n return int(val) # type: ignore[arg-type]\n return None", "title": "" } ]
[ { "docid": "58aad9c81ff71b941b78cdf189ef4a8e", "score": "0.8058299", "text": "def dump(self, value):\n return", "title": "" }, { "docid": "58aad9c81ff71b941b78cdf189ef4a8e", "score": "0.8058299", "text": "def dump(self, value):\n return", "title": "" }, { "docid": "7d78c4412fdfe2e65d54910b5c97bb4d", "score": "0.72222567", "text": "def get_values_to_dump(self):\n return 0", "title": "" }, { "docid": "f99fad0af6d7383be5be186724086c53", "score": "0.7075634", "text": "def dump(self):\r\n data = self.dump_data()\r\n store[self.key] = data", "title": "" }, { "docid": "d02468596fc90fd6b1229a86d7c2fc47", "score": "0.700473", "text": "def dump(self):\n pass", "title": "" }, { "docid": "1a7223474ce7f259bb8e65cc209458ae", "score": "0.6998508", "text": "def dump(self) -> str:", "title": "" }, { "docid": "60bf149e047f8c4f9ff03ff1d5dd9b05", "score": "0.6986296", "text": "def to_database(self, value):\n if value is None and self.has_default:\n return self.get_default()\n return value", "title": "" }, { "docid": "70c666fbb24955a5fadff95c1069bde9", "score": "0.68173593", "text": "def dump(self) -> retval:\n ...", "title": "" }, { "docid": "ed5a10c80788b0ed1559961d468b4df1", "score": "0.6812896", "text": "def toStorageValue():", "title": "" }, { "docid": "02615dcd05ab5ba6b5e229ba057d0393", "score": "0.67910254", "text": "def db_value(self, value):\n if value is None:\n return None\n\n serialized_state = base64.b64encode(pickle.dumps(value)).decode(\"ascii\")\n return serialized_state", "title": "" }, { "docid": "22dc6d5aa2ce3bc1e516ccd93208e7bb", "score": "0.67773825", "text": "def dump(self):\n raise NotImplementedError", "title": "" }, { "docid": "a627091d0e3527e8b32013b1a0b3fcea", "score": "0.6771704", "text": "def _dump_bs4_value(value):\n return str(value)", "title": "" }, { "docid": "7b1958bd1ff9f9aa871f18d4f455f376", "score": "0.676124", "text": "def _dump_pickle_value(value):\n return pickle.dumps(value)", "title": "" }, { "docid": "e3c303337437e0729a2be0fa600aa705", "score": "0.65281695", "text": "def db_value(self, value: 'Any') -> 'Optional[bytes]': # pylint: disable=inconsistent-return-statements\n if value is not None:\n value = pickle.dumps(value)\n return super().db_value(value)", "title": "" }, { "docid": "21a5443c972e1a54bfb6f70cd1729d57", "score": "0.6419501", "text": "def _dump_bs4(value):\n return str(value)", "title": "" }, { "docid": "a53e43c82f10a1ad5b64aa053ffc6516", "score": "0.63743377", "text": "def dumps(value):\n if isinstance(value, asyncpg.Record):\n value = dict(value)\n return pickle.dumps(value)", "title": "" }, { "docid": "3f7669ac17f01b2e69f648926edea81c", "score": "0.6299847", "text": "def db(self):\n return str(self._db)", "title": "" }, { "docid": "382cb3d09403b1afbc5661c21db0ffc1", "score": "0.6294782", "text": "def dump_DB(self):\n\t\tprint 'Dump data base....'\n\t\tstream = open(self.DB_file, 'w')\n\t\tpickle.dump(self.DB, stream)\n\t\tstream.close()\n\t\t#return ", "title": "" }, { "docid": "3800aa075ba933f50abcfab445e8c7a7", "score": "0.6274904", "text": "def dump(self): # pragma: no cover\n raise NotImplementedError()", "title": "" }, { "docid": "4d6a76e3ce0ff010f8e1bcb915934929", "score": "0.6250882", "text": "def serialize(self, value):\n return value", "title": "" }, { "docid": "4d6a76e3ce0ff010f8e1bcb915934929", "score": "0.6250882", "text": "def serialize(self, value):\n return value", "title": "" }, { "docid": "5bf5c5da4ac7074a30b9b26532a7be38", "score": "0.6231838", "text": "def dump(self, destination='db'):\n data = self.schema().dump(self).data\n data['id'] = data.pop('_key')\n return data", "title": "" }, { "docid": "c8e9b25687555622ce7ab04d2b9186e0", "score": "0.6206808", "text": "def dump(self, outfile):\n db_dict = {\n 'meta_prints': self.meta_prints,\n 'content_prints': self.content_prints,\n 'series_id': self.series_id,\n }\n msgpack.dump(db_dict, outfile)", "title": "" }, { "docid": "88b07073f24a6a51b2d5c690195a14ef", "score": "0.61572754", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "f741f111c4ca585bbd15aab32f14749a", "score": "0.61461127", "text": "def __str__(self):\n return repr(self.value)", "title": "" }, { "docid": "f741f111c4ca585bbd15aab32f14749a", "score": "0.61461127", "text": "def __str__(self):\n return repr(self.value)", "title": "" }, { "docid": "962b0b6d60b8ecababf12885ff6792d5", "score": "0.61360556", "text": "def dump_qvalues(self, force=False):\r\n if force:\r\n print(\"******** Saving Q-table(%d keys) to local file... ********\" % len(self.qvalues.keys()))\r\n fil = open(\"q_values.json\", \"w\")\r\n json.dump(self.qvalues, fil)\r\n fil.close()\r\n print(\"******** Q-table(%d keys) updated on local file ********\" % len(self.qvalues.keys()))", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "de7fc10940c0395b22d2ffac055a673a", "score": "0.6113299", "text": "def write(self, value):\n return value", "title": "" }, { "docid": "cbfc09a43d41d097b5ba2a72af21aed6", "score": "0.61018085", "text": "def __str__(self):\n\n return repr(self.value)", "title": "" }, { "docid": "4d268794bdad82b90017573ae9643466", "score": "0.60986525", "text": "def stringify(self, value):\n return repr(value)", "title": "" }, { "docid": "d63b5931ad50335df775b00849a43cee", "score": "0.6088348", "text": "def dump(self):\n return self.core.dump()", "title": "" }, { "docid": "16a033fba7b600dbb1d7e6d4828241b7", "score": "0.6085403", "text": "def write(self, value):\r\n return value", "title": "" }, { "docid": "ec60528e2890e1ce9336c790bea94068", "score": "0.60719293", "text": "def write(self, value):", "title": "" }, { "docid": "c84a53937c99d2b84f218641adb3a059", "score": "0.60533965", "text": "def toPersistenceFormat(self, value):\r\n \r\n self = self # silent pylint\r\n return value", "title": "" }, { "docid": "28cbaa16279e80926550cb9ecaf57641", "score": "0.6049075", "text": "def serialize_value(self, value):\n return value", "title": "" }, { "docid": "8d0c26071a0ed0818b2ae75d597417a2", "score": "0.6048115", "text": "def __str__(self):\n ldump = []\n try:\n for k, v in self.items():\n ldump.append((k, v))\n ldump.sort(key=lambda t: t[0])\n ldump = [str(i) for i in ldump]\n return \"BackingStore: [{}]\".format(\", \".join(ldump))\n except BStoreClosedError:\n return \"BackingStore: closed\"", "title": "" }, { "docid": "988ccbaa438079acdb155ba959d3edb3", "score": "0.60408574", "text": "def to_str(self):\n # type: () -> str\n return pprint.pformat(self.value)", "title": "" }, { "docid": "124c300bc7e141c52d56499591305479", "score": "0.6021871", "text": "def _serialize(cls, value):\n return value", "title": "" }, { "docid": "171184f90d29d400439e62b700b33d61", "score": "0.6016454", "text": "def save_value(self, value):\n self.dual_variables[0].save_value(value)", "title": "" }, { "docid": "4fd93ab469154b485b092c36c083f9b8", "score": "0.6013662", "text": "def dump(self) -> bytes:\n return self.as_bytes()", "title": "" }, { "docid": "67b70280de6aa08f36a905ce577a3fa7", "score": "0.5998885", "text": "def _dump_csv_value(value):\n buff = io.StringIO()\n writer = csv.writer(buff, quotechar='\"', quoting=csv.QUOTE_ALL)\n writer.writerows(value)\n buff.seek(0)\n value = buff.read()\n buff.close()\n return value", "title": "" }, { "docid": "648ba135d902a787c90a16f8213f70ae", "score": "0.59966755", "text": "def _dump_plist_value(value):\n if hasattr(plistlib, 'dumps'):\n return plistlib.dumps(value)\n try:\n return plistlib.writePlistToString(value).encode('utf-8')\n except AttributeError:\n return plistlib.writePlistToBytes(value)", "title": "" }, { "docid": "4073eb6365f22e5b52891ecc46c5d251", "score": "0.59952545", "text": "def debug(self, value):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(value)", "title": "" }, { "docid": "f50395e8dcf0490fa310322907108621", "score": "0.59813136", "text": "def dump(self):\n pickle.dump( self,open(self.dumpf,'w'), True )", "title": "" }, { "docid": "7935ee779343b58d7ac1184013326d05", "score": "0.5966167", "text": "def _dump(self, value_dict: dict, file_path: str,\n file_format: str) -> None:\n with open(file_path, 'a+') as f:\n dump(value_dict, f, file_format=file_format)\n f.write('\\n')", "title": "" }, { "docid": "a49e013ba753cfcb3db8ceca10d503cc", "score": "0.5965843", "text": "def dump(self): # real signature unknown; restored from __doc__\n return \"\"", "title": "" }, { "docid": "b74538ec54b114d8aad0cb10174120b9", "score": "0.5940354", "text": "def dump_file(fname, value):\n with open(fname, 'w') as out:\n out.write(str(value))", "title": "" }, { "docid": "efda5fb791de55a7d7b196db93766065", "score": "0.59314626", "text": "def __str__(self):\n return str(self.__value)", "title": "" }, { "docid": "bfdd5ffe3705fff63625ecc439e733c6", "score": "0.5915344", "text": "def __str__(self):\n return str(self.value)", "title": "" }, { "docid": "bfdd5ffe3705fff63625ecc439e733c6", "score": "0.5915344", "text": "def __str__(self):\n return str(self.value)", "title": "" }, { "docid": "bfdd5ffe3705fff63625ecc439e733c6", "score": "0.5915344", "text": "def __str__(self):\n return str(self.value)", "title": "" }, { "docid": "e5144ec9c574cd7984d2fbf53457dfc8", "score": "0.59087557", "text": "def dump(self):\n for key in self.__data.keys():\n print()\n print(\"----- %s -----\" % key)\n print(self.getDataFrame(key))", "title": "" }, { "docid": "c1a4521aec0052032f6d7d1aff53c89c", "score": "0.5907035", "text": "def dump_data(self):\r\n return {}", "title": "" }, { "docid": "7d8aae7c8676160e6c2f27eea9eee6f7", "score": "0.59065425", "text": "def __str__(self):\n return self._value_", "title": "" }, { "docid": "422d2ff68417ee9aacbed02ed3d48e4d", "score": "0.58901775", "text": "def dumpdb(database):\n for i in database:\n (s, r) = database[i]\n print(\"%s, %d , %d \" % (i, s, r))", "title": "" }, { "docid": "2b5613d8ddbd6358d5377df54145d287", "score": "0.58886635", "text": "def to_db(self,**kwds):\n return(sub_to_db(self,**kwds))", "title": "" }, { "docid": "ca0e9b117f00e8e89a9f141174234c20", "score": "0.58876044", "text": "def dump_q_values(self):\n if self.gameCNT % self.DUMPING_N == 0:\n fil = open('qvalues.json', 'w')\n json.dump(self.q_values, fil)\n fil.close()\n print('Q-values updated on local file.')", "title": "" }, { "docid": "eb7289d075006ee7ab5db4907de9f629", "score": "0.5886371", "text": "def test_core_save_stored_value_v1(self):\n pass", "title": "" }, { "docid": "ef1e4cb47a2245729ac53bfefca22fd2", "score": "0.58855474", "text": "def __str__(self) -> str:\r\n return str(self._value)", "title": "" }, { "docid": "3762728a46ec34c8e8ec63e60ad73bdd", "score": "0.58850515", "text": "def __str__(self):\r\n return str(self.value)", "title": "" }, { "docid": "3762728a46ec34c8e8ec63e60ad73bdd", "score": "0.58850515", "text": "def __str__(self):\r\n return str(self.value)", "title": "" }, { "docid": "3762728a46ec34c8e8ec63e60ad73bdd", "score": "0.58850515", "text": "def __str__(self):\r\n return str(self.value)", "title": "" }, { "docid": "7a8ed16f38bfba88af00f34506d062da", "score": "0.58794194", "text": "def dump(self):\n self.open()\n if self.verbose:\n print \"INFO: Dump SQLite database \" + self.sql.path\n return subprocess.check_output([\"sqlite3\", self.sql.path, \".dump\"])", "title": "" }, { "docid": "a0a58ed55d8e0c15e49761007c99af26", "score": "0.5873896", "text": "def __dump__(args):\n\n d = data()\n if args.cls: d = {args.cls: d[args.cls]}\n\n output = sys.stdout\n if args.selftest:\n from bob.db.base.utils import null\n output = null()\n\n for k, v in d.items():\n for array in v:\n s = ','.join(['%.1f' % array[i] for i in range(array.shape[0])] + [k])\n output.write('%s\\n' % (s,))\n\n return 0", "title": "" }, { "docid": "0602377ac7ba484f1c1b88ccab3c299a", "score": "0.58713865", "text": "def dump(self, path):\n data = self.to_dic()\n save_dict(data, path)", "title": "" }, { "docid": "a0e42faecf6e53a9f9c03cb519254489", "score": "0.58692455", "text": "def data(self):\r\n return str(self)", "title": "" }, { "docid": "59aeb2f32809c3a3de95a878b138f2f2", "score": "0.5843663", "text": "def __str__(self) -> str:\n return str(self.value)", "title": "" }, { "docid": "59aeb2f32809c3a3de95a878b138f2f2", "score": "0.5843663", "text": "def __str__(self) -> str:\n return str(self.value)", "title": "" }, { "docid": "59aeb2f32809c3a3de95a878b138f2f2", "score": "0.5843663", "text": "def __str__(self) -> str:\n return str(self.value)", "title": "" }, { "docid": "ade3a9362342e8a6cfb9797910bc27b2", "score": "0.58345526", "text": "def __str__(self):\n return str(self.save())", "title": "" }, { "docid": "de51a29b2484930c3519253a15551be3", "score": "0.58327436", "text": "def print_result(self, value: Any) -> None:\n if isinstance(value, Mapping):\n if self.varpath == '.':\n value = toml.dumps(value)\n else:\n value = toml.dumps({self.varpath: value})\n lines = []\n for line in value.strip().split('\\n'):\n if not line.startswith('['):\n lines.append(line)\n else:\n lines.append(line.replace('\"', ''))\n value = '\\n'.join(lines)\n print(value, flush=True)", "title": "" }, { "docid": "62bfbac37da689c0c3e32ddaa7b524af", "score": "0.582031", "text": "def dump(self, data=None):\n return _dump(data or self)", "title": "" }, { "docid": "23442ba4a7de9448dde2c8c300182664", "score": "0.58159244", "text": "def __repr__(self):\n return str([{key: self.db[key]} for key in self.db.keys()])", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "b150f36da793ca152c855d4c6c2d4ce3", "score": "0.5790559", "text": "def Value(self) -> str:", "title": "" }, { "docid": "9043826b1dd99f8e5f491712e8a70855", "score": "0.57861644", "text": "def __str__(self) -> str:\n return str(self.serialize())", "title": "" }, { "docid": "a29df2aa7d1ed9e17ea1be4e448bc62b", "score": "0.57737744", "text": "def dump(self):\n # dump key information\n dump = [\n \"\",\n \"----------Metadata Info for Dataset:%s----------------\" % self.dataset_name,\n \"Total Training Triples :%s\" % self.kg_meta.tot_train_triples,\n \"Total Testing Triples :%s\" % self.kg_meta.tot_test_triples,\n \"Total validation Triples :%s\" % self.kg_meta.tot_valid_triples,\n \"Total Entities :%s\" % self.kg_meta.tot_entity,\n \"Total Relations :%s\" % self.kg_meta.tot_relation,\n \"---------------------------------------------\",\n \"\",\n ]\n self._logger.info(\"\\n\".join(dump))\n return dump", "title": "" }, { "docid": "92aa758e25b304ebf5f181d3b4247835", "score": "0.576751", "text": "def Dump(self, path):\n _ecvl.Dataset.Dump(self, path)", "title": "" }, { "docid": "a93b171ae91bcb4e2a90c442b7e8230e", "score": "0.5765132", "text": "def _dump(self):\n\n print 'Lexicon'\n for lang in self.getLexicon().getLanguages():\n print lang\n for k,v in self.getLexicon()._words[lang].items():\n print repr(k), v\n\n print\n\n print '-'*80\n\n print 'Storage'\n for field in self.fields:\n S = self.getStorage(field)\n\n for k, v in S._wid2doc.items():\n print k, list(v)", "title": "" }, { "docid": "b46f6b2237cccaafe9772698b396ef80", "score": "0.57535505", "text": "def __str__(self) -> str:\n info = self.export()\n for key, value in info.items():\n if isinstance(value, Decimal):\n info[key] = str(value)\n return json.dumps(info)", "title": "" }, { "docid": "ebc31c7bd4f9f34007a3042b9f7f209c", "score": "0.5752177", "text": "def __str__(self):\n return self.dumps()", "title": "" }, { "docid": "64876c8f17e0d5e35f5909b42d999179", "score": "0.57462734", "text": "def serialize_value(self, value):\n return self.field.prepare_value(value)", "title": "" }, { "docid": "dbceeb2693f316a9265a8539aaa374bd", "score": "0.57432085", "text": "def __str__(self) -> str:\n return self.value", "title": "" }, { "docid": "fa17fd88a8975b25fcd7abe0918d3331", "score": "0.57382214", "text": "def dump(self):\n # dump key information\n dump = [\n \"\",\n \"----------Metadata Info for Dataset:%s----------------\" % self.dataset.name,\n \"Total Training Triples :%s\" % self.train_triples_count,\n \"Total Testing Triples :%s\" % self.test_triples_count,\n \"Total validation Triples :%s\" % self.valid_triples_count,\n \"Total Entities :%s\" % self.entity_count,\n \"Total Relations :%s\" % self.relation_count,\n \"---------------------------------------------\",\n \"\",\n ]\n return dump", "title": "" }, { "docid": "ef3f64f5f609461c4f65978f86dac773", "score": "0.57270485", "text": "def __repr__(self):\n return str(self.value)", "title": "" } ]
4262ba2169fb9af58bdba7293847d736
Edit an entire list. Danger Will Robinson! Only staff members should be allowed to access this view.
[ { "docid": "fc5bb10fae3692184a40801cd44250b3", "score": "0.7460019", "text": "def edit_list(request, list_id):\t\n\tif request.user.is_staff:\n\t\tcan_edit = 1\n\n # Get this list's object (to derive list.name, list.id, etc.)\n\tlist = get_object_or_404(List, pk=list_id)\n\n\tif list.team.uuid == request.user.profile.team.uuid or request.user.is_staff:\n\t\tauth_ok = 1\n\t\tif request.method == 'POST':\n\t\t\t\n\t\t\tform = EditListForm(request.POST, instance=list)\n\t\t\tif form.is_valid():\n\t\t\t\ttry:\n\t\t\t\t\tform.save()\n\t\t\t\t\treturn HttpResponseRedirect('/todolist') \n\t\t\t\texcept IntegrityError:\n\t\t\t\t\tmessages.error(request,\n\t\t\t\t\t\t\"There was a problem saving the list. \"\n\t\t\t\t\t\t\"Most likely a list with the same name in the same team already exists.\")\n\t\telse:\t\t\t\n\t\t\tform = EditListForm(instance=list)\n\t\t\t\t\t\n\t\t\tif task.due_date:\n\t\t\t\tthedate = task.due_date\n\t\t\telse:\n\t\t\t\tthedate = datetime.datetime.now()\n\telse:\t\t\n\t\tmessages.info(request, \"You do not have permission to view/edit this list.\")\n\n\treturn render_to_response('todolist/edit_list.html', locals(), context_instance=RequestContext(request))", "title": "" } ]
[ { "docid": "c70565b6317d6080ce923f2da04457d2", "score": "0.69857556", "text": "def edit_list(name):\n user = session['username']\n description = ''\n\n user_lists = list_object.show_lists(user)\n\n for s_list in user_lists:\n if s_list['name'] == name:\n description = s_list['description']\n\n if request.method == 'POST':\n old_name = name\n new_name = request.form['name']\n description = request.form['description']\n status = list_object.update_list(old_name, new_name, description, user)\n\n if status == list_object.shopping_list:\n response = \"\" + name + \" shopping list successfully updated\"\n flash(response)\n\n return redirect(url_for('dashboard'))\n\n flash(status)\n return redirect(url_for('dashboard'))\n\n return render_template(\"edit_shopping_list.html\", name=name, description=description)", "title": "" }, { "docid": "a29ff8ab63a7d18b81c7895c6e511459", "score": "0.6962629", "text": "def edit_list_view(request, listId):\n if not request.method == \"POST\":\n return JsonResponse({\"status\": 405, \"error\": \"The only allowed methods for this endpoint are POST.\"}, status=405)\n\n try:\n ls = get_object_or_404(List, id=listId)\n except Http404:\n return JsonResponse({\"status\": 404, \"error\": \"The list you are trying to retrieve/modify does not exist!\"}, status=404)\n\n title = request.POST.get(\"title\")\n order = request.POST.get(\"order\")\n\n if title:\n ls.title = title\n\n if order:\n try:\n order = int(order)\n except ValueError:\n return JsonResponse({\"status\": 400, \"error\": \"Unable to parse list order!\"}, status=400)\n\n # Remove the list from the order (shift all lists after it back one).\n List.objects.filter(order__gt=ls.order).update(order=F(\"order\") - 1)\n\n # Add the list to the order (shift all lists after it forward one).\n List.objects.filter(order__gte=order).update(order=F(\"order\") + 1)\n\n ls.order = order\n\n ls.save()\n return JsonResponse({\"status\": 200})", "title": "" }, { "docid": "48f756b93dd3648e8c5739f3fe34bcb2", "score": "0.6656953", "text": "def edit_site_list(item_list, list_name, all_values, tenant_file_name):\n loop = True\n\n while loop:\n\n action = [\n (\"View list\", 'view'),\n (\"Add to list\", 'add'),\n (\"Remove items from list\", 'remove'),\n (\"Load/Save list\", 'file'),\n (\"Go Back\", 'back')\n ]\n\n banner = \"\\nSelect Action:\"\n line_fmt = \"{0}: {1}\"\n\n # just pull 2nd value\n selected_action = menus.quick_menu(banner, line_fmt, action)[1]\n\n if selected_action == 'view':\n print(\"\\n{0} ({1} entries):\".format(list_name, len(item_list)))\n for item in item_list:\n print(\"\\t{0}\".format(item))\n elif selected_action == 'add':\n item_list = add_to_list(item_list, list_name, all_values)\n elif selected_action == 'remove':\n item_list = remove_from_list(item_list, list_name, all_values)\n elif selected_action == 'file':\n item_list = load_save_list(item_list, list_name, all_values, tenant_file_name)\n elif selected_action == 'back':\n loop = False\n else:\n sys.exit()\n\n # return a shallow copy of site list\n return item_list[:]", "title": "" }, { "docid": "3347eab35f8b161ed6d59d086b7a861d", "score": "0.6606622", "text": "def _edit_listing(test_case_instance, id, input_data, default_user='bigbrother'):\n user = generic_model_access.get_profile(default_user).user\n test_case_instance.client.force_authenticate(user=user)\n url = '/api/listing/{0!s}/'.format(id)\n # GET Listing\n data = test_case_instance.client.get(url, format='json').data\n\n for current_key in input_data:\n if current_key in data:\n data[current_key] = input_data[current_key]\n\n # PUT the Modification\n response = test_case_instance.client.put(url, data, format='json')\n test_case_instance.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "bd5452fc7671ea15c9455d72cc2a5289", "score": "0.65433407", "text": "def update_detail(self, object_list, bundle):\n # get the original object (this is needed in case we are locking it)\n # bundle.obj is the modified value\n # the original can be found in object_list\n original = object_list.get(id=bundle.obj.id)\n user = get_real_user_object(bundle.request.user)\n if user.has_perm('collection.change_collection', original):\n # the user has permission to edit - but not all edits are permitted\n # it is a fairly complex setup - locked prevents certain things,\n # but not others etc. this isn't so much an authorisation issue but\n # a model issue however\n return True\n else:\n raise Unauthorized(\n \"This collection is locked and cannot be unlocked or modified.\"\n )", "title": "" }, { "docid": "2c61dff76d905a8b5ea8d6412cb09c66", "score": "0.6530129", "text": "def request_edit(self):\n pass", "title": "" }, { "docid": "f99ec2c258b75ceddaab04223feca2f5", "score": "0.6435584", "text": "def edit(self, asset_list_edit, asset_list_id):\n response = self._client.put('asset-lists/%(list_id)s', asset_list_edit, {'list_id': asset_list_id})\n return AssetList.from_json(response.text)", "title": "" }, { "docid": "e5e64b0fbe1a27597d406b68db379a4a", "score": "0.64300525", "text": "def manage_edit(data, title):", "title": "" }, { "docid": "acc16af1d05ad05682856b87db8d0850", "score": "0.63578504", "text": "def edit_item(list_name, item_name):\n user = session['username']\n\n user_items = item_object.show_items(user, list_name)\n list_items = [item for item in user_items if item['list'] == list_name]\n qty = ''\n prc = ''\n\n for list_item in list_items:\n if list_item['name'] == item_name:\n qty = list_item['quantity']\n prc = list_item['price']\n\n if request.method == 'POST':\n old_name = item_name\n new_name = request.form['name']\n quantity = request.form['quantity']\n price = request.form['price']\n\n status = item_object.update_item(old_name, new_name, list_name, user, quantity, price)\n\n if isinstance(status, list):\n response = \"\" + old_name + \" successfully edited\"\n flash(response)\n\n return redirect(url_for('view_list', name=list_name))\n\n flash(status)\n return redirect(url_for('view_list', name=list_name))\n\n return render_template(\"shopping_list_items_edit.html\", list_name=list_name, item_name=item_name,\n quantity=qty, price=prc)", "title": "" }, { "docid": "675c87856ccea6cc91f86236d329e08d", "score": "0.63349503", "text": "def can_edit_all(self):\n return True", "title": "" }, { "docid": "a2600ba606525f2576db6a9a6bc1e039", "score": "0.62400925", "text": "def edit():\r\n this_page = db.page(request.args(0,cast=int)) or redirect(URL('index'))\r\n form = SQLFORM(db.page,this_page).process(\r\n next = URL('show',args = request.args)\r\n )\r\n return dict(form=form)", "title": "" }, { "docid": "7d098b67986d05c718b865f7ca798c6a", "score": "0.6239211", "text": "def edited_list_items(self, edited_list_name, org_list_name):\n for shopping_item in self.item_list:\n if shopping_item['list'] == org_list_name:\n edit_dict = {\n 'list': edited_list_name\n }\n shopping_item.update(edit_dict)", "title": "" }, { "docid": "8b712b51caacc63050cb593fd4b1e8c9", "score": "0.61827546", "text": "def ModifyListValue(self, *args):\n return _IFSelect.IFSelect_EditForm_ModifyListValue(self, *args)", "title": "" }, { "docid": "7d3f252593834d7fc39004a2d77ecc8b", "score": "0.6178037", "text": "async def edit(self, **fields):\n ...", "title": "" }, { "docid": "38614ec4464e718578106e69b51902cb", "score": "0.6163403", "text": "def edit(cl_id, allowed_type, control_list):\n if allowed_type not in [\"lemma\", \"POS\", \"morph\"]:\n raise NotFound(\"Unknown type of resource.\")\n\n # In case of Post\n if request.method == \"POST\":\n allowed_values = request.form.get(\"allowed_values\")\n if allowed_type == \"lemma\":\n allowed_values = [\n x.replace('\\r', '')\n for x in allowed_values.split(\"\\n\")\n if len(x.replace('\\r', '').strip()) > 0\n ]\n elif allowed_type == \"POS\":\n allowed_values = [\n x.replace('\\r', '')\n for x in allowed_values.split(\",\")\n if len(x.replace('\\r', '').strip()) > 0\n ]\n else:\n allowed_values = list(StringDictReader(allowed_values))\n try:\n control_list.update_allowed_values(allowed_type, allowed_values)\n flash(\"Control List Updated\", category=\"success\")\n except PyrrhaError as exception:\n flash(\"A Pyrrha error occurred: {}\".format(exception), category=\"error\")\n except:\n flash(\"An unknown error occurred\", category=\"error\")\n\n values = control_list.get_allowed_values(allowed_type=allowed_type, order_by=\"id\")\n if allowed_type == \"lemma\":\n format_message = \"This should be formatted as a list of lemma separated by new line\"\n values = \"\\n\".join([d.label for d in values])\n elif allowed_type == \"POS\":\n format_message = \"This should be formatted as a list of POS separated by comma and no space\"\n values = \",\".join([d.label for d in values])\n else:\n format_message = \"The TSV should at least have the header : label and could have a readable column for human\"\n values = \"\\n\".join(\n [\"label\\treadable\"] + [\"{}\\t{}\".format(d.label, d.readable) for d in values]\n )\n return render_template_with_nav_info(\n \"control_lists/edit.html\",\n format_message=format_message,\n values=values,\n allowed_type=allowed_type,\n control_list=control_list\n )", "title": "" }, { "docid": "c5097c92e82cdfaa31059a36c2bd857f", "score": "0.6160228", "text": "def UpdateList(self, *args):\n return _IFSelect.IFSelect_Editor_UpdateList(self, *args)", "title": "" }, { "docid": "08e662e3a812078244705f421f0f16fc", "score": "0.61479366", "text": "def cl_editable(control_list_param: str):\n\n def wrapper(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n control_list, is_owner = ControlLists.get_linked_or_404(\n control_list_id=kwargs[control_list_param],\n user=current_user\n )\n can_edit = is_owner or current_user.is_admin()\n if not can_edit:\n flash(\"You are not an owner of the list.\", category=\"error\")\n return redirect(url_for(\".get\", control_list_id=kwargs[control_list_param]))\n return func(*args, control_list=control_list, **kwargs)\n return decorated_view\n return wrapper", "title": "" }, { "docid": "cd1b8de5d4fcbe910f7394abcbf573dc", "score": "0.61430645", "text": "def edit_listing(request, listing_id):\n listing = Listing.objects.get(id=listing_id)\n if listing.owner != request.user:\n raise Http404\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry.\n form = ListingForm(instance=listing)\n else:\n # POST data submittedl process data.\n form = ListingForm(instance=listing, data=request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('swift_realtys:listing', args=[listing.id]))\n \n context = {'listing': listing, 'form': form}\n return render(request, 'swift_realtys/edit_listing.html', context)", "title": "" }, { "docid": "7601d61be4486a92f315eb288d898c4f", "score": "0.6123556", "text": "def edit_plantlist(plantlist_id):\n plantlist = PlantList.query.get_or_404(plantlist_id)\n if g.user not in plantlist.users:\n flash(\"Not authorized to view this page.\", \"danger\")\n return redirect(url_for(\"homepage\"))\n\n form = PlantListAddForm(obj=plantlist)\n form.projects.choices = [(project.id, project.name,) for project in g.user.projects]\n form.plots.choices = [(plot.id, plot.name,) for plot in g.user.plots]\n\n if form.validate_on_submit():\n\n try:\n plantlist.edit(\n name=form.name.data,\n description=form.description.data,\n # is_public=form.is_public.data,\n )\n db.session.commit()\n g.user.plantlists.append(plantlist)\n\n # Append selected plots to the project\n for plot in form.plots.data:\n plot = Plot.query.get(plot)\n plantlist.plots.append(plot)\n # Append plot to selected projects\n for project in form.projects.data:\n project = Project.query.get(project)\n plantlist.projects.append(project)\n\n db.session.commit()\n\n except IntegrityError:\n flash(\"Failed to edit plant list.\", \"danger\")\n return render_template(\n \"plantlists/edit.html\", form=form, plantlist=plantlist\n )\n\n flash(\"Successfully edited plant list!\", \"success\")\n\n return redirect(url_for(\"show_plantlist\", plantlist_id=plantlist.id))\n\n return render_template(\"plantlists/edit.html\", form=form, plantlist=plantlist)", "title": "" }, { "docid": "368f0a6d08241d1e672ce0298a7ff910", "score": "0.6114772", "text": "def view_list(request, list_id):\n list_ = List.objects.get(id=list_id)\n form = ExistingListItemForm(for_list=list_)\n \n context = {'list': list_}\n\n if request.method == 'POST':\n form = ExistingListItemForm(for_list=list_, data=request.POST)\n \n if form.is_valid():\n form.save()\n \n return redirect(list_)\n\n context['form'] = form\n \n return render(request, 'lists/list.html', context)", "title": "" }, { "docid": "acc178ffa65c54c7cc2ade3eb13f9bb9", "score": "0.61122936", "text": "def editItemHandler(self, event):\n for i in self.tasklist.selectedItems():\n self.tasklist.editItem(i)", "title": "" }, { "docid": "b2506ebbfad034336191824752fa0b2b", "score": "0.6099714", "text": "def edit(data):", "title": "" }, { "docid": "6ceae47c6ffba06a1af6839a42758fe9", "score": "0.59823", "text": "def view_list(request, list_id=0, list_slug=None, view_completed=0):\n # Make sure the accessing user has permission to view this list.\n # Always authorize the \"mine\" view. Admins can view/edit all lists.\n\n\tif list_slug == \"mine\" or list_slug == \"recent-add\" or list_slug == \"recent-complete\":\n\t\tauth_ok = 1\n\telse:\n\t\tlist = get_object_or_404(List, slug=list_slug)\n\t\tlistid = list.id\n\n\t\t# Check whether current user is a member of the group this list belongs to.\n\t\tif list.team.uuid == request.user.profile.team.uuid or request.user.is_staff or list_slug == \"mine\":\n\t\t\tauth_ok = 1 # User is authorized for this view\n\t\telse: # User does not belong to the group this list is attached to\t\t\t\n\t\t\tmessages.error(request, \"You do not have permission to view/edit this list.\")\n\n # First check for items in the mark_done POST array. If present, change\n # their status to complete.\n\tif request.POST.getlist('mark_done'):\n\t\tdone_items = request.POST.getlist('mark_done')\n # Iterate through array of done items and update its representation in the model\n\t\tfor thisitem in done_items:\n\t\t\tp = Item.objects.get(id=thisitem)\n\t\t\tp.completed = 1\n\t\t\tp.completed_date = datetime.datetime.now()\n\t\t\tp.save()\n\t\t\tmessages.success(request, \"Item \\\"%s\\\" marked complete.\" % p.title)\n\n # Undo: Set completed items back to incomplete\n\tif request.POST.getlist('undo_completed_task'):\n\t\tundone_items = request.POST.getlist('undo_completed_task')\n\t\tfor thisitem in undone_items:\n\t\t\tp = Item.objects.get(id=thisitem)\n\t\t\tp.completed = 0\n\t\t\tp.save()\n\t\t\tmessages.success(request, \"Previously completed task \\\"%s\\\" marked incomplete.\" % p.title)\n\n # And delete any requested items\n\tif request.POST.getlist('del_task'):\n\t\tdeleted_items = request.POST.getlist('del_task')\n\t\tfor thisitem in deleted_items:\n\t\t\tp = Item.objects.get(id=thisitem)\n\t\t\tp.delete()\n\t\t\tmessages.success(request, \"Item \\\"%s\\\" deleted.\" % p.title)\n\n # And delete any *already completed* items\n\tif request.POST.getlist('del_completed_task'):\n\t\tdeleted_items = request.POST.getlist('del_completed_task')\n\t\tfor thisitem in deleted_items:\n\t\t\tp = Item.objects.get(id=thisitem)\n\t\t\tp.delete()\n\t\t\tmessages.success(request, \"Deleted previously completed item \\\"%s\\\".\" % p.title)\n\n\tthedate = datetime.datetime.now()\n\tcreated_date = \"%s-%s-%s\" % (thedate.year, thedate.month, thedate.day)\n\n # Get list of items with this list ID, or filter on items assigned to me, or recently added/completed\n\tif list_slug == \"mine\":\n\t\ttask_list = Item.objects.filter(assigned_to=request.user, completed=0)\n\t\tcompleted_list = Item.objects.filter(assigned_to=request.user, completed=1)\n\n\telif list_slug == \"recent-add\":\n # We'll assume this only includes uncompleted items to avoid confusion.\n # Only show items in lists that are in groups that the current user is also in.\n\t\ttask_list = Item.objects.filter(list__group__in=(request.user.groups.all()),\n completed=0).order_by('-created_date')[:50]\n # completed_list = Item.objects.filter(assigned_to=request.user, completed=1)\n\n\telif list_slug == \"recent-complete\":\n # Only show items in lists that are in groups that the current user is also in.\n\t\ttask_list = Item.objects.filter(list__group__in=request.user.groups.all(),\n completed=1).order_by('-completed_date')[:50]\n # completed_list = Item.objects.filter(assigned_to=request.user, completed=1)\n\n\telse:\n\t\ttask_list = Item.objects.filter(list=list.id, completed=0)\n\t\tcompleted_list = Item.objects.filter(list=list.id, completed=1)\n\n\tif request.POST.getlist('add_task'):\n\t\tform = AddItemForm(list, request.POST, initial={\n\t\t 'assigned_to': request.user.id,\n\t\t 'priority': 2,\n\t\t})\n\t\t\n\t\tif form.is_valid():\n # Save task first so we have a db object to play with\n\t\t\tnew_task = form.save()\n\n # Send email alert only if the Notify checkbox is checked AND the assignee is not the same as the submittor\n # Email subect and body format are handled by templates\n\t\t\tif \"notify\" in request.POST:\n\t\t\t\t# if new_task.assigned_to == request.user:\n\n # Send email\n\t\t\t\temail_subject = render_to_string(\"todolist/email/assigned_subject.txt\", {'task': new_task})\n\t\t\t\temail_body = render_to_string(\"todolist/email/assigned_body.txt\",\n\t\t\t\t {'task': new_task, 'site': current_site, })\n\t\t\t\ttry:\n\t\t\t\t\tsend_mail(email_subject, email_body, new_task.created_by.email, [new_task.assigned_to.email],fail_silently=False)\n\t\t\t\texcept:\n\t\t\t\t\tmessages.error(request, \"Task saved but mail not sent. Contact your administrator.\")\n\n\t\t\tmessages.success(request, \"New task \\\"%s\\\" has been added.\" % new_task.title)\n\n\t\t\treturn HttpResponseRedirect(request.path)\n\telse:\n # We don't allow adding a task on the \"mine\" view\n\t\tif list_slug != \"mine\" and list_slug != \"recent-add\" and list_slug != \"recent-complete\":\n\t\t\tform = AddItemForm(list, initial={\n\t\t\t 'assigned_to': request.user.id,\n\t\t\t 'priority': 999,\n\t\t\t})\n\n\tif request.user.is_staff:\n\t\tcan_del = 1\n\n\treturn render_to_response('todolist/view_list.html', locals(), context_instance=RequestContext(request))", "title": "" }, { "docid": "8b19ffeeb7e6c937ebc80d508e508e19", "score": "0.5982274", "text": "def playlists_edit(file_id):\n stls = files.find_one({'_id': ObjectId(file_id)})\n return render_template('files_edit.html', stl=stls, title='Edit Files')", "title": "" }, { "docid": "f0779f7384632a9bb72ed835e9ac0435", "score": "0.5948604", "text": "def edit():\n return render_template('edit.html')", "title": "" }, { "docid": "e26a31f898a0c74e70d52c24c027bc1b", "score": "0.59266186", "text": "def editar_persona(self, listado):\n\n self.listar_persona(listado)\n print \"\\n\\n\"\n idpersona = raw_input(self.txt_idpersona)\n print \"\\n\"\n nombre = raw_input(self.txt_nombre)\n apellido = raw_input(self.txt_apellido)\n\tdni = raw_input(self.txt_dni)\n\treturn (idpersona, nombre, apellido, dni)", "title": "" }, { "docid": "a71945cfeb969ad35feb890a0ccc6640", "score": "0.5923755", "text": "def test_edit_admin(self):\n self.client.force_authenticate(user=self.admin_user)\n response = self.client.patch(_get_detail_url(1, 1), submission_data(self.admin_user))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "1e6c32ef8915c1d6ef7c76fd0fdece12", "score": "0.5874132", "text": "def edit(self):\r\n\t\treturn False", "title": "" }, { "docid": "141cb872a7870e5e2ee371cd9e19962c", "score": "0.58735824", "text": "def OnEdit(self, event):\n \n index = self.list.GetSelection()\n \n if self.edit_callback != None and index >= 0:\n data = self.edit_callback(self.list.GetClientData(index))\n if self.style & CLE_STRING and not isinstance(data, (str, unicode)):\n raise TypeError('The add callback must return a string')\n elif self.style & CLE_LIST and not isinstance(data, (list, tuple)):\n raise TypeError('The add callback must return a sequence')\n elif self.style & CLE_DICT and not isinstance(data, dict):\n raise TypeError('The add callback must return a dictionary')\n \n self.modified = True\n if self.style & CLE_LIST or self.style & CLE_DICT:\n self.list.SetString(index, data[self.object_key])\n self.list.SetClientData(index, data)\n else:\n self.list.SetString(index, data)\n self.list.SetClientData(index, data)", "title": "" }, { "docid": "b937acec8b61f77ac21be90cc02ea3af", "score": "0.58521885", "text": "def edit_item(self, item_name, org_item_name, list_name, user):\n # Get users items\n my_items = self.owner_items(user, list_name)\n for item in my_items:\n if item['list'] == list_name:\n if item['name'] != item_name:\n if item['name'] == org_item_name:\n edit_dict = {\n 'name': item_name,\n }\n item.update(edit_dict)\n else:\n return \"Item name already exists\"\n return self.owner_items(user, list_name)", "title": "" }, { "docid": "b5696550d969dc181ab0f2a9b6fea24c", "score": "0.58359826", "text": "def if_can_edit(self, request, *args, **kwargs):\n if self.determine_perms(self, request, *args, **kwargs)['can_edit']:\n return\n raise PermissionDenied", "title": "" }, { "docid": "e415e2c0a511ce678e309eb849920891", "score": "0.5694459", "text": "def OriginalList(self, *args):\n return _IFSelect.IFSelect_EditForm_OriginalList(self, *args)", "title": "" }, { "docid": "699f1eddcb8660a0d98b8bcce915280e", "score": "0.5686461", "text": "def on_edit_item(self, event):\n\t\t\n\t\tself.list_partitions.edit_partition()", "title": "" }, { "docid": "de600b8b5f61eae3c5c48e02d6b1d3d6", "score": "0.56743777", "text": "def persp_edit(form, db, param, conn):\n\n nUser = int(form.getfirst('user_id', '0'))\n\n User = dbobj.adultrec(db, nUser)\n if not User.found:\n app_error(form, param, conn, message=\"Invalid User identifier\")\n return\n\n # Can only edit personal details\n if nUser != conn.scout_id:\n #go to top of browse tree\n security_page(form, param, conn)\n return \n\n User.forename \t= form.getfirst('forename', '')\n User.initials \t= form.getfirst('initials', '')\n User.surname \t\t= form.getfirst('surname', '')\n User.date_of_birth \t= form.getfirst('date_of_birth', '')\n User.mobile\t \t= form.getfirst('mobile', '')\n User.email\t \t= form.getfirst('email', '')\n User.add_info \t= form.getfirst('add_info', '')\n User.gender\t \t= form.getfirst('gender', '')\n User.addr1\t \t= form.getfirst('addr1', '')\n User.addr2\t \t= form.getfirst('addr2', '')\n User.addr3\t \t= form.getfirst('addr3', '')\n User.p_code\t \t= form.getfirst('p_code', '')\n User.telephone_h \t= form.getfirst('telephone_h', '')\n User.telephone_w \t= form.getfirst('telephone_w', '')\n\n if User.forename == '' or User.surname == '':\n persf_edit(form, db, param, conn)\n return\n\n if not val_date(User.date_of_birth):\n persf_edit(form, db, param, conn)\n return\n\n User.update()\n\n profile(form, db, param, conn)\n\n return", "title": "" }, { "docid": "8f02a144fd2e7cc124747c13ad9be21e", "score": "0.5670872", "text": "def ListEditor(self, *args):\n return _IFSelect.IFSelect_EditForm_ListEditor(self, *args)", "title": "" }, { "docid": "7a9829fa239e956bfa6a53cc2e8a7eda", "score": "0.5661941", "text": "def edit_post(self, modelName, key):\n modelAdmin = getModelAdmin(modelName)\n item = self._safeGetItem(modelAdmin.model, key)\n #item.author = users.get_current_user()\n form = modelAdmin.AdminForm(urlPrefix = self.urlPrefix, data = self.request.POST, instance = item)\n if form.is_valid():\n # Save the data, and redirect to the edit page\n item = form.save()\n self.redirect(\"%s/%s/edit/%s/\" % (self.urlPrefix, modelAdmin.modelName, item.key()))\n else:\n templateValues = {\n 'models': self.models,\n 'urlPrefix': self.urlPrefix,\n 'item' : item,\n 'moduleTitle': modelAdmin.modelName,\n 'editForm': form,\n 'readonlyProperties': self._readonlyPropsWithValues(item, modelAdmin),\n }\n path = os.path.join(ADMIN_TEMPLATE_DIR, 'model_item_edit.html')\n self.response.out.write(template.render(path, templateValues))", "title": "" }, { "docid": "e02c6b413cb3f6264a818fb0c803fab7", "score": "0.56582236", "text": "def get_allowed_edit():", "title": "" }, { "docid": "add04db9a885b1b5715678db6ce94c28", "score": "0.56488675", "text": "def edit(value):\n user = User(session['username'], _id=session['user_id'])\n return render_template('forms/EditForm/edit_page.html', form=user.get_job_by_row_id(str(value)))", "title": "" }, { "docid": "576e1c456fc6cc5c45178b54f2e270c9", "score": "0.5638598", "text": "def UpdateLists(self):\n self.UpdateItemsList()\n self.UpdateTypeList()\n self.UpdateSetList()", "title": "" }, { "docid": "1ecb80c766f13530400428621fafc4bf", "score": "0.5628818", "text": "def edit_bucketlist(self):\n bucketlists_dict = Bucketlist.bucketlists\n for item in bucketlists_dict.values():\n if session['user_id'] == item['user_id']:\n for key, val in bucketlists_dict.items():\n if key == int(request.form['key']):\n print('To be edited =', bucketlists_dict[key])\n existing_owner = val['user_id']\n bucketlists_dict[key] = {'user_id': existing_owner, 'name': self.name, 'description': self.description}\n\n return bucketlists_dict", "title": "" }, { "docid": "e674eb178c03b497bb81e5960cc0f05e", "score": "0.5620375", "text": "def TouchList(self, *args):\n return _IFSelect.IFSelect_EditForm_TouchList(self, *args)", "title": "" }, { "docid": "8758eb31b3a49cc37f5284822d816e04", "score": "0.5615431", "text": "def edit(username):\n\n loggedIn = True if 'username' in session else False\n\n if loggedIn is False:\n return redirect(url_for('forbidden'))\n\n if request.method == 'POST' and request.form['btn'] == 'save':\n db.profile.update_many(\n {'username': session['username']},\n {\"$set\": {\n 'shortDescription': request.form.get('shortDescription'),\n \"imgURL\": request.form.get('imgURL'),\n \"district\": request.form.get('district'),\n \"skills\": request.form.getlist(\"skills\"),\n \"description\": request.form.get('description'),\n \"desiredSkills\": request.form.getlist(\"desiredSkills\"),\n \"communicationStyle\": request.form.getlist(\"communicationStyle\"),\n \"otherDetails\": request.form.getlist(\"other\"),\n \"github\": request.form.get('github'),\n }})\n\n flash(\n \"Edits saved successfully. Scroll down to preview and publish.\",\n \"success\")\n\n return redirect(url_for('profile', username=session['username']))\n\n username = db.profile.find_one({\"username\": username})\n\n return render_template(\n 'pages/editprofile.html', loggedIn=loggedIn, username=username,\n active=\"profile\", skills=skills,\n commstyles=commstyles, other=other)", "title": "" }, { "docid": "266a8be00c7686f3c2829ca59b2359ee", "score": "0.56129473", "text": "def allow_bulk_edit_for_view(self, fake_request):\n current_request = self.context['request']\n return fake_request.path.startswith(current_request.path)", "title": "" }, { "docid": "cd8b6fb34c371e000b31c63cdde97d60", "score": "0.5611685", "text": "def edit_playlist(playlist_id):\n playlist = Playlist.query.get(playlist_id)\n session[\"current_playlist_name\"] = playlist.name # Save playlist name prior to editing\n\n links = playlist.links\n\n form = EditPlaylistForm(obj=playlist)\n if request.method == 'GET':\n form.play_on_start = playlist.play_on_start\n if playlist.links:\n # if there are links, then retrieve the movies that they link to\n links = playlist.links\n movies = [Movie.query.get(link.movie_id) for link in links]\n form.movies.data = [movie for movie in movies]\n\n if form.validate_on_submit():\n # Update name and movie list of playlist\n # playlist.play_on_start = form.play_on_start.data\n playlist.update_playlist(form.name.data, form.play_on_start.data, form.movies.data)\n return redirect(url_for('main.playlist_list'))\n\n return render_template('edit_playlist.html', form=form)", "title": "" }, { "docid": "581886c9a7f72652c726d51ff74216f8", "score": "0.5588032", "text": "def update_todo_list(self, list_id, name, description, milestone_id=None,\n private=None, tracked=None):\n path = '/todos/update_list/%u' % list_id\n req = ET.Element('request')\n list_ = ET.SubElement('list')\n ET.SubElement(list_, 'name').text = str(name)\n ET.SubElement(list_, 'description').text = str(description)\n if milestone_id is not None:\n ET.SubElement(list_, 'milestone_id').text = str(int(milestone_id))\n if private is not None:\n ET.SubElement(list_, 'private').text = str(bool(private)).lower()\n if tracked is not None:\n ET.SubElement(list_, 'tracked').text = str(bool(tracked)).lower()\n return self._request(path, req)", "title": "" }, { "docid": "9d7ae8ba5d537b763bef27b5d6b66e3c", "score": "0.5586192", "text": "def edit_list(self, custom_list, works_qu, save_option, featured_identifiers):\n input_editions = [work.presentation_edition for work in works_qu]\n\n if save_option in self.ADD_OPTIONS:\n # We're just adding to what's there. No need to get fancy.\n self.log.info(\"Adding %d editions to %r\", len(input_editions), custom_list)\n input_editions = self.editions_with_featured_status(\n input_editions, featured_identifiers\n )\n [custom_list.add_entry(e, featured=f) for e, f in input_editions]\n\n if save_option == 'remove':\n self.log.info(\n \"Removing %d editions from %r\",\n len(input_editions), custom_list\n )\n [custom_list.remove_entry(e) for e in input_editions]\n\n if save_option == 'replace':\n list_editions = set([e.edition for e in custom_list.entries])\n overwritten_editions = list(list_editions.difference(input_editions))\n # Confirm that the editions we believe aren't in the list of\n # input editions, *actually* aren't represented in the list.\n overwritten_editions = self._confirm_removal(\n custom_list, overwritten_editions, input_editions\n )\n\n self.log.info(\n \"Removing %d editions from %r\",\n len(overwritten_editions), custom_list\n )\n [custom_list.remove_entry(e) for e in overwritten_editions]\n\n self.log.info(\n \"Adding %d editions to %r\", len(input_editions), custom_list\n )\n input_editions = self.editions_with_featured_status(\n input_editions, featured_identifiers\n )\n [custom_list.add_entry(e, featured=f) for e, f in input_editions]\n\n if save_option in self.EDIT_OPTIONS:\n custom_list.updated = datetime.utcnow()", "title": "" }, { "docid": "cf83690abe22e1c80bafa16515e6201c", "score": "0.55847496", "text": "def LoadEdited(self, *args):\n return _IFSelect.IFSelect_ListEditor_LoadEdited(self, *args)", "title": "" }, { "docid": "813f5be23dc0632907554932adb607b3", "score": "0.556694", "text": "def set_allowed_edit(group_ids):", "title": "" }, { "docid": "64f48e548d76cbf3cca0897284076271", "score": "0.55137664", "text": "def changelist_view(self, request, extra_context=None):\n model = self.model\n obj = model.singleton.get()\n return redirect(\n reverse(\n 'admin:%s_%s_change' % (\n model._meta.app_label, model._meta.model_name\n ),\n args=(obj.id,)\n )\n )", "title": "" }, { "docid": "a14186fa0dc92b812224e7e86cb4c7e4", "score": "0.5508839", "text": "def delete_detail(self, object_list, bundle):\n original = object_list.get(id=bundle.obj.id)\n user = get_real_user_object(bundle.request.user)\n if user.has_perm('collection.delete_collection', original):\n # the user has permission to edit - but not all edits are permitted\n # it is a fairly complex setup - locked prevents certain things,\n # but not others etc. this isn't so much an authorisation issue but\n # a model issue however\n return True\n else:\n raise Unauthorized(\n \"This collection is locked and cannot be unlocked or modified.\"\n )", "title": "" }, { "docid": "566ad27c6ac8e1adc2d33b13ebcc48ed", "score": "0.5480662", "text": "def test_edit_regular(self):\n self.client.force_authenticate(user=self.regular_user)\n response = self.client.patch(_get_detail_url(1, 1), submission_data(self.regular_user))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "68b71ddbbb9afb49bd7481dde20054b4", "score": "0.5471629", "text": "def get(self, request, list_id):\n book_list = get_object_or_404(models.List, id=list_id)\n book_list.raise_not_editable(request.user)\n\n data = {\n \"list\": book_list,\n \"pending\": book_list.listitem_set.filter(approved=False),\n \"list_form\": forms.ListForm(instance=book_list),\n }\n return TemplateResponse(request, \"lists/curate.html\", data)", "title": "" }, { "docid": "5291091bbfca1699ac9cc7269829149b", "score": "0.5461846", "text": "def edit(self):\n super().edit()\n self.setup_playlist()", "title": "" }, { "docid": "bea302e05de2457000da8adc39b6b41c", "score": "0.54350793", "text": "def test_favorite_list_view_put_invalid_update(self):\n path = reverse('api:id-favlists', kwargs={'id': self.favlist.id})\n request = self.factory.put(path)\n request.data = {\n 'employees': ['Employee #1'],\n }\n response = FavListView.put(self, request, id=self.favlist.id)\n assert response.status_code == 304", "title": "" }, { "docid": "3bfbfdb9cb9c6610afcfd7484489980e", "score": "0.5418849", "text": "def update_list(perm_objs, list_cls, filter):\n \n list_obj, created = list_cls.objects.get_or_create(**filter) \n old_perms = copy(list_obj.permission_list)\n\n perm_strs = ['%s.%s' % (perm.content_type.app_label, perm.codename) \\\n for perm in perm_objs]\n perm_ids = [perm.id for perm in perm_objs]\n \n for perm in old_perms:\n try: \n perm_strs.index(perm)\n except ValueError:\n i = list_obj.permission_list.index(perm)\n list_obj.permission_list.pop(i)\n list_obj.permission_fk_list.pop(i)\n\n i = 0 \n for perm in perm_strs:\n try:\n old_perms.index(perm)\n except ValueError:\n list_obj.permission_list.append(perm)\n list_obj.permission_fk_list.append(perm_ids[i])\n i += 1\n\n list_obj.save()", "title": "" }, { "docid": "639afad64555ea6fcb74ad139138968c", "score": "0.5414315", "text": "def test_update_staff(self):\n request = self.factory.put(\"/\")\n permission = self.permission()\n permission.change_permissions = (allow_staff,)\n self.check_permission(permission, request)", "title": "" }, { "docid": "bd1b9bae4b01fc867919b2df294db448", "score": "0.54115283", "text": "def test_Edit(self):\n self.assertIsNotNone(manager.editPlaylist(str(idPlaylist), 'cuarentenaNoLogin', [10]))", "title": "" }, { "docid": "d5f27691db5ecdcc06c208177c70e21d", "score": "0.54004514", "text": "def test_Edit(self):\n self.assertIsNotNone(manager.editPlaylist(str(idPlaylist), 'cuarentena25', [12]))", "title": "" }, { "docid": "8750cf94e72628c2cd1b6baa83d54794", "score": "0.5385478", "text": "def test_Edit(self):\n self.assertIsNotNone(manager.editPlaylist(str(idPlaylist), 'cuarentena35', [8]))", "title": "" }, { "docid": "c7f7be3fe76b1edfae99bdc27d036a58", "score": "0.53678", "text": "def edit(trainer, request):\n\n # XXX This is getting to be a lot of forms; it might actually be easier at\n # this point to just make them all one form and complicate validation a bit\n stuff = {\n 'trainer': trainer,\n 'form': TrainerEditForm(prefix='edit', csrf_context=request.session),\n 'password_form': TrainerPasswordResetForm(\n prefix='password', csrf_context=request.session\n ),\n 'ban_form': TrainerBanForm(prefix='ban', csrf_context=request.session)\n }\n\n stuff['form'].set_roles(trainer)\n return stuff", "title": "" }, { "docid": "7f4805f89d54da9e80318d6d70953631", "score": "0.53659207", "text": "def list_view(request, list_id):\n list_items = list_item.objects.all().filter(item_list=list_id)\n context = {\n 'data' : list_items\n }\n return render(request, \"list.html\", context)", "title": "" }, { "docid": "d26bae9fb9b1d32455fe4b7ad4a6a017", "score": "0.53496397", "text": "def edit_entry(self, index, entries):\n\n edit_options = {\"1\": \"Task Name\",\n \"2\": \"Time Spent\", \"3\": \"Notes\", \"4\": \"Date\"}\n\n print()\n\n for k, v in edit_options.items():\n print(k + \". \" + v)\n\n while True:\n user_choice = input(\n \"\\nEnter the number of the value you would like to edit: \")\n\n print()\n\n if user_choice == '1':\n entries[index].set_name()\n self.update_entries(entries)\n print(\"Task name has been updated.\")\n self.main_menu()\n elif user_choice == '2':\n entries[index].set_time_spent()\n self.update_entries(entries)\n print(\"Time spent has been updated.\")\n self.main_menu()\n elif user_choice == '3':\n entries[index].set_notes()\n self.update_entries(entries)\n print(\"Task notes have been updated.\")\n self.main_menu()\n elif user_choice == '4':\n entries[index].set_date()\n self.update_entries(entries)\n print(\"Task date has been updated.\")\n self.main_menu()\n else:\n self.main_menu()", "title": "" }, { "docid": "c371ada0c762dd913242726573b8a310", "score": "0.53426975", "text": "def test_update_object_staff(self):\n request = self.factory.put(\"/\")\n permission = self.permission()\n permission.object_change_permissions = (allow_staff, self.has_access)\n self.check_object_permission(permission, request)", "title": "" }, { "docid": "58caf433a56a17a35e37b7be60d724d1", "score": "0.5341461", "text": "def ClearEdit(self, *args):\n return _IFSelect.IFSelect_ListEditor_ClearEdit(self, *args)", "title": "" }, { "docid": "dd3c488e60f0aa032f81d783ce912208", "score": "0.53406227", "text": "def edit():\n conn = sqlite3.connect('posts')\n cursor = conn.cursor()\n \n post_id = request.args.get('id')\n post_title = request.args.get('title')\n post_description = request.args.get('desc')\n values = (post_title, post_description, post_id)\n \n cursor.execute(\"UPDATE posts SET title = (?),description = (?) WHERE id = (?);\", (*values, ))\n conn.commit()\n conn.close()\n return redirect('/')", "title": "" }, { "docid": "57fd4be4062710d0604f441e534e0638", "score": "0.5322819", "text": "def onlinef_edit(form, db, param, conn, id_msg = '', email_msg = '', pw_msg = ''):\n if not conn.superuser:\n app_error(form, param, conn, message = 'Invalid authority')\n\n next_prog = form.getfirst('next_prog', '')\n call_prog = form.getfirst('call_prog', '')\n scout_id = int(form.getfirst('person_id', '0'))\n\n adult = dbobj.adultrec(db, scout_id)\n if not adult.found:\n app_error(form, param, conn, message = 'Invalid identifier')\n return\n\n ol_id = form.getfirst('ol_id', adult.on_line_id)\n ol_email = form.getfirst('ol_email', adult.email)\n\n jw_header(param, conn, menu_item = 801)\n\n print '<hr>'\n print adult_disp_dets(adult, param) + '<BR>'\n\n table = webproc.table(width='100%', cellpadding = param.it_cellpad, cellspacing = param.it_cellspc, border = param.it_brdr)\n\n if id_msg == '':\n edit_row(table, 'On-line ID:', 'ol_id', ol_id) \n else:\n edit_row(table, 'On-line ID:', 'ol_id', ol_id, 0, id_msg, req=1) \n if email_msg == '':\n edit_row(table, 'E-Mail:', 'ol_email', ol_email) \n else:\n edit_row(table, 'E-Mail:', 'ol_email', ol_email, 0, email_msg, req=1) \n #print '<hr>'\n\n item = table.add_row().add_item('Password')\n if pw_msg != '':\n item.data += '<br><SPAN CLASS=\"validation_message\">' + pw_msg + '</SPAN>'\n item.styleclass = 'error'\n\n table.last_row().add_item('<INPUT TYPE=\"PASSWORD\" NAME=\"pass1\">')\n\n table.add_row().add_item('Repeat Password')\n table.last_row().add_item('<INPUT TYPE=\"PASSWORD\" NAME=\"pass2\">')\n\n #item = webproc.table_item('<INPUT TYPE=\"PASSWORD\" NAME=\"password1\">')\n\n item = table.add_row().add_item('<INPUT TYPE=\"SUBMIT\" VALUE=\"SUBMIT\">')\n item.data += '<INPUT TYPE=\"HIDDEN\" NAME=\"jw_action\" VALUE=\"onlinep_edit\"'\n item.data += '<INPUT TYPE=\"HIDDEN\" NAME=\"next_prog\" VALUE=\"' + next_prog + '\">'\n item.data += '<INPUT TYPE=\"HIDDEN\" NAME=\"call_prog\" VALUE=\"' + call_prog + '\">'\n item.data += '<INPUT TYPE=\"HIDDEN\" NAME=\"scout_id\" VALUE=' + str(scout_id) + '>'\n\n print webproc.tag('FORM', table.pr_table(), 'METHOD=\"POST\", NAME=\"ONLINEF_EDIT\" ACTION=\"office.py\"')\n\n\n webproc.form_footer()\n return", "title": "" }, { "docid": "0947946730139243f6b8282cb90c0c50", "score": "0.53208935", "text": "def edit(self) :\n print(\"edit\")\n\n # Show the exams available\n self.mode_run(\"Show\")\n\n # * Ask user fo the id\n id = self.id_input()\n\n # Confirmation question\n confirmation = [\n {\n 'type': 'confirm',\n 'message': 'Are you sure you want to edit exam ?',\n 'name': 'confirm',\n 'default': False,\n }\n ]\n\n confirmationAnswer = prompt(confirmation, style=custom_style_1)\n\n if confirmationAnswer['confirm'] == True :\n # Connect to a database -> if does not exist -> create\n conn = create_connection(self.database)\n\n # * Edit the exam with given id\n editQuestion = [\n {\n 'type': 'list',\n 'name': 'item',\n 'message': 'Which item would you like to edit ?',\n 'choices': [\n 'Class Code',\n 'Type',\n 'Date',\n 'Quit'\n ]\n },\n ] \n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM exams WHERE id=?\", (id,))\n data = cur.fetchall()\n\n # Saving the data for delivery\n id = data[0][0]\n classCode = data[0][1]\n type = data[0][2]\n date = data[0][3]\n daysLeft = data[0][4]\n studyTime = data[0][5]\n\n # Editing the specific data the user wants to edit\n quitFlag = False\n while quitFlag != True :\n editQuestionAnswer = prompt(editQuself.id - idestion, style=custom_style_2)\n \n if editQuestionAnswer['item'] == 'Quit' :\n quitFlag = True\n \n # Saves the changes you made and quit\n conn.commit()\n conn.close()\n return\n\n if editQuestionAnswer['item'] == 'Type' :\n type = self.type_input()\n\n elif editQuestionAnswer['item'] == 'Date' :\n date = self.date_input()\n\n # Find out how many days left and if less than 5 -> make it bright red\n daysLeft = self.days_left(parse(date).date())\n if daysLeft < 10 :\n daysLeft = str(daysLeft)\n daysLeft = colored(daysLeft,'white', 'on_red',attrs=['bold'])\n else :\n daysLeft = str(daysLeft)\n\n else :\n classCode = self.classCode_input()\n\n # Update\n update_exam(conn, id, classCode, type, date, daysLeft, studyTime)\n\n else :\n print(\"Operation Canceled.\")", "title": "" }, { "docid": "8e955cf4056eb604bc2cc973d341cf62", "score": "0.5317684", "text": "def edit_page():\n\n user = User.query.filter_by(email=session['current_user']).one()\n\n return render_template(\"edit_profile.html\", email=user.email, username=user.username,\n fname=user.fname, lname=user.lname, about_me=user.description, user=user)", "title": "" }, { "docid": "4772c47e15b070262d7816bdcf74969b", "score": "0.5317436", "text": "def update_form(self, list_entry):\n for field in model.ENTRY_FIELDS:\n list_entry_field = getattr(list_entry, field)\n self.textboxes[field].set_text(list_entry_field)\n return", "title": "" }, { "docid": "5ff024356aabc86f09d15b8c60c44ca0", "score": "0.5308581", "text": "def updateView(self, data):\n try:\n pass\n except IndexError:pass", "title": "" }, { "docid": "3abb7d0b30080e06439ac158b5603148", "score": "0.52975947", "text": "def test_edit_attended(self):\n self.client.force_authenticate(user=self.attended_user)\n response = self.client.patch(_get_detail_url(1, 1), submission_data(self.attended_user))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "60a89f0d813f756e201830342168f807", "score": "0.5292873", "text": "def editContactItem(contact, **parameters):", "title": "" }, { "docid": "872a0e6b65a2550ef758f86b9567eda0", "score": "0.5291019", "text": "def edit_item(category, item_id):\n item = Item.query.get(int(item_id))\n if item.user_id != current_user.id:\n return redirect(url_for('list_category', category=category))\n else:\n if is_not_authorized(item_id):\n return render_template('negado.html')\n return render_template(\n 'item_form.html', item=item, categories=Category.query.all(),\n target_url=url_for('save_item', item_id=item.id))", "title": "" }, { "docid": "fff91ec7645793fa17697371542a008d", "score": "0.52889204", "text": "def edit():\n site_id = request.args[0]\n response.title = T(\"Edit site\")\n response.view = 'general/only_content.html'\n \n if not auth.has_membership('admin'):\n db.sites.group_id.readable = False\n db.sites.group_id.writable = False\n db.sites.group_id.default = get_group_id()\n \n return_url = URL('index')\n \n crud.messages.submit_button = T(\"Update\")\n crud.messages.record_updated = T(\"Updated site\")\n crud.settings.update_next = return_url\n crud.settings.update_onaccept = [ log_update, set_update_permission ]\n crud.settings.update_deletable = True\n form = crud.update(db.sites, site_id)\n \n back = get_button('back', return_url)\n \n return dict(content=form, back=back)", "title": "" }, { "docid": "64d5925674c24f0e667ff4858ff42137", "score": "0.52809787", "text": "def set_list(self, new_list):\n self.__students = new_list", "title": "" }, { "docid": "e1601032282b622ec90bf3e0a4fea263", "score": "0.52795047", "text": "def playlist_edit(self, filter_id: int, name=False,\n object_type=False):\n ampache_url = self.AMPACHE_URL + '/server/' + self.AMPACHE_API + '.server.php'\n data = {'action': 'playlist_edit',\n 'auth': self.AMPACHE_SESSION,\n 'filter': filter_id,\n 'name': name,\n 'type': object_type}\n if not name:\n data.pop('name')\n if not object_type:\n data.pop('type')\n data = urllib.parse.urlencode(data)\n full_url = ampache_url + '?' + data\n ampache_response = self.fetch_url(full_url, self.AMPACHE_API, 'playlist_edit')\n if not ampache_response:\n return False\n return self.return_data(ampache_response)", "title": "" }, { "docid": "dbfbc39a9691c06dc9ddc4d622370000", "score": "0.5278585", "text": "def EditItem(self, item, column):", "title": "" }, { "docid": "3c65e2bd6a460699656ed8999e67b40d", "score": "0.52754456", "text": "def LoadList(self, *args):\n return _IFSelect.IFSelect_EditForm_LoadList(self, *args)", "title": "" }, { "docid": "6e84497d061aac7266c48ee2ca265246", "score": "0.52735996", "text": "def edit_user(user_id):\n\n if session and session['logged_in']:\n cur_user = User.query.filter_by(id = user_id).first()\n user_is_admin = session['user_is_admin']\n editing_self = False\n\n if cur_user.username == session['username']:\n editing_self = True\n check_admin = ''\n\n if cur_user.is_admin == 1:\n check_admin = 'checked'\n\n return render_template(\n 'admin/admin_user_edit.html',\n user_is_admin = user_is_admin,\n site_name = config.site_name,\n cur_user = cur_user,\n check_admin = check_admin,\n editing_self = editing_self,\n powered_by_link = admin.create_powered_by_link())\n else:\n return redirect('/admin/login')", "title": "" }, { "docid": "da3e9a8a915c2377dfcbf1a0667e2d50", "score": "0.5273161", "text": "def SetList(self, *args):\n return _pcbnew.EDA_ITEM_SetList(self, *args)", "title": "" }, { "docid": "c4ad88064da3f018f748cbeb8026ef44", "score": "0.52697194", "text": "def edit_personalinfo(id):\n check_admin()\n\n personalinfo = Employee.query.get_or_404(id)\n form = PersonalInfoForm(obj=personalinfo)\n if form.validate_on_submit():\n personalinfo.first_name = form.first_name.data\n personalinfo.last_name = form.last_name.data\n personalinfo.middle_name = form.middle_name.data\n personalinfo.dob = form.dob.data\n personalinfo.email = form.email.data\n personalinfo.street = form.street.data\n personalinfo.city = form.city.data\n personalinfo.zip = form.zip.data\n personalinfo.state = form.state.data\n personalinfo.home_phone = form.home_phone.data\n personalinfo.cell_phone = form.cell_phone.data\n\n db.session.commit()\n flash('You have successfully edited the employee.')\n\n # redirect to the employee page\n return redirect(url_for('admin.list_personalinfos'))\n\n form.first_name.data = personalinfo.first_name\n form.last_name.data = personalinfo.last_name\n form.middle_name.data = personalinfo.middle_name\n form.dob.data = personalinfo.dob\n form.email.data = personalinfo.email\n form.street.data = personalinfo.street\n form.city.data = personalinfo.city\n form.zip.data = personalinfo.zip\n form.state.data = personalinfo.state\n form.home_phone.data = personalinfo.home_phone\n form.cell_phone.data = personalinfo.cell_phone\n\n return render_template('admin/personalinfos/personalinfo.html', action=\"Edit\",\n form=form,\n personalinfo=personalinfo, title=\"Edit Personal Info\")", "title": "" }, { "docid": "0bab15bfbf5a83f0b84f3b5d5a8eb5a6", "score": "0.5253029", "text": "def edit(todo_id):\n if request.method == 'POST':\n old_task = Todo.get_by_id(todo_id, DATABASE_PATH)\n name = request.form['name']\n task = Todo(old_task.id, name, old_task.done)\n task.update(os.path.join(app.root_path, 'hello.db'))\n return redirect(\"/\")\n elif request.method == 'GET':\n obj = Todo.get_by_id(todo_id, DATABASE_PATH)\n old_name = obj.name\n return render_template('update.html', old_name=old_name)", "title": "" }, { "docid": "85b87cb18b1675e41e6fe876644572c7", "score": "0.52529025", "text": "def edit_get(self, modelName, key = None):\n modelAdmin = getModelAdmin(modelName)\n item = self._safeGetItem(modelAdmin.model, key)\n templateValues = {\n 'models': self.models,\n 'urlPrefix': self.urlPrefix,\n 'item' : item,\n 'moduleTitle': modelAdmin.modelName,\n 'editForm': modelAdmin.AdminForm(urlPrefix = self.urlPrefix, instance = item),\n 'readonlyProperties': self._readonlyPropsWithValues(item, modelAdmin),\n }\n path = os.path.join(ADMIN_TEMPLATE_DIR, 'model_item_edit.html')\n self.response.out.write(template.render(path, templateValues))", "title": "" }, { "docid": "24fa7efd77fdfc7a367bca00a8a3b373", "score": "0.52504736", "text": "def edit(self) -> Tuple[str, ...]:\n return self.__edit", "title": "" }, { "docid": "b4bbbb939b2fb4f49e2bb596bbb3fa01", "score": "0.5244331", "text": "def EditedValues(self, *args):\n return _IFSelect.IFSelect_ListEditor_EditedValues(self, *args)", "title": "" }, { "docid": "e576c51636a511b27e8ec37ac702d1fd", "score": "0.52434754", "text": "def watchlist():\n\n form = WatchListForm()\n flash('View and update the Watch List')\n return render_template(\"watchlist.html\",\n form=form,\n displastname=False,\n errors=False,\n dispflash=True)", "title": "" }, { "docid": "5e056c045be6cff2f7a92a0459958bef", "score": "0.52360976", "text": "def fetch_editable_list(self, list_name, list_id, save_option):\n custom_list = CustomList.find(self._db, self.source, list_id)\n\n if save_option in self.EDIT_OPTIONS and not custom_list:\n raise NoResultFound(\n 'CustomList \"%s (%s)\" not in database. Please use save '\\\n 'option \"-n\" or \"--new\" to create a new list.' %\n (list_name, list_id)\n )\n\n if save_option == 'new':\n if custom_list:\n raise self.CustomListAlreadyExists(\n '%r already exists. Use save option \"--append\" or '\\\n '\"--replace\" to edit the existing list or use a '\\\n 'different CustomList name to create a new list.'\n )\n return None\n\n return custom_list", "title": "" }, { "docid": "2f2e8398abea5679b1c72123d82372fb", "score": "0.52348214", "text": "def test_admin_can_update_all_item(self):\n self.client.force_login(self.admin)\n\n payload_item = {\n \"name\": faker.pystr_format(),\n \"owner_id\": self.user_2.id,\n \"completed\": faker.boolean(chance_of_getting_true=50),\n }\n\n response = self.client.put(\n reverse(\"todo-detail\", args=[self.user_1_item.id]), payload_item\n )\n data = response.data\n\n self.assertEqual(data[\"id\"], str(self.user_1_item.id))\n self.assertEqual(data[\"name\"], str(payload_item[\"name\"]))\n self.assertEqual(data[\"owner\"][\"id\"], str(self.user_2.id))\n self.assertEqual(data[\"completed\"], payload_item[\"completed\"])\n self.assertEqual(200, response.status_code)\n self.client.logout()", "title": "" }, { "docid": "27aa0d994d2c070c9fe21e6a2f8cb498", "score": "0.52304244", "text": "def edit(id):\n cur = db.get_db().cursor()\n cur.execute(\n 'SELECT * from todos WHERE id=%s',\n (id,)\n )\n todo = cur.fetchone()\n if request.method == \"POST\":\n new = request.form['new']\n\n if not new:\n\n return render_template(edit.html)\n # update the task and set it to complete\n else:\n cur.execute(\n 'UPDATE todos SET description = %s'\n ' WHERE id = %s ',\n (new, id)\n )\n g.db.commit()\n cur.close()\n\n return redirect(url_for('todos.index'))\n return render_template(\"edit.html\", todo=todo)", "title": "" }, { "docid": "99e7b3a997e2066be7ba825e8c5d6f7d", "score": "0.52240247", "text": "def test_can_update(self):\n todo_list = self.TodoList(name='Test').save()\n todo_list.name = \"Changed name\"\n todo_list.save()\n\n self.assertEquals(todo_list.tasks, [])\n self.assertEquals(todo_list.name, 'Changed name')\n self.assertEquals(todo_list.version, 1)", "title": "" }, { "docid": "b69a6deed24477238471e50f582b4d5c", "score": "0.52209854", "text": "def updatelist(self):\n lst_ele = input(\"\\n Enter List Elements you want to add: \")\n self.my_list.append(lst_ele)", "title": "" }, { "docid": "d12a345e562e92644382ac494298850d", "score": "0.521484", "text": "def SetList(self, *args):\n return _pcbnew.MODULE_List_SetList(self, *args)", "title": "" }, { "docid": "18eb30cabf76f1df82498a896c1c9e30", "score": "0.52097523", "text": "def edit_activity(self, bucketlist_id, key):\n all_activities = Activity.activities\n for k, val in all_activities.items():\n if k == key and val['bucketlist_id'] == bucketlist_id:\n print('To be edited =', all_activities[k])\n parent_bucketlist = val['bucketlist_id']\n all_activities[k] = {'bucketlist_id': parent_bucketlist, 'title': self.title, 'description': self.description, 'status': self.status}\n\n print('Should have been edited =', all_activities)\n\n return all_activities", "title": "" }, { "docid": "c8ebf3d0cec6a88ad121c13eb6f58ba6", "score": "0.52006227", "text": "def make_editable():", "title": "" }, { "docid": "f8cdf96ba231a85f910e1498ae51bc33", "score": "0.5200595", "text": "def view_lists(request):\n lists = user_lists.objects.all().filter(list_owner=request.user)\n print(lists)\n context = {\n 'data' : lists\n }\n return render(request, \"user_lists.html\", context)", "title": "" }, { "docid": "6742df01dfd30e87b1a6e6175a8e3855", "score": "0.51968366", "text": "def edit_profile(request):\n return _edit_profile(request, False)", "title": "" }, { "docid": "d833de4e7e560c97f3b644d4e7604e27", "score": "0.5192286", "text": "def edit_food(id):\n db = get_db()\n food_entry = get_food_entry(id)\n old_food_name = food_entry['food_name']\n old_food_code = food_entry['food_code']\n\n if request.method == 'POST':\n if request.form['action'] == 'Update Entry':\n code = request.form['code']\n code = code.lower()\n error = None\n\n if not code:\n error = 'Please enter a code to edit your previous entry'\n\n elif code == old_food_code:\n error = \"You've entered your previous code\"\n\n else:\n for food in food_list:\n food_code = food.get_code()\n\n if code == food_code:\n food_calories = food.get_calories()\n food_name = food.get_name()\n db.execute(\n 'UPDATE food_entry SET food_code = ?, food_name = ?, calories = ? WHERE id = ?',\n (code, food_name, food_calories, id)\n )\n db.commit()\n message = \"Updated {0} ({1}) into {2} ({3}) for your food journal!\".format(old_food_name, old_food_code, food_name, food_code)\n flash(message, \"success\")\n return redirect(url_for('food.food_journal'))\n else:\n error = 'Invalid code entered'\n\n if error is not None:\n flash(error, \"error\")\n\n elif request.form['action'] == 'Remove Food Entry':\n db.execute('DELETE FROM food_entry WHERE id = ?', (id,))\n db.commit()\n message = \"Deleted {0} ({1}) from your food journal!\".format(old_food_name, old_food_code)\n flash(message, \"success\")\n return redirect(url_for('food.food_journal'))\n\n else:\n return redirect(url_for('food.edit_food', id=id))\n\n return render_template('food/edit_food.html', food_entry=food_entry, datetime=datetime)", "title": "" }, { "docid": "9d578ff8aa78711a2b7639b2251c7d33", "score": "0.5184235", "text": "def edit_course_update(self, index, content=None, course=None, user=None, date=None, deleted=None):\n user = user or self.user\n course = course or self.course\n updates_usage_key = get_course_info_usage_key(course, 'updates')\n course_updates = modulestore().get_item(updates_usage_key)\n for item in course_updates.items:\n if item['id'] == index:\n if date is not None:\n item['date'] = date\n if content is not None:\n item['content'] = content\n if deleted is not None:\n item['status'] = 'deleted' if deleted else 'visible'\n break\n modulestore().update_item(course_updates, user.id)", "title": "" }, { "docid": "ca3462ea45f8419878661ea11cef7021", "score": "0.51831925", "text": "def setAccessList(self, doc, access, save=False, user=None, force=False,\n setPublic=None, publicFlags=None):\n if setPublic is not None:\n self.setPublic(doc, setPublic, save=False)\n\n if publicFlags is not None:\n doc = self.setPublicFlags(doc, publicFlags, user=user, save=False,\n force=force)\n\n doc = AccessControlledModel.setAccessList(self, doc, access,\n user=user, save=save, force=force)\n\n return doc", "title": "" }, { "docid": "6ba1b8f37f4799998e1e65b28d7d51e7", "score": "0.5175601", "text": "def edit(self, id: int, model: EditOrderDto):\n raise NotImplementedError", "title": "" } ]
feca5e961fa911131bf43c91b42fcfd9
Write the nifti file to disk
[ { "docid": "b06bf425afbdbec5b966634675f994ba", "score": "0.75929457", "text": "def write_nifti(self, output_path):\n nib.save(self.niftiImage, output_path)\n print('Image saved at: {}'.format(output_path))", "title": "" } ]
[ { "docid": "d9a1fcb314f9516ffb3c235ebf492d2a", "score": "0.6979252", "text": "def save_nifti(self, path):\n meta = {'te': self.te, 'tr': self.tr, 'sw': self.sw}\n if self.sequence_type == 'STEAM':\n meta['tm'] = self.tm\n\n # store real and imaginary components in last 2 dims\n component_fid = np.stack((np.real(self.fid),np.imag(self.fid)), -2)\n nifti = nib.Nifti2Image(component_fid, self.transform.get_matrix(), extra=meta)\n nib.save(nifti, path)", "title": "" }, { "docid": "2daac18524eb39db388d5643ec71980b", "score": "0.69197434", "text": "def write(self, filename):\n pass", "title": "" }, { "docid": "2daac18524eb39db388d5643ec71980b", "score": "0.69197434", "text": "def write(self, filename):\n pass", "title": "" }, { "docid": "9eebd933794ef8595c49e4c61517e172", "score": "0.6748722", "text": "def write_to_file(self, filename: str) -> None:", "title": "" }, { "docid": "744f57ef7c1e11d28170a6eb8c35a4c9", "score": "0.66238767", "text": "def write(self, fname):\n pass", "title": "" }, { "docid": "72802193e54e11da7f740df3f606cee9", "score": "0.6591535", "text": "def write_file(self, i, path, fout):\n\n test_file = path + '/' + self.output[i]\n # Write file name\n print(test_file, file=fout, end='\\n\\n')\n\n extension = os.path.splitext(test_file)[1]\n if extension == '.fits' or extension == 'FITS':\n import subprocess\n prog = self.bindir + '/fits2ascii.py -i ' + test_file\n output = subprocess.check_output(prog.split(), shell=False)\n data = output.decode()\n else:\n fin = open(test_file, 'r')\n data = fin.read()\n fin.close()\n #fout.write(data)\n print(data, file=fout)\n print(file=fout, end='\\n')", "title": "" }, { "docid": "b716bd5f1820cd029612ab67cf8b263f", "score": "0.6581329", "text": "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "title": "" }, { "docid": "7ca7c9997ceb573b611a4ffc4be68aef", "score": "0.65263844", "text": "def to_file(self, file_path, smirnoff_data):\n pass", "title": "" }, { "docid": "0f58336f429d60e03488831d5f0b2f00", "score": "0.6507578", "text": "def write (self, file):\n\t\tfile.write (self.pack ())", "title": "" }, { "docid": "aa7194012f334c33266173f677449f2a", "score": "0.64920294", "text": "def write(self, filename): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "24125a8357e8e679d194ccb714d6b86c", "score": "0.64842874", "text": "def write(filename):\n print(uc.write(filename))", "title": "" }, { "docid": "89075d56ace81a1c8dbdc04ad1c311c4", "score": "0.6415806", "text": "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "title": "" }, { "docid": "f566ae9333334d3fd706b015a920be54", "score": "0.6378824", "text": "def filewrite(self, filename):\n io.write(self, filename)", "title": "" }, { "docid": "a0a20a4bd2640ee2293d76d0db8f027b", "score": "0.63755494", "text": "def write_to_disk(self):\n\n\t\t# print \"--------------------------------------------------------WRITING PIECE %r TO DISK\" %self.index\n\t\ttry:\n\t\t\tos.makedirs(PATH)\n\t\texcept:\n\t\t\tpass\n\t\tself.piece_file_name = os.path.join(PATH, self.torrent.name+'.'+'00'+str(self.index))\n\t\t# print \"Saving piece to file name: \", self.piece_file_name\n\t\tpiece_file = open(self.piece_file_name, 'w')\n\t\tpiece_file.write(self.data)\n\t\tpiece_file.close()", "title": "" }, { "docid": "aa4148857ba2aa1a9949652f7c964908", "score": "0.6338302", "text": "def write(self, tofile=None):\n if tofile is None:\n tofile = self._filename[:-4]+\"_tifinity.tiff\"\n\n with open(tofile, 'wb') as out_file:\n self._tiff.tofile(out_file) # numpy.tofile()", "title": "" }, { "docid": "1b3c2b06848adb223cd0b310ff2258a1", "score": "0.63330835", "text": "def write_to_file(self, filename):\n self.octree.write(str.encode(filename))\n print(\"Save octomap to \"+filename)", "title": "" }, { "docid": "e6ca3b6adfe79bfcb4bf3953a0bf9159", "score": "0.63312125", "text": "def save_to_nii(im, filename, outdir=\"\", mode=\"image\", system=\"sitk\"):\n if system == \"sitk\":\n if mode == 'label':\n img = sitk.GetImageFromArray(im.astype(np.uint8))\n else:\n img = sitk.GetImageFromArray(im.astype(np.float32))\n if not os.path.exists(\"./{}\".format(outdir)):\n os.mkdir(\"./{}\".format(outdir))\n sitk.WriteImage(img, \"./{}/{}.nii.gz\".format(outdir, filename))\n else:\n img = np.rot90(im, k=2, axes= (1,2))\n OUTPUT_AFFINE = np.array(\n [[0, 0, 1, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0],\n [0, 0, 0, 1]])\n if mode == 'label':\n img = nibabel.Nifti1Image(img.astype(np.uint8), OUTPUT_AFFINE)\n else:\n img = nibabel.Nifti1Image(img.astype(np.float32), OUTPUT_AFFINE)\n if not os.path.exists(\"./{}\".format(outdir)):\n os.mkdir(\"./{}\".format(outdir))\n nibabel.save(img, \"./{}/{}.nii.gz\".format(outdir, filename))", "title": "" }, { "docid": "8b2c14384e4e04377d6207b8646f1538", "score": "0.6308714", "text": "def SaveNIFTI(data, file_path):\n if(np.iscomplex(data).any()):\n data = abs(data)\n nii = nib.Nifti1Image(data, np.eye(4)) \n nib.save(nii, file_path)", "title": "" }, { "docid": "9f58d7941505c397987b8395260ea010", "score": "0.62816036", "text": "def save(self, fname, snver=None):\n self._io.save(fname)", "title": "" }, { "docid": "92afdcb0404f635a53352b0050986885", "score": "0.62661976", "text": "def write(self, cull=False):\n if cull:\n cull_prefixes(self).write()\n else:\n ser = self.g.serialize(format='nifttl', encoding='utf-8')\n with open(self.filename, 'wb') as f:\n f.write(ser)\n #print('yes we wrote the first version...', self.name)", "title": "" }, { "docid": "c35a50e66c1e2735f972ee1240502606", "score": "0.6235734", "text": "def write_to_binary_file(self, filename):\n\n self.octree.writeBinary(str.encode(filename))", "title": "" }, { "docid": "37d66a56c9a68e43c7d2b3617837cd32", "score": "0.6224596", "text": "def save_nii(img_path, data, affine, header):\n nimg = nib.Nifti1Image(data, affine=affine, header=header)\n nimg.to_filename(img_path)", "title": "" }, { "docid": "845b81aabf1f8b26b409343ca483054a", "score": "0.6224113", "text": "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "title": "" }, { "docid": "bc7d291fb5a02c4794c4a68ab594eb7d", "score": "0.6205511", "text": "def write_cif_file(self, file_name):\n cif_writer = CifWriter(self.dna_structure)\n cif_writer.write(file_name, self.infile, self.informat )", "title": "" }, { "docid": "54803adc45c5412e90d8ff4b0d66aef2", "score": "0.6198829", "text": "def save_to(self, f: BinaryIO):\n raise NotImplementedError", "title": "" }, { "docid": "a08adb7318cad18083f490b4bb5541ce", "score": "0.6182208", "text": "def write(self, file=None):\n if file is None:\n file = self.name + '.nbk'\n ET.ElementTree(self.root).write(file)", "title": "" }, { "docid": "e6b51d72b444c2d1080119e6481df607", "score": "0.61501175", "text": "def save2nifti(self, file_path):\n #Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 #signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 #128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n #Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n data = np.rot90(self._data, 3)\n if data_type.has_key(data.dtype.type):\n self._header['datatype'] = data_type[data.dtype.type]\n self._header['cal_max'] = data.max()\n self._header['cal_min'] = 0\n image = nib.nifti1.Nifti1Image(data, None, self._header)\n nib.nifti1.save(image, file_path)", "title": "" }, { "docid": "a6c7142d7b030daf4a4b21144205f77f", "score": "0.6142444", "text": "def write_info_to_file(self):\n\n self.info.write_mission_info()\n\n self.logger.info(\"Mission instance write succeeded.\")", "title": "" }, { "docid": "54741f7668d379ae3649dbc74df9a45c", "score": "0.61301744", "text": "def write_oif(self, oifn):\n print(\"I don't know how to write \" + oifn + \" at this moment.\")", "title": "" }, { "docid": "e6b129a7f7e77550b149bc5c15213594", "score": "0.6125539", "text": "def _toFile(self):\n pass", "title": "" }, { "docid": "08f2f72779940e23e5d24ba7eb3d2d4a", "score": "0.6124996", "text": "def to_file(self, file_io):\n pickle.dump(self.__object, file_io)", "title": "" }, { "docid": "59c63c88ac8a3461efd3179804a692b2", "score": "0.61193436", "text": "def make_nifti(self, output_path=None):\n\n # save nifti\n if output_path is None:\n output = self.nifti_file\n else:\n output = output_path\n ecat2nii.ecat2nii(ecat_main_header=self.ecat_header, ecat_subheaders=self.subheaders, ecat_pixel_data=self.data,\n nifti_file=output, affine=self.affine)\n\n if 'nii.gz' not in output:\n output = helper_functions.compress(output)\n\n return output", "title": "" }, { "docid": "a1c8784392e3a9bb1076c2655e2b58bd", "score": "0.611131", "text": "def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()", "title": "" }, { "docid": "fc4cce892d5ccf055ee94e29c620dd51", "score": "0.6099797", "text": "def writetif(self,outputname,):\n pass", "title": "" }, { "docid": "b5c99d24d7c76b2dfb0508aa660fd275", "score": "0.60953605", "text": "def write_inpfile(self, filename, units=None, version=2.2, force_coordinates=False):\n wntr.network.io.write_inpfile(self, filename, units=units, version=version, force_coordinates=force_coordinates)", "title": "" }, { "docid": "fba2819d81bab38a48a6bc4f15521b75", "score": "0.60877645", "text": "def write(self, filename, data):\n raise NotImplementedError", "title": "" }, { "docid": "6966e3f7247eb9390e34417a074f048e", "score": "0.6082527", "text": "def write_itk_image(image, path):\n\n writer = itk.ImageFileWriter()\n writer.SetFileName(path)\n\n if os.path.splitext(path)[1] == '.nii':\n Warning('You are converting nii, ' + \\\n 'be careful with type conversions')\n\n writer.Execute(image)", "title": "" }, { "docid": "929b628d7328af61fb955f18049bd00e", "score": "0.6074798", "text": "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "title": "" }, { "docid": "929b628d7328af61fb955f18049bd00e", "score": "0.6074798", "text": "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "title": "" }, { "docid": "bb379d51a97c4c89658152637417434a", "score": "0.6073623", "text": "def save2nifti(self, file_path):\n # Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 # signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 # 128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n # Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n header = nib.Nifti1Header()\n if self.data.shape[1] == 1:\n new_shape = (self.data.shape[0], 1, 1)\n else:\n new_shape = (self.data.shape[0], 1, 1, self.data.shape[1])\n data = self.data.reshape(new_shape)\n\n if data.dtype.type in data_type:\n header['datatype'] = data_type[data.dtype.type]\n header['cal_max'] = data.max()\n header['cal_min'] = data.min()\n image = nib.Nifti1Image(data, None, header)\n nib.nifti1.save(image, file_path)", "title": "" }, { "docid": "3cec6a0378db0cf3866b21c10f4f4a6d", "score": "0.6069007", "text": "def writeto(self, fileout):\n \n dump_pkl(self.data, fileout)", "title": "" }, { "docid": "7133c5c8d006a0a62e3c7f26438861e4", "score": "0.6063141", "text": "def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))", "title": "" }, { "docid": "0e3859b1db502569849a960d721077bf", "score": "0.6050982", "text": "def save_elem_file(self, output):\n with open(output, 'wb') as fid:\n self._write_elem_header(fid)\n self._write_nodes(fid)\n self._write_elements(fid)\n self._write_neighbors(fid)", "title": "" }, { "docid": "3a74cba5fa33e12667c514eec459c586", "score": "0.6047022", "text": "def to_file(self, f: str) -> None:\n with open(f, \"w\") as open_file:\n open_file.write(\"\\n\".join(self.itos) + \"\\n\")", "title": "" }, { "docid": "c3dc707fffe3f87674d1dac402387489", "score": "0.6042394", "text": "def write_file(self, f=None):\n # get model information\n nlay = self.parent.nlay\n dis = self.parent.get_package(\"DIS\")\n if dis is None:\n dis = self.parent.get_package(\"DISU\")\n\n # Open file for writing\n if f is None:\n f_obj = open(self.fn_path, \"w\")\n\n # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET, IKVFLAG, IKCFLAG\n f_obj.write(\n f\" {self.ipakcb:9d} {self.hdry:9.3G} {self.iwdflg:9d}\"\n f\" {self.wetfct:9.3G} {self.iwetit:9d} {self.ihdwet:9d}\"\n f\" {self.ikvflag:9d} {self.ikcflag:9d}\\n\"\n )\n\n # LAYCON array\n for layer in range(nlay):\n if self.intercellt[layer] > 0:\n f_obj.write(\n f\"{self.intercellt[layer]:1d} {self.laycon[layer]:1d} \"\n )\n else:\n f_obj.write(f\"0{self.laycon[layer]:1d} \")\n f_obj.write(\"\\n\")\n\n # TRPY, <ANGLEX>\n f_obj.write(self.trpy.get_file_entry())\n transient = not dis.steady.all()\n structured = self.parent.structured\n anis = any(t != 1 for t in self.trpy)\n if (not structured) and anis:\n f_obj.write(self.anglex.get_file_entry())\n\n # <SF1>, <TRAN>, <HY>, <VCONT>, <KV>, <SF2>, <WETDRY>\n for layer in range(nlay):\n if transient:\n f_obj.write(self.sf1[layer].get_file_entry())\n\n if self.ikcflag == 0:\n self._write_hy_tran_vcont_kv(f_obj, layer)\n\n if transient and (self.laycon[layer] in [2, 3, 4]):\n f_obj.write(self.sf2[layer].get_file_entry())\n\n if (self.iwdflg != 0) and (self.laycon[layer] in [1, 3]):\n f_obj.write(self.wetdry[layer].get_file_entry())\n\n # <KSAT> (if ikcflag==1)\n if abs(self.ikcflag == 1):\n f_obj.write(self.ksat.get_file_entry())\n\n f_obj.close()", "title": "" }, { "docid": "a2c6ee1491d5b9667a280171be5457d0", "score": "0.6042112", "text": "def write(self, file):\n #write header\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "title": "" }, { "docid": "d5662f4336b6ec64e8fd8665e4cf9b61", "score": "0.60305744", "text": "def _write(self, out_file):\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write((' '*18).encode()) # pad bytes\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write((' '*37).encode()) # pad bytes\n out_file.write('{:1d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n for node in self.nodes:\n if self.format < 2:\n out_file.write(' '.encode())\n out_file.write('-1'.encode())\n if self.format == 0:\n out_file.write('{:5d}'.format(node.number).encode())\n else:\n out_file.write('{:10d}'.format(node.number).encode())\n for i in range(3):\n out_file.write('{:12.5E}'.format(node.pos[i]).encode())\n out_file.write('\\n'.encode())\n else:\n out_file.write(struct.pack('i', node.number))\n if self.format == 2:\n out_file.write(struct.pack('fff', *node.pos))\n else:\n out_file.write(struct.pack('ddd', *node.pos))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only", "title": "" }, { "docid": "d5801a629ef2df5d1008b11adcb58021", "score": "0.6025829", "text": "def write(cls, file, data):\n file.write(data)", "title": "" }, { "docid": "627b155753d2bc4d179b838fa2ae1275", "score": "0.6010008", "text": "def save(self, ofilename, oname, noisy_only = True):\n ofile = ROOT.TFile(ofilename, 'recreate')\n\n outhists = [h.Clone(oname % (i + 1)) for i, h in enumerate(self.modules)]\n for h, cells in zip(outhists, self.cells):\n if noisy_only: h.Reset()\n for cell in cells: h.SetBinContent(cell[0], cell[1], noisy_only * 1.)\n # h.Write()\n\n ofile.Write()\n ofile.Close()", "title": "" }, { "docid": "959709db8c62331ac36ace2639e2638f", "score": "0.6003019", "text": "def save_tiff(self, to_file=None):\n self.tif_file.clear() # Empty the array first\n\n # Header\n byteo = 'II'\n if self.byteOrder != 'little':\n byteo = 'MM'\n self.tif_file.insert_bytes(list(byteo.encode())) # byte order\n self.tif_file.insert_int(42, 2) # Magic number\n self.tif_file.insert_int(8, 4) # first IFD always at 0x08\n\n for ifd in self.ifds:\n # self.calculateIFDSpace(ifd) # Readjusts counts because of changes to image data\n endpos = self.save_ifd(ifd)\n self.save_image(ifd, endpos)\n\n self.tif_file.write(to_file) # lastly, write to file", "title": "" }, { "docid": "91f1b5385601c83fa58a725ca8ae0e29", "score": "0.5986506", "text": "def write_mir(self, filename):\n raise NotImplementedError", "title": "" }, { "docid": "a5a2077186a994c22057dbc230f6f669", "score": "0.5962661", "text": "def save(self, file):\n boulders = []\n elephants = []\n rhinos = []\n for i in range(5):\n for j in range(5):\n if self[i][j]!= 0:\n piece = self[i][j]\n L = []\n if not isinstance(self[i][j], Boulder):\n L.append(self[i][j].direction[0])\n L.append(self[i][j].direction[1])\n if piece.species == \"Elephant\":\n elephants.append(\"(\" + str(i) + \",\" + str(j)+ \") : np.array([\"+str(L[0])+ \",\" + str(L[1])+\"])\")\n elif piece.species == \"Rhinoceros\":\n rhinos.append(\"(\"+str(i)+\",\" +str(j)+ \") : np.array([\"+str(L[0]) + \",\" + str(L[1])+\"])\")\n elif isinstance(piece, Boulder):\n boulders.append(\"(\" + str(i) + \",\" + str(j) + \")\")\n file.write(\"# King of Siam GameFile \\n\\nplayer_turn {\\n \" + self.playerTurn + \"\\n}\\n\\n\")\n file.write(\"Boulder {\")\n for k in range(len(boulders)):\n file.write(\"\\n \" + boulders[k] + \";\")\n file.write(\"\\n}\\n\\nElephant {\")\n for elt in elephants:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\\n\\nRhinoceros {\")\n for elt in rhinos:\n file.write(\"\\n \" + elt + \";\")\n file.write(\"\\n}\")\n\n file.close()", "title": "" }, { "docid": "6395a1cf5ad36977b1e0b016e86fd799", "score": "0.5961621", "text": "def write(self, out):", "title": "" }, { "docid": "8761e969f7b8f87e13313da6d136a7a7", "score": "0.5953547", "text": "def write_voldata_to_nifti_file(file_name, vol_data, affine=None, header=None):\n if header is None:\n header = nib.Nifti1Header()\n header.set_data_shape(vol_data.shape)\n nifti_image = nib.Nifti1Image(vol_data, affine, header=header)\n nib.save(nifti_image, file_name)", "title": "" }, { "docid": "ecb422e9a9d6d4325ac9406ea4507899", "score": "0.59516346", "text": "def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()", "title": "" }, { "docid": "a44657e8b00dd8109880ac2f3cdb9f22", "score": "0.5946414", "text": "def write(self, filename):\n \n return self.model.write(filename,xml_declaration=True, encoding='utf-8')", "title": "" }, { "docid": "b28b35b1e75aac1cdbc81fc57cc4f2e0", "score": "0.5924653", "text": "def write_file(self, filename):\n with open(filename, \"w\") as fo:\n # vertices\n\n for x, y, z in self.vertices:\n fo.write(\"v {} {} {}\\n\".format(x, y, z))\n logging.info(\"Wrote {} vertices\".format(len(self.vertices)))\n\n # faces\n faces = 0\n width, height = self.size\n for y in range(0, height-1):\n for x in range(0, width-1):\n tl = self.vertex_num(x,y)\n tr = tl + 1\n bl = tl + width\n br = bl + 1\n fo.write(\"f {} {} {}\\n\".format(tl, tr, bl))\n fo.write(\"f {} {} {}\\n\".format(tr, br, bl))\n faces += 2\n logging.info(\"Wrote {} tris\".format(faces))", "title": "" }, { "docid": "c03b4cbd4cdb7c51596fc3d8d00654f3", "score": "0.5922896", "text": "def write_neighnet(pathfolder, nif, neighnet, methodvalues):\n ## Generate namefile\n namefile = generate_namefile(pathfolder, methodvalues)\n\n ## Writting\n db = shelve.open(namefile)\n db['nif'] = nif\n db['neighnet'] = neighnet\n db['methodvalues'] = methodvalues\n db.close()", "title": "" }, { "docid": "ef1dcb41f2399f7cf46c16ac525abe68", "score": "0.5920407", "text": "def _write(self):\n f = FortranFile(self.filename,mode='w')\n # Default omnivor binary header\n f.writeInts ( self.data['MK'] , 'i' ) \n f.writeInts ( self.data['itime'] , 'i' ) \n f.writeString ( self.data['version'] ) \n f.writeInts ( self.data['file_id'] , 'i' ) \n f.writeString ( self.data['sversion'] ) \n # Velocity field\n f.writeString ( self.data['stype'] ) \n f.writeInts ( self.data['is_grid'] , 'i' ) \n f.writeInts ( self.data['nCPs'] , 'i' ) \n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n f.writeInts ( self.data['n1'] , 'i' ) \n f.writeInts ( self.data['n2'] , 'i' ) \n f.writeInts ( self.data['n3'] , 'i' ) \n f.writeInts ( self.data['is_straight'] , 'i' ) \n f.writeReals ( self.data['v1'] , real_char ) \n f.writeReals ( self.data['v2'] , real_char ) \n f.writeReals ( self.data['v3'] , real_char ) \n\n CPs = self.data['CPs'].flatten(order = 'F')\n Utot = self.data['Utot'].flatten(order = 'F')\n f.writeReals(CPs,real_char)\n f.writeReals(Utot,real_char)", "title": "" }, { "docid": "7623a17fd8a67ef43b815c03bef41d04", "score": "0.59195465", "text": "def write(self, filename, data, hdr):\n pass", "title": "" }, { "docid": "52f4ec9598da7577dafc7d6081a0fedb", "score": "0.59129125", "text": "def write_to_disk(self):\n text_file = open(self.file_path, \"w\")\n text_file.write(str(self))\n text_file.close()\n # dump to pickle\n pickle.dump(self.blockchain, open(self.pickle_path, \"wb\"))", "title": "" }, { "docid": "5e8cc7fb8af9c7c6a21df7718460d26e", "score": "0.5906205", "text": "def write(self, filename):\n\n self.__image.save(filename)", "title": "" }, { "docid": "57c245c512c60547b9974e39f9e344b0", "score": "0.59002054", "text": "def to_file(self, file_path, smirnoff_data):\n xml_string = self.to_string(smirnoff_data)\n with open(file_path, \"w\") as of:\n of.write(xml_string)", "title": "" }, { "docid": "c82eb207ce185f1de1b24389ee21542a", "score": "0.5897611", "text": "def write(self):", "title": "" }, { "docid": "c82eb207ce185f1de1b24389ee21542a", "score": "0.5897611", "text": "def write(self):", "title": "" }, { "docid": "83e1100313f58006f9824a570e5ed7f3", "score": "0.5885105", "text": "def write_to_file(self, overwrite=True):\n t0 = time.time()\n self.hdus.verify()\n if BACKEND == 'astropy':\n self.hdus.writeto(self.filename, overwrite=overwrite)\n elif BACKEND == 'pyfits':\n self.hdus.writeto(self.filename, clobber=overwrite)\n self.logger.debug(\"Took {:.4f} seconds to write to disk\".format(time.time() - t0))", "title": "" }, { "docid": "e695ed99505b54ec147c6349694b8c41", "score": "0.5881756", "text": "def write_output_file(self, index):\n ctx = self.block_store.make_local_output(self.expected_outputs[index])\n self.open_output_contexts[index] = ctx\n return ctx.get_filename()", "title": "" }, { "docid": "379415503d173841714cd123e031610e", "score": "0.58558863", "text": "def save(self, filename):\n o = open(filename, 'w')\n o.write(self.write())\n o.close()", "title": "" }, { "docid": "55755cd0134bb4c6ee2b7fcd802c8102", "score": "0.5855836", "text": "def _write_to_file(self):\n with open(self.filename + \".ir\", \"w+\") as file:\n file.writelines(\n [\"\\n\" + l if p != 0 else l for p, l in enumerate(self.lines)]\n )", "title": "" }, { "docid": "3ce1e1f717d5110d6b0123d9b9f2e818", "score": "0.58487016", "text": "def write(self, image):\n raise NotImplementedError()", "title": "" }, { "docid": "8e841b6a633411ec917fad44ae629836", "score": "0.5847671", "text": "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "title": "" }, { "docid": "fcd1ad0530885f3ed9dbc18de3a8817e", "score": "0.584708", "text": "def write_to_file(unit, fobj):\n\n _write_all_headers(unit, fobj)\n _write_all_sections(unit, fobj)", "title": "" }, { "docid": "8e37b57c8d94b10f49cb119007c1c5c9", "score": "0.5846687", "text": "def test(nifti_region_to_save, path_where_store_out=\"pet_regions_segmented\"):\n regions_used = \"three\"\n list_regions = session.select_regions_to_evaluate(regions_used)\n dic_regions_segmented = load_pet_regions_segmented(list_regions)\n\n region_container_3d = dic_regions_segmented[\n nifti_region_to_save] # [patients x heigh, width, depth]\n\n for patient in range(0, region_container_3d.shape[0], 1):\n img = nib.Nifti1Image(region_container_3d[patient, :, :, :], np.eye(4))\n img.to_filename(os.path.join(path_where_store_out,\n \"region_{0},patient_{1}.nii\".format(\n regions_used, patient)))", "title": "" }, { "docid": "bd943578d1c610d9d8da44b3efdfd049", "score": "0.5845502", "text": "def save(self,file):\n\n with open(file,\"w\") as f:\n f.write(self.to_string())", "title": "" }, { "docid": "3c189f40a442fddd4758d5a53a594f14", "score": "0.5837892", "text": "def save(self):\n # TODO: save the file", "title": "" }, { "docid": "9337eb4881cf3abfb3b37183988730ee", "score": "0.583108", "text": "def write_nml(self, nml_write_path):\n\n # If the object does not have any trees, construct an empty tree before writing to enable webKnossos import\n if self.num_trees() == 0:\n self.add_tree()\n\n nml = self._skeleton_to_nml()\n with open(nml_write_path, \"wb\") as f:\n wknml.write_nml(f, nml)", "title": "" }, { "docid": "dd2984812a38c77ba36940f8ac281938", "score": "0.5829392", "text": "def to_file(self, outfile):\n\n with open(outfile, \"w\") as outf:\n outf.write(self.to_string())", "title": "" }, { "docid": "9dd87178c13879683022962dd303299f", "score": "0.5825301", "text": "def write_file(self):\n file = open(self.__file_path, 'w+')\n file.truncate(0)\n file.write(self.__content)\n file.close()", "title": "" }, { "docid": "b6019fd21ddaa8851d55e94ac4b8a9e0", "score": "0.5808851", "text": "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "title": "" }, { "docid": "eb6a4eb0e3f2b63bd488943f59547b7b", "score": "0.57909006", "text": "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "title": "" }, { "docid": "e3f0308b0fd50f45cb7e38ad2286908e", "score": "0.57812834", "text": "def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())", "title": "" }, { "docid": "26b1df0343117ef259674fa64d29e0ab", "score": "0.5780285", "text": "def write(self):\n raise NotImplementedError", "title": "" }, { "docid": "f84ebd009d8cb425c1971249907680b6", "score": "0.5770531", "text": "def save_to_disk(self, filename='ens_state.nc'):\n self.to_netcdf(filename)", "title": "" }, { "docid": "9f90ad2c2379a5283aaece0abf0d0f2e", "score": "0.57655793", "text": "def iPutFile(sess, fileName, iPath):\n\n options = {kw.REG_CHKSUM_KW: ''}\n\n with open(fileName, 'r') as f:\n content = f.read()\n\n obj = sess.data_objects.create(iPath)\n with obj.open('w', options) as obj_desc:\n obj_desc.write(content)\n\n obj = sess.data_objects.get(iPath)\n\n return obj", "title": "" }, { "docid": "c2f37cd64c0491c79e734deb80b88793", "score": "0.5763002", "text": "def write_to(self, filename):\n with open(filename, 'w') as f:\n for xx, yy, zz, ww in zip(self.x, self.y, self.field, self.weight):\n f.write(\"%s %s %s %s\\n\" % (xx, yy, zz, ww))\n logger.info(\"Written data into file {0}\".format(filename))", "title": "" }, { "docid": "3c090bade94a0a747501ce260cd50097", "score": "0.5762988", "text": "def save(self, inst):\n n = inst.dimensions[\"n\"]\n with open(self.location, \"wt\") as f:\n f.write(f\"measurements: {n}\\n\")\n f.write(f\"time temperature\\n\")\n for time, temp in zip(inst.time, inst.temperature):\n f.write(f\"{time:4} {temp:12}\\n\")", "title": "" }, { "docid": "747713f2239772c216538064f6230919", "score": "0.57571495", "text": "def write_to_file(self, filename):\n\n loader = ImageLoader()\n loader.write(self, filename)", "title": "" }, { "docid": "9f51b6a930b1d91d287a653cc58f8af3", "score": "0.57534796", "text": "def to_file(self, file): \n try:\n import dill as pickle\n except ImportError:\n logger.error(\"Cannot write to file, dill not installed \\n\"\n \"pip install dill\")\n return\n try:\n logger.info('Writing GeologicalModel to: {}'.format(file))\n pickle.dump(self,open(file,'wb'))\n except pickle.PicklingError:\n logger.error('Error saving file')", "title": "" }, { "docid": "cea24ca9c9de0abbb0e948d4731c2fd0", "score": "0.57529384", "text": "def write_to_file(info: List[str]) -> None:\n return", "title": "" }, { "docid": "8d53f18bf1071359a0d9038f9760e0e4", "score": "0.57505745", "text": "def write(self, uri):\n img_to_write = self.msiToWrite.get_image()\n\n # sitk can only write images of dimension 2,3,4. This hack is\n # to fake 1d images as being 2d. 1d images e.g. occure after taking\n # the mean of an image.\n if len(img_to_write.shape) == 1:\n img_to_write = np.reshape(img_to_write, (1, 1, img_to_write.shape[0]))\n\n img = sitk.GetImageFromArray(img_to_write, isVector=True)\n sitk.WriteImage(img, uri)\n logging.info(\"written file \" + uri + \" to disk\")\n return None", "title": "" }, { "docid": "3b283a6b291f544e283db0b219be4a52", "score": "0.5748918", "text": "def save_file(self, filename):\r\n \r\n f = open(filename,'w')\r\n f.write(self.body)\r\n f.close", "title": "" }, { "docid": "1d7c7046cf0833c319707a9df2f69470", "score": "0.5743292", "text": "def write(self, instream: typ.BinaryIO, filepath: str,\r\n filename: str = None) -> None:\r\n if filename is not None:\r\n filename = path.basename(filename)\r\n if self.fs_type == 'FAT':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n elif self.fs_type == 'NTFS':\r\n allocator_metadata = self.fs.write(instream, filepath)\r\n self.metadata.add_file(filename, allocator_metadata)\r\n else:\r\n raise NotImplementedError()", "title": "" }, { "docid": "ac6ceb9e08e21523e45400a32b53e701", "score": "0.57419723", "text": "def write(self, filename, *args, **kwargs):\n self.to_fits().writeto(filename, *args, **kwargs)", "title": "" }, { "docid": "b429138e9a012dbb6bdb40404edeedc0", "score": "0.5736616", "text": "def _write(self, out_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write(self.setname.ljust(6).encode())\n out_file.write('{:12.5E}'.format(self.value).encode())\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write(self.text.ljust(20).encode())\n out_file.write('{:2d}'.format(self.ictype).encode())\n out_file.write('{:5d}'.format(self.numstep).encode())\n out_file.write(self.analys.ljust(10).encode())\n out_file.write('{:2d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n out_file.write(' '.encode()) # pad byte\n out_file.write('-4'.encode()) # key = -4\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(self.name.ljust(8).encode())\n if self.entities[0].ictype == 2 and self.ncomps == 3:\n out_file.write('{:5d}'.format(self.ncomps + 1).encode())\n else:\n out_file.write('{:5d}'.format(self.ncomps).encode())\n out_file.write('{:5d}'.format(self.irtype).encode())\n out_file.write('\\n'.encode()) # eol\n\n for entity in self.entities:\n out_file.write(' '.encode()) # pad byte\n out_file.write('-5'.encode())\n out_file.write((' '*2).encode()) # pad bytes\n out_file.write(entity.name.ljust(8).encode())\n out_file.write('{:5d}'.format(entity.menu).encode())\n out_file.write('{:5d}'.format(entity.ictype).encode())\n out_file.write('{:5d}'.format(entity.icind1).encode())\n if entity.ictype == 4:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n elif entity.ictype == 2 and entity is self.entities[-1]:\n out_file.write('{:5d}'.format(entity.icind2).encode())\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write(entity.icname.encode())\n else:\n out_file.write('{:5d}'.format(entity.iexist).encode())\n out_file.write('\\n'.encode()) # eol\n\n for result in self.results:\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n for j in range(num_lines):\n if j == 0:\n out_file.write(' -1'.encode()) # pad byte and key = -1\n if self.format == 0:\n out_file.write(\n '{:5d}'.format(result.node).encode())\n else:\n out_file.write(\n '{:10d}'.format(result.node).encode())\n else:\n out_file.write(' -2'.encode()) # pad byte and key = -2\n out_file.write(' '*(5*(self.format+1)).encode())\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for k in range(k_start, k_end):\n out_file.write(\n '{:12.5E}'.format(result.data[k]).encode())\n out_file.write('\\n'.encode()) # eol\n else:\n out_file.write(struct.pack('i', result.node))\n out_file.write(struct.pack('f'*self.ncomps, *result.data))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only", "title": "" }, { "docid": "363a0eac9e9b57532a9ba88b1378c247", "score": "0.57359904", "text": "def write_binary(self, path):\n return", "title": "" }, { "docid": "3adecc82a47048190bc829e70bed541f", "score": "0.57349813", "text": "def store(self, filename):", "title": "" }, { "docid": "2ef692c0aca0460238f4f6cc6e8707de", "score": "0.57297206", "text": "def cam_write(filename, M, N):\n f = open(filename,'wb')\n # write the header\n f.write(TAG_CHAR)\n M.astype('float64').tofile(f)\n N.astype('float64').tofile(f)\n f.close()", "title": "" }, { "docid": "8c688bdacbbede28ba0deeb53f19199f", "score": "0.57166487", "text": "def write(self):\n pass", "title": "" }, { "docid": "8c688bdacbbede28ba0deeb53f19199f", "score": "0.57166487", "text": "def write(self):\n pass", "title": "" }, { "docid": "a09e42e366b5e35d03d50c64b8cce2b8", "score": "0.571542", "text": "def save(self,outPath=None):\n if (not self.canSave or self.skipObjRecords): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.changed = 1\n self.tes3.hedr.changed = 1\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Size Cell Records\n cntRecords = 0\n progress = self.progress\n progress.setMax(len(self.cells))\n progress(0.0,'Saving '+self.fileInfo.name)\n for record in self.cells:\n record.getSize()\n #--Progress\n cntRecords += 1\n progress(cntRecords)\n #--Other Records\n for record in self.records:\n record.getSize() #--Should already be done, but just in case.\n record.dump(out)\n out.close()", "title": "" } ]
fd18bd86a69e92f4b68845489ca18d22
asks the player if he wants to hit or stand
[ { "docid": "1d581f916b0bce561d52d363362b7407", "score": "0.7071528", "text": "def hit_or_stand(self, _):\n while True:\n move = input(\"Do you want to hit or stand? (H or S): \")\n if move not in ['H', 'S']:\n print(\"Wrong input. Please try again.\")\n continue\n break\n return move", "title": "" } ]
[ { "docid": "e32daeee8ff2c30e0fee55e15db46ec1", "score": "0.70723826", "text": "def get_player_action(self) -> None:\n print(f\"\\nYou have: {self.user.hand.cards} totalling to {self.user.hand.value}\")\n while not self.get_game_ending_hands():\n action = self.validate_input(\"Do you want to 1. hit or 2. stand?\", ('1', '2'))\n if action == '1':\n self.action_hit()\n elif action == '2':\n self.action_stand()\n break", "title": "" }, { "docid": "8a82b0e4ff5d9d44e55f84b76df20c76", "score": "0.7048434", "text": "def hit():\n \n # Update messages, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Hand\" is in play, hit the \"player\". \n if in_play:\n outcome = outcome_plus = outcome_plus_plus = \"\"\n player.add_card(deck_of_cards.deal_card())\n else:\n return None\n \n # If busted, update messages, score and the player's \n # \"Hand\" status.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL \n score -= SCORE_POINTS\n in_play = False\n \n return None", "title": "" }, { "docid": "bae69a9b87559464477bb98f442390e3", "score": "0.68880326", "text": "def get_user_input(self, game, hand, message, allowed_actions):\n if random.random() < 0.5:\n return 'hit'\n else:\n return 'stand'", "title": "" }, { "docid": "cfaee65cc9ab734b5c5a42158cf64a7b", "score": "0.6715368", "text": "def stand(self):\n self.endgame()", "title": "" }, { "docid": "0364bc64fa3d34b4435d193d65c72809", "score": "0.6608959", "text": "def deal_player(self, player): \n answer = self.__ask_hit_or_stand(player)\n if answer in ('hit'):\n player.hit(self.get_card())\n elif answer in('stand'):\n player.stand()", "title": "" }, { "docid": "8afc8113745c14add5075045907c0e87", "score": "0.65327054", "text": "def player_hit(self):\r\n if self.in_progress:\r\n self.player_hand.add(self.deck.deal())\r\n if self.player_hand.total > 21:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()", "title": "" }, { "docid": "26d8600049519efd3d69b62c43777f1d", "score": "0.65276474", "text": "def player_hit(self):\n self.player.hit(self.deck)\n self.print_hands()\n \n if self.player.sum_cards() > 21:\n self.round_winner = True\n self.print_hands()\n print(\"BUST! Dealer wins.\")", "title": "" }, { "docid": "b7843802c0057d665db6f8b7c2587d0e", "score": "0.6515513", "text": "def user_play(play_shoe, player, dealer):\n print(\"\\nDealer shows:\" + dealer.get_viewable_hand())\n hit = True\n while hit == True:\n decision = \" \"\n if len(player.get_hand()) == 2:\n print(\"\\nPlayer \" + player.get_name() + \" your hand is:\" + player.get_viewable_hand())\n else:\n print(\"\\nYour hand is now:\" + str(player.get_viewable_hand()))\n decide_soft_score_print(player)\n if not(check_blackjack(player.get_score(), player.get_hand())):\n if not(player.check_bust()) and player.get_score() < 21:\n while not(decision[0] == \"h\") and not(decision[0] == \"s\"):\n decision = input(\"Would you like to Hit or Stand? \").lower()\n if decision[0]==\"h\":\n player.hit_hand(play_shoe)\n else:\n hit = False\n else:\n hit = False\n else:\n hit = False\n check_stand(player)", "title": "" }, { "docid": "2559ea2cb174e50cb71a019aab595242", "score": "0.6512105", "text": "def hit(self):\n global in_play, deck, player_hand, dealer_hand, outcome, lost\n \n if in_play:\n player_hand.add_card(deck.deal_card())\n \n if player_hand.get_value() > 21:\n self.outcome.set(\"You have busted! Dealer wins. New deal?\")\n self.lost += 1\n self.score.set(str(self.won) + \"/\" + str(self.lost))\n in_play = False\n draw(canvas)\n\n print \"\\nPlayer hand: \", player_hand\n print \"Dealer hand: \", dealer_hand", "title": "" }, { "docid": "cfc50e95d0bc349e624a92a188a122c8", "score": "0.6493972", "text": "def hit(self):\n assert not self.damaged\n self.damaged = True\n self.game_piece.hit()", "title": "" }, { "docid": "b870b6e63fef75dceef76098999afb8b", "score": "0.64912117", "text": "def play_game(self):\n player = Player(input(\"What is your name?\"))\n while player.health > 0:\n input(\"Press t to start another turn\")\n n = random.randint(0, 3)\n if n == 0:\n if self.monster_attack(player):\n break\n elif n == 1:\n self.find_gold(player)\n else:\n print(\"Nothing happened!\")", "title": "" }, { "docid": "1ec6acabccd49c3a1d5d162d12ce906c", "score": "0.6479495", "text": "def play(self):\n hand = self.state.hand\n supply = self.state.supply\n money = count_money(hand) - self.state.used_money\n if supply['Province'] > 0 and money >= Province.Cost:\n self.game_client.buy('Province')\n elif supply['Duchy'] > 0 and money >= Duchy.Cost:\n self.game_client.buy('Duchy')\n elif supply['Estate'] > 0 and money >= Estate.Cost:\n self.game_client.buy('Estate')\n\n self.game_client.done()", "title": "" }, { "docid": "85732fc579ad3e84be5afc1d5d8beda3", "score": "0.6478083", "text": "def play(self):\n self.player = Knight()\n self._occupy_huts()\n acquired_hut_counter = 0\n\n self.show_game_mission()\n self.player.show_health(bold=True)\n\n while acquired_hut_counter < 5:\n idx = self._process_user_choice()\n self.player.acquire_hut(self.huts[idx-1])\n\n if self.player.health_meter <= 0:\n print_bold(\"YOU LOSE :( Better luck next time\")\n break\n\n if self.huts[idx-1].is_acquired:\n acquired_hut_counter += 1\n\n if acquired_hut_counter == 5:\n print_bold(\"Congratulations! YOU WIN!!!\")", "title": "" }, { "docid": "ec6fb609019da93ccc3ae6183550a1e6", "score": "0.6403893", "text": "def hit(player):\n deal_random_card(player)", "title": "" }, { "docid": "a984f081712dc88f65f99afe231d4d8f", "score": "0.62761146", "text": "def take_turn(self, opponent):\n\n # --------- BEGIN YOUR CODE ----------\n\n # 1.) Guess a random space that has not been guessed (or be more clever!)\n\n # Steps 2-4 are the same as Human.take_turn\n\n # 2.) Call opponent.guess() to check whether the guess is a hit or miss\n\n # 3.) Update my_hits, my_misses, and sunk_ships accordingly\n\n # 4.) If the sunk_ships array has 5 ships in it set self.complete to True\n\n # --------- END YOUR CODE ----------\n\n # enforce a short delay to make the computer appear to \"think\" about its guess\n time.sleep(0.5)", "title": "" }, { "docid": "b5308a346756f6aec2fd3be09f4ecd2b", "score": "0.6266108", "text": "def play(self):\n\n player1_turn = True\n\n while True:\n if player1_turn:\n self.player_turn(self.player1, self.player2)\n if self.lost(self.player2):\n print(\"Game Over!! You sank {}'s ships!\".format(\n self.player2.name))\n break\n player1_turn = False\n else:\n self.player_turn(self.player2, self.player1)\n if self.lost(self.player1):\n print(\"Game Over!! You sank {}'s ships!\".format(\n self.player1.name))\n break\n player1_turn = True", "title": "" }, { "docid": "a41ddc3e5446124e900843abae6feecf", "score": "0.6259154", "text": "def user_action():\n\t### This is the function that takes and executes the users choices\n\twhile battle_on:\n\t\tchoosing = True\n\t\twhile choosing:\n\t\t\tmenu(\"general\")\n\t\t\tanswer()\n\t\t\tif ans == \"attack\":\n\t\t\t\tattack(my_pokemon, enemy)\n\t\t\t\tcalc_hp(enemy, \"attack\")\n\t\t\t\tshow_hp(enemy)\n\t\t\t\tprint \" \"\n\t\t\t\treturn\n\t\t\telif ans == \"flee\":\n\t\t\t\tchance = uniform(0, 100)\n\t\t\t\tif chance > 90:\n\t\t\t\t\twin(\"flee\")\n\t\t\t\telse:\n\t\t\t\t\tprint \"You failed to escape!\"\n\t\t\t\t\treturn\n\t\t\telif ans == \"potion\":\n\t\t\t\tuse_potion(my_pokemon)\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tprint \"i dont know what you mean :)\"\n\t\t\t\tprint \"lets try again!\"\n\t\t\t\tchoosing = True", "title": "" }, { "docid": "59d90161ec4d71b2a47b96b42233e2b2", "score": "0.6238245", "text": "def ask_user():\r\n while True:\r\n if bj.player1.double_down is True and bj.player1.split is True and bj.player1.went_split is False:\r\n p_choice = input(\"Hit, Stand, Double Down or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.split is True and bj.player1.went_split is False: # various input prompts depending on available player choices\r\n p_choice = input(\"Hit, Stand or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.double_down is True:\r\n p_choice = input(\"Hit, Stand or Double Down?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n else:\r\n p_choice = input(\"Hit or Stand?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice", "title": "" }, { "docid": "a77106d160b904a378e82183c0d40a31", "score": "0.6237381", "text": "def decision(self) -> bool:\n\n while True:\n # Get's user input, makes all charactures lowercase, and removes any whitespace\n decision = input('Enter \"hit\" or \"stay\". \\n').lower().strip()\n\n if decision == 'hit' or decision == 'stay':\n return decision == 'hit'\n else:\n # Humans can be dumb. Doesn't break the while loop\n print('\\nYou must type \"hit\" or \"stay\".')", "title": "" }, { "docid": "3318776bae976189714c2f759fbdbab3", "score": "0.6213806", "text": "def check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y):\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not stats.game_active:\n ai_settings.initialize_dynamic_settings()\n #hiding mouse cursor\n start_game(ai_settings, screen, stats, ship, aliens, bullets)\n\n sb.prep_score()\n sb.prep_high_score()\n sb.prep_level()\n sb.prep_ships()", "title": "" }, { "docid": "c1676118f9e94960ef3b44dd2c2b01c0", "score": "0.6210235", "text": "def hit(self, hand_idx=0):\n player = self.players[hand_idx]\n if player['active']:\n player['hand'].append(self._pick_card())\n if self.dealer_hand_value() < self.dealer_min:\n self.dealer_hand.append(self._pick_card())\n if self.is_bust(hand_idx):\n self.stand(hand_idx) # Force Stand and compute game result\n # Turn Off Split and Double Down after the first hit\n if player['allow_dd']: # Don't allow double down after the first hit\n player['allow_dd'] = False\n if self.allow_split: # Don't allow split after the first hit\n self.allow_split = False", "title": "" }, { "docid": "8a7e327cbb671ba11571c756ba1189c8", "score": "0.62012774", "text": "def check_trying_using(self):\r\n if self.opportunity or 'key' in inventory:\r\n if self.rect.colliderect(player):\r\n music_acceptor.usingPortalSound()\r\n player.rect.x = random.randrange(75, WIDTH - 125)\r\n player.rect.y = random.randrange(25, HEIGHT - 100)", "title": "" }, { "docid": "9111730c43a24f158c135d59fdf061f6", "score": "0.6194562", "text": "def attack_opponent(self):\n coordHit = self.attack_table.item(\n self.attack_table.currentRow(), self.attack_table.currentColumn())\n if coordHit in self.clicked:\n self.attack_table.clearSelection()\n error_sound = vlc.MediaPlayer(\"resources/error.mp3\")\n error_sound.play()\n else:\n self.attack_table.item(self.attack_table.currentRow(\n ), self.attack_table.currentColumn()).setBackground(Qt.gray)\n self.clicked.append(coordHit)\n shoot_sound = vlc.MediaPlayer(\"resources/shoot.mp3\")\n shoot_sound.play()\n for ship in self.enemyShips:\n if ship.check_position(coordHit) == True:\n ship.hit(coordHit)\n self.attack_table.item(self.attack_table.currentRow(\n ), self.attack_table.currentColumn()).setBackground(Qt.darkRed)\n if self.check_enemy_fleet() == False:\n self.menu = Menu(self.lang, self.username)\n self.menu.show()\n self.win_window = Win(self.lang)\n self.win_window.show()\n self.close()\n self.hit_coordinate()\n self.attack_table.clearSelection()", "title": "" }, { "docid": "edcd2e4d3d282555c3950b7eb2235e26", "score": "0.6188507", "text": "def on_hit(self, event, data):\n world, player = data\n # Ensure the top of the flag block is being hit\n # and ensure player hadn't been healed before in this level\n if get_collision_direction(player, self) == \"A\" and not player.get_bonus_health():\n player.change_health(1)\n player.set_bonus_health(True) # player won't get heal twice in a single level", "title": "" }, { "docid": "df8ce28c43d6737afd010e41e3584476", "score": "0.6187365", "text": "def deal_self(self):\n self.cards.hit(self.get_card())\n if self.cards.hand < 17 and self.cards.hand>=0:\n self.state = 'active'\n elif self.cards.hand >= 17 and self.cards.hand <= 21:\n self.state = 'stand'\n elif self.cards.hand==-1:\n self.state = 'burst'", "title": "" }, { "docid": "c963c08dfc2b5f09af40b6fcbd6f9184", "score": "0.6183678", "text": "def roomAction(gc, room_mob):\n gc['statuscontent']['MobAction'].setText(\"\")\n gamestate = 2\n # print(\"room_mob['category']:\", room_mob['category'])\n # if mob is a monster when you press enter the player attacks and get attacked back if the mob is still alive\n if room_mob.category == 'monster':\n gc['statuscontent']['MobAction'].setText(\"You attacked the {}\".format(room_mob.name))\n play('jab.ogg')\n damage = random.randint(1, gc['player'].getAttack())\n left = room_mob.hp - damage\n\n # if mob has any hp left set new hp to that mob and let the mob hit back at the player\n # else mob is dead (set mob hp to 0) and tell that room i doesn't have any mob any more (hasmob = False)\n if left > 0:\n room_mob.setHP(left)\n gc['statuscontent']['MobHP'].setText(\"HP: \" + str(room_mob.hp))\n dice = random.randint(1, 100)\n \n if dice <= room_mob.attacktrigger*100:\n time.sleep(0.5)\n play('chopp.ogg')\n hurtPlayer(gc['player'], room_mob.damage)\n else:\n time.sleep(0.5)\n play('missed_chopp.ogg')\n \n else:\n time.sleep(0.5)\n play('zombie_pain.ogg')\n room_mob.setHP(0)\n gc['board'].setNoMob(gc['player'].relcoords())\n gc['statuscontent']['MobHP'].setText(\"DEAD!\")\n gc['statuscontent']['MobAction'].setText(\"Monster dropped {}\".format(room_mob.getLootDescription()))\n gc['player'].addInventory(room_mob.getLoot())\n gamestate = 1\n elif room_mob.category == 'treasure':\n print(\"OPEN TREASURE\")\n time.sleep(0.5)\n play('open_chest.ogg')\n gc['board'].setNoMob(gc['player'].relcoords())\n gc['statuscontent']['MobAction'].setText(\"You got {}\".format(room_mob.getLootDescription()))\n gc['player'].addInventory(room_mob.getLoot())\n # elif room_mob['category'] == 'trap':\n # dice = random.randint(1, 100)\n # print(\"Nu kommer jag till en fälla\")\n # if dice <= room_mob.attacktrigger*100:\n # hurtPlayer(gc['statusbar'], gc['statuscontent'], gc['player'], room_mob.damage)\n # print(\"Jag blev visst skadad\")\n gc['statuscontent']['Attack'].setText(\"Atk {}\".format(gc['player'].getAttack()))\n gc['statuscontent']['Gold'].setText(\"Gold: {}\".format(gc['player'].getTreasure()))\n return gc, room_mob, gamestate", "title": "" }, { "docid": "64417c4f86c17a50e631a1353b167537", "score": "0.61788714", "text": "def player_stand(self):\r\n if self.in_progress:\r\n while self.dealer_hand.total < 17:\r\n self.dealer_hand.add(self.deck.deal())\r\n if self.dealer_hand.total > 21 or self.dealer_hand.total < self.player_hand.total:\r\n self.status_color = 'red'\r\n self.game_status = \"Player WINS... Press 'r' to start game\"\r\n self.player_wins += 1\r\n elif self.player_hand.total == self.dealer_hand.total:\r\n self.status_color = 'red'\r\n self.game_status = \"TIE Game... Press 'r' to start game\"\r\n else:\r\n self.status_color = 'red'\r\n self.game_status = \"Dealer WINS... Press 'r' to start game\"\r\n self.dealer_wins += 1\r\n self.in_progress = False\r\n self.refresh_canvas()", "title": "" }, { "docid": "c7e479ab5c731f519d6d29ff008ae79b", "score": "0.6177234", "text": "def request_action(self):\n\n # Return the player's input\n return input(\"Enter 'r' to roll the die, or 'h' to hold. What you you like to do? \")", "title": "" }, { "docid": "47c8fce9493b987ab34d9985597dbc4f", "score": "0.61745393", "text": "def _on_stand_next(self) -> None:\n self._should_stand = True", "title": "" }, { "docid": "d3033404877962412038907fe227db91", "score": "0.6141425", "text": "def stand():\n \n # Update message, score and the player's \"Hand\" status\n # as global variables.\n global outcome, outcome_plus, outcome_plus_plus, in_play, score, action \n \n # If the \"Player\" has busted, remind the \"Player\" that \n # they have busted.\n if player.get_value() > 21:\n outcome = PLAYER_BUSTED\n outcome_plus = outcome_plus_plus = \"\"\n action = NEW_DEAL\n elif in_play:\n # If the \"Hand\" is in play, repeatedly hit \"Dealer\" \n # until his \"Hand\" has value 17 or more. \n while dealer.get_value() < 17:\n dealer.add_card(deck_of_cards.deal_card())\n\n # If busted, update messages, score and the \n # player's \"Hand\" status. \n if dealer.get_value() > 21:\n outcome = PLAYER_WINS\n outcome_plus = DEALER_BUSTED\n outcome_plus_plus = \"\"\n action = NEW_DEAL \n score += SCORE_POINTS \n in_play = False\n # Else compare the value of the \n # player's and dealer's \"Hands\". If the value of \n # the player's \"Hand\" is less than or equal to \n # the dealer's \"Hand\", the \"dealer\" wins. \n # Otherwise the \"player\" has won. Again,\n # update messages, score and the player's \"Hand\" \n # status. \n else: \n in_play = False\n action = NEW_DEAL\n outcome_plus = outcome_plus_plus = \"\"\n if player.get_value() > dealer.get_value():\n outcome = PLAYER_WINS \n score += SCORE_POINTS \n else:\n outcome = PLAYER_LOSES \n score -= SCORE_POINTS\n \n return None", "title": "" }, { "docid": "b3f1a7d67c941f9c3fefd6220604e13f", "score": "0.61371064", "text": "def beer():\r\n global cheated\r\n\r\n if enter_four == config.confus(config.config4):\r\n player.grab(helpful.Item('SixPack',10,0,0,6))\r\n cheated = True\r\n print '<achievement unlocked>\\n'\r\n\r\n if player.get_money() >= 17:\r\n\r\n player.set_health(100)\r\n player.lose_money(17)\r\n\r\n raw_input('You take out your money.\\n')\r\n raw_input(bartender_name + ' chuckles.\\n')\r\n raw_input('\"I guess we have this stuff, if you really need a drink.\"\\n')\r\n\r\n raw_input(\"The 'beer' healed you!\\n\")\r\n raw_input('It also cost $17.\\n')\r\n \r\n else:\r\n print bartender_name + ' chuckles and looks pointedly at his empty tip jar.\\n'\r\n raw_input('\"' +\"We're out of beer.\" + '\"\\n')\r\n raw_input('\"Nice try.\"\\n')", "title": "" }, { "docid": "34cdbe501d57c3d78d66955cfd575c52", "score": "0.61012924", "text": "def ninja_turn():\r\n\tglobal men\r\n\tl = [chop, fly, firebreath]\r\n\tx = randint(0,3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn ninja.hit(*l[x])\r\n\telse:\r\n\t\tmen += ninja.sleep(*nsleep)\r\n\t\treturn 0", "title": "" }, { "docid": "394e30f4934e81765f4640e779769cbc", "score": "0.6086581", "text": "def get_user_input(self, game, hand, message, allowed_actions):\n return 'hit'", "title": "" }, { "docid": "6a32cb98c519baefffb385ece4b489e1", "score": "0.60825604", "text": "def event_player_bust(self) -> None:\n print(f\"Your hand contains {min(self.user.hand.value)}, you're bust\")\n self.event_house_wins()", "title": "" }, { "docid": "54e643484040a670d3ca2883bd9f8d0f", "score": "0.60809463", "text": "def weapon_check():\n if get_locations()['player'] == get_locations()['weapon']:\n STATUS['weapon'] = 'armed'\n STATUS['locations']['weapon'] = None\n print(\"You found the weapon! Now go and kill the monster!\")", "title": "" }, { "docid": "9df1e067c12599ca0bdf4615cd880937", "score": "0.60632664", "text": "def win_point(world: World):\r\n if world[\"user choice\"] == \"santa\" and world[\"cpu choice\"] == \"snowman\":\r\n world[\"score\"] += 1\r\n world[\"Won?\"] = \"Win\"\r\n elif world[\"user choice\"] == \"snowman\" and world[\"cpu choice\"] == \"reindeer\":\r\n world[\"score\"] += 1\r\n world[\"Won?\"] = \"Win\"\r\n elif world[\"user choice\"] == \"reindeer\" and world[\"cpu choice\"] == \"santa\":\r\n world[\"score\"] += 1\r\n world[\"Won?\"] = \"Win\"\r\n else:\r\n world[\"Won?\"] = \"Did not win\"", "title": "" }, { "docid": "7c0e1c060ab6cb8fd6361930cc4c669f", "score": "0.6061345", "text": "def _check_button(self, mouse_pos):\r\n if self.display.easy_button.rect.collidepoint(mouse_pos):\r\n self.settings.set_difficulty(self.settings.easy)\r\n self.ai_game.start_game()\r\n elif self.display.normal_button.rect.collidepoint(mouse_pos):\r\n self.settings.set_difficulty(self.settings.normal)\r\n self.ai_game.start_game()\r\n elif self.display.hard_button.rect.collidepoint(mouse_pos):\r\n self.settings.set_difficulty(self.settings.hard)\r\n self.ai_game.start_game()\r\n elif self.display.quit_button.rect.collidepoint(mouse_pos):\r\n self.ai_game.quit()", "title": "" }, { "docid": "1d6476794b5c013223857b9d2bc04a21", "score": "0.60532385", "text": "def check_hand(self, player):\n\n total = player.score()\n if total > 21:\n status = 'bust'\n elif total == 21:\n status = 'win'\n else:\n status = 'okay'\n\n if self.verbose:\n print(total, 'points')\n \n return status", "title": "" }, { "docid": "808e366c0fde77b124fb1f2444e58dcb", "score": "0.60456306", "text": "def check_if_game_over():\n check_for_winner()\n check_for_tie()", "title": "" }, { "docid": "5290de161e1e02765b7f7829cc97026b", "score": "0.6039582", "text": "def step(self, action):\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {\n \"play\": {\n \"landlord\": self._get_reward(\"landlord\"),\n \"landlord_up\": self._get_reward(\"landlord_up\"),\n \"landlord_down\": self._get_reward(\"landlord_down\")\n },\n \"bid\": {\n \"landlord\": self._get_reward_bidding(\"landlord\")*2,\n \"landlord_up\": self._get_reward_bidding(\"landlord_up\"),\n \"landlord_down\": self._get_reward_bidding(\"landlord_down\")\n }\n }\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}", "title": "" }, { "docid": "934b2c4f2135888882f8dd207cd794e9", "score": "0.6037751", "text": "def on_hit(self, game):\n raise NotImplementedError", "title": "" }, { "docid": "322e11f41f06f5d21d3593c4d2a4119c", "score": "0.60331637", "text": "def player_turn():\r\n\tglobal pen\r\n\tl = [punch, kick, headbut]\r\n\tprint \"1. punch\"\r\n\tprint \"2. Kick\"\r\n\tprint \"3. Head but\"\r\n\tprint \"4. Sleep\"\r\n\twhile True:\r\n\t\tx = choice(4)\r\n\t\tif x == 3 and pen < 85:\r\n\t\t\tpen += player.sleep(*psleep)\r\n\t\t\treturn 0\r\n\t\telif x == 3:\r\n\t\t\tprint \"You are too full of energy!\"\r\n\t\telif pen - l[x][5] >= 0 and x == 2:\r\n\t\t\treturn headbut_miss(player.hit(*l[2]))\r\n\t\telif pen - l[x][5] >= 0:\r\n\t\t\treturn player.hit(*l[x])\r\n\t\telse:\r\n\t\t\tprint \"You don't have enough energy.\"", "title": "" }, { "docid": "7ee0e3731e95ffb391da6489247c80d0", "score": "0.6026735", "text": "def plyer_op(self):\r\n time.sleep(1)\r\n while True:\r\n choice = input(\"how do you wint play with??\\n\"\r\n \"1. rockyman\\n\"\r\n \"2. RandomBOY\\n\"\r\n \"3. mirrorBOY\\n\"\r\n \"4. circularBoy\\n\"\r\n \"5. leave\\n \")\r\n if choice == '1':\r\n self.opposite = rockyman()\r\n break\r\n elif choice == '2':\r\n self.opposite = RandomBOY()\r\n break\r\n elif choice == '3':\r\n self.opposite = mirrorBOY()\r\n break\r\n elif choice == '4':\r\n self.opposite = circularBoy()\r\n break\r\n elif choice == '5':\r\n sys.exit(\"Bye\")\r\n break\r\n else:\r\n print('what??\\n')\r\n Game.plyer_op", "title": "" }, { "docid": "927beea612267b8689ee36e8c06cc27e", "score": "0.6021901", "text": "def hit(self, hand):\n if hand == \"player\":\n self.player_hand.append(self.cards_list[self.top_card_int])\n self.calculate_value(\"player\")\n elif hand == \"dealer\":\n self.dealer_hand.append(self.cards_list[self.top_card_int])\n self.calculate_value(\"dealer\")\n self.top_card_int += 1\n self.update_card_positions()", "title": "" }, { "docid": "b87bfa6f11e42faed34e8c577f6818a2", "score": "0.6020695", "text": "def cat_turn():\r\n\tglobal men\r\n\tl = [bat, pounce, legkick]\r\n\tx = randint(0, 3)\r\n\tif men >= 85 and x == 3:\r\n\t\tx = randint(0,2)\r\n\tif x != 3 and men - l[x][5] >= 0:\r\n\t\treturn cat.hit(*l[x])\r\n\telse:\r\n\t\tmen += cat.sleep(*csleep)\r\n\t\treturn 0", "title": "" }, { "docid": "3ffc8b8217b1982a49b5db138ae5bcc5", "score": "0.60170066", "text": "def action_normal(self):\n obs = self.observation\n shoot = False\n eb = self.__class__.enemy_base\n \n ammopacks = filter(lambda x: x[2] == \"Ammo\", obs.objects)\n if ammopacks:\n self.updateAllAmmoSpots(ammopacks)\n # Walk to ammo\n if obs.ammo < SUFFICIENT_AMMO:\n self.goal = self.getClosestLocation(ammopacks)\n self.motivation = MOTIVATION_AMMO\n self.debugMsg(\"*> Recharge (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''if (obs.ammo > 0 and obs.foes):\n self.goal = self.getClosestLocation(obs.foes)\n self.debugMsg(\"*> Go to enemy (%d,%d)\" % self.goal)\n # If the enemy is within range, shoot.\n if(point_dist(self.goal, obs.loc) < self.settings.max_range\n and not line_intersects_grid(obs.loc, self.goal, self.grid, self.settings.tilesize)):\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #if self.goal not in obs.friends:\n self.motivation = MOTIVATION_SHOOT_TARGET\n shoot = True'''\n \n # Attack strategy 1\n #########################\n # 1) Shoot live enemies #\n #########################\n # Aim at the closest enemy outside the enemy base\n if obs.ammo > 0 and obs.foes:\n living = filter(lambda x: point_dist(x[0:2], eb) > ENEMY_BASE_RANGE, obs.foes)\n self.debugMsg(\"Living: %s\" % (living,))\n if living:\n self.debugMsg(1)\n self.goal = min(living, key=lambda x: point_dist(obs.loc, x[0:2]))[0:2]\n self.motivation = MOTIVATION_SHOOT_TARGET\n self.debugMsg(2)\n # Check if enemy in fire range\n if (\n point_dist(self.goal, obs.loc) < self.settings.max_range and\n not line_intersects_grid(\n obs.loc, \n self.goal, \n self.grid, \n self.settings.tilesize\n )\n ):\n self.debugMsg(3)\n self.debugMsg(\"*> Shoot (%d,%d)\" % self.goal)\n #return self.getActionTriple(True,None,0) ###?? SHOULD WE STOP MOVING WHEN WE SHOOT?\n return self.getActionTriple(True)\n else:\n self.debugMsg(4)\n return self.getActionTriple()\n self.debugMsg(5)\n \n # Walk to an enemy CP\n if self.goal is None and len(self.friendlyCPs) < 2:\n self.goal = self.getClosestLocation(self.getQuietEnemyCPs())\n if self.goal:\n self.debugMsg(\"Crowded location: %d\" % self.getCrowdedValue(self.goal))\n self.motivation = MOTIVATION_CAPTURE_CP\n self.debugMsg(\"*> Capture (%d,%d)\" % (self.goal[0],self.goal[1]))\n \n '''# If you can't think of anything to do\n # at least walk to a friendly control point\n if self.goal is None:\n self.goal = self.getClosestLocation(self.getQuietRestlessFriendlyCPs())\n if self.goal:\n self.motivation = MOTIVATION_GUARD_CP\n self.debugMsg(\"*> Guard (%d,%d)\" % (self.goal[0],self.goal[1]))'''\n \n if self.goal is None:\n self.goal = max(\n self.__class__.ammoSpots,\n key=lambda x: point_dist(x, obs.loc),\n )\n self.debugMsg(\"Going to ammospot far away (%d, %d)\" % (self.goal[0],self.goal[1]))\n self.motivation = MOTIVATION_STAY_PUT\n \n\n if self.goal:\n return self.getActionTriple(shoot)\n else:\n return self.getActionTriple(shoot)", "title": "" }, { "docid": "0708558eb4d4ca422fb506b1255073c0", "score": "0.60139674", "text": "def process(self, player):\n if player.has_sword():\n return CONTINUE\n return GAIN_SWORD", "title": "" }, { "docid": "053799aae84239cfdbfc2b7acf0ea58a", "score": "0.60128367", "text": "def attack1(self, command):\n\n if random.randint(1,3) == 1 or random.randint(1,3) == 3:\n p.health -= self.weapon[0].damage\n print(\"You've been hit! \\nHealth at \" + str(p.health))\n else:\n print('Enemy tried to attack, missed!')", "title": "" }, { "docid": "4753980cd1fc244e30b03d9ea410186c", "score": "0.5966602", "text": "def take_turn(self):\n \n self.card_1 = self.get_card()\n self.display_card_1()\n guess = self.player.higher_lower()\n self.card_2 = self.get_card()\n self.display_card_2()\n self.compare_cards(guess)\n self.player.print_score()\n if self.player.score > 0:\n self.can_deal = self.player.keep_playing()\n print(\"\\n\")\n else:\n self.can_deal = False\n print(\"Game overThanks for playing!\")", "title": "" }, { "docid": "bf8bf219bf0428284a220b31499dca54", "score": "0.5964878", "text": "def check_if_won(self):\n if self.player_points > self.enemy_points:\n self.bHasWon = True\n else:\n self.bHasWon = False", "title": "" }, { "docid": "ab9a57b43aaafd041185ab21b7f6c46e", "score": "0.5955461", "text": "def got_hit(self, game_over_event):\n if self.invul_timer == 0:\n self.health -= 1\n self.play_impact_sound()\n if self.health == 0:\n pygame.event.post(pygame.event.Event(game_over_event))\n self.invul_timer = CST.PLAYER_INVULNERABILITY_DURATION * CST.FPS\n self.repair_timer = self.REPAIR_TIME # Resetting repair state upon hit", "title": "" }, { "docid": "52ad85122c8187252ca57c612f9bdb00", "score": "0.59527355", "text": "def game_play(self):", "title": "" }, { "docid": "34bcacdf8491b38da11f12a016bd7a31", "score": "0.5950853", "text": "def hitMe(hand, deck):\n if deck.cardsLeft == 0:\n return False\n hand.getCard(deck.drawCard())\n return True", "title": "" }, { "docid": "b6ed422af2b4113563aae5856b5b7b70", "score": "0.5946137", "text": "def chooseAttack(opponents_board):\r\n while True:\r\n guess = int(raw_input(\"choose a number between 0 through 8 to attack::\"))\r\n if guess < 0 or guess >8:\r\n continue\r\n result = checkIfHitOrMiss(guess, opponents_board)\r\n\r\n\r\n if result == \"hit\" or result == \"miss\":\r\n break\r\n\r\n if checkIfSunk(opponents_board):\r\n return \"sunk\"\r\n\r\n return result", "title": "" }, { "docid": "4bdf2b84439d63aa01bca3ece1894016", "score": "0.5941747", "text": "async def round(self):\n def turn_check(m):\n return ((m.content.lower() == 'stand') or (m.content.lower() == 'hit')) and m.guild == self.ctx.guild\n # Players\n for i, player in enumerate(self.players):\n if not player.out:\n HoS = ''\n while HoS != \"stand\":\n embed_players = discord.Embed(\n title='Players', color=0x0000fd)\n try:\n await self.ctx.send(f\"{self.users[i].name}, Would you like to hit or stand? \")\n HoS = await self.client.wait_for('message', timeout=20.0, check=turn_check)\n HoS = HoS.content.lower()\n\n if HoS == \"stand\":\n break\n\n elif HoS == \"hit\":\n # give the player a new card\n self.deck.move_cards(player, 1)\n # reload the embed with player hands\n for j, player2 in enumerate(self.players):\n if not player2.out:\n embed_players.add_field(\n name=f\"{self.users[j].name}\", value=player2, inline=True)\n await self.players_msg.edit(embed=embed_players)\n\n if player.get_value() > 21:\n await self.ctx.send(f\"{self.users[i].name} is bust\")\n break\n elif player.get_value() == 21:\n await self.ctx.send(f\"{self.users[i].name} has BlackJack!\")\n player.has_bj = True\n break\n\n except Exception as e:\n print(e)\n continue\n\n # Dealer\n while self.dealer.get_value() < 17:\n self.deck.move_cards(self.dealer, 1)\n\n embed_dealer = discord.Embed(title='Dealer', color=0x00ff00)\n embed_dealer.add_field(name=\"Hand\", value=self.dealer, inline=False)\n await self.dealer_msg.edit(embed=embed_dealer)\n\n # Checks\n # if dealer is bust and not all players are out\n if self.dealer.get_value() > 21 and self.total_players_out < len(self.players):\n for player in self.players:\n if player.get_value() <= 21 and not player.out: # if player is not bust and is not out\n player.credit(2 * player.bet)\n await self.ctx.send(\"Since Dealer is bust, all players win\")\n\n elif self.dealer.get_value() == 21 and self.total_players_out < len(self.players): # Dealer has blackjack\n await self.ctx.send(\"Dealer has BlackJack!\")\n for player in self.players:\n if player.has_bj and not player.out:\n player.credit(2 * player.bet)\n else:\n # Used to check if any of the if statements are activated.\n if_flag = False\n for i, player in enumerate(self.players):\n # if player has blacjack or beat the dealer and not out\n if player.has_bj or (player.get_value() < 21 and player.get_value() > self.dealer.get_value()) and not player.out:\n if_flag = True\n await self.ctx.send(f\"{self.users[i].name}, Conrats on winning!\")\n player.credit(2 * player.bet)\n # if player not bust and tied with dealer\n elif player.get_value() < 21 and player.get_value() == self.dealer.get_value() and not player.out:\n if_flag = True\n await self.ctx.send(f\"{self.users[i].name}, tied with the dealer!\")\n player.credit(player.bet)\n if not if_flag and self.total_players_out < len(self.players):\n await self.ctx.send(\"House wins\")\n\n # end of round cleanup\n for i, player in enumerate(self.players):\n if not player.out:\n player.has_bj = False\n if player.coins < 1:\n await self.ctx.send(f\"{self.users[i].name}, Min bet is €1, get your cheap ass out of here\")\n player.out = True\n self.total_players_out += 1\n elif player.coins > 10000:\n await self.ctx.send(f\"{self.users[i].name}! You\\'re too good, we have to stop you\")\n player.out = True\n self.total_players_out += 1", "title": "" }, { "docid": "63b45c138abf742a87adec7f7d089ade", "score": "0.5929707", "text": "def ai_event(self): \n self.choice = (-1, -1)\n any_human_agents = isinstance(self.agents[Player.WHITE], HumanAgent) or \\\n isinstance(self.agents[Player.BLACK], HumanAgent)\n\n if self.timestep_watch.time() >= self.TIMESTEP_DELAY:\n self.timestep_watch.reset()\n if not any_human_agents:\n self.event = Event.next(self.event)\n if Event.is_valid_placement_stage(self.event):\n self.choice = self.agents[self.env.turn].act()", "title": "" }, { "docid": "2c1002d9916fea1646a60924869648cb", "score": "0.59273726", "text": "def use(self):\n while True:\n print(\"Type 'back' to go back.\")\n item_choice = player_choice(\"\")\n if item_choice == 'back':\n break\n elif item_choice in inventory:\n if item_choice == \"bronze key\":\n print(\"You open the door and step outside.\")\n jt = Outside('outside')\n jt.just_there()\n else:\n print(\"That is the wrong item!\")\n else:\n print(\"You have not found the item yet.\")", "title": "" }, { "docid": "688c01f2928d56a0555e22b3dcbf8d9c", "score": "0.59255815", "text": "async def play_axe(game_state) -> None:\n if len(game_state.active_player.zombies) > 0:\n play_weapon(game_state, Supply.AXE)\n else:\n game_state.active_player.print(f'You cannot play {Supply.AXE.value} for nothing!')", "title": "" }, { "docid": "3f2d5027e11c1e97fd1db61a7518232e", "score": "0.5920565", "text": "def _control_play(self, entities: List[str]):\n if entities:\n self.player.play(entities)\n else:\n self.player.respond(\"I'm sorry, I couldn't find that for you.\")", "title": "" }, { "docid": "6cb1b62c58cab4ffb40820e5536a887f", "score": "0.5918885", "text": "def game(a,b, ):\n attacker, defender = a, b\n combatround = 0\n while a.hitpoints > 0 and b.hitpoints > 0:\n combatround += 1 # increase combatround by 1\n if a.stunned > 0:\n a.stunned -= 1\n if b.stunned > 0:\n b.stunned -= 1\n print()\n print(\"=================================\")\n print(\"combat round nr:\", combatround)\n print(\"attacker:\", attacker)\n print(\"defender:\", defender)\n print(\"=================================\")\n result = strike(attacker,defender)\n if result == None:\n break\n for line in result:\n print(line)\n if attacker == a and defender ==b:\n attacker, defender = b, a\n else:\n attacker, defender = a, b\n\n # game over \n print(\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\")\n if a.hitpoints > b.hitpoints:\n victor = a.name\n elif b.hitpoints > a.hitpoints :\n victor = b.name\n else:\n print(\"it is a draw\")\n victor = None\n print(\"victor:\", victor)", "title": "" }, { "docid": "a5cac588a541b8d81fa188f0f7dea322", "score": "0.59161335", "text": "def player_hit(self, at: engine.math.Vector2, hit_result: int):\n # Store the effect of the shot.\n self.__current_fire_location = at\n self.__current_fire_effect = hit_result\n\n # Move on to the hit response.\n self.__current_phase = self.PHASE_SHOW_HIT", "title": "" }, { "docid": "344bec9bab3f9485f3c48fc711d7e2e3", "score": "0.59126884", "text": "def hits(self, player1):\n newcard1 = self.deck.draw()\n player.hand.append(cards)\n print(\" Drew the {}.\".format(str(newcard)))\n\n while True:\n points = sum_hand(player.hand)\n\n if points < 17:\n print(\" Hit.\")\n self.hit(player)\n elif points == 21:\n print(\" {} wins!\".format(player.name))\n sys.exit(0) # End if someone wins\n elif points > 21:\n print(\" Bust!\")\n break\n else: # Stand if between 17 and 20 (inclusive)\n print(\" Standing at {} points.\".format(str(points)))\n self.scores[player.name] = points\n break", "title": "" }, { "docid": "877782d2a24fa134bcab033529a8d498", "score": "0.5912468", "text": "def check_angry(self):\n if self.die_a.value == 3 and self.die_b.value == 3:\n print(\"WOW, you're ANGRY!\\nTime to go back to Stage 1!\")\n self.current_stage = 1", "title": "" }, { "docid": "736e12524ace0be0177c38f341739e4a", "score": "0.5904266", "text": "def attack(self):\n if not self.attack_mode and not self.damage_mode and not self.death_mode:\n self.attack_group = choice(self.attack_groups)\n self.attack_mode = True\n self.cut_frame_update = 0", "title": "" }, { "docid": "08a48afb2b62465d3b9f54cebb524d1d", "score": "0.59027267", "text": "def action_handler(self):\n if self.state == data.DEAD:\n return\n\n x = 0\n for check in self.state_chart[self.state]:\n if not check:\n x += 1\n continue\n elif check():\n self.state = x\n\n # Some messages when state changes\n if self.state == data.CHASE:\n self.handler.message_box.add_msg(\"{} sees you!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n elif self.state == data.RUN:\n self.handler.message_box.add_msg(\"{} runs away!\".format(self.name), \n data.COLOURS['mob_behaviour_text'])\n\n x += 1\n\n if self.state == data.HOLD:\n return\n elif self.state == data.CHASE:\n self.chase(self.handler.player)\n elif self.state == data.RUN:\n self.run(self.handler.player)", "title": "" }, { "docid": "e0aa2e64ebfe92468537687dbf8317fc", "score": "0.58848697", "text": "def hit(self, card):\n self.cards.hit(card)\n if self.cards.hand ==-1:\n self.state ='burst'", "title": "" }, { "docid": "063bee1c72ffb2dc70bedc0e962c0ef6", "score": "0.5884498", "text": "def turn(self, attacker, taker):\n # checks if the attacker hits a critical\n damage = attacker.attack()\n if damage != attacker.strength:\n print(f'BIG HIT! critical {damage} damage!')\n print(f'{attacker.name} hits '\n f' {damage} hp!')\n # checks if the taker dodges the attack\n if taker.take_damage(damage):\n print(f'{taker.name} takes a hit! ouch.'\n f' {taker.name} has '\n f'{taker.health} health remaining')\n else:\n print(f'{taker.name} dodges the attack!!!')", "title": "" }, { "docid": "d7d49e8e67198ff5df2be1fbe4e12312", "score": "0.588029", "text": "def play(self):\n if self.stats['round'] == 0:\n if self.data['personalities'] and self.data['events']:\n self.choose_opponent()\n self.resolve_conflict()\n else:\n self.stats['round'] += 1\n elif self.stats['round'] == 1:\n if self.data['locations']:\n self.choose_location()\n self.resolve_conflict()\n else:\n self.stats['round'] += 1\n else:\n print(\"You've won\")\n self.game_over = True\n return self.stats", "title": "" }, { "docid": "688d743285f51df102138d97b80c030c", "score": "0.58789146", "text": "def play_game(self):\n # need everyone to pass to move to next phase?\n self.deal_cards()\n self.plant_food()", "title": "" }, { "docid": "7f7bb029218420f56de947dd4933cfd7", "score": "0.58545", "text": "def play_round(self):\r\n move1 = self.p1.move()\r\n move2 = self.p2.move()\r\n # Checks if User Wants to Quit Game:\r\n if move1 == \"quit\" or move2 == \"quit\":\r\n self.game_over(True)\r\n print(f\"Player One: {move1.upper()}\\nPlayer Two: {move2.upper()}\")\r\n self.keep_score(move1, move2)\r\n self.p1.learn(move1, move2)\r\n self.p2.learn(move2, move1)", "title": "" }, { "docid": "59785ccaa2df637843bbb2703ab0886a", "score": "0.58539194", "text": "def hit(self, player):\n\n hit_card = self.deck.draw()\n hit_card.flip()\n player.take_card(hit_card)\n\n if self.verbose:\n print(player, 'receives', hit_card)", "title": "" }, { "docid": "fb10a46803dcc4db865c73c41a29939c", "score": "0.5849056", "text": "def game(self):\n sender = self.sender()\n if(sender.text() == \" \"):\n sender.setText(\"x\" if self.firstPlayer else \"0\")\n self.firstPlayer = not(self.firstPlayer)\n res = self.checkForResult()\n if(res[0] == True):\n self.endGame(res[1])", "title": "" }, { "docid": "120a0fb14770ea0f26d8c63782d3dc22", "score": "0.58401054", "text": "def __attack(self, target):\n attack_difference = (Warrior.attack(self, target))\n if attack_difference > 5:\n print(\"Second attack with ANGRY!\")\n Warrior.attack(self, target)\n return None", "title": "" }, { "docid": "289ecc5bb17c1f735a1089c3de4406a1", "score": "0.58390135", "text": "def play_game():\n pass", "title": "" }, { "docid": "c6c0ec6482d272363bb66b9842d1e647", "score": "0.5825503", "text": "def player_win(self):\n global chips\n global placed_bet\n\n chips = (self.final_bet*2 + chips)\n self.victory = True\n placed_bet = False", "title": "" }, { "docid": "c19a7f74d8121815696b66b900afe0a7", "score": "0.5824732", "text": "def __advance(self):\n # If the game is being prepared.\n if self.__current_phase == self.PHASE_PREPARE:\n # If both players are ready.\n if self.__get_current_player().pre_game_prepare() and self.__get_other_player().pre_game_prepare():\n # Start the turn.\n self.__current_phase = self.PHASE_START_TURN\n\n # Begin the game for each player.\n self.__get_current_player().start_game()\n self.__get_other_player().start_game()\n\n # If the game is being set up.\n elif self.__current_phase == self.PHASE_START_TURN:\n # Advance onto the request fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n\n # Call the start turn method for both players.\n self.__get_current_player().start_turn()\n self.__get_other_player().start_turn()\n\n # If the game requires the user to shoot.\n elif self.__current_phase == self.PHASE_REQUEST_SHOT:\n # Advance onto the fire phase.\n self.__current_phase = self.PHASE_FIRE\n\n # Call the shoot method of the user.\n self.__get_current_player().request_shot()\n\n # If the game requires the other user to be hit.\n elif self.__current_phase == self.PHASE_REQUEST_HIT:\n # Advance onto the hit phase.\n self.__current_phase = self.PHASE_HIT\n\n # Call the other player's request hit method.\n self.__get_other_player().request_hit(self.__current_fire_location)\n\n # If the game shows the hit result.\n elif self.__current_phase == self.PHASE_SHOW_HIT:\n # Advance onto the await phase.\n self.__current_phase = self.PHASE_AWAIT_OPPONENT_SHOT\n\n # Call the player's show hit method.\n self.__get_current_player().show_hit(self.__current_fire_location, self.__current_fire_effect)\n\n # If the game awaits the next shot.\n elif self.__current_phase == self.PHASE_AWAIT_OPPONENT_SHOT:\n # If the opponent has lost.\n if self.__current_fire_effect == Player.SHOT_HIT_TYPE_GAME_OVER:\n # Store the winner's index.\n engine.Engine.game_manager.winner = self.current_player_index\n # Move to the game over phase.\n engine.Engine.load_level(\"GameOver\")\n else:\n # Call the player's await hit method.\n self.__get_current_player().await_opponent_shot()\n\n # If the turn is over.\n if self.current_player_index == 1:\n # Advance to the next turn.\n self.__current_phase = self.PHASE_END_TURN\n else:\n # Advance onto the next fire phase.\n self.__current_phase = self.PHASE_REQUEST_SHOT\n # Increment the user counter.\n self.current_player_index = 1\n\n elif self.__current_phase == self.PHASE_END_TURN:\n # Start a new turn.\n self.__current_phase = self.PHASE_START_TURN\n # Decrement the user counter.\n self.current_player_index = 0\n\n # Call the end turn methods.\n self.__get_current_player().end_turn()\n self.__get_other_player().end_turn()", "title": "" }, { "docid": "8f50928d2f1b9a85f1ddba66ccf53e03", "score": "0.582291", "text": "def on_hit(self, event, data):\n world, player = data\n # Ensure the top of the bounce block is being hit\n if get_collision_direction(player, self) == \"A\":\n self._active = True\n player.set_velocity((0, -3*player.get_max_velocity())) # bounce the player\n player.set_jumping(False) # player can't jump while bounced\n player.set_bounced(True)", "title": "" }, { "docid": "9d8b34bac7f0ea1a057ab0a06c6cd94a", "score": "0.58194697", "text": "def check_play_button(ai_settings,screen,stats,play_button,ship,bullets,mouse_x,mouse_y):\n\te_pressed = False\n\tbutton_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n\tif (button_clicked or e_pressed) and not stats.game_active: \n\t\t#play the music\n\t\tpygame.mixer.music.load('sounds/lvl1.mp3')\n\t\tpygame.mixer.music.set_volume(1.5)\n\t\tpygame.mixer.music.play()\n\t\t# Hide the mouse cursor\n\t\tpygame.mouse.set_visible(False)\n\t\t# Reset the game statistics\n\t\tstats.reset_stats()\n\t\tstats.game_active = True\n\t\tship.second_stage = False\n\t\tai_settings.boss_health = ai_settings.boss_health_default\n\t\t#empty the list of bullets\n\t\tbullets.empty()\n\t\t#center the ship\n\t\tship.center_ship()", "title": "" }, { "docid": "3a3032af7d1ca09c9b4062ce9c2109e7", "score": "0.58082086", "text": "def monster_check():\n player = get_locations()['player']\n monster = get_locations()['monster']\n if player == monster:\n if STATUS['weapon'] == 'armed':\n print(\"You killed the monster with the sword!\")\n play_again()\n else:\n if STATUS['hp'] > 0:\n STATUS['hp'] -= 5\n return \"The monster caught you! You barely manage to escape...\"\n elif STATUS['hp'] <= 0:\n print(\"The monster catachs you in its claws. Its not pretty.\")\n play_again()\n else:\n return \"Nothing in this room. Its around here somehwere though. \"", "title": "" }, { "docid": "4365cacadce63e5852fbc15df8ef83bd", "score": "0.58024955", "text": "def play_step(self, action):\n self.players[0].moving_left = False\n self.players[0].moving_right = False\n if action == MOVE_LEFT:\n self.players[0].moving_left = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_left = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == MOVE_RIGHT:\n self.players[0].moving_right = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_right = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == SHOOT:\n if self.dead_player or not self.players[0].is_alive:\n self.update(is_a_star=True)\n return\n if not self.players[0].weapon.is_active:\n self.players[0].shoot()\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n if self.dead_player or not self.players[0].is_alive:\n return", "title": "" }, { "docid": "7049cbb1a6497b9ac29d00f0f9ee0fcb", "score": "0.57980084", "text": "def duck_shooting1():\r\n score = 0\r\n duck = input(\"Do you want to shoot duck 1 2 3 or 4 \\n\")\r\n if duck == '1':\r\n if chance_hit() == 1:\r\n print(\"good job you got 500 points\")\r\n score += 500\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '2':\r\n if chance_hit() == 1:\r\n print(\"good job you got 1000 points\")\r\n score += 1000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '3':\r\n if chance_hit() == 1:\r\n print(\"good job you got 5000 points\")\r\n score += 5000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '4':\r\n if chance_hit() == 1:\r\n print(\"good job you got 3000 points\")\r\n score += 3000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\r\n \"That is not a duck you silly Goose. Now you have to start over!\")\r\n start()\r\n duck = input(\"Do you want to shoot duck 1 2 3 or 4 \\n\")\r\n if duck == '1':\r\n if chance_hit() == 1:\r\n print(\"good job you got 500 points\")\r\n score += 500\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '2':\r\n if chance_hit() == 1:\r\n print(\"good job you got 1000 points\")\r\n score += 1000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '3':\r\n if chance_hit() == 1:\r\n print(\"good job you got 5000 points\")\r\n score += 5000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '4':\r\n if chance_hit() == 1:\r\n print(\"good job you got 3000 points\")\r\n score += 3000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\r\n \"That is not a duck you silly Goose. Now you have to start over!\")\r\n start()\r\n duck = input(\"Do you want to shoot duck 1 2 3 or 4 \\n\")\r\n if duck == '1':\r\n if chance_hit() == 1:\r\n print(\"good job you got 500 points\")\r\n score += 500\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '2':\r\n if chance_hit() == 1:\r\n print(\"good job you got 1000 points\")\r\n score += 1000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '3':\r\n if chance_hit() == 1:\r\n print(\"good job you got 5000 points\")\r\n score += 5000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n elif duck == '4':\r\n if chance_hit() == 1:\r\n print(\"good job you got 3000 points\")\r\n score += 3000\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\"Dang, you missed\")\r\n print(\"Score:\" + (\"{:08d}\".format(score)))\r\n else:\r\n print(\r\n \"That is not a duck you silly Goose. Now you have to start over!\")\r\n start()\r\n return score", "title": "" }, { "docid": "8ad3e37bfaca0a92c86f7f6165dd1718", "score": "0.57948875", "text": "def battle_screen_hand_click_action(click_type,ai_settings, screen,buttons, screen_status, button_status, card_database_filter, user,player2, position = ''):\n if click_type == 'hand':\n if screen_status.battle_screen_action_indicator == 'stage-1-level-up':\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n\n else:\n pass\n\n elif 'stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'detail-spawn' in screen_status.battle_screen_action_indicator:\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'monster'\n and (int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a monster lv','').replace(' or less and click yes to play.','')))\n and int(user.monster_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n\n elif 'stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'detail-think-fast' in screen_status.battle_screen_action_indicator:\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'tactic'\n and (int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a tactic lv','').replace(' or less and click yes to play.','')))):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n\n elif 'stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'detail-equip' in screen_status.battle_screen_action_indicator:\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'item'\n and (int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a item lv','').replace(' or less and click yes to play.','')))\n and int(user.item_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n\n elif 'stage-2-character-action-' in screen_status.battle_screen_action_indicator and 'detail-sneak' in screen_status.battle_screen_action_indicator:\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'monster'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a card lv','').replace(' or less and click yes to play.',''))\n and int(user.monster_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n elif (located_card.card_type == 'item'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a card lv','').replace(' or less and click yes to play.',''))\n and int(user.item_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n elif (located_card.card_type == 'tactic'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a card lv','').replace(' or less and click yes to play.',''))\n ):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-think-fast':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'monster'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a monster/tactic lv','').replace(' or less and click yes to play.',''))\n and int(user.monster_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n elif (located_card.card_type == 'tactic'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a monster/tactic lv','').replace(' or less and click yes to play.',''))\n ):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn-and-equip':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'monster'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a monster/item lv','').replace(' or less and click yes to play.',''))\n and int(user.monster_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n elif (located_card.card_type == 'item'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a monster/item lv','').replace(' or less and click yes to play.',''))\n and int(user.item_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast-and-equip':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'item'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a tactic/item lv','').replace(' or less and click yes to play.',''))\n and int(user.item_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n elif (located_card.card_type == 'tactic'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a tactic/item lv','').replace(' or less and click yes to play.',''))\n ):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-spawn':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'monster'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a monster lv','').replace(' or less and click yes to play.',''))\n and int(user.monster_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-think-fast':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'tactic'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a tactic lv','').replace(' or less and click yes to play.',''))\n ):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-equip':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'item'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a item lv','').replace(' or less and click yes to play.',''))\n and int(user.item_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-sneak':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n if (located_card.card_type == 'monster'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a card lv','').replace(' or less and click yes to play.',''))\n and int(user.monster_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n elif (located_card.card_type == 'item'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a card lv','').replace(' or less and click yes to play.',''))\n and int(user.item_in_play_length) < 6):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n elif (located_card.card_type == 'tactic'\n and int(located_card.level) <= int(button_status.battle_screen_instruction_bar_text.replace('Pick a card lv','').replace(' or less and click yes to play.',''))\n ):\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n else:\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n else:\n pass\n\n elif screen_status.battle_screen_action_indicator == 'stage-2-other-action-detail-tactic-1':\n\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) >= int(position):\n button_status.battle_screen_my_hand_indicator_position = position\n button_status.battle_screen_my_hand_indicator_display = True\n\n\n elif click_type == 'level up':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n for i in range(1,16):\n if user.character_under_card_by_level[str(i*10)] == '':\n user.character_under_card_by_level[str(i*10)] = located_card\n break\n user.hand_list.remove(located_card)\n user.character_card.level = str(int(user.character_card.level) + 10)\n user.character_card.health = str(int(user.character_card.health) + 20)\n button_status.battle_screen_my_hand_indicator_display = False\n add_text_to_action_history('You have leveled up with: '+located_card.name+', Lv: '+str(int(user.character_card.level)-10)+' --> '+user.character_card.level+', HP: '+str(int(user.character_card.health)-20)+' --> '+user.character_card.health, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n elif click_type == 'spawn':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n # Check if this card satisfied spawn requirement\n for i in range(1,7):\n if user.monster_in_play_dict[str(i)] == '':\n user.monster_in_play_dict[str(i)] = located_card\n break\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have spawned the monster: '+ located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n elif click_type == 'think fast':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n x = located_card.special_effect\n if 'Quest/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name + ', drawn 2 cards', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n elif 'Heal 20/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.character_card.health = str(int(user.character_card.health) + 20)\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name+ ' ,heal yourself for 20 HP, HP: '+str(int(user.character_card.health)-20)+ ' --> '+user.character_card.health+ ', and drawn a card', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n elif 'Dmg' in x:\n if 'other' in screen_status.battle_screen_action_indicator:\n screen_status.battle_screen_action_indicator = 'stage-2-other-action-detail-tactic-1'\n elif 'character' in screen_status.battle_screen_action_indicator:\n y = screen_status.battle_screen_action_indicator.replace('stage-2-character-action-','')[0]\n screen_status.battle_screen_action_indicator = 'stage-2-character-action-' + y + '-detail-tactic-1'\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n button_status.battle_screen_instruction_bar_text = \"Pick a target to do \" + x[-3:] + ' Damage'\n\n\n elif click_type == 'equip':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n # Check if this card satisfied item requirement\n\n for i in range(1,7):\n if user.item_in_play_dict[str(i)] == '':\n user.item_in_play_dict[str(i)] = located_card\n break\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have equiped the item: '+located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n elif click_type == 'spawn/think fast':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n # Check if this card satisfied item requirement\n if located_card.card_type == 'monster':\n for i in range(1,7):\n if user.monster_in_play_dict[str(i)] == '':\n user.monster_in_play_dict[str(i)] = located_card\n break\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have spawned the monster: '+ located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n else:\n\n x = located_card.special_effect\n if 'Quest/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name + ', drawn 2 cards', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n\n elif 'Heal 20/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.character_card.health = str(int(user.character_card.health) + 20)\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name+ ' ,heal yourself for 20 HP, HP: '+str(int(user.character_card.health)-20)+ ' --> '+user.character_card.health+ ', and drawn a card', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n\n elif 'Dmg' in x:\n screen_status.battle_screen_action_indicator = 'stage-2-other-action-detail-tactic-1'\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n button_status.battle_screen_instruction_bar_text = \"Pick a target to do \" + x[-3:] + ' Damage'\n\n\n elif click_type == 'spawn/equip':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n # Check if this card satisfied item requirement\n if located_card.card_type == 'monster':\n for i in range(1,7):\n if user.monster_in_play_dict[str(i)] == '':\n user.monster_in_play_dict[str(i)] = located_card\n break\n add_text_to_action_history('You have spawned the monster: '+ located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n else:\n for i in range(1,7):\n if user.item_in_play_dict[str(i)] == '':\n user.item_in_play_dict[str(i)] = located_card\n break\n add_text_to_action_history('You have equiped the item: '+located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n\n elif click_type == 'think fast/equip':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n # Check if this card satisfied item requirement\n if located_card.card_type == 'tactic':\n\n x = located_card.special_effect\n if 'Quest/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name + ', drawn 2 cards', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n\n elif 'Heal 20/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.character_card.health = str(int(user.character_card.health) + 20)\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name+ ' ,heal yourself for 20 HP, HP: '+str(int(user.character_card.health)-20)+ ' --> '+user.character_card.health+ ', and drawn a card', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n\n elif 'Dmg' in x:\n screen_status.battle_screen_action_indicator = 'stage-2-other-action-detail-tactic-1'\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n button_status.battle_screen_instruction_bar_text = \"Pick a target to do \" + x[-3:] + ' Damage'\n\n else:\n for i in range(1,7):\n if user.item_in_play_dict[str(i)] == '':\n user.item_in_play_dict[str(i)] = located_card\n break\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have equiped the item: '+located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n\n elif click_type == 'sneak':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n # Check if this card satisfied item requirement\n if located_card.card_type == 'tactic':\n\n x = located_card.special_effect\n if 'Quest/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name + ', drawn 2 cards', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n\n elif 'Heal 20/Quest' in x:\n user.hand_list.append(user.remain_deck_list[0])\n del user.remain_deck_list[0]\n\n user.character_card.health = str(int(user.character_card.health) + 20)\n\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name+ ' ,heal yourself for 20 HP, HP: '+str(int(user.character_card.health)-20)+ ' --> '+user.character_card.health+ ', and drawn a card', screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('draw heal',ai_settings)\n\n\n elif 'Dmg' in x:\n if 'other' in screen_status.battle_screen_action_indicator:\n screen_status.battle_screen_action_indicator = 'stage-2-other-action-detail-tactic-1'\n elif 'character' in screen_status.battle_screen_action_indicator:\n y = screen_status.battle_screen_action_indicator.replace('stage-2-character-action-','')[0]\n screen_status.battle_screen_action_indicator = 'stage-2-character-action-' + y + '-detail-tactic-1'\n button_status.battle_screen_instruction_bar_yes_display = False\n button_status.battle_screen_instruction_bar_yes_backend = False\n button_status.battle_screen_instruction_bar_text = \"Pick a target to do \" + x[-3:] + ' Damage'\n\n elif located_card.card_type == 'item':\n for i in range(1,7):\n if user.item_in_play_dict[str(i)] == '':\n user.item_in_play_dict[str(i)] = located_card\n break\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have equiped the item: '+located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n elif located_card.card_type == 'monster':\n for i in range(1,7):\n if user.monster_in_play_dict[str(i)] == '':\n user.monster_in_play_dict[str(i)] = located_card\n break\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have spawned the monster: '+ located_card.name, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('play card',ai_settings)\n\n\n elif click_type == 'use tactic':\n # Make sure if using auto level up by clicking yes, the global position variable is set to one.\n if len(user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1):7 * screen_status.battle_screen_my_hand_page_id]) < int(button_status.battle_screen_my_hand_indicator_position):\n button_status.battle_screen_my_hand_indicator_position = '1'\n #\n located_card = user.hand_list[7*(screen_status.battle_screen_my_hand_page_id - 1)+(int(button_status.battle_screen_my_hand_indicator_position)-1)]\n dmg = located_card.special_effect[-3:]\n\n if \"opponent's character\" in button_status.battle_screen_instruction_bar_text:\n player2.character_card.health = str(int(player2.character_card.health)-int(dmg))\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name+', dealt '+str(int(dmg))+ \" damage to opponent's character, HP: \"+str(int(player2.character_card.health)+int(dmg))+' --> '+player2.character_card.health, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('attack face',ai_settings)\n\n elif \"opponent's monster\" in button_status.battle_screen_instruction_bar_text:\n x = button_status.battle_screen_instruction_bar_text[-2:-1]\n player2.monster_in_play_dict[x].health = str(int(player2.monster_in_play_dict[x].health) - int(dmg))\n user.hand_list.remove(located_card)\n button_status.battle_screen_my_hand_indicator_display = False # hand buttons on card eg:****\n button_status.battle_screen_player2_battleground_indicator_display = False\n button_status.battle_screen_instruction_bar_yes_display = True\n button_status.battle_screen_instruction_bar_yes_backend = True\n add_text_to_action_history('You have played the tactic: '+located_card.name+', dealt '+str(int(dmg))+ \" damage to opponent's monster: \"+player2.monster_in_play_dict[x].name+ \", HP: \"+str(int(player2.monster_in_play_dict[x].health) + int(dmg))+' --> '+player2.monster_in_play_dict[x].health, screen, buttons,screen_status, button_status, card_database_filter, user, player2)\n play_sound_effect('attack face',ai_settings)", "title": "" }, { "docid": "e65320bd6f106200c0ef0f5ad7aa6daf", "score": "0.5790037", "text": "def stand(hand=bj.player1.hand):\r\n phv = bj.player1.hand_value_check(hand) # check player hand value\r\n phv = [x for x in phv if x <= 21]\r\n if hand == bj.player1.hand:\r\n if len(phv) > 0:\r\n bj.player1.final_hand_val = max(phv)\r\n else:\r\n bj.player1.final_hand_val = \"bust\"\r\n else:\r\n if len(phv) > 0:\r\n bj.player1.final_hand2_val = max(phv)\r\n else:\r\n bj.player1.final_hand2_val = \"bust\"", "title": "" }, { "docid": "7a822fe117bb3c0c7442e26361cb70ca", "score": "0.57890815", "text": "def playerCanPlay(game, situation, player):\r\n return True", "title": "" }, { "docid": "537e8f97ec9368ae8e14a004dfbf5ce3", "score": "0.57825285", "text": "def _ship_hit(self):\n if self.stats.ships_left > 0:\n self.stats.ships_left -= 1\n self.sb.prep_ships()\n self.stars.empty()\n self.bullets.empty()\n self._create_galaxy()\n self.ship.center_ship()\n sleep(0.5)\n else:\n self.stats.game_active = False\n pygame.mouse.set_visible(True)", "title": "" }, { "docid": "7bf317e7df7095660cdf07d90c105873", "score": "0.5782345", "text": "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "title": "" }, { "docid": "d6a92745745ee9ac7f73e3d592580588", "score": "0.5780594", "text": "def doHotSeatCheck(args):\n\tiGameTurn, ePlayer = args\n\tGAME = CyGame()\n\tif GAME.isHotSeat() and ePlayer == GAME.getActivePlayer():\n\t\tCvEventInterface.getEventManager().fireEvent(\"SwitchHotSeatPlayer\", ePlayer)", "title": "" }, { "docid": "48d0a77686056c752abd5bd2441ed977", "score": "0.57775366", "text": "def _playfield_switch_hit(self, **kwargs):\n if self.balls <= 0 or (kwargs.get('balls') and self.balls - kwargs['balls'] < 0):\n self._mark_playfield_active()\n\n if not self.num_balls_requested:\n self.debug_log(\"Playfield was activated with no balls expected.\")\n self.machine.events.post('unexpected_ball_on_' + self.name)\n '''event: unexpected_ball_on_(name)\n desc: The playfield named (name) just had a switch hit,\n meaning a ball is on it, but that ball was not expected.\n '''\n\n self.ball_search.reset_timer()", "title": "" }, { "docid": "34d4c75f392cf115dee7bacd04267045", "score": "0.57754314", "text": "def startBattle(self):\n defender = self.map.getUnitAt(self.pos)\n attacker = self.selectedUnit\n defender.takeDamage(int(attacker.firepower * attacker.hp))\n attacker.takeDamage(int(defender.firepower * defender.hp))\n self.endBattle()", "title": "" }, { "docid": "91c52233e62c802bb21059239db9ea81", "score": "0.57746506", "text": "def action_stand(self) -> None:\n self.action_house_reveal()", "title": "" }, { "docid": "60a05323852db93b4966cb77cd76eb75", "score": "0.57741123", "text": "def attack(self, text, state):\n text = text.replace('attack ', '', 1)\n for key, value in state.currentRoom.objects.iteritems():\n if text.startswith(key.lower()):\n if value.hasHP():\n print \"You attack the {0} with your {1}.\".format(key, state.player.currentWeapon)\n else:\n print \"You cannot attack the {0}\".format(key)", "title": "" }, { "docid": "77d4074f66aa55271fbce4951626c27e", "score": "0.5772505", "text": "def on_hit(self, event, data):\n world, player = data\n\n # Ensure the top of the switch block is being hit\n if get_collision_direction(player, self) == \"A\" and not self._pressed:\n self._time = time.time() # save the hit time\n self._pressed = True # set the pressed status to True\n if not self._block_around: # ensure the block storage is empty\n x, y = self.get_position() # get the switch position\n self._block_around = world.get_things_in_range(x, y, 20) # put block around into storage\n for block in self._block_around: # remove block in the storage\n if not isinstance(block, Switch) and isinstance(block, Block):\n world.remove_block(block)", "title": "" }, { "docid": "766e25fe679ef3c1d28b42fce3c40924", "score": "0.57696164", "text": "def start_game(self):\n while self.can_deal:\n self.take_turn()", "title": "" }, { "docid": "99a558db198b5bf9de54391e2e195efb", "score": "0.576664", "text": "def check_play_button(ai_settings,screen,stats,play_button,ship,aliens,bullets,\n\tmouse_x,mouse_y,sb):\n\n\tbutton_clicked = play_button.rect.collidepoint(mouse_x,mouse_y)\n\n\tif button_clicked and not stats.game_active:\n\t\t# Reinicia as configurações no jogo\n\t\tai_settings.initialize_dynamic_settings()\n\n\n\t\t# Oculta cursor do mouse quando o mouse estiver sobre a janela\n\t\tpygame.mouse.set_visible(False)\n\t\t\n\n\t\t# Reinicia o jogo\n\t\tstats.reset_stats()\n\t\tstats.game_active = True\n\n\t\t# Reinicia as imagems do painel de pontuação\n\t\tsb.prep_score()\n\t\tsb.prep_high_score()\n\t\tsb.prep_level()\n\t\tsb.prep_ship()\n\n\t\t# Esvazia a lista de alienígenas e de projéteis\n\t\taliens.empty()\n\t\tbullets.empty()\n\n\t\t# Cria uma ova frota e centraliza a espaçonave\n\t\tcreate_fleet(ai_settings,screen,ship,aliens)\n\t\tship.center_ship()", "title": "" }, { "docid": "40c0eb0dfd90373d852f61993a3def84", "score": "0.57652634", "text": "def hit(self, other=None, push=False):\n laser = pygame.mixer.Sound('resources/Laser.wav')\n laser.set_volume(0.5)\n if not other:\n front_pos = \\\n [p + d for p, d in zip(self.pos, DIRECTIONS[self.rotation])]\n other = self.map.get(front_pos)\n assert other is not None, \"No robot in front!\"\n if push:\n other.pos = [p + d for p, d in\n zip(other.pos, DIRECTIONS[self.rotation])]\n # get the hit direction\n look_other = DIRECTIONS[other.rotation]\n look_self = DIRECTIONS[self.rotation]\n if look_other == look_self: # von hinten getroffen\n damage = DAMAGE[FROM_BEHIND]\n elif all(abs(x) != abs(y)\n for x, y in zip(look_other, look_self)): # seitlich\n damage = DAMAGE[FROM_SIDE]\n else: # frontal\n damage = DAMAGE[FROM_FRONT]\n\n other.health -= damage if not push else damage * 0.25\n\n if hasattr(other, 'take_damage_anim'):\n other.animator.play_animation(other.take_damage_anim)\n if self.speakers:\n self.speakers.play(laser)\n new_turn = \"{0}!{1};{2}\".format(self.pos, other.pos, damage)\n self._call_gamelog_callbacks(new_turn)", "title": "" }, { "docid": "80bb118d6ecc80628b386e6d4bc6d00c", "score": "0.57602376", "text": "def get_user_input(self, game, hand, message, allowed_actions):\n if self.first_turn:\n hand = self.hands[0]\n if hand.cards == 2:\n card1, card2 = hand.cards\n if card1.get_value() == card2.get_value():\n return 'split'\n return 'double'\n else:\n return 'stand'", "title": "" }, { "docid": "ddca87925d6f6978cbf8fa45d3d4fd1e", "score": "0.5759892", "text": "def won(self):\n if self.current_room.name == \"Victory\":\n return True\n else:\n return False", "title": "" }, { "docid": "5d555062957ec9724284e978583af682", "score": "0.5757461", "text": "def wishMe():\r\n hour = int(datetime.datetime.now().hour)\r\n\r\n if hour >= 0 and hour < 12:\r\n speak(\"Good morning\" + MASTER)\r\n\r\n elif hour >= 12 and hour < 18:\r\n speak(\"Good afternoon\" + MASTER)\r\n else:\r\n speak(\"Good Evening\" + MASTER)\r\n # speak(\"I am VA. How may I help you?\")\r", "title": "" }, { "docid": "671c7d7d88e6005e432fa5df6f737dd6", "score": "0.57557136", "text": "def allowedToEnter(self):\n if base.cr.isPaid():\n return True\n place = base.cr.playGame.getPlace()\n myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId)\n if myHoodId in \\\n (ToontownGlobals.ToontownCentral,\n ToontownGlobals.MyEstate,\n ToontownGlobals.GoofySpeedway,\n ):\n # trialer going to TTC/Estate/Goofy Speedway, let them through\n return True\n return False", "title": "" }, { "docid": "0c3be2c893205b7898e06d7372d4c070", "score": "0.575387", "text": "def fighter(mob):\r\n\tglobal player\r\n\trestore()\r\n\top = op_set(mob)\r\n\tt = None\r\n\tplayer = engine('you', fo)\r\n#\tprint fo\r\n\twhile mhp > 0 and php > 0:\r\n\t\tt = turn(t, op)\r\n\t\tprompt()\r\n\tif mhp <= 0 and php > 0:\r\n\t\treturn 'winner'\r\n\telif php <= 0 and mhp > 0:\r\n\t\treturn 'game_death'\r\n\telse:\r\n\t\tprint \"You both seemed to have died...\"\r\n\t\treturn 'game_death'", "title": "" } ]
05445aab07c768eec636fe901ab66bd9
Turn on the entity.
[ { "docid": "0413af31cc498d121ee7207c9afb775a", "score": "0.0", "text": "def turn_on(self, speed: str = None, **kwargs) -> None:\n if speed is None:\n speed = SPEED_MEDIUM\n self.set_speed(speed)", "title": "" } ]
[ { "docid": "5636f7ded24b3e11bfd3c30745e713ea", "score": "0.8618475", "text": "def turn_on(self, **kwargs):\n setattr(self._device, self.entity_description.key, True)", "title": "" }, { "docid": "0787e3138df52f64d7bccfce51a74eff", "score": "0.81042206", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n await self._device.async_set_setting(self.entity_description.key, True)", "title": "" }, { "docid": "25c7e0138652b2678091c04502aef3a7", "score": "0.7723294", "text": "def turn_on(self, **kwargs):\n self._set_state(True)", "title": "" }, { "docid": "07431c32aa27377c8475ceb69ad45401", "score": "0.76973546", "text": "def turn_on(self):\n raise NotImplementedError()", "title": "" }, { "docid": "135955392715db5a917f191ea945ba3a", "score": "0.7666584", "text": "def turn_on(self, **kwargs: Any) -> None:\n sensor_type = self.entity_description.key\n if sensor_type == \"clean\":\n self.set_graceful_lock(True)\n self._tc_object.start_cleaning()\n elif sensor_type == \"dock\":\n self._tc_object.dock()\n elif sensor_type == \"find\":\n self._tc_object.find_me()", "title": "" }, { "docid": "c46ad138f612f6f7a56d7d987fb167fa", "score": "0.76025903", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n self.entity_description.set_value_fn(self.coordinator.device, True)\n await self.coordinator.push_state_update()\n self.async_write_ha_state()", "title": "" }, { "docid": "18aa02c0ad12614ef648ce61fd7414b6", "score": "0.7555699", "text": "def turn_on(self, **kwargs):\n self._device.switch_on()", "title": "" }, { "docid": "0721e724818ea2b1d4c45e86068a20c7", "score": "0.75431734", "text": "def turn_on(self, **kwargs):\n self._state = self._hass.data[SONOFF_DOMAIN].switch(True, self._deviceid, self._outlet)\n self.schedule_update_ha_state()", "title": "" }, { "docid": "e9c32145185534928338851df99f358c", "score": "0.75335157", "text": "def turnOn(self):\n\n self.device.updateStateOnServer(key='onOffState', value=True)\n self.updateStateImage()", "title": "" }, { "docid": "91bcbdc579f2bec8d99916605954491c", "score": "0.7500128", "text": "def turn_on(self, **kwargs):\n self.hass.services.call(\"light\", \"turn_on\", self._get_kwargs_payload(self._zeptrion_entity))\n self.hass.services.call(\"light\", \"turn_on\", self._get_kwargs_payload(self._hue_entity, kwargs))\n self.update()", "title": "" }, { "docid": "1e755247d08e3363b6604f6fc56ed224", "score": "0.7490265", "text": "async def turn_on(self) -> None:\n await self._request(\"switch\", {\"switch\": STATE_ON})", "title": "" }, { "docid": "b65a5e784b866628cd17ab5de329ef0a", "score": "0.74796206", "text": "def on(self):\n\t\tself.status = 1\n\t\tprint(self, 'turned on')", "title": "" }, { "docid": "60e15f278b10c9ae9145775bd66b0e1b", "score": "0.74481165", "text": "def turn_on(self, **kwargs):\n\n if hasattr(self, '_event') and self._event:\n self._event.device.send_on(rfxtrx.RFXOBJECT.transport)\n\n self._state = True\n self.update_ha_state()", "title": "" }, { "docid": "a9662a69563e6f7a14cc0453abd7a515", "score": "0.74109656", "text": "def turn_on(self) -> bool:", "title": "" }, { "docid": "ed5a7bf337c1264f3e8f7889d89b1860", "score": "0.74069566", "text": "def turn_on(self) -> None:\n self._device.on()", "title": "" }, { "docid": "ddefe43b887739d30ab16faf83400b28", "score": "0.7402433", "text": "def turn_on(self, on: bool):\n body = {\"on\": on}\n self._send_state(body)", "title": "" }, { "docid": "4706c9de2eb120ecc43eb93a99dfcc45", "score": "0.7380818", "text": "def turn_on(self, **kwargs):\n if self._write_to_hub(self._sid, **{self._data_key: \"on\"}):\n self._state = True\n self.schedule_update_ha_state()", "title": "" }, { "docid": "2d0b1cadbf94886d01110659d7a4d24d", "score": "0.73678505", "text": "def turn_on(self, **kwargs):\n with self._lock:\n self._open()\n self._update_state(self._write_read_format(CMD_DICT[STATE_ON]))\n self._close()\n\n self.schedule_update_ha_state(True)", "title": "" }, { "docid": "24af37b0c02cccae8c0aa40b4bcfefc5", "score": "0.73384523", "text": "async def async_turn_on(self, **kwargs):\n await self._device.set_on()", "title": "" }, { "docid": "2d1fa42ad3586d1b009073a0ce57fdeb", "score": "0.73381674", "text": "def turn_on(self, **kwargs) -> None:\n if not self._node.on():\n _LOGGER.debug('Unable to turn on switch.')", "title": "" }, { "docid": "d3c8971d60615a8007889ae0e11444de", "score": "0.7330959", "text": "def turn_on(self, **kwargs):\n self.ihc_controller.set_runtime_value_bool(self.ihc_id, True)", "title": "" }, { "docid": "de3178cf7a1fdb2be04a6b6729b4cfcc", "score": "0.7330552", "text": "def turn_on(self, **kwargs):\n self._device.on(**kwargs)", "title": "" }, { "docid": "1effd1d0e9066403aab02142772ebfcf", "score": "0.73142654", "text": "def turn_on(self) -> None:\n self._device.power_on()", "title": "" }, { "docid": "9aff9338928cc97b5c98b248659a75e5", "score": "0.7290888", "text": "def on(self):\n self.set(is_on=True)", "title": "" }, { "docid": "58e3446574751923ff486ff30c8376c3", "score": "0.7289517", "text": "def turn_on(self, **kwargs: Any) -> None:\n self._send_command([{\"code\": self.dp_code, \"value\": True}])", "title": "" }, { "docid": "dcc113101c9f4752773f5954d668b725", "score": "0.72798187", "text": "def turn_on(hass, entity_id=None):\n data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}\n hass.services.call(DOMAIN, SERVICE_TURN_ON, data)", "title": "" }, { "docid": "b728430f9bec7e22defa5f434e9b7c92", "score": "0.72693366", "text": "def turn_on(self):\n self.action = action.Action(\"Actuator \" + str(self.obj),\n self.apply)\n self.action.run()", "title": "" }, { "docid": "c2ae0d340dfa0cc36f2ad2410971c85e", "score": "0.72672313", "text": "async def async_turn_on(self, **kwargs):\n self._state = True\n self._last_change = time.time()\n self.async_schedule_update_ha_state()\n await self.coordinator.api.pause_device(self._system_id, self._item_id, False)", "title": "" }, { "docid": "d72487d1b7c67bedfcbd1d6f52970c02", "score": "0.724471", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n await self._async_publish_command(VacuumEntityFeature.TURN_ON)", "title": "" }, { "docid": "e273383932eddd9c04ef20b9744fbc96", "score": "0.7226841", "text": "def turn_on(self, **kwargs):\n self._device.set_switch_state_on()", "title": "" }, { "docid": "bffdf2cc254376ba156caa06092fdecc", "score": "0.7205939", "text": "def turn_on(self, **kwargs):\n self._set_light(ON_STATE)", "title": "" }, { "docid": "c43b202c3268c538e35dbe5b304b8015", "score": "0.7204318", "text": "def turn_on(self, **kwargs):\n self._is_on = True if self.client.turn_onff(self.device, 'on') else False", "title": "" }, { "docid": "3fd7e90f6fe7606769481a1cbea0d29b", "score": "0.71997255", "text": "def turn_on(self, **kwargs):\n self._client.put_device(self._pin_num,\n int(self._activation == STATE_HIGH))\n self._set_state(True)", "title": "" }, { "docid": "a685f9509fa2625fd3e687a50fd70044", "score": "0.7185758", "text": "def turn_on(self) -> None:\n self._set_configuration(fpwr=\"ON\")", "title": "" }, { "docid": "834cf45e5748edcc6b14bde803b10767", "score": "0.7180817", "text": "def turn_on(self, **kwargs):\n _LOGGER.debug(\"Turn on device %s\", self.name)\n if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:\n self._device.resume()\n else:\n self._device.start()", "title": "" }, { "docid": "b29149b08ae3220a26e35f035b6eb4ee", "score": "0.7165376", "text": "def on(self):\r\n\t\tself._on = True", "title": "" }, { "docid": "1cb40a582b174d4edcbb3f9a26c64ed1", "score": "0.71587574", "text": "def turn_on(self, **kwargs):\n _LOGGER.debug(\"%s: switching on %s\", self._vehicle.name,\n self._name)\n if self._attribute == 'climate':\n self._vehicle.remote_services.trigger_remote_air_conditioning()\n elif self._attribute == 'light':\n self._vehicle.remote_services.trigger_remote_light_flash()\n elif self._attribute == 'horn':\n self._vehicle.remote_services.trigger_remote_horn()", "title": "" }, { "docid": "19ce65ab5689ce1db2e478cd1200019c", "score": "0.715587", "text": "async def turn_on(self) -> None:\n if self.is_offline:\n raise ClientConnectionError()\n self.state = True\n self.mode = self.default_mode", "title": "" }, { "docid": "e43524e927ffc6029a24c788e13bc1dd", "score": "0.71480584", "text": "def turn_on(self, **kwargs) -> None:\n if not self._actions.runThen():\n _LOGGER.error('Unable to turn on switch')", "title": "" }, { "docid": "98dae39b9f71b81edd9698a694f6f977", "score": "0.7145532", "text": "def turn_on(self):\n self.display_on()\n self.set_backlight(self._backlight)\n self._state = STATE_ON\n self.schedule_update_ha_state()", "title": "" }, { "docid": "3c8cad5080bfc6b2b28351ceaba74a27", "score": "0.7140774", "text": "def set_on(self):\n self.set_state(STATE_ON)\n self.set_dehumidification_target()\n self.set_max_on_timer()", "title": "" }, { "docid": "cf8c4a2fba93ccb1984a8b063f355dfe", "score": "0.71405125", "text": "async def async_turn_on(self, **kwargs):\n self.coordinator.device.light = True\n await self.coordinator.push_state_update()\n self.async_write_ha_state()", "title": "" }, { "docid": "8b9546e4223817c5e6b971028c2c68a7", "score": "0.7139541", "text": "async def async_turn_on(self, **kwargs):\n await self.async_put_characteristics({CharacteristicsTypes.ACTIVE: True})", "title": "" }, { "docid": "8b9546e4223817c5e6b971028c2c68a7", "score": "0.7139541", "text": "async def async_turn_on(self, **kwargs):\n await self.async_put_characteristics({CharacteristicsTypes.ACTIVE: True})", "title": "" }, { "docid": "fce49f2a69b2ccdb221e18fd6bdfeb37", "score": "0.71284884", "text": "async def async_turn_on(self, **kwargs) -> None:\n self._client.nefit.put_value(self.get_endpoint(), \"on\")\n\n _LOGGER.debug(\n \"Switch Nefit %s ON, endpoint=%s.\", self._key, self.get_endpoint()\n )", "title": "" }, { "docid": "0baa33ea8abe541e1fe66b71f0ed9061", "score": "0.71190584", "text": "async def async_turn_on(self, **kwargs) -> None:\n self._client.nefit.put_value(self.get_endpoint(), \"true\")\n\n _LOGGER.debug(\n \"Switch Nefit %s ON, endpoint=%s.\", self._key, self.get_endpoint()\n )", "title": "" }, { "docid": "a73493a08ebf71b2e98127638d2013af", "score": "0.71119183", "text": "def turn_on(self):\n hub.my_pages.smartplug.set(self._id, 'on')\n hub.my_pages.smartplug.wait_while_updating(self._id, 'on')\n self.update()", "title": "" }, { "docid": "ca2b5e03669336c3979bc8c1c394d689", "score": "0.71082246", "text": "async def _async_heater_turn_on(self):\n data = {ATTR_ENTITY_ID: self.heater_entity_id}\n await self.hass.services.async_call(\n HA_DOMAIN, SERVICE_TURN_ON, data, context=self._context\n )", "title": "" }, { "docid": "af9bc76e6cb4970539b781d5a7a6fe48", "score": "0.7082913", "text": "def on(self):\n\t\tself._on = 1", "title": "" }, { "docid": "af9bc76e6cb4970539b781d5a7a6fe48", "score": "0.7082913", "text": "def on(self):\n\t\tself._on = 1", "title": "" }, { "docid": "164e2607de30a0f7a9c911d465e3f192", "score": "0.70804274", "text": "def turn_on(self, **kwargs):\n self._automation.set_active(True)", "title": "" }, { "docid": "84cd88ac9ae29968fe5c9c8e2f4aa38d", "score": "0.7064681", "text": "def turn_on(self, **kwargs):\n self._parent_device.turn_on(outlet=self.outletnumber)", "title": "" }, { "docid": "69b0b80a8ac1ffb7766d0b00278c2338", "score": "0.70582986", "text": "def turn_on(self):\n self._speaker.turnOn()", "title": "" }, { "docid": "51e5cc8d70244bd2eb2f35a7a8704e56", "score": "0.7049895", "text": "def on(self):\n self.setOn(True)", "title": "" }, { "docid": "7f42475738be4d40f1ddd28bb12b9726", "score": "0.7042133", "text": "async def async_turn_on(self, **kwargs):\n await self.__async_set_lamp_state(True, **kwargs)", "title": "" }, { "docid": "03edf68c6559cd267008e774ed4667ef", "score": "0.702976", "text": "def turn_on(self, **kwargs: Any) -> None:\n self._hmdevice.on(self._channel)", "title": "" }, { "docid": "9e552ac4b298b405496524bdce57ffd2", "score": "0.70278305", "text": "def turn_on(self, **kwargs):\n self.__switch(BrickletRemoteSwitchV2.SWITCH_TO_ON)", "title": "" }, { "docid": "81bbbee3b43905848f61cdfef0c26bf6", "score": "0.7026929", "text": "def force_on(self):\n self.action(1)", "title": "" }, { "docid": "6862b1aaa2f779f0076e444d296a19ab", "score": "0.70263183", "text": "def turn_on(self, switch=1):\r\n self.set_status(True, switch)", "title": "" }, { "docid": "1328216d83b51ae178989c2fc623ef22", "score": "0.7025578", "text": "def async_turn_on(self):\n if self._state is STATE_OFF:\n yield from self.device.turn_on()\n self._state = STATE_PLAYING\n self.hass.async_add_job(self.async_update_ha_state())", "title": "" }, { "docid": "6b2eca4b67a8962afe93e27462ba7ed4", "score": "0.7003284", "text": "def _activate(self):\n self._toggle_until = time.monotonic() + self._toggle_for.total_seconds()\n track_point_in_time(\n self.hass,\n self.async_update_ha_state,\n dt_util.utcnow() + self._toggle_for,\n )\n momentary_remote_rpi_gpio.write_output(self._switch, 1)\n self._state = True\n _LOGGER.debug(\"turned on\")\n self.schedule_update_ha_state()", "title": "" }, { "docid": "5fdb00bd2012e387431f5831c77397dd", "score": "0.7000363", "text": "def switch_on(self) -> None:\n self.set_switch_state(1)", "title": "" }, { "docid": "73e77117a343f640f31e86854be72aa5", "score": "0.6995114", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n _LOGGER.debug(f\"Turning on {self.name} with {kwargs}\")\n await self.send_command(CMD_ON)", "title": "" }, { "docid": "c547bce3aa57f571618da8d628102d8a", "score": "0.69713736", "text": "def turn_on(self, **kwargs):\n self._aerogarden.light_toggle(self._macaddr)\n self._state = 1", "title": "" }, { "docid": "a062d8fcc018d0699bfc72d5b52e7b1e", "score": "0.69588906", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.call_state_change(True)", "title": "" }, { "docid": "582a69f0368719d22fd2a28cfed80370", "score": "0.6951012", "text": "def on(self):\n self.light = self.switch_on", "title": "" }, { "docid": "c4904c59e3e7632e142fd442f03f723c", "score": "0.69504595", "text": "def turn_on(self, **kwargs) -> None:\n self.log('R4SkyKettleSwitch.turn_on')\n self._connect.onModeBoil()", "title": "" }, { "docid": "8ed6b5dcfc7500ccf69eefe5a0510f44", "score": "0.69393265", "text": "async def async_turn_on(self, **kwargs):\n self._write(True)", "title": "" }, { "docid": "9968a4c4c8b0594e15c64824300260d8", "score": "0.6921624", "text": "async def async_turn_on(self, **kwargs):\n self.turn_on()", "title": "" }, { "docid": "b646c6a4606a29189510c335916221ac", "score": "0.69058913", "text": "def turn_on(self) -> None:\n if self.state == MediaPlayerState.OFF:\n self._device.send_key(\"POWER\")\n\n self._attr_state = MediaPlayerState.IDLE", "title": "" }, { "docid": "27fd68c6d8184c3f3b307f1c3a983770", "score": "0.6903544", "text": "def setOn(self, command):\n self.setDriver('ST', 1)", "title": "" }, { "docid": "b4c42608b24fb66cd5ff82962e035e7e", "score": "0.690084", "text": "async def async_turn_on(self, **kwargs) -> None:\n sucess = await self.async_handle_switch_on_off(turn_on=True)\n if sucess:\n self._is_on = True", "title": "" }, { "docid": "c65e4823fb9dd7d64a5713fc1445748f", "score": "0.6887902", "text": "def call_turn_on(self):\n self.action(\"turnOn\")", "title": "" }, { "docid": "926f4d0343d694c704f4666de015100c", "score": "0.688402", "text": "def cmd_set_on(self, command):\n self.setDriver('ST', 1)", "title": "" }, { "docid": "b3cb1bd0ede95cacf78b6760e69ba584", "score": "0.68790954", "text": "def turn_on(self, **kwargs):\n req_url = (\n self._scheme\n + \"://\"\n + self._host\n + \":\"\n + str(self._port)\n + \"/toggle/\"\n + self._id\n + \"/switch_1/true\"\n )\n try:\n r = requests.get(req_url)\n if r.status_code == 200:\n # _LOGGER.warning(r.status_code)\n # _LOGGER.warning(r.json())\n # _LOGGER.warning(\"Turrned on\")\n self._switch = True\n else:\n _LOGGER.error(\"Error turning on device: %s\", self._id)\n except requests.RequestException as e:\n _LOGGER.error(\"Error turning on device: %s\", self._id)\n _LOGGER.error(\"Error: %s \", e)", "title": "" }, { "docid": "d91715c24a8ce788466cc07653b298dd", "score": "0.68738455", "text": "def turnOn(self):\n return self._processOnOffResponse(self._sendCommand(commands['on']))", "title": "" }, { "docid": "955d72a886ea4e567f4e3b8276498699", "score": "0.68729055", "text": "async def async_turn_on(self, **kwargs):\n fn = getattr(self._device, \"set_\" + self._key)\n result = await fn(True)\n await self.async_force_update()\n return True", "title": "" }, { "docid": "84fcdf9cf4cc829cb63660142d311d11", "score": "0.68708825", "text": "async def async_turn_on(self, **kwargs):\n if kwargs.get(ATTR_EFFECT) == \"AUTO\":\n _LOGGER.debug(\"Enabling AUTO for outlet %s\", self.name)\n self.outlet.enable_auto()\n await self.apex.update_outlet(self.outlet)\n self.async_schedule_update_ha_state(True)\n _LOGGER.debug(\"Just turned on AUTO. is_on = %s\", self.outlet.is_on())\n elif kwargs.get(ATTR_EFFECT) == \"ON\":\n await self.async_turn_on()\n elif kwargs.get(ATTR_EFFECT) == \"OFF\":\n await self.async_turn_off()\n else:\n _LOGGER.debug(\"Turning outlet ON for %s\", self.name)\n self.outlet.force_on()\n await self.apex.update_outlet(self.outlet)\n self.async_schedule_update_ha_state()\n _LOGGER.debug(\"Just turned on outlet. is_on = %s\", self.outlet.is_on())", "title": "" }, { "docid": "45bad4f9d018f269b78c0671e6e6a566", "score": "0.6856041", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n # == Turn device on ==\n # Turn on both ALL (Hyperion itself) and LEDDEVICE. It would be\n # preferable to enable LEDDEVICE after the settings (e.g. brightness,\n # color, effect), but this is not possible due to:\n # https://github.com/hyperion-project/hyperion.ng/issues/967\n if not bool(self._client.is_on()):\n for component in [\n const.KEY_COMPONENTID_ALL,\n const.KEY_COMPONENTID_LEDDEVICE,\n ]:\n if not await self._client.async_send_set_component(\n **{\n const.KEY_COMPONENTSTATE: {\n const.KEY_COMPONENT: component,\n const.KEY_STATE: True,\n }\n }\n ):\n return\n\n # Turn on the relevant Hyperion priority as usual.\n await super().async_turn_on(**kwargs)", "title": "" }, { "docid": "d9a202b0c1f67dbe37f80f9b115b565b", "score": "0.68555176", "text": "async def async_turn_on(self, **kwargs):\n await self._device.turn_on()\n await self.async_force_update(2)\n return True", "title": "" }, { "docid": "443bc46418de714b5b13ba0e95636595", "score": "0.68535167", "text": "def turn_on(self, **kwargs):\n if ATTR_BRIGHTNESS in kwargs:\n self._brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_TRANSITION in kwargs:\n transition_time = kwargs[ATTR_TRANSITION]\n self._led.transition(\n transition_time,\n is_on=True,\n brightness=_from_hass_brightness(self._brightness),\n )\n else:\n self._led.set(\n is_on=True, brightness=_from_hass_brightness(self._brightness)\n )\n\n self._is_on = True\n self.schedule_update_ha_state()", "title": "" }, { "docid": "161a8bfa04c5ae2c6c2627cf3d9f24b7", "score": "0.68462247", "text": "async def async_turn_on(self, **kwargs) -> None:\r\n self._update_node_red(True)", "title": "" }, { "docid": "592b13af5d7d544f3508467f73e9c4e9", "score": "0.68379587", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.async_update_state(True)", "title": "" }, { "docid": "7d842ca3dd149f2aac570c01d0c3122a", "score": "0.6820683", "text": "async def async_turn_on(self, **kwargs):\n result = await self._try_command(\"Turning the plug on failed.\", self._plug.turn_switch, self._index, 1)\n\n if result:\n self._state = True\n self._skip_update = True", "title": "" }, { "docid": "1a042944da717c2248bb734dc5ce7279", "score": "0.6818715", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n await self._light_set_state(LIGHT_ON)", "title": "" }, { "docid": "1362b7d6869b6196bd9228efc3e7b2e7", "score": "0.68162966", "text": "def turn_on(self):\n # turn on the actual power supply here\n self.set_state(DevState.ON)\n self.push_change_event('temperature', self.__temperature)\n self.push_change_event('current', self.__current)", "title": "" }, { "docid": "c835966ec790bf6e2366d3e69d9a2724", "score": "0.68114877", "text": "def _on(self):\n string = self.command + \" Enable\"\n subprocess.call(string)\n self.state = True", "title": "" }, { "docid": "6cae5c9bdecdd683a7fcb05133c3a92d", "score": "0.67885673", "text": "def turn_on(self, **kwargs) -> None:\n self._state = True\n self._skip_update = True\n\n if ATTR_BRIGHTNESS in kwargs:\n self._brightness = int(float(kwargs[ATTR_BRIGHTNESS]) / 255 * 100)\n self.apply_action(\"setIntensity\", self._brightness)\n elif ATTR_EFFECT in kwargs:\n self._effect = kwargs[ATTR_EFFECT]\n self.apply_action(\"wink\", 100)\n else:\n self.apply_action(\"on\")\n\n self.async_write_ha_state()", "title": "" }, { "docid": "7948674353324c2ca563e40fbd906502", "score": "0.67874134", "text": "def turn_on(self, **kwargs):\n if ATTR_HS_COLOR in kwargs:\n self._color = kwargs[ATTR_HS_COLOR]\n if ATTR_BRIGHTNESS in kwargs:\n self._brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_TRANSITION in kwargs:\n transition_time = kwargs[ATTR_TRANSITION]\n self._led.transition(\n transition_time,\n is_on=True,\n brightness=_from_hass_brightness(self._brightness),\n color=_from_hass_color(self._color),\n )\n else:\n self._led.set(\n is_on=True,\n brightness=_from_hass_brightness(self._brightness),\n color=_from_hass_color(self._color),\n )\n\n self._is_on = True\n self.schedule_update_ha_state()", "title": "" }, { "docid": "bf9474fd2c9d2852d24fe5598b2d5607", "score": "0.6783945", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n await self._plug.turn_on()", "title": "" }, { "docid": "5ee6ab4fdee1d2c0e98bd43b199989ce", "score": "0.67812794", "text": "async def turn_on(self, **kwargs):\n _LOGGER.info(\"Sonoff LAN Mode switch %s switching on\" % self._name)\n await self._sonoff_device.turn_on()", "title": "" }, { "docid": "15ffe4cc6ba82c1fd13b0d6d677fd9b3", "score": "0.6758282", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n if not await self._node.enable():\n raise HomeAssistantError(f\"Unable to enable device {self._node.address}\")", "title": "" }, { "docid": "750129e208b7328cc269bf211903befd", "score": "0.67508435", "text": "def turn_on(self, **kwargs):\n if 'ok' in self.miio.raw_command('set_arming', [\"on\"]):\n self._state = True\n _LOGGER.debug(f\"{self._name} Alarm ON\")", "title": "" }, { "docid": "5b6a9f57391026ee9a7e4653ead5c07e", "score": "0.6747384", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n\n to_state: bool = True\n await self._mesh.async_set_parental_control_state(state=to_state)\n self._state_value = to_state\n self.async_schedule_update_ha_state()", "title": "" }, { "docid": "fd096bf64042e5aa4d177c42235464ac", "score": "0.67450625", "text": "async def async_turn_on(self):\n powerStatus = await self.hass.async_add_executor_job(self._remote.powerStatus)\n if powerStatus == SKY_STATE_STANDBY:\n await self.hass.async_add_executor_job(\n self._remote.press, [\"home\", \"dismiss\"]\n )\n await self.async_update()", "title": "" }, { "docid": "b83afcc1ad8cf1a55eda1373c7b0c2dd", "score": "0.6742945", "text": "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.call_state_change(\"open\")", "title": "" }, { "docid": "c7cad39f9af20bd531b2937fc71d60b2", "score": "0.6727367", "text": "def on(self):\n\t\tcommand._led(self.connector._getGlobalId(), ON)", "title": "" }, { "docid": "67ad43db6bca3191f261944120ad70ba", "score": "0.6726339", "text": "async def async_turn_on(self, **kwargs):\n fn = getattr(self._room, \"set_\" + self._key)\n result = await fn(True)\n await self.async_force_update()\n return True", "title": "" }, { "docid": "9037369f4cdf9c625186f19aa00c3fb9", "score": "0.6724332", "text": "def on(self):\n self.event[\"action\"] = \"on\"\n self.light.write(True, event=self.event)\n return True", "title": "" }, { "docid": "eb37415c6b8bc2e48ec5da8981f77a93", "score": "0.672372", "text": "def turn_on(self, **kwargs: Any) -> None:\n self._gc100.write_switch(self._port_addr, 1, self.set_state)", "title": "" }, { "docid": "bd44a880d9b20a02c98df047d8e1e32d", "score": "0.67196447", "text": "def test_turn_on_when_on(self):\n common.set_operation_mode(self.hass, STATE_HEAT, self.HEAT_ENTITY)\n common.set_operation_mode(self.hass, STATE_COOL, self.COOL_ENTITY)\n self.hass.block_till_done()\n self.hass.services.call('climate', SERVICE_TURN_ON)\n self.hass.block_till_done()\n state_heat = self.hass.states.get(self.HEAT_ENTITY)\n state_cool = self.hass.states.get(self.COOL_ENTITY)\n assert STATE_HEAT == \\\n state_heat.attributes.get('operation_mode')\n assert STATE_COOL == \\\n state_cool.attributes.get('operation_mode')", "title": "" } ]
f8c00adb9f1397b1a0557dda391b3234
Parse arguments to the detect module
[ { "docid": "f27ae0cbc8200bbb4fec7d1bc9817c08", "score": "0.5837996", "text": "def arg_parse():\n\n parser = argparse.ArgumentParser(description='YOLO v3 Video Detection')\n parser.add_argument(\"--dataset\", dest=\"dataset\",\n help=\"Dataset on which the network has been trained\", default=\"pascal\")\n parser.add_argument(\"--confidence\", dest=\"confidence\",\n help=\"Object Confidence to filter predictions\", default=0.5, type=float)\n parser.add_argument(\"--nms_thresh\", dest=\"nms_thresh\",\n help=\"NMS Threshhold\", default=0.4, type=float)\n parser.add_argument(\"--cfg\", dest=\"cfgfile\",\n help=\"Config file\", default=\"cfg/yolov3.cfg\", type=str)\n parser.add_argument(\"--weights\", dest=\"weightsfile\", \n help=\"weightsfile\", default=\"yolov3.weights\", type=str)\n parser.add_argument(\"--reso\", dest=\"reso\",\n help=\"Input resolution of the network. Increase to increase accuracy. Decrease to increase learning speed\",\n default=416, type=int)\n return parser.parse_args()", "title": "" } ]
[ { "docid": "34fe4fe55514d90b708cb95166c13d37", "score": "0.7203778", "text": "def parse_args(self):", "title": "" }, { "docid": "97d2ccf7f996b3021796b0ea822e65de", "score": "0.7183488", "text": "def parse(args):", "title": "" }, { "docid": "d2d066d8732dafb3e689637147415fda", "score": "0.7098273", "text": "def parse_arguments(self, arguments):", "title": "" }, { "docid": "1dccf4c3d2985588ab18019d903fe76b", "score": "0.66024846", "text": "def arg_parse():\n\n parser = argparse.ArgumentParser(description='YOLO v3 Detection Module')\n\n parser.add_argument(\"--images\", dest='images', help=\"Image / Directory containing images to perform detection upon\",\n default=\"data/test_folder/\", type=str)\n parser.add_argument(\"--det\", dest='det', help=\"Image / Directory to store detections to\",default=\"det\", type=str)\n parser.add_argument(\"--bs\", dest=\"bs\", help=\"Batch size\", default=1)\n parser.add_argument(\"--confidence\", dest=\"confidence\", help=\"Object Confidence to filter predictions\", default=0.5)\n parser.add_argument(\"--nms_thresh\", dest=\"nms_thresh\", help=\"NMS Threshhold\", default=0.4)\n parser.add_argument(\"--cfg\", dest='cfgfile', help=\"Config file\",default=\"config/yolov3.cfg\", type=str)\n parser.add_argument(\"--weights\", dest='weightsfile', help=\"weightsfile\",default=\"weights/yolov3.weights\", type=str)\n parser.add_argument(\"--reso\", dest='reso', help=\"Input resolution of the network. Increase to increase accuracy. \"\n \"Decrease to increase speed\",default=\"416\", type=str) # 输入图像的分辨率\n\n return parser.parse_args()", "title": "" }, { "docid": "3bdb3e16d72a312b69be9840ae8b223c", "score": "0.6591714", "text": "def test_detect_arguments_parser(self):\n\n args = [\n 'This is an english text',\n '-m', 'pylade/data/model.json'\n ]\n\n expected = {\n 'output_file': None,\n 'model': 'pylade/data/model.json',\n 'predict_args': None,\n 'text': 'This is an english text',\n 'implementation': 'CavnarTrenkleImpl',\n 'loglevel': 30\n }\n\n assert detect_script_args_parser.parse_arguments(args) == expected", "title": "" }, { "docid": "f86ec341246e9bf90ea3ac3a47d9eaa4", "score": "0.6584287", "text": "def parse_known_args(self, args=None,namespace=None):\n\t\tpass", "title": "" }, { "docid": "3a401270c4a820588f20017d19c7b030", "score": "0.65567154", "text": "def parseArguments(self):\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", help = \"Path to the image\")\n ap.add_argument(\"-id\", \"--imagedir\", help = \"Path to the imagedir\")\n ap.add_argument(\"-v\", action='store_true', help = \"video mode\")\n ap.add_argument(\"-d\", action='store_true', help = \"debug mode\")\n return vars(ap.parse_args())", "title": "" }, { "docid": "0c17f057ef6c806e1a10d7e265ea3c55", "score": "0.64887965", "text": "def arg_parse():\r\n\tparser = argparse.ArgumentParser(description='YOLO v3 Detection Module')\r\n\tparser.add_argument(\"--bs\", dest = \"bs\", help = \"Batch size\", default = 1)\r\n\tparser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.5)\r\n\tparser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\r\n\tparser.add_argument(\"--cfg\", dest = 'cfgfile', help = \"Config file\", default = \"cfg/yolov3.cfg\", type = str)\r\n\tparser.add_argument(\"--weights\", dest = 'weightsfile', help = \"weightsfile\", default = \"yolov3.weights\", type = str)\r\n\tparser.add_argument(\"--reso\", dest = 'reso', help = \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\", default = \"416\", type = str)\r\n\tparser.add_argument(\"--video\", dest = \"videofile\", help = \"Video file to run detection on\", default = \"video.avi\", type = str)\r\n\treturn parser.parse_args()", "title": "" }, { "docid": "46d348570f2d9bc5874cbaa21bfe279b", "score": "0.6442018", "text": "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Predict whether an image is blurry or not\")\n\n parser.add_argument(\"-f\", \"--input\", required=True,\n help=\"The path of the image floder to classify\")\n parser.add_argument(\"-r\", \"--range\", required=True,\n help=\"The range\")\n parser.add_argument(\"-m\", \"--model\", required=True,\n help=\"The path of the SVM model file\")\n\n return parser.parse_args()", "title": "" }, { "docid": "408a90053883f2be61e02aba5d273ffc", "score": "0.6399796", "text": "def arg_parse():\r\n \r\n \r\n parser = argparse.ArgumentParser(description='YOLO v3 Video Detection Module')\r\n \r\n parser.add_argument(\"--video\", dest = 'video', help = \r\n \"Video to run detection upon\",\r\n default = \"video5.mp4\", type = str)\r\n parser.add_argument(\"--dataset\", dest = \"dataset\", help = \"Dataset on which the network has been trained\", default = \"pascal\")\r\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.5)\r\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\r\n parser.add_argument(\"--cfg\", dest = 'cfgfile', help = \r\n \"Config file\",\r\n default = \"cfg/yolov3.cfg\", type = str)\r\n parser.add_argument(\"--weights\", dest = 'weightsfile', help = \r\n \"weightsfile\",\r\n default = \"yolov3.weights\", type = str)\r\n parser.add_argument(\"--reso\", dest = 'reso', help = \r\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\r\n default = \"416\", type = str)\r\n return parser.parse_args()", "title": "" }, { "docid": "fa5d982c18e0c3fa36da50baad1d4a85", "score": "0.6341816", "text": "def parse(arg_list):", "title": "" }, { "docid": "7d41077a809c6dff80468ee93e89f819", "score": "0.634023", "text": "def parseArguments():\n\n parser = argparse.ArgumentParser(description='Contaminant Detection')\n\n parser.add_argument('-v', '--video', required=True,\n help=\"Path to video.\")\n parser.add_argument('-o', '--directory', required=False, default=DEFAULT_OUTPUT_DIRECTORY,\n help=\"Path to directory where output images created should be stored.\")\n parser.add_argument('-b', '--bMask', required=False, default=None,\n help=\"Path to background mask (image).\")\n parser.add_argument('-w', '--white', required=False, default=DEFAULT_WHITE_PERCENTAGE, type=float,\n help=\"Percent value of white to be used when evaluating potential for contaminant.\")\n parser.add_argument('-p', '--pixels', required=False, default=DEFAULT_PIXEL_SIZE, type=int,\n help=\"\"\"Minimum pixel size of a region that should be identified as contaminant, \n and highlighted inside rectangular box.\"\"\")\n parser.add_argument('-l', '--long', required=False, default=False, action='store_true', \n help=\"Long mode shows masking of every frame as it goes.\")\n \n args = vars(parser.parse_args())\n\n videoP = args['video']\n outputDir = args['directory']\n backgroundMask = args['bMask']\n longFlag = args['long']\n whiteFlag = args['white']\n minPixls = args['pixels']\n return videoP, outputDir, backgroundMask, longFlag, whiteFlag, minPixls", "title": "" }, { "docid": "3c4c07b55d0410e229e2cc0c69a7e65e", "score": "0.62396866", "text": "def parse_args(self):\n assert os.path.isfile(os.path.join(self.params.matlab_source_folder,\n self.params.matlab_function+'.m')), \\\n \"Matlab function file '%s' not found.\" % self.params.matlab_function", "title": "" }, { "docid": "cf9e8262e355d6077c10a780e43e48c5", "score": "0.6228705", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument(\n '--third_party_headers',\n type=str,\n default='',\n help='Space-separated list of headers to resolve.')\n flags, unparsed = parser.parse_known_args()\n\n main(unparsed, flags)", "title": "" }, { "docid": "b2b203fcbea8e005b08a0f7315a06119", "score": "0.62052715", "text": "def parse_args() -> Any:\n return get_parser().parse_args()", "title": "" }, { "docid": "677a1a112c52479f1cba0cbec371e7bb", "score": "0.6203535", "text": "def arg_parse():\r\n \r\n \r\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo')\r\n parser.add_argument(\"--confidence\", dest = \"confidence\", help = \"Object Confidence to filter predictions\", default = 0.25)\r\n parser.add_argument(\"--nms_thresh\", dest = \"nms_thresh\", help = \"NMS Threshhold\", default = 0.4)\r\n parser.add_argument(\"--reso\", dest = 'reso', help = \r\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\r\n default = \"160\", type = str)\r\n return parser.parse_args()", "title": "" }, { "docid": "6304e100248fc1c6918a83dfeb801518", "score": "0.6191033", "text": "def parse_args(self, args=None,namespace=None):\n\t\tpass", "title": "" }, { "docid": "ab34bfe8864ee190ba5c2fce0ba199b7", "score": "0.6172615", "text": "def processArgs():\n parser = argparse.ArgumentParser(description='Generates detectnet simulated data.')\n parser.add_argument('--canvas_image_dir', type=str, required=True,\n help='Data dir containing canvas images.')\n parser.add_argument('--paste_image_dir', type=str, required=True,\n help='Data dir containing paste images.')\n parser.add_argument('--paste_label_dir', type=str, required=True,\n help='Data dir that contains paste labels.')\n parser.add_argument('--save_dir', type=str, required=True,\n help='Data dir where sim images will be generated.')\n parser.add_argument('--max_paste_rotation', type=int, default=60,\n help='maximum rotation that can be randomly added to pasted image.')\n parser.add_argument('--max_canvas_rotation', type=int, default=5,\n help='maximum rotation that can be randomly added to canvas image.')\n parser.add_argument('--final_img_width', type=int, default=608,\n help='height of the final produced image.')\n parser.add_argument('--final_img_height', type=int, default=608,\n help='width of the final produced image.')\n parser.add_argument('--max_canvas_images', type=int, default=-1,\n help='If set to non-negative value it will only get that number of canvas images.')\n\n args, unknown = parser.parse_known_args()\n return args", "title": "" }, { "docid": "e84557961fa4559c0f3f91b4631d3800", "score": "0.61702156", "text": "def get_arguments():\n parser = argparse.ArgumentParser(description=\"label-components\")\n parser.add_argument(\"--rgb-img\", type=str, default=RGB_IMG, help=\"RGB image\")\n parser.add_argument(\"--rgb\", action='store_true', help=\"run for single rgb file.\")\n parser.add_argument(\"--dir\", type=str, default=RGB_DIR, help=\"Directory for RGB images\")\n return parser.parse_args()", "title": "" }, { "docid": "c6a2e4e3fa1568f3d24b99d3cb4ba754", "score": "0.6168153", "text": "def set_detection_args(self):\n parser_det = self.method_args.add_parser(\n \"detection\",\n help=\"Arguments for detection tasks.\")\n self.set_wsi_arg(parser_det)\n\n parser_det.add_argument(\n \"-vo\", \"--voc_style\", action=\"store_true\",\n help=\"Output as VOC style.\")\n parser_det.add_argument(\n \"-co\", \"--coco_style\", action=\"store_true\",\n help=\"Output as COCO style.\")\n parser_det.add_argument(\n \"-yo\", \"--yolo_style\", action=\"store_true\",\n help=\"Output as YOLO style.\")\n parser_det.add_argument(\n \"-ra\", \"--ratio\", default=\"8:1:1\",\n help=\"Ratio of the dataset size of train/validation/test phase.\")\n parser_det.add_argument(\n \"-cb\", \"--crop_bbox\", default=False, action=\"store_true\",\n help=\"Crop bounding boxes after patch extraction.\"\n )\n self.add_annotation_args(parser_det, slide_is_sparse=True)\n self.set_common_args(parser_det)", "title": "" }, { "docid": "ea8fec226520ac734efd271aa10bad8b", "score": "0.61349094", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input_video', required=True)\n parser.add_argument('-p', '--picture', required=True)\n return parser.parse_args()", "title": "" }, { "docid": "c320620566b045f452cfe8184eb345fd", "score": "0.61147726", "text": "def parse_args():\n # fmt: off\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\",\n help=\"Model name to use.\")\n parser.add_argument(\"--input\", type=Path,\n help=\"Path of the directory containing the image to infer.\")\n parser.add_argument(\"--output\", type=Path, default=\"output\",\n help=\"Path of the directory to output the results.\")\n parser.add_argument('--gpu_id', type=int, default=-1,\n help=\"GPU ID to use.\")\n args = parser.parse_args()\n # fmt: on\n\n return args", "title": "" }, { "docid": "94a04c4317dfbd5b67bf6b4429883ffb", "score": "0.6092546", "text": "def parse_args(self):\n \n if not os.path.exists(self.params.url_file):\n raise gc3libs.exceptions.InvalidUsage(\n \"gparseurl command file '%s' does not exist;\"\n % self.params.url_file)\n gc3libs.utils.test_file(self.params.url_file, os.R_OK,\n gc3libs.exceptions.InvalidUsage)\n\n if self.params.master_script and \\\n not os.path.exists(self.params.master_script):\n raise gc3libs.exceptions.InvalidUsage(\n \"Input folder '%s' does not exists\"\n % self.params.master_script)", "title": "" }, { "docid": "d6ee16d92e1b16e41f794934410eed5c", "score": "0.60799843", "text": "def args_parser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", required=False,\n help=\"Path to an .xml file with a pre-trained\"\n \"face detection model\")\n parser.add_argument(\"-pm\", \"--posemodel\", required=False,\n help=\"Path to an .xml file with a pre-trained model\"\n \"head pose model\")\n parser.add_argument(\"-i\", \"--input\", required=True, type=str,\n help=\"Path to video file or image.\"\n \"'cam' for capturing video stream from camera\")\n parser.add_argument(\"-l\", \"--cpu_extension\", type=str, default=None,\n help=\"MKLDNN (CPU)-targeted custom layers. Absolute \"\n \"path to a shared library with the kernels impl.\")\n parser.add_argument(\"-d\", \"--device\", default=\"CPU\", type=str,\n help=\"Specify the target device to infer on; \"\n \"CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. Application \"\n \"will look for a suitable plugin for device specified\"\n \"(CPU by default)\")\n parser.add_argument(\"-c\", \"--confidence\", default=0.5, type=float,\n help=\"Probability threshold for detections filtering\")\n\n return parser", "title": "" }, { "docid": "281de24e0053994661037c84149f762d", "score": "0.60713536", "text": "def setup_parser():\n PARSER = argparse.ArgumentParser(\n description='plot verifications from MET')\n \n PARSER.add_argument('--model1_mpr_path', type=str, \n required=True, help='model1_mpr_path')\n PARSER.add_argument('--model2_mpr_path', type=str, \n required=True, help='model2_mpr_path')\n PARSER.add_argument('--model1_name', type=str, \n required=True, help='model1 name')\n PARSER.add_argument('--model2_name', type=str, \n required=True, help='model2 name')\n PARSER.add_argument('--fields_list', nargs='+', \n required=True, \n help=\"field list such as T2\")\n \n return PARSER.parse_args()", "title": "" }, { "docid": "6281406bda1760342970c8c1bb8224cd", "score": "0.6065967", "text": "def read_interface_parameters():\n\n arg_parser = argparse.ArgumentParser()\n\n read_common_parameters(arg_parser)\n arg_parser.add_argument('--image',\n type=str,\n help='Path to image for prediction.')\n arg_parser.add_argument('--url',\n type=str,\n help='URL to image for prediction.')\n arg_parser.add_argument('--labels',\n type=str,\n help='Path to labels with indices JSON file.')\n # Host and port for http service\n arg_parser.add_argument('--host',\n type=str,\n default='0.0.0.0',\n help='Host name for HTTP service.')\n arg_parser.add_argument('--port',\n type=int,\n default=50050,\n help='Port number for HTTP service.')\n (flags, _) = arg_parser.parse_known_args()\n\n return (flags, arg_parser)", "title": "" }, { "docid": "7fdd5466fbee950aac7e5b47f20ea46c", "score": "0.60658526", "text": "def parse_args():\n pretrained_model_choices = ['bert-base-uncased', 'bert-base-cased', \"bert-large-uncased-whole-word-masking\",\n 'bert-large-uncased', 'bert-large-cased', 'gpt2', 'gpt2-medium', 'gpt2-large', 'roberta-base',\n 'roberta-large', 'xlnet-base-cased', 'xlnet-large-cased', 'allenai/scibert_scivocab_uncased', 'dmis-lab/biobert-v1.1']\n tokenizer_choices = [\"RobertaTokenizer\", \"BertTokenizer\", \"XLNetTokenizer\"]\n parser = ArgumentParser()\n parser.add_argument(\n \"--pretrained-class\", default=\"bert-base-cased\", choices=pretrained_model_choices,\n help=\"Choose the pretrained model to load from.\")\n parser.add_argument(\"--no-cuda\", default=False, action=\"store_true\")\n parser.add_argument(\n \"--input-file\", default=\"../data/dev.json\", type=str,\n help=\"Choose the dataset to evaluate on.\")\n\n parser.add_argument(\"--output-dir\", default=\"predictions/\", type=str,\n help=\"Choose the output directory for predictions.\")\n parser.add_argument(\"--output-file\", default=None, type=str,\n help=\"Choose the name of the predictions file\")\n\n parser.add_argument(\"--skip-intrasentence\", help=\"Skip intrasentence evaluation.\",\n default=False, action=\"store_true\")\n parser.add_argument(\"--intrasentence-model\", type=str, default='BertLM', choices=[\n 'BertLM', 'BertNextSentence', 'RoBERTaLM', 'XLNetLM', 'XLMLM', 'GPT2LM', 'ModelNSP'],\n help=\"Choose a model architecture for the intrasentence task.\")\n parser.add_argument(\"--intrasentence-load-path\", default=None,\n help=\"Load a pretrained model for the intrasentence task.\")\n parser.add_argument(\"--skip-intersentence\",\n default=False, action=\"store_true\", help=\"Skip intersentence evaluation.\")\n parser.add_argument(\"--intersentence-model\", type=str, default='BertNextSentence', choices=[\n 'BertLM', 'BertNextSentence', 'RoBERTaLM', 'XLNetLM', 'XLMLM', 'GPT2LM', 'ModelNSP'],\n help=\"Choose the model for the intersentence task.\")\n parser.add_argument(\"--intersentence-load-path\", default=None,\n help=\"Path to the pretrained model for the intersentence task.\")\n parser.add_argument(\"--tokenizer\", type=str,\n default='BertTokenizer', choices=tokenizer_choices,\n help=\"Choose a string tokenizer.\")\n parser.add_argument(\"--batch-size\", type=int, default=1)\n parser.add_argument(\"--max-seq-length\", type=int, default=128)\n return parser.parse_args()", "title": "" }, { "docid": "227a5c17a32fa9b558bb2dedcdfd8002", "score": "0.6062507", "text": "def parse_args():\n \n import sys\n import global_variables\n \n argvnum = len(sys.argv) # number of total argements,the real arguements number is \n argvlist = sys.argv[1:] # total real arguments(shell name excludded)\n global_variables.set_value('argvnum',argvnum) # store length of arguments into dict \n global_variables.set_value('argvlist',argvlist) # store arguments into dict", "title": "" }, { "docid": "0f8d16ea0932d4bf801e6f03ae7c2339", "score": "0.6059355", "text": "def read_arguments():\n global model_path\n global test_path\n global HEIGHT\n global WIDTH\n \n parser = argparse.ArgumentParser(description = \"Testing TSR model\")\n parser.add_argument(\"model_path\", type=str, help=\"Path to dataset\")\n parser.add_argument(\"test_path\", type=str, help=\"Path to test images\")\n parser.add_argument(\"--height\", type=int, help=\"Image height\")\n parser.add_argument(\"--width\", type=int, help=\"Image width\")\n args = parser.parse_args()\n \n model_path = args.model_path\n test_path = args.test_path\n if args.height is not None:\n HEIGHT = args.height\n if args.height is not None:\n WIDTH = args.width", "title": "" }, { "docid": "e68d9cc4b7a7456e8e97678da3f6a021", "score": "0.60350376", "text": "def parse_arguments():\n factory = ArgumentParserFactory(__doc__)\n factory.add_input_dir_argument(\"Path to the predicted heatmaps\")\n factory.add_output_dir_argument(\n \"Path to the RGB images where the generated labels belong to and should be stored\",\n Path(__file__).parent,\n )\n factory.add_suffix_argument()\n factory.add_resolution_argument()\n parser = factory.parser\n parser.add_argument(\n \"-bt\",\n \"--bin_threshold\",\n default=96,\n type=int,\n help=\"Values over this threshold will be binarized to 1\",\n )\n parser.add_argument(\n \"-md\",\n \"--min_diameter\",\n default=0.05,\n type=float,\n help=\"Minimum diameter for an ROI in percent to the image width\",\n )\n return parser.parse_args()", "title": "" }, { "docid": "33362bbbd232fd3663be090627fc3a74", "score": "0.6026564", "text": "def parser(self, args):\n pass", "title": "" }, { "docid": "c187cb9757752dc6c41fa01be4a9b450", "score": "0.6025145", "text": "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser()\n\n # print([DEF_IMAGE_NAME_A, DEF_IMAGE_NAME_B])\n\n parser.add_argument(\"-m\", \"--image_file\", help=\"The location of the image files\",\n default=[DEF_IMAGE_NAME_A, DEF_IMAGE_NAME_B], nargs=2)\n\n parser.add_argument(\"-d\", \"--division_pixel\", type=int,help=\"Thickness (number of pixels) of horizontal stripes\",\n default=5)\n\n # parser.add_argument(\"-n\", \"--no_attribution\", help=\"Whether to include attribution\",\n # action='store_false')\n args = None\n args = parser.parse_args(argv)\n image1_none = not os.path.isfile(args.image_file[0])\n image2_none = not os.path.isfile(args.image_file[1])\n if image1_none or image2_none:\n warning(\"Either {} or {} does not exist\".format(args.image_file[0], args.image_file[1]))\n parser.print_help()\n return args, IO_ERROR\n return args, SUCCESS", "title": "" }, { "docid": "2c526909a615ec91de41cce5cdea9e59", "score": "0.6016686", "text": "def arg_parse():\n\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo')\n parser.add_argument(\"--confidence\", dest=\"confidence\",\n help=\"Object Confidence to filter predictions\", default=0.25)\n parser.add_argument(\"--nms_thresh\", dest=\"nms_thresh\",\n help=\"NMS Threshhold\", default=0.4)\n parser.add_argument(\"--reso\", dest='reso', help=\"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default=\"160\", type=str)\n return parser.parse_args()", "title": "" }, { "docid": "2e8b019872296d16338362b735cca05f", "score": "0.60094047", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Train a model for image classification/segmentation (TensorFlow 2.0)\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"--dataset\",\n type=str,\n default=\"ImageNet1K\",\n help=\"dataset name. options are ImageNet1K, CUB200_2011, CIFAR10, CIFAR100, SVHN\")\n parser.add_argument(\n \"--work-dir\",\n type=str,\n default=os.path.join(\"..\", \"imgclsmob_data\"),\n help=\"path to working directory only for dataset root path preset\")\n\n args, _ = parser.parse_known_args()\n dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)\n dataset_metainfo.add_dataset_parser_arguments(\n parser=parser,\n work_dir_path=args.work_dir)\n\n add_train_cls_parser_arguments(parser)\n\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "051f551d879aa2e815549161e0ee4df1", "score": "0.600855", "text": "def parse_arguments():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-dp', '--data-path', type=str, help='Specify the path to the images to be split')\n parser.add_argument('-ip', '--image-path', type=str, help='Specify the path to save the split files')\n parser.add_argument('-sp', '--set-path', type=str, help='Specify the path to save the split details txt files')\n args = parser.parse_args()\n\n return args", "title": "" }, { "docid": "fbabfe20c6ffd1d1a13eb7e584163685", "score": "0.600713", "text": "def arg_parse():\n\n parser = argparse.ArgumentParser(description='YOLO v3 Cam Demo')\n parser.add_argument(\"--confidence\", dest=\"confidence\", help=\"Object Confidence to filter predictions\", default=0.25)\n parser.add_argument(\"--nms_thresh\", dest=\"nms_thresh\", help=\"NMS Threshhold\", default=0.4)\n parser.add_argument(\"--reso\", dest='reso', help=\n \"Input resolution of the network. Increase to increase accuracy. Decrease to increase speed\",\n default=\"160\", type=str)\n return parser.parse_args()", "title": "" }, { "docid": "51b186c0526c27c769535d24ccda53ef", "score": "0.6002494", "text": "def process_parsed_args(self, filter_args: argparse.Namespace) -> None:", "title": "" }, { "docid": "b7751a5333a70083490f3aa808ba75bf", "score": "0.60010445", "text": "def parse_args():\n parser = common.default_args(net_name=NET_NAME, num_classes=81, image_size=IMAGE_SIZE)\n parser.add_argument('--model', required=False, default=CORNER_NET, \n choices=[CORNER_NET, CORNER_NET_SACCADE, CORNER_NET_SQUEEZE])\n return parser.parse_args()", "title": "" }, { "docid": "4dcb5bb29f06b8d92792b12297f762f9", "score": "0.59915173", "text": "def _ParseArgs():\n usage = 'usage: %prog [options]'\n parser = optparse.OptionParser(usage=usage)\n\n parser.add_option('--label',\n type='string',\n default='MY_TEST',\n help=('Label of the test, used to identify different '\n 'tests. Default: %default'))\n parser.add_option('--ref_video',\n type='string',\n help='Reference video to compare with (YUV).')\n parser.add_option('--test_video',\n type='string',\n help=('Test video to be compared with the reference '\n 'video (YUV).'))\n parser.add_option('--frame_analyzer',\n type='string',\n help='Path to the frame analyzer executable.')\n parser.add_option('--aligned_output_file',\n type='string',\n help='Path for output aligned YUV or Y4M file.')\n parser.add_option('--vmaf', type='string', help='Path to VMAF executable.')\n parser.add_option('--vmaf_model',\n type='string',\n help='Path to VMAF model.')\n parser.add_option('--vmaf_phone_model',\n action='store_true',\n help='Whether to use phone model in VMAF.')\n parser.add_option(\n '--yuv_frame_width',\n type='int',\n default=640,\n help='Width of the YUV file\\'s frames. Default: %default')\n parser.add_option(\n '--yuv_frame_height',\n type='int',\n default=480,\n help='Height of the YUV file\\'s frames. Default: %default')\n parser.add_option('--chartjson_result_file',\n type='str',\n default=None,\n help='Where to store perf results in chartjson format.')\n options, _ = parser.parse_args()\n\n if not options.ref_video:\n parser.error('You must provide a path to the reference video!')\n if not os.path.exists(options.ref_video):\n parser.error('Cannot find the reference video at %s' %\n options.ref_video)\n\n if not options.test_video:\n parser.error('You must provide a path to the test video!')\n if not os.path.exists(options.test_video):\n parser.error('Cannot find the test video at %s' % options.test_video)\n\n if not options.frame_analyzer:\n parser.error(\n 'You must provide the path to the frame analyzer executable!')\n if not os.path.exists(options.frame_analyzer):\n parser.error('Cannot find frame analyzer executable at %s!' %\n options.frame_analyzer)\n\n if options.vmaf and not options.vmaf_model:\n parser.error('You must provide a path to a VMAF model to use VMAF.')\n\n return options", "title": "" }, { "docid": "7d74e08988f61bdeff3bbec55a1e9522", "score": "0.5989032", "text": "def parseArgs(self):\n\t\tself.errors = { \"errType\":\"Need to provide type. Valid types: frontend or compute\",\n\t\t\t\t\t\t\"errNoConfig\":\"Need to provide xml configuration file.\",\n\t\t\t\t\t\t\"errConfig\":\"Configuration file %s is not found\"}\n\t\tusage = \"Usage: %prog [-h] [-d] --type=[frontend|compute] configFile\"\n\t\tself.parser = OptionParser(usage, version=self.version)\n\t\tself.parser.add_option(\"-d\", \"--debug\", \n\t\t\t\t\t\t\t\tdest=\"debug\", action=\"store_true\",\n\t\t\t\t\t\t\t\tdefault=False, help=\"Prints values parsed from input file and exits\")\n\t\tself.parser.add_option(\"--type\", dest=\"type\", default=False, help=\"VM type is frontend or compute\")\n\n\t\t(options, args) = self.parser.parse_args(self.args)\n\t\tif options.type not in [\"frontend\", \"compute\"]: \n\t\t\tself.parser.error(self.errors[\"errType\"])\n\t\telse:\n\t\t\tself.type = options.type\n\n\t\tif args:\n\t\t\tself.config = args[0]\n\t\telse:\n\t\t\tself.parser.error(self.errors[\"errNoConfig\"])\n\n\t\tself.debug = options.debug", "title": "" }, { "docid": "828344e13dd907c5fdec4b9419dca3ed", "score": "0.5986436", "text": "def parse_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(add_help=False)\n args = parser.add_argument_group('Options')\n # fmt: off\n args.add_argument('-h', '--help', action='help', help='Show this help message and exit.')\n args.add_argument('-m', '--model', required=True, type=str,\n help='Required. Path to an .xml or .onnx file with a trained model.')\n args.add_argument('-i', '--input', required=True, type=str, nargs='+', help='Required. Path to an image file(s).')\n args.add_argument('-l', '--extension', type=str, default=None,\n help='Optional. Required by the CPU Plugin for executing the custom operation on a CPU. '\n 'Absolute path to a shared library with the kernels implementations.')\n args.add_argument('-c', '--config', type=str, default=None,\n help='Optional. Required by GPU or VPU Plugins for the custom operation kernel. '\n 'Absolute path to operation description file (.xml).')\n args.add_argument('-d', '--device', default='CPU', type=str,\n help='Optional. Specify the target device to infer on; CPU, GPU, MYRIAD, HDDL or HETERO: '\n 'is acceptable. The sample will look for a suitable plugin for device specified. '\n 'Default value is CPU.')\n args.add_argument('--labels', default=None, type=str, help='Optional. Path to a labels mapping file.')\n args.add_argument('-nt', '--number_top', default=10, type=int, help='Optional. Number of top results.')\n # fmt: on\n return parser.parse_args()", "title": "" }, { "docid": "b642a97023c7bc9971149844e7eb759c", "score": "0.5980089", "text": "def parse_args():\n desc = \"\"\"Build Gaussian process models on wafer data. Expects ./train/ and\n ./test/ subfolders of current directory to have training and test data for \n each wafer to predict.\n \"\"\"\n epilog = \"\"\"Open-sourced under the MIT license.\n Copyright (C) 2011-2012 Nathan Kupp, Yale University.\"\"\"\n \n parser = argparse.ArgumentParser(description=desc, epilog=epilog)\n parser.add_argument('max_x', type=int,\n help='the wafer width dimension, in number of chips')\n \n parser.add_argument('max_y', type=int,\n help='the wafer height dimension, in number of chips')\n \n parser.add_argument('--noise_param', type=float,\n default=0.1, required=False,\n help='noise parameter, default = 0.1')\n \n return parser.parse_args()", "title": "" }, { "docid": "9aacf79d5aba82feb85087c4c51dcddf", "score": "0.59770584", "text": "def parseArguments():\n desc = \"Program that computes the first binding event for a series of adaptive sampling runs\"\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\"column\", type=int, help=\"Column with binding event related metric\")\n parser.add_argument(\"threshold\", type=float, help=\"Threshold for a binding event to be considered\")\n parser.add_argument(\"stepsPerEpoch\", type=int, help=\"StepsPerEpoch\")\n parser.add_argument(\"-seq\", action='store_true', help=\"Use a sequential run, instead of adaptive\")\n parser.add_argument(\"-u\", action='store_true', help=\"Look for unbinding event, instead of binding\")\n parser.add_argument(\"folders\", nargs='+', default=\".\", help=\"Folders with adaptive sampling runs\")\n args = parser.parse_args()\n\n return args.folders, args.column, args.threshold, args.stepsPerEpoch, args.seq, args.u", "title": "" }, { "docid": "6533dbbbaedfb1443d01a2d12bb80c98", "score": "0.59742355", "text": "def parse_arguments():\n\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('file_path', help='Path of the file that will be search for strings \\\n in language')\n parser.add_argument(\"-L\",'--lang', help='language to filter, if a language different from \\\n english is picked, it only prints strings in that language, because search or synonym \\\n techniques are only supported in english')\n parser.add_argument(\"-O\",'--out', help='Output file for strings obtained in specific a \\\n if this is not chosen, the default file name is \"out_lang_strings.txt\"')\n parser.add_argument(\"-Q\",'--query', help='search for word or similar phrase')\n parser.add_argument(\"-M\",'--max', help='max results returned')\n parser.add_argument('-s', action='store_true',help=\"Search using exact match for synonyms \\\n of a word in --query.\")\n parser.add_argument('--lsy', action='store_true',help=\"list synonyms of each word in --query\")\n parser.add_argument(\"-V\", \"--version\", help=\"show program version\", action=\"store_true\")\n parser.add_argument(\"-P\", \"--predict\", help=\"make predictions according previously trained dataset\", action=\"store_true\")\n parser.add_argument(\"-T\", \"--train\", help=\"train dataset passed in this argument\")\n return parser.parse_args()", "title": "" }, { "docid": "2752f15938ad72a967d78109589743ba", "score": "0.59725624", "text": "def parse_arguments():\n\n\ttry:\n\t\tparser = argparse.ArgumentParser(description=\"Python Image Converter\")\n\t\tparser.add_argument(\"-m\",\n\t\t\t\t\t\t\t\"--mode\",\n\t\t\t\t\t\t\thelp=\"1: jpg, 2: png, 3: grayscale, 4: black_white, 5: resize\",\n\t\t\t\t\t\t\ttype=int,\n\t\t\t\t\t\t\tdefault=1)\n\t\tparser.add_argument(\"-w\",\n\t\t\t\t\t\t\t\"--overwrite\",\n\t\t\t\t\t\t\thelp=\"Overwrite the image given.\",\n\t\t\t\t\t\t\taction=\"store_true\",\n\t\t\t\t\t\t\tdefault=False)\n\t\tparser.add_argument(\"imgfile\",\n\t\t\t\t\t\t\thelp=\"Path to the image to be converted.\",\n\t\t\t\t\t\t\ttype=str)\n\t\tparser.add_argument(\"-o\",\n\t\t\t\t\t\t\t\"--output\",\n\t\t\t\t\t\t\thelp=\"Output file name.\",\n\t\t\t\t\t\t\ttype=str,\n\t\t\t\t\t\t\tdefault=\"imgconv\")\n\t\tparser.add_argument(\"-r\",\n\t\t\t\t\t\t\t\"--resize\",\n\t\t\t\t\t\t\thelp=\"Resize to a new X Y size (-r X Y)\",\n\t\t\t\t\t\t\ttype=int,\n\t\t\t\t\t\t\tnargs=\"+\")\n\t\tparser.add_argument(\"-s\",\n\t\t\t\t\t\t\t\"--show\",\n\t\t\t\t\t\t\thelp=\"Show the image after processing it.\",\n\t\t\t\t\t\t\taction=\"store_true\",\n\t\t\t\t\t\t\tdefault=False)\n\t\targs = parser.parse_args()\n\n\t\treturn (args.mode, args.overwrite, args.imgfile,\n\t\t\t\targs.output, args.resize, args.show)\n\n\texcept argparse.ArgumentError:\n\t\tprint(\"An error occured while parsing your arguments.\", file=sys.stderr)\n\t\tsys.exit(ARGPARSE_ERR)", "title": "" }, { "docid": "15431e648a956ac2d1d1536d380e00a7", "score": "0.59648156", "text": "def parse_args(args):\n\n parser = argparse.ArgumentParser(\n description='Extract images from SDTP Homeless count pdfs')\n\n parser.add_argument(\n '-v',\n '--verbose',\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action='store_const',\n const=logging.INFO)\n parser.add_argument(\n '-vv',\n '--very-verbose',\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action='store_const',\n const=logging.DEBUG)\n\n parser.add_argument('source_image', help='Source image')\n #parser.add_argument('source_dir', help='Source directory')\n #parser.add_argument('dest_dir', help='Destination directoryr')\n\n return parser.parse_args()", "title": "" }, { "docid": "5d5e4bff67c4a3960e47b23723fd5ead", "score": "0.59603465", "text": "def parse_args():\n parser = argparse.ArgumentParser(\"Classification on VIDAA dataset\")\n parser.add_argument(\n \"--prepare\",\n action=\"store_true\",\n help=\"create the directories, prepare the vocabulary and embeddings\",\n )\n parser.add_argument(\"--train\", action=\"store_true\", help=\"train the model\")\n parser.add_argument(\n \"--evaluate\", action=\"store_true\", help=\"evaluate the model on dev set\"\n )\n parser.add_argument(\n '--predict', action='store_true', help='predict the model on test set'\n )\n parser.add_argument(\"--char\", action=\"store_true\", help=\"use char embedding\")\n\n parser.add_argument(\"--gpu\", type=str, default=\"0\", help=\"specify gpu device\")\n\n train_settings = parser.add_argument_group(\"train settings\")\n train_settings.add_argument(\"--dev\", type=float, default=0.2, help=\"验证集比例\")\n train_settings.add_argument(\n \"--filter_sizes\", type=list, default=[5], help=\"一维卷积核大小\"\n )\n train_settings.add_argument(\"--num_filters\", type=int, default=32, help=\"卷积核数量\")\n train_settings.add_argument(\"--optim\", default=\"adam\", help=\"optimizer type\")\n train_settings.add_argument(\n \"--learning_rate\", type=float, default=0.001, help=\"learning rate\"\n )\n train_settings.add_argument(\n \"--weight_decay\", type=float, default=0, help=\"weight decay\"\n )\n train_settings.add_argument(\"--dropout\", type=float, default=0, help=\"dropout rate\")\n train_settings.add_argument(\n \"--batch_norm\", action=\"store_true\", help=\"whether use batch norm or not\"\n )\n train_settings.add_argument(\n \"--batch_size\", type=int, default=64, help=\"train batch size\"\n )\n train_settings.add_argument(\"--epochs\", type=int, default=10, help=\"train epochs\")\n train_settings.add_argument(\n \"--hidden_size\", type=int, default=128, help=\"number of rnn hidden unit\"\n )\n train_settings.add_argument(\n \"--max_document_len\", type=int, default=10, help=\"max length of document\"\n )\n train_settings.add_argument(\n \"--max_word_len\", type=int, default=5, help=\"max length of word\"\n )\n model_settings = parser.add_argument_group(\"model settings\")\n\n model_settings.add_argument(\n \"--embedding_size\", type=int, default=300, help=\"size of the embeddings\"\n )\n model_settings.add_argument(\n \"--character_embedding_size\",\n type=int,\n default=100,\n help=\"size of the character embeddings\",\n )\n model_settings.add_argument(\"--class_model\", type=str, default=\"rnn\")\n model_settings.add_argument(\n \"--pretrained_embedding\", action=\"store_true\", help=\"use pretrained embeddings\"\n )\n\n path_settings = parser.add_argument_group(\"path settings\")\n path_settings.add_argument(\n \"--pretrained_file\",\n default=\"data/pretrained_embedding.utf8\",\n help=\"the file to save pretrained word embeddings\",\n )\n path_settings.add_argument(\n \"--data_files\",\n nargs=\"+\",\n default=[\"data/atec_nlp_sim_train.csv\", \"data/atec_nlp_sim_train_add.csv\"],\n help=\"list of files that contain the preprocessed train data\",\n )\n path_settings.add_argument(\n \"--preposs_file\",\n default=\"data/train.data\",\n help=\"the file with ltp token segment\",\n )\n path_settings.add_argument(\"--dev_fils\", default=\"dev.data\")\n path_settings.add_argument(\n \"--model_dir\", default=\"data/models/\", help=\"the dir to store vocab\"\n )\n path_settings.add_argument(\n \"--vocab_dir\", default=\"data/vocab/\", help=\"the dir to store models\"\n )\n path_settings.add_argument(\n \"--result_dir\", default=\"data/results/\", help=\"the dir to output the results\"\n )\n path_settings.add_argument(\n \"--summary_dir\",\n default=\"data/summary/\",\n help=\"the dir to write tensorboard summary\",\n )\n path_settings.add_argument(\n \"--dict_file\", default=\"data/dict\", help=\"user dict of jieba\"\n )\n path_settings.add_argument(\n \"--log_path\",\n help=\"path of the log file. If not set, logs are printed to console\",\n )\n return parser.parse_args()", "title": "" }, { "docid": "3603e75bd1d7745615c023f6bb50a8c8", "score": "0.59470725", "text": "def parse_args():\n parser = argparse.ArgumentParser(description=\"Mindspore HRNet Training Configurations.\")\n parser.add_argument(\"--train_url\", type=str, default='./checkpoints/', help=\"Storage path of training results.\")\n parser.add_argument(\"--run_distribute\", type=ast.literal_eval, default=False,\n help=\"Use one card or multiple cards training.\")\n parser.add_argument(\"--device_id\", type=int, default=0)\n\n return parser.parse_args()", "title": "" }, { "docid": "9e075f52ad59f1f37dd24c956e392283", "score": "0.5946902", "text": "def _parse_arguments_for_extraction(argument: str) -> str:", "title": "" }, { "docid": "6d1e2fa3b6c1890e845fffc7e5b293d0", "score": "0.5945396", "text": "def parse_args(self):\n self.args = self.parser.parse_args()", "title": "" }, { "docid": "c36855eebff0a154563b81a323ebbb69", "score": "0.5933432", "text": "def parse_args():\n \n parser = argparse.ArgumentParser(description='Evaluate predictions')\n parser.add_argument('--dir', required = True, help = \"TFRECORD FOLDER\")\n parser.add_argument('--pred', required = True, help = 'Path prediction file')\n parser.add_argument('--hemkit', required = True, help = 'Path to HEMkit bin file.')\n parser.add_argument('--max-dist', default = \"5\", help = 'HEMkit : above this threshold all nodes will be considered to have a common ancestor.')\n parser.add_argument('--pb-max-err', default = \"4\", help = 'HEMkit : maximum error with which pair-based measures penalize nodes that were matched with default.')\n \n args = parser.parse_args()\n \n return args", "title": "" }, { "docid": "d95fafc01faaca981bb63e37058423c0", "score": "0.591745", "text": "def processArgs(printHelp=False):\n parser = OptionParser()\n\n parser.add_option('-f', '--files', dest='files',\n help=\"Input files to compile e.g. 'CCD*science.fits'\", metavar='string')\n parser.add_option('-o', '--output', dest='output',\n help=\"Name of the output file, default=VISFPA.fits\", metavar='string')\n parser.add_option('-e', '--extension', type='int', dest='ext',\n help='FITS extension from which to look for data, default=0', metavar='int')\n parser.add_option('-d', '--debug', dest='debug', action='store_true',\n help='Debugging mode on')\n if printHelp:\n parser.print_help()\n else:\n return parser.parse_args()", "title": "" }, { "docid": "46645ed76ca5d50943acab989e39b0d9", "score": "0.59162074", "text": "def parse_args():\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-d', dest='driver', required=True)\n parser.add_argument('-r', dest='range', required=True)\n parser.add_argument('-f', dest='hipfile', required=True)\n\n args, unknown = parser.parse_known_args()\n\n if unknown:\n usage('Unknown argument(s): %s' % (' '.join(unknown)))\n\n err = validate_args(args)\n if err:\n usage(err)\n\n return args", "title": "" }, { "docid": "5f27565b1af5e6aa5be9c62bb80abaad", "score": "0.5912516", "text": "def arg_parse():\n parser = argparse.ArgumentParser(description='YOLO-V3 object module')\n parser.add_argument(\n \"--imagefolder\", help='Directory containing imgs, can only contain imgs', default='imgs/test', type=str)\n parser.add_argument('--det', help='where to save imgs',\n default='imgs/output', type=str)\n parser.add_argument('--bs', help='batch_size', default=1)\n parser.add_argument(\n '--confidence', help='whether to preserve the box', default=0.5)\n parser.add_argument('--nms_thre', help='nms threshold', default=0.4)\n parser.add_argument('--cfg', help='path to cfg file',\n default='data/yolov3.cfg')\n parser.add_argument('--weights', help='path to weights',\n default='data/yolov3.weights')\n parser.add_argument(\n '--reso', help='Input resolution of the network. Bigger to increase accuracy but decrease speed')\n return parser.parse_args()", "title": "" }, { "docid": "cfe2bc5b6d87940a6c453126619cc564", "score": "0.58955336", "text": "def parse_args():\n parser = argparse.ArgumentParser(usage='Use \"python %(prog)s --help\" for more information', formatter_class=argparse.HelpFormatter)\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument(\n '--input_template',\n type=str,\n default='',\n help='Path to template file pdsc.')\n parser.add_argument(\n '--hdrs',\n type=str,\n default='./hdrs.lst',\n help='Headers for cvariant Reference')\n parser.add_argument(\n '--srcs',\n type=str,\n default='./srcs.lst',\n help='Sources for cvariant Reference')\n parser.add_argument(\n '--util_hdrs',\n type=str,\n default='./util_hdrs.lst',\n help='Headers for Kernel Utils')\n parser.add_argument(\n '--util_srcs',\n type=str,\n default='./util_srcs.lst',\n help='Sources for Kernel Utils') \n parser.add_argument(\n '--hdrs-cmsis-nn',\n type=str,\n default='./hdrs.cmsis-nn.lst',\n help='Headers for cvariant CMSIS-NN')\n parser.add_argument(\n '--srcs-cmsis-nn',\n type=str,\n default='./srcs.cmsis-nn.lst',\n help='Sources for cvariant CMSIS-NN')\n parser.add_argument(\n '--hdrs-ethos',\n type=str,\n default='./hdrs.ethos.lst',\n help='Headers for cvariant Ethos-U')\n parser.add_argument(\n '--srcs-ethos',\n type=str,\n default='./srcs.ethos.lst',\n help='Sources for cvariant Ethos-U')\n parser.add_argument(\n '--testhdrs',\n type=str,\n default='./hdrs.test.lst',\n help='Headers for component Testing')\n parser.add_argument(\n '--testsrcs',\n type=str,\n default='./srcs.test.lst',\n help='Sources for component Testing')\n parser.add_argument(\n '--tensorflow_path',\n type=str,\n required=True,\n help='Path to root of tensorflow git')\n flags, unparsed_args = parser.parse_known_args()\n\n main(unparsed_args, flags)", "title": "" }, { "docid": "2cd0e1c1b349ca1fba8080d7c96acae1", "score": "0.5890777", "text": "def process_arguments():\n parser = argparse.ArgumentParser(description='Power Output Formatter')\n parser.add_argument('-if', '--infiles', required=True, nargs='+',\n type=argparse.FileType('r'),\n help='Output files to consider')\n return parser.parse_args()", "title": "" }, { "docid": "86a0baeb8dc37cd471c8cf7a5aad9ccd", "score": "0.58862704", "text": "def parse_argument():\n parser = argparse.ArgumentParser(description='Parsing a file.')\n parser.add_argument('--train', nargs=1, required=True)\n parser.add_argument('--test', nargs=1, required=True)\n parser.add_argument('--numTrees', nargs=1, required=True)\n args = vars(parser.parse_args())\n return args", "title": "" }, { "docid": "bf8463e1c33d370bc6276a9022395c48", "score": "0.5885991", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--fetch', action='store_true',\n help='download cudd from its website')\n parser.add_argument(\n '--linetrace', action='store_true',\n help='use line tracing for Cython extensions')\n for opt in download.EXTENSIONS:\n parser.add_argument(\n '--{s}'.format(s=opt), default=None,\n const='', type=str, nargs='?',\n help='build Cython extension {s}'.format(s=opt))\n args, unknown = parser.parse_known_args()\n args.sdist = 'sdist' in unknown\n args.bdist_wheel = 'bdist_wheel' in unknown\n # avoid confusing `setuptools`\n sys.argv = [sys.argv[0]] + unknown\n return args", "title": "" }, { "docid": "d5a0fd3d1b43ce27fdc59cad0ef4a392", "score": "0.5884999", "text": "def cfenet_arguments():\n parser = argparse.ArgumentParser(add_help=False)\n\n # Logging\n parser.add_argument(\n \"--logdir\",\n default=\"../logs\",\n type=str,\n help=\"Directory to store logs, summaries, checkpoints.\",\n )\n parser.add_argument(\n \"--dev\",\n action=\"store_true\",\n help=\"If true, will ignore logdir and log to ../logdev instead\",\n )\n parser.add_argument(\"--name\", type=str, help=\"Prefix to add to logging directory\")\n parser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"If set, will enable autograd anomaly detection\",\n )\n # settings for input data_loader\n parser.add_argument(\n \"-i\",\n \"--dataset_path\",\n default=\"../datasets/modelnet40_ply_hdf5_2048\",\n type=str,\n metavar=\"PATH\",\n help=\"path to the processed dataset. Default: ../datasets/modelnet40_ply_hdf5_2048\",\n )\n parser.add_argument(\n \"--dataset_type\",\n default=\"modelnet_hdf\",\n choices=[\n \"modelnet_hdf\",\n \"kitti_odometry\",\n \"bunny\",\n \"armadillo\",\n \"buddha\",\n \"dragon\",\n ],\n metavar=\"DATASET\",\n help=\"dataset type (default: modelnet_hdf)\",\n )\n parser.add_argument(\n \"--num_points\",\n default=1024,\n type=int,\n metavar=\"N\",\n help=\"points in point-cloud (default: 1024)\",\n )\n parser.add_argument(\n \"--noise_type\",\n default=\"crop_scale\",\n choices=[\"clean\", \"jitter\", \"crop\", \"crop_scale\", \"crop_scale_dense\", \"kitti\"],\n help=\"Types of perturbation to consider\",\n )\n parser.add_argument(\n \"--rot_mag\",\n default=45.0,\n type=float,\n metavar=\"T\",\n help=\"Maximum magnitude of rotation perturbation (in degrees)\",\n )\n parser.add_argument(\n \"--trans_mag\",\n default=0.5,\n type=float,\n metavar=\"T\",\n help=\"Maximum magnitude of translation perturbation\",\n )\n parser.add_argument(\n \"--partial\",\n default=[0.7, 0.7],\n nargs=\"+\",\n type=float,\n help=\"Approximate proportion of points to keep for partial overlap (Set to 1.0 to disable)\",\n )\n # Model\n parser.add_argument(\n \"--method\",\n type=str,\n default=\"cfenet\",\n choices=[\"cfenet\", \"fgr\", \"ransac\", \"icp\", \"dcp_v2\", \"rpmnet\", \"dgr\"],\n help=\"Model to use. Note: Only cfenet is supported for training.\"\n \"'eye' denotes identity (no registration), 'gt' denotes groundtruth transforms\",\n )\n # PointNet settings\n parser.add_argument(\n \"--radius\",\n type=float,\n default=0.3,\n help=\"Neighborhood radius for computing pointnet features\",\n )\n parser.add_argument(\n \"--num_neighbors\",\n type=int,\n default=64,\n metavar=\"N\",\n help=\"Max num of neighbors to use\",\n )\n # CFENet settings\n parser.add_argument(\n \"--features\",\n type=str,\n choices=[\"ppf\", \"dxyz\", \"xyz\"],\n default=[\"ppf\", \"dxyz\", \"xyz\"],\n nargs=\"+\",\n help=\"Which features to use. Default: all\",\n )\n parser.add_argument(\n \"--feat_dim\",\n type=int,\n default=96,\n help=\"Feature dimension (to compute distances on). Other numbers will be scaled accordingly\",\n )\n parser.add_argument(\"--feat_ball_dim\", type=int, default=72)\n parser.add_argument(\n \"--no_slack\", action=\"store_true\", help=\"If set, will not have a slack column.\"\n )\n parser.add_argument(\n \"--num_sk_iter\",\n type=int,\n default=5,\n help=\"Number of inner iterations used in sinkhorn normalization\",\n )\n parser.add_argument(\n \"--num_reg_iter\",\n type=int,\n default=5,\n help=\"Number of outer iterations used for registration (only during inference)\",\n )\n parser.add_argument(\n \"--loss_type\",\n type=str,\n choices=[\"mse\", \"mae\", \"l1\"],\n default=\"mae\",\n help=\"Loss to be optimized\",\n )\n parser.add_argument(\n \"--wt_inliers\", type=float, default=1e-2, help=\"Weight to encourage inliers\"\n )\n # Training parameters\n parser.add_argument(\n \"--train_batch_size\",\n default=16,\n type=int,\n metavar=\"N\",\n help=\"training mini-batch size (default 8)\",\n )\n parser.add_argument(\n \"-b\",\n \"--val_batch_size\",\n default=1,\n type=int,\n metavar=\"N\",\n help=\"mini-batch size during validation or testing (default: 16)\",\n )\n parser.add_argument(\n \"--resume\",\n default=None,\n type=str,\n metavar=\"PATH\",\n help=\"Pretrained network to load from. Optional for train, required for inference.\",\n )\n parser.add_argument(\n \"--gpu\",\n default=\"0\",\n type=str,\n metavar=\"DEVICE\",\n help=\"GPU to use, ignored if no GPU is present. Set to negative to use cpu\",\n )\n\n # METHOD OPTIONS, defaults all should be proposed\n parser.add_argument(\n \"--sk_type\",\n type=str,\n default=\"sum_approx\",\n choices=[\"sum\", \"log\", \"sum_approx\", \"log_approx\"],\n )\n parser.add_argument(\"--via_cpu\", action=\"store_true\")\n parser.add_argument(\n \"--ppf_type\", type=str, default=\"inner_prod\", choices=[\"inner_prod\", \"angle\"]\n )\n parser.add_argument(\n \"--knn\",\n type=str,\n default=\"k_select\",\n choices=[\n \"k_select\",\n \"topk\",\n \"topk_select\",\n \"all_sort\",\n \"in_radius\",\n \"faiss_k_select\",\n \"faiss_topk\",\n ],\n )\n parser.add_argument(\"--scale_min\", type=float, default=0)\n parser.add_argument(\"--scale_max\", type=float, default=0)\n parser.add_argument(\"--scale_test\", type=float, default=0)\n\n return parser", "title": "" }, { "docid": "07e67b6eae607bebb8d45d9195ef6fa6", "score": "0.5868634", "text": "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--set_dir', default='/data/Volume1', type=str, help='parent directory of test dataset')\n parser.add_argument('--set_names', default=['train'], type=list, help='name of test dataset')\n parser.add_argument('--model_dir', default='/home/ubuntu/PycharmProjects/MyDenoiser/keras_implementation/residual_std_models/old_models/DnCNN_sigma25/', type=str,\n help='directory of the low-noise-denoising model')\n parser.add_argument('--model_name', default='model_004.hdf5', type=str,\n help='name of the high-noise model')\n parser.add_argument('--result_dir', default='data/results_dncnn', type=str, help='directory of results')\n parser.add_argument('--save_result', default=1, type=int, help='save the denoised image, 1 for yes or 0 for no')\n return parser.parse_args()", "title": "" }, { "docid": "c5649d12d6beb4307e57b180d4f143f5", "score": "0.58654195", "text": "def parse_args():\n\n import argparse\n\n # Parse command line arguments\n ap = argparse.ArgumentParser(\n description=\"Detectron2 Image Processing Pipeline\")\n ap.add_argument(\"-i\", \"--input\", required=True,\n help=\"path to input image file or directory\")\n ap.add_argument(\"-o\", \"--output\", default=\"output\",\n help=\"path to output directory (default: output)\")\n ap.add_argument(\"-p\", \"--progress\", action=\"store_true\",\n help=\"display progress\")\n ap.add_argument(\"-sb\", \"--seperate-background\", action=\"store_true\",\n help=\"seperate background\")\n\n # Detectron Settings\n ap.add_argument(\"--config-file\",\n default=\"configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\",\n help=\"path to config file (default: configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\n ap.add_argument(\"--config-opts\", default=[], nargs=argparse.REMAINDER,\n help=\"modify model config options using the command-line\")\n ap.add_argument(\"--weights-file\", default=None,\n help=\"path to model weights file\")\n ap.add_argument(\"--confidence-threshold\", type=float, default=0.5,\n help=\"minimum score for instance predictions to be shown (default: 0.5)\")\n\n # Mutliprocessing settings\n ap.add_argument(\"--gpus\", type=int, default=1,\n help=\"number of GPUs (default: 1)\")\n ap.add_argument(\"--cpus\", type=int, default=0,\n help=\"number of CPUs (default: 1)\")\n ap.add_argument(\"--queue-size\", type=int, default=3,\n help=\"queue size per process (default: 3)\")\n ap.add_argument(\"--single-process\", action=\"store_true\",\n help=\"force the pipeline to run in a single process\")\n\n return ap.parse_args()", "title": "" }, { "docid": "e670e28a7638834fd008543faf1f21e3", "score": "0.58651114", "text": "def processProgramArguments():\n\t\n\t# --- Check argument count ---\n\tif len(sys.argv) != 2:\n\t\tError.exit(Error.argument, \"Invalid argument count\")\n\t\n\t# --- Print argument \"--help\" ---\n\tif sys.argv[1] == \"--help\":\n\t\tprint(\"This program interprets code in language IPPcode18 parsed to XML\")\n\t\tprint(\"Author: Jiri Furda (xfurda00)\")\n\t\tprint(\"Usage:\")\n\t\tprint(\"python3.6 interpret.py --source=<path to .src>\")\n\t\tsys.exit(0)\n\t\t\n\t# --- Load arguemnt \"--source\" ---\n\telif sys.argv[1][:9] == \"--source=\":\n\t\treturn sys.argv[1][9:]\t\n\t\t\n\t# --- Check illegal argument ---\n\telse:\n\t\tError.exit(Error.argument, \"Invalid argument\")", "title": "" }, { "docid": "f3dc77eaf7c8181742961c0d9243db8c", "score": "0.5864169", "text": "def parse_arguments():\n\n text = sys.modules[__name__].__doc__\n parser = argparse.ArgumentParser(description=text, prog=OWN_NAME)\n\n text = \"Port on the devise to send controls to.\"\n parser.add_argument(\"port\", type=int, metavar=\"PORT\", help=text)\n\n text = \"Print version and exit.\"\n parser.add_argument(\"--version\", action=\"version\", help=text, version=VERSION)\n\n return parser.parse_args()", "title": "" }, { "docid": "4c1bc68b7711ce49970b37db0c906770", "score": "0.58618814", "text": "def parse_args():\n # Instantiate the parser\n parser = argparse.ArgumentParser(description='ConvESN_MSMC')\n\n # location of the padded skeleton data\n parser.add_argument('input', default='./data/padded', help='the skeleton data folder/ test file name')\n # choose the split number from the padded files\n parser.add_argument('-split_number', nargs='?', default='1', help='split number to use')\n # trains a model if set to true\n parser.add_argument('--train', action='store_true')\n # name of checkpoint file (save to/ load from)\n parser.add_argument('-checkpoint', default='check_points/weights-improvement_test.hdf5', nargs='?', help=\"name of checkpoint file to load/save\")\n # save reservoir along with model\n parser.add_argument('-reservoir', default='reservoir/reservoir_test.pkl', nargs='?', help=\"name of checkpoint file to load/save\")\n parser.add_argument('-test_sample', action='store_true')\n\n return parser.parse_args()", "title": "" }, { "docid": "e5cfc39e98b94f892796b5079f44c50a", "score": "0.5860713", "text": "def process_argument():\n parser = argparse.ArgumentParser()\n # Default argument\n parser.add_argument(\"data_dir\", type=str, help=\"Directory of data\")\n # Optional arguments\n parser.add_argument(\"--save_dir\", default='', type=str, help=\"Directory to save checkpoints\")\n parser.add_argument(\"--arch\", default='densenet121', type=str, help=\"Pick a network architecture. Options: alexnet, vgg13, vgg13_bn, resnet34, densenet161\", choices=[\"alexnet\", \"vgg13\", \"vgg13_bn\", \"resnet34\", \"densenet161\"])\n parser.add_argument('--learning_rate', default=0.001, type=float, help='Set learning rate' )\n parser.add_argument('--hidden_units', default=256, type=int, help='Set number of nodes in each hidden layer')\n parser.add_argument('--epochs', default=3, type=int, help='Set number of training epochs')\n parser.add_argument('--gpu', default=False, action='store_true', help='Use GPU processing')\n args = parser.parse_args()\n \n # For checking purposes\n '''\n print(args.data_dir)\n print(args.arch)\n print(args.learning_rate)\n print(args.hidden_units)\n print(args.epochs)\n if args.gpu:\n print(\"Using GPU\")\n '''\n return args", "title": "" }, { "docid": "5c6dc1c9140d62bdde35ef1ea19fb4be", "score": "0.5860565", "text": "def parse_argument():\n parser = argparse.ArgumentParser(description='Parsing a file.')\n parser.add_argument('--train', nargs=1, required=True)\n parser.add_argument('--test', nargs=1, required=True)\n args = vars(parser.parse_args())\n return args", "title": "" }, { "docid": "bb5e095797d72a40ef7a8e4ad96b750a", "score": "0.5857974", "text": "def _clparsing():\n parser = argparse.ArgumentParser(description=_DESCRIPTION)\n\n msg = 'Path of the input file'\n parser.add_argument('-id', '--indir', default='.', help=msg)\n\n msg = 'Path of the output file'\n parser.add_argument('-od', '--outdir', default='.', help=msg)\n\n args = parser.parse_args()\n\n return args", "title": "" }, { "docid": "354e5466995111e1c1d649ac18ac9480", "score": "0.5853421", "text": "def _parse_arguments(text):\n parser = argparse.ArgumentParser(description=\"Check a Rez package for issues.\",)\n\n parser.add_argument(\n \"-d\",\n \"--disable\",\n default=\"\",\n help=\"A comma-separated list of codes to not check.\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--folder\",\n default=\".\",\n help=\"The starting point that will be used to search for Rez package(s). \"\n \"This defaults to the current directory.\",\n )\n\n parser.add_argument(\n \"-r\",\n \"--recursive\",\n action=\"store_true\",\n help=\"Enable this flag to search for all Rez packages under the given --folder.\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--concise\",\n action=\"store_true\",\n help=\"Add this option to make lint results more compact.\",\n )\n\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n default=0,\n action=\"count\",\n help=\"Add this option to print logger messages. Repeat for more verbosity.\",\n )\n\n parser.add_argument(\n \"-g\",\n \"--vimgrep\",\n action=\"store_true\",\n help=\"Output check results with path, line, and column information. \"\n \"When enabled, this option will disable --verbose.\",\n )\n\n return parser.parse_args(text)", "title": "" }, { "docid": "1a70b10ba3bb55642bfbbc3a4a913c2a", "score": "0.5850238", "text": "def parse_arguments():\r\n parser = ArgumentParser()\r\n parser.add_argument('-v', '--version', action='version', version=VERSION)\r\n parser.add_argument('dir', help = 'input directory, compare .reference and .result files')\r\n\r\n return parser.parse_args()", "title": "" }, { "docid": "6c5d094d34d5d1937ae795b9a080d95d", "score": "0.58474004", "text": "def parse_arguments(args_to_parse):\n description = \"Extract information from phone marked lattices\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n '-b', '--base-lat-dir', type=str,\n help=\"Path to the base lattice directory.\"\n )\n parser.add_argument(\n '-e', '--extension-dir', type=str,\n help=\"Extension directory post-dataset directory\"\n )\n parser.add_argument(\n '-o', '--output-dir', type=str, default='info/abs-dataset-paths',\n help=\"Output directory for the processed absolute path files (.txt)\"\n )\n parser.add_argument(\n '-i', '--input-dir', type=str, default='info/reference-lists',\n help=\"The directory with the train, cv, and test files which indicate the dataset split (.lst)\"\n )\n parser.add_argument(\n '--confusion-net', default=False, action='store_true',\n help='Operate over confusion networks rather than lattices (these end in *.scf.gz rather that *.lat.gz)'\n )\n\n args = parser.parse_args(args_to_parse)\n return args", "title": "" }, { "docid": "5c385bcb432f4c02145a87a1b592bff0", "score": "0.5843178", "text": "def process_commands():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"image\", help=IMAGE_HELP)\n parser.add_argument(\"--encode\", help=ENCODE_HELP, type=str)\n return parser.parse_args()", "title": "" }, { "docid": "cdf366a76c535a65be5a13a1dc833fb7", "score": "0.58399224", "text": "def read_cmd_arguments(no_of_layers, no_of_kernels):\n\n\tconfig = sys.argv[1]\n\tparam = genfromtxt(config, delimiter=',')\n\tprint(param)\n\n\tk_type = genfromtxt('kernels.csv', delimiter=',')\n\n\treturn param, k_type", "title": "" }, { "docid": "311a19713a8fcd0eb58532df217e6cab", "score": "0.58309513", "text": "def main_parse_args():\n parser = ArgumentParser()\n parser.add_argument('chunkfile', help='path to a single chunk file produced \\\n in fetch, e.g. dip.PPI.raw_line.1.txt')\n parser.add_argument('metadata_json', help='json file produced from check, \\\n e.g. file_metadata.json')\n parser = cf.add_config_args(parser)\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "2d62d3abf4dc1eeff9eb118ff0ba05c0", "score": "0.5824896", "text": "def parse_args() -> argparse.Namespace:\r\n parser = argparse.ArgumentParser(description=\"Demux HEIF file.\")\r\n parser.add_argument(\r\n \"file_or_dir_path\",\r\n type=str,\r\n help=\"Path to your HEIF file or directory containing HEIF files.\",\r\n )\r\n return parser.parse_args()", "title": "" }, { "docid": "41b3fe8d6c57fe784cb613337ff426ef", "score": "0.58138317", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description='Perform convolutions and transposed convolutions.'\n )\n parser.add_argument(\"--image\", dest=\"image\", type=str, default=\"images/python.png\",\n help=\"the image to be convoluted\")\n parser.add_argument(\"--num-convolutions\", dest=\"num_convolutions\", type=int, default=3,\n help=\"the number of convolutions (and transposed convolutions) to perform\")\n\n return parser.parse_args()", "title": "" }, { "docid": "7bb75d96d74e6b34137f6e658277a19c", "score": "0.58126795", "text": "def _setup_arguments(self, parser):\n pass", "title": "" }, { "docid": "e02607f592c54044ad53df54f40e8e8c", "score": "0.5812146", "text": "def ParseArguments():\r\n parser = argparse.ArgumentParser(description=\"Perform Part 2 NHS Digital Technical Tests\")\r\n parser.add_argument(\"--input\", \r\n help=\"Input postcode data\", \r\n default=\"import_data.csv\")\r\n parser.add_argument(\"--unmatched\", \r\n help=\"Output unmatched/invalid data\", \r\n default=\"failed_validation.csv\")\r\n return parser.parse_args()", "title": "" }, { "docid": "71623a7f89adfa9f46de0f16db7f6437", "score": "0.58117455", "text": "def parse_arguments():\n parser = GooeyParser(\n prog=\"EP2018_demo1\", usage=None,\n description=\"Demonstation CLI Program\",\n epilog=None,\n add_help=True)\n parser.add_argument(\"infile\", help=\"One or More Input Files\", nargs='+',\n action='append', type=argparse.FileType('r'), widget='FileChooser')\n parser.add_argument(\"--reverse\", '-r', help=\"Do things backwards\",\n action=\"store_true\")\n parser.add_argument('--detail', '-l', help='Select the level of detail',\n type=int, default=3)\n opts = parser.parse_args()\n return opts", "title": "" }, { "docid": "5bee57cb8fe2706fc03204d31851ab77", "score": "0.5806407", "text": "def parse_args():\n parser = ArgumentParser()\n parser.add_argument(\n '--test-perf',\n action='store_true',\n help=\"\"\"Instead of unit test, run a performance test\"\"\",\n )\n parser.add_argument(\n '-v',\n action='count',\n default=Levels.WARN,\n help=\"\"\"Set verbosity level. Repeat for increased verbosity.\"\"\",\n )\n return vars(parser.parse_args())", "title": "" }, { "docid": "35aeb9c37ff6123dd146b1e580d20c83", "score": "0.58053076", "text": "def parse_args():\n parser = ArgumentParser(description=\"Path planning module Parameters\")\n parser.add_argument(\"-d\", \"--dim\", default=64, help=\"Dimensions of the (square) map in fields. Default is 64\", type=int)\n parser.add_argument(\"-f\", \"--fov\", default=1, help=\"1/2 of the FOV of the UAV. Is used forward AND backward. Default is 1\", type=int)\n parser.add_argument(\"--overlaph\", default=0.5, help=\"Horizontal desired overlap. Default is 0.5\", type=float)\n parser.add_argument(\"--overlapv\", default=0.5, help=\"Vertical desired overlap. Default is 0.5\", type=float)\n parser.add_argument(\"-a\", \"--accuracy\", default=0.8, help=\"Detection accuracy. Default is 0.8\", type=float)\n parser.add_argument(\"-t\", \"--transposed\", default=False, help=\"Whether the map should be transposed. Default is false\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--simcase\", default=1, help=\"Which simulation case to run. Default is 1\", type=int)\n parser.add_argument(\"-r\", \"--random\", default=False, action=\"store_true\", help=\"Whether object locations should be randomly generated or not\")\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "c71762b6ec33c875cd9c9511feb82e9e", "score": "0.5802769", "text": "def parse_args():\n parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset')\n parser.add_argument('--prepare' , default=False , action='store_true' ,\n help='create the directories, prepare the vocabulary and embeddings')\n parser.add_argument('--train' , default=False , action='store_true' ,\n help='train the model')\n parser.add_argument('--evaluate' , default=False , action='store_true' ,\n help='evaluate the model on dev set')\n parser.add_argument('--predict' , default=True , action='store_true' ,\n help='predict the answers for test set with trained model')\n parser.add_argument('--gpu' , type=str , default='0',\n help='specify gpu device')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim' , default='adam' ,\n help='optimizer type')\n train_settings.add_argument('--norm' , default=False ,\n help='batch normalization')\n train_settings.add_argument('--learning_rate' , type=float , default=0.001 ,\n help='learning rate')\n train_settings.add_argument('--weight_decay' , type=float , default=0.001 ,\n help='weight decay')\n train_settings.add_argument('--dropout_keep_prob' , type=float , default=1 ,\n help='dropout keep rate')\n train_settings.add_argument('--batch_size' , type=int , default=32,\n help='train batch size')\n train_settings.add_argument('--epochs' , type=int , default=3,\n help='train epochs')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--algo' , choices=['BIDAF' , 'MLSTM'] , default='BIDAF' ,\n help='choose the algorithm to use')\n model_settings.add_argument('--embed_size' , type=int , default=128 ,\n help='size of the embeddings')\n model_settings.add_argument('--hidden_size' , type=int , default=64 ,\n help='size of LSTM hidden units')\n model_settings.add_argument('--max_p_num' , type=int , default= 5,\n help='max passage num in one sample')\n model_settings.add_argument('--max_p_len' , type=int , default=400,\n help='max length of passage')\n model_settings.add_argument('--max_q_len' , type=int , default=60,\n help='max length of question')\n model_settings.add_argument('--max_a_len' , type=int , default=200,\n help='max length of answer')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--train_files' , nargs='+',\n default=[s_train, z_train] ,\n help='list of files that contain the preprocessed train data')\n path_settings.add_argument('--dev_files' , nargs='+' ,\n default=[s_dev, z_dev] ,\n help='list of files that contain the preprocessed dev data')\n path_settings.add_argument('--test_files' , nargs='+' ,\n default=[s_test, z_test] ,\n help='list of files that contain the preprocessed test data')\n path_settings.add_argument('--brc_dir' , default='../data/baidu' ,\n help='the dir with preprocessed baidu reading comprehension data')\n path_settings.add_argument('--vocab_dir' , default='../data/vocab/vocab19' ,\n help='the dir to save vocabulary')\n path_settings.add_argument('--model_dir' , default='../data/models/model_7.16/' ,\n help='the dir to store models')\n path_settings.add_argument('--result_dir' , default='../data/results/result_7.16' ,\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir' , default='../data/summary/' ,\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_path' , default='/home/congyao/DuReader-master/data/logs/log4txt' ,\n help='path of the log file. If not set, logs are printed to console')\n return parser.parse_args()", "title": "" }, { "docid": "31e8c907be058e7f07ad88a4b2f211c3", "score": "0.5800657", "text": "def process_args():\n \n parser = argparse.ArgumentParser(description=\"Statistiky nehodovosti Policie CR\", allow_abbrev=False)\n parser.add_argument(\"--show_figure\", help=\"if set show figures\", action=\"store_true\")\n parser.add_argument(\"--fig_location\", help=\"folder for figures\")\n return parser.parse_args()", "title": "" }, { "docid": "ce9405518119670aeba7a87ada11110c", "score": "0.58005553", "text": "def parse_args():\n\n parser = argparse.ArgumentParser(\n description=\"Let's train some neural nets!\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--architecture',\n default = 'ASL',\n choices=['ASL', 'VGG', 'AlexNet', 'LeNet'],\n help='''Which architecture to run'''\n )\n parser.add_argument(\n '--data',\n default = 'asl_dataset' + os.sep + 'asl_dataset' + os.sep,\n help='Location where the dataset is stored.')\n parser.add_argument(\n '--load-checkpoint',\n default=None,\n help='''Path to model checkpoint file (should end with the\n extension .h5). Checkpoints are automatically saved when you\n train your model. If you want to continue training from where\n you left off, this is how you would load your weights.''')\n parser.add_argument(\n '--confusion',\n action='store_true',\n help='''Log a confusion matrix at the end of each\n epoch (viewable in Tensorboard). This is turned off\n by default as it takes a little bit of time to complete.''')\n parser.add_argument(\n '--evaluate',\n action='store_true',\n help='''Skips training and evaluates on the test set once.\n You can use this to test an already trained model by loading\n its checkpoint.''')\n parser.add_argument(\n '--video',\n action='store_true',\n help='''Skips training and runs the live video predictor.\n You can use this to predict with an already trained model by loading\n its checkpoint.''')\n\n\n return parser.parse_args()", "title": "" }, { "docid": "473eed41e76bbc97939af5b576c8255f", "score": "0.5796528", "text": "def parse_command_line_parameters(parser):\n msg = {\n '--input-dir': 'Path to the input folder.',\n '--output-dir': 'Path to the output folder.',\n '--im-ext': \"\"\"Extension of the image files inside the input \n folder. Typically '.jpg'\"\"\",\n '--seg-suffix': \"\"\"Suffix of the segmentation files. For example, if \n an input image is called image.jpg, and the \n corresponding segmentation is image_seg.png,\n then the suffix is '_seg'.\"\"\",\n '--seg-ext': \"\"\"Extension of the segmentation mask files. \n Typically '.png'\"\"\",\n '--max-inst': 'Maximum number of instruments present in the image.',\n '--max-tips': 'Maximum number of instruments present in the image.',\n }\n parser.add_argument('--input-dir', required=True, help=msg['--input-dir'])\n parser.add_argument('--output-dir', required=True, help=msg['--output-dir'])\n parser.add_argument('--im-ext', required=False, default='.jpg', help=msg['--im-ext'])\n parser.add_argument('--seg-suffix', required=False, default='_seg', \n help=msg['--seg-suffix'])\n parser.add_argument('--seg-ext', required=False, default='.png', help=msg['--seg-ext'])\n parser.add_argument('--max-inst', required=True, help=msg['--max-inst'])\n parser.add_argument('--max-tips', required=True, help=msg['--max-tips'])\n \n args = parser.parse_args()\n args.max_inst = int(args.max_inst)\n args.max_tips = int(args.max_tips)\n return args", "title": "" }, { "docid": "be6002677e09ddde180f9529e8201c9b", "score": "0.57919157", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', help='path to data file',\n nargs='?', default='static/irises.jpeg')\n parser.add_argument(\n '-k', help='number of image cluster from [4,7]', nargs='?', type=int, default=4)\n parser.add_argument('-o', help='output image file',\n nargs='?', default='output.jpg')\n return parser.parse_args()", "title": "" }, { "docid": "22ff8b003b960881359fb942e3593a5d", "score": "0.57894313", "text": "def parse_arguments():\n arg = argparse.ArgumentParser()\n arg.add_argument(\n \"--model_name\",\n type=str,\n default=ls.available_models()[0],\n choices=ls.available_models(),\n help=\"Model architecture\",\n )\n arg.add_argument(\n \"--version\",\n type=str,\n help=\"Model version\",\n )\n arg.add_argument(\n \"--target\",\n \"-t\",\n type=str,\n help=\"Target path to save the model\",\n )\n arg.add_argument(\n \"--quantize\",\n \"-q\",\n action=\"store_true\",\n help=\"Quantize the model\",\n )\n arg.add_argument(\n \"--opset-version\",\n type=int,\n default=11,\n help=\"Onnx opset version\",\n )\n return arg.parse_args()", "title": "" }, { "docid": "183d18ef3a4ed94be1a2aa3fe8f36d8a", "score": "0.5787306", "text": "def parse_args(self):\n if not os.path.exists(self.params.executable):\n raise gc3libs.exceptions.InvalidUsage(\n \"Path '%s' to the 'forwardPremium' executable does not exist;\"\n \" use the '-x' option to specify a valid one.\"\n % self.params.executable)\n if os.path.isdir(self.params.executable):\n self.params.executable = os.path.join(self.params.executable,\n 'forwardPremium')\n gc3libs.utils.test_file(self.params.executable, os.R_OK|os.X_OK,\n gc3libs.exceptions.InvalidUsage)", "title": "" }, { "docid": "3a3d826808fc58114e8d99a163344f2c", "score": "0.5771084", "text": "def handleArgs(self):\n # Iterate the arguments and handle them\n i = 0\n while i < len(self.args):\n arg = self.args[i]\n # Handle the mod toggle argument\n if arg == '--disableMods':\n self.results['loadCustomMods'] = False\n\n # Handle the runtime type argument, defaulting to server if invalid\n elif arg == '--mode' and i != len(self.args)-1:\n self.results['runtimeType'] = {'SERVER' : SERVER,\n 'CLIENT' : CLIENT,\n 'COMBINED' : COMBINED\n }.get(self.args[i+1], SERVER)\n del self.args[i+1]\n\n # Handle the address and port arguments\n elif arg == '--address' and i != len(self.args)-1:\n self.results['address'] = self.args[i+1]\n del self.args[i+1]\n\n elif arg == \"--seed\" and i != len(self.args)-1:\n try:\n x = float(self.args[i+1])\n except ValueError:\n x = 0\n self.results['seed'] = (4*x)/(x**2+1)\n del self.args[i+1]\n\n # Handle the AI argument\n elif arg == '--enableSpecialAI':\n self.results['specialAI'] = True\n\n # Print a warning message if an unknown argument is given\n else:\n print('[WARNING] Unknown argument: {}'.format(arg))\n del self.args[i]", "title": "" }, { "docid": "8f85d9a49454f9cedb7845e8c5a2ccd1", "score": "0.57694006", "text": "def checkArgs(self):\n missing = 0;\n if self.input == None:\n self.help()\n \n self.base, self.ext = os.path.splitext(self.input)\n if self.output == None:\n self.output = self.base\n self.trajFile = self.output + \"-traj\" + self.ext\n self.fftrajFile = self.output + \"-fftraj\" + self.ext\n self.selFile = self.output + \"-selections.ndx\"", "title": "" }, { "docid": "fe10d3c49fc7590006afe777e4836609", "score": "0.5766129", "text": "def parse_args(args):\n parser = argparse.ArgumentParser(\n description=\"Color information script.\")\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"twoisprime {ver}\".format(ver=__version__))\n parser.add_argument(\n \"-i\",\n \"--image\",\n dest=\"image\",\n help=\"path to image file\",\n type=str,\n metavar=\"STR\")\n parser.add_argument(\n \"-r\",\n \"--resolution\",\n dest=\"resolution\",\n help=\"resize resolution\",\n type=int,\n metavar=\"INT\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action=\"store_const\",\n const=logging.INFO)\n parser.add_argument(\n \"-vv\",\n \"--very-verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action=\"store_const\",\n const=logging.DEBUG)\n return parser.parse_args(args)", "title": "" }, { "docid": "ad6f89be89d54be756adde0c2bf660db", "score": "0.5763624", "text": "def parseArgs(self):\n self.addArguments()\n self.args = self.parser.parse_args()", "title": "" }, { "docid": "58e79b754d277b9e27a305f92ba460c6", "score": "0.5762653", "text": "def __fn_set_args(self):\n self.argparser = argparse.ArgumentParser(\n description = 'A tool for performing pattern matching against '\n + 'applications.',\n epilog = 'Note that this tool has only been '\n + 'tested with Python 3.7+. '\n + 'Will NOT work with versions less than 3.4.\\n'\n )\n self.argparser.add_argument(\n '-f',\n '--folder',\n type = str,\n action = 'store',\n help = 'folder containing apps to be analysed '\n + '(or to which apps should be pulled, '\n + 'when used with the -p flag). '\n + 'Provide absolute path to folder as argument.'\n )\n self.argparser.add_argument(\n '-p',\n '--platform',\n choices = ['android'],\n action = 'store',\n nargs = '?',\n const = self.pull_source,\n help = 'the type of files/platform to be analysed, '\n + 'e.g., android. Only android is currently '\n + 'supported. Support for other files to be added.'\n )\n self.argparser.add_argument(\n '-e',\n '--extract',\n choices = ['device', 'ext4', 'img'],\n action = 'store',\n nargs = '?',\n const = self.pull_source,\n help = 'extract Android apps from connected device '\n + 'or system image. '\n + 'Only relevant when platform=android. '\n + 'Use \"-e device\" to pull APKs from device (default). '\n + '(Make sure that only one Android device '\n + 'is connected and that it is unlocked.) '\n + 'Use \"-e ext4\" to extract applications '\n + 'from an ext4 system image. '\n + 'Use \"-e img\" to pull applications from '\n + 'a .img system image. '\n + 'Apps get pulled to <root>/apps/ directory '\n + 'or to folder specified with the -f option. '\n + 'If pulling from system image, the image '\n + 'must be in this folder as well.'\n )\n self.argparser.add_argument(\n '-g',\n '--graph',\n choices = ['neo4j', 'visjs', 'both'],\n action = 'store',\n nargs = '?',\n const = self.graph_type,\n help = 'show results on graph. '\n + 'Use \"-g neo4j\" to output to a Neo4j database. '\n + 'Requires that a Neo4j database be up and '\n + 'running on http://localhost:7474 '\n + 'with username:neo4j and password:n3o4j '\n + '(or user-specified values from config). '\n + 'Or use \"-g visjs\" to create a vis.js network in html '\n + 'that can be viewed from the output folder. '\n + 'Or use \"-g both\" to generate both.'\n )\n self.argparser.add_argument(\n '--dump',\n default = False,\n action=\"store_true\",\n help = 'only dump all apps from phone '\n )", "title": "" }, { "docid": "95486109a365397cb4509da3835d8819", "score": "0.5758391", "text": "def parse_args():\n parser = argparse.ArgumentParser('train.py')\n add_arg = parser.add_argument\n add_arg('--device', default='cpu')\n add_arg('--config', default='configs/train_config_big.yaml')\n add_arg('--out_dir_colab', default='')\n return parser.parse_args()", "title": "" }, { "docid": "f0bdd62ceb85705e71c9cc348786a830", "score": "0.57562786", "text": "def _parse_args():\n parser = ArgumentParser(\n description='''start paddle training using multi-process mode.\nNOTE: your train program ***must*** run as distributed nccl2 mode,\nsee: http://www.paddlepaddle.org/documentation/docs/zh/1.6/user_guides/howto/training/cluster_howto.html#permalink-8--nccl2-\nAnd your train program must read environment variables below in order to let different\nprocess init properly:\nFLAGS_selected_gpus\nPADDLE_TRAINER_ID\nPADDLE_CURRENT_ENDPOINT\nPADDLE_TRAINERS_NUM\nPADDLE_TRAINER_ENDPOINTS\nPOD_IP (current node ip address, not needed for local training)\n''')\n\n #Optional arguments for the launch helper\n parser.add_argument(\n \"--cluster_node_ips\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..\")\n parser.add_argument(\n \"--node_ip\",\n type=str,\n default=\"127.0.0.1\",\n help=\"The current node ip. \")\n parser.add_argument(\n \"--use_paddlecloud\",\n action='store_true',\n help=\"wheter to use paddlecloud platform to run your multi-process job. If false, no need to set this argument.\"\n )\n parser.add_argument(\n \"--started_port\",\n type=int,\n default=None,\n help=\"The trainer's started port on a single node\")\n\n parser.add_argument(\n \"--print_config\",\n type=bool,\n default=True,\n help=\"Print the config or not\")\n\n parser.add_argument(\n \"--selected_gpus\",\n type=str,\n default=None,\n help=\"It's for gpu training and the training process will run on the selected_gpus,\"\n \"each process is bound to a single GPU. And if it's not set, this module will use all the gpu cards for training.\"\n )\n\n parser.add_argument(\n \"--log_level\",\n type=int,\n default=20, # logging.INFO, details are here:https://docs.python.org/3/library/logging.html#levels\n help=\"Logging level, default is logging.INFO\")\n\n parser.add_argument(\n \"--log_dir\",\n type=str,\n help=\"The path for each process's log.If it's not set, the log will printed to default pipe.\"\n )\n\n #positional\n parser.add_argument(\n \"training_script\",\n type=str,\n help=\"The full path to the single GPU training \"\n \"program/script to be launched in parallel, \"\n \"followed by all the arguments for the \"\n \"training script\")\n\n #rest from the training program\n parser.add_argument('training_script_args', nargs=REMAINDER)\n return parser.parse_args()", "title": "" }, { "docid": "321805b7a8b933955d75b86a45f97d9d", "score": "0.5755121", "text": "def parse_args():\n\tparser = argparse.ArgumentParser(description='Inputs from command line.')\n\tparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"Increase output verbosity\")\n\tparser.add_argument(\"--alg\",type=str,choices=[\"kNN\"],default=\"kNN\",required=False,\n help=\"Prediction algorithm\")\n\tparser.add_argument(\"--train\", \n\t\thelp='Read in training set and train algorithm',action=\"store_true\")\n\tglobal args\n\targs = parser.parse_args()", "title": "" }, { "docid": "1b58c6ab16949dcbbf00cfbe0d421562", "score": "0.5751931", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert CVAT XML annotations to PASCAL VOC format'\n )\n parser.add_argument(\n '--annotations-dir', metavar='DIR', required=False, default=ANNOTATIONS_DIR_,\n help='Directory where annotations XML are stored'\n )\n parser.add_argument(\n '--imgs-root-dir', metavar='DIR', required=False, default=IMGROOTDIR_,\n help='Directory where original images are stored'\n )\n parser.add_argument(\n '--no-hash', dest='dohash', action='store_false', required=False,\n help='Do not create a hash from xml_or_img_basename.'\n )\n parser.add_argument(\n '--hash', dest='dohash', action='store_true', required=False,\n help='Create a hash from xml_or_img_basename to shorten length of path'\n )\n parser.add_argument(\n 'xml_or_img_basename', metavar='IMG_n_XML_BASE',\n help='XML name without .xml (which is also used as base directory path for original images)'\n )\n parser.set_defaults(dohash=CREATE_HASH_)\n return parser.parse_args()", "title": "" }, { "docid": "76f308b4e1a153f48c2cfd5acff0c180", "score": "0.5749224", "text": "def parse_args():\n parser = argparse.ArgumentParser(description='Run the longitudinal MS lesion segmentation docker.')\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n '-f', '--old',\n dest='old_path', default='/data/longitudinal/',\n help='Option to use the old pipeline in the production docker. The second parameter should be the folder where'\n 'the patients are stored.'\n )\n group.add_argument(\n '-d', '--devel',\n dest='new_path', default=None,\n help='Option to use the old pipeline in the development docker. The second parameter should be the folder where'\n 'the patients are stored.'\n )\n group.add_argument(\n '-t', '--train',\n dest='train_path', default=None,\n help='Option to train the logistic regression model. The second parameter should be the folder where'\n 'the patients are stored.'\n )\n group.add_argument(\n '-T', '--test',\n dest='test_path', default=None,\n help='Option to test a logistic regression model. The second parameter should be the folder where'\n 'the patients are stored.'\n )\n group.add_argument(\n '-l', '--leave-one-out',\n dest='loo_path', default=None,\n help='Option to use the logistic regression model with leave-one-out cross-validation. The second parameter'\n ' should be the folder where the patients are stored.'\n )\n group.add_argument(\n '-g', '--gui',\n dest='gui_path', default=None,\n help='Option to use a gui to setup parameters for segmentation and select the method. The second parameter'\n ' should be the folder where the patients are stored.'\n )\n parser.add_argument(\n '-B', '--time1',\n dest='time1', default='time1',\n help='Name of the baseline folder'\n )\n parser.add_argument(\n '-F', '--time2',\n dest='time2', default='time2',\n help='Name of the followup folder'\n )\n parser.add_argument(\n '-u', '--user',\n dest='user', default=None,\n help='Name of the followup folder'\n )\n return vars(parser.parse_args())", "title": "" }, { "docid": "6feeeb0ca882e33a69859fc640cc9063", "score": "0.5747352", "text": "def process_args(args):\n global input_file, parameters_file, output_file, cascade_file, gui\n if args.input:\n input_file = Path(args.input)\n if args.parameters:\n parameters_file = Path(args.parameters)\n else:\n parameters_file = input_file.parent / (input_file.stem + \"_analysis.prm\")\n\n if args.output:\n output_file = Path(args.output)\n else:\n output_file = input_file.parent / (input_file.stem + \"_analysis.csv\")\n\n if args.cascade:\n cascade_file = Path(args.cascade)\n\n if not input_file.exists():\n sys.exit(\"Video file not found!\")\n\n if not cascade_file.exists():\n sys.exit(\"Haar Cascade file missing!\")\n\n print(\"\\nInput file '{}'\".format(input_file))\n print(\"Parameters file '{}'\".format(parameters_file))\n print(\"Output file '{}'\".format(output_file))\n print(\"Cascade file '{}'\".format(cascade_file))\n\n else:\n # Run GUI if no arguments given\n print(\"Running Blinky GUI\")\n gui = True", "title": "" }, { "docid": "f33b937b3f67f81999a424078007e940", "score": "0.57469356", "text": "def parse_args(args):\n parser = argparse.ArgumentParser(description=\"Lung Image Segmentation Using UNet Architecture\")\n parser.add_argument(\n \"-i\",\n \"--input_dir\",\n default=os.getcwd(),\n help=\"directory where input files will be read from\"\n )\n\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n default=os.getcwd(),\n help=\"directory where output files will be written to\"\n )\n \n parser.add_argument('-epochs', metavar='num_epochs', type=int, default = 5, help = \"Number of training epochs\")\n parser.add_argument('--batch_size', metavar='batch_size', type=int, default = 16, help = \"Batch Size\")\n\n return parser.parse_args(args)", "title": "" }, { "docid": "152a56a7ccea012f3d99047ae253df26", "score": "0.5745745", "text": "def parse_args(args, file_name):\n\t\tpref = file_name.split(\".\")[0]", "title": "" } ]
674908d1db925dbcc1adc35da09ca96b
get target network actions from all the agents in the MADDPG object
[ { "docid": "28a5892506b36c1f6f35cd172af47eef", "score": "0.72622454", "text": "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [agent.target_act(obs) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "title": "" } ]
[ { "docid": "3a6f213e3e825c2203028c4d9e180f76", "score": "0.731483", "text": "def target_act(self, obs_all_agents, noise=0.0):\n target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return target_actions", "title": "" }, { "docid": "a354e9f36f1b3bece21468aa56d7e77d", "score": "0.69447255", "text": "def get_target_actors(self):\n target_actors = [agent.actor_target for agent in self.maddpg_agent]\n return target_actors", "title": "" }, { "docid": "6b91bd0ca3d79607a8d163f92118f0c5", "score": "0.68338233", "text": "def get_target_actors(self):\n target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]\n return target_actors", "title": "" }, { "docid": "f566743c7a1fec7c7fbcd5de408cbd2c", "score": "0.66721165", "text": "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions", "title": "" }, { "docid": "f566743c7a1fec7c7fbcd5de408cbd2c", "score": "0.66721165", "text": "def act(self, obs_all_agents, noise=0.0):\n actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]\n return actions", "title": "" }, { "docid": "b5d6973e53e068f91ef050a2ea00d0b9", "score": "0.6554561", "text": "def act(self, all_states):\n actions = [agent.act(np.expand_dims(states, axis=0)) for agent, states in zip(self.ddpg_agents, all_states)]\n return actions", "title": "" }, { "docid": "140759c5781217a09a87d1030ee43d31", "score": "0.63089156", "text": "def get_targets(self):\r\n return self._get_list('target')", "title": "" }, { "docid": "025ecac8f94d045d027daa2b7497e493", "score": "0.6172595", "text": "def act(self, states, add_noise=True):\n actions = []\n for i, agent in enumerate(self.agents):\n action = agent.act(states[i], add_noise)\n actions.append(action)\n return actions", "title": "" }, { "docid": "afc626d419fe5aba2bfe69bb5db43179", "score": "0.6151647", "text": "def act(self, states, add_noise=True):\n actions = []\n for state, agent in zip(states, self.agents):\n action = agent.act(state, add_noise)\n actions.append(action)\n return actions", "title": "" }, { "docid": "3fd8e35928b04214c35a004ac3fef1cd", "score": "0.6144805", "text": "def get_agent_list(self):\n return self.agents", "title": "" }, { "docid": "c8056ecee4617011bd64171af1afdd5a", "score": "0.6105672", "text": "def get_targets(self, env, executor):\r\n return self.targets", "title": "" }, { "docid": "6cabb143b548cd4f1577cc370679a602", "score": "0.6097117", "text": "def get_targets(self):\n return self.targets", "title": "" }, { "docid": "149e4c689c06b97b52ca5e1fd85eba7d", "score": "0.6094627", "text": "def get_actors(self):\n actors = [agent.actor for agent in self.maddpg_agent]\n return actors", "title": "" }, { "docid": "a4a7f874e2a99a5e4c3b5b37f7aa0d7c", "score": "0.607446", "text": "def targets(self):\n return []", "title": "" }, { "docid": "a4a7f874e2a99a5e4c3b5b37f7aa0d7c", "score": "0.607446", "text": "def targets(self):\n return []", "title": "" }, { "docid": "f743104ed2f24787c9f27ef6a01c97b3", "score": "0.60497135", "text": "def get(self):\n wazuhlogin()\n agents = []\n for r in callWazuhApi(\"/agents\")[\"data\"][\"affected_items\"]:\n # Manage PC는 제외\n if r[\"id\"] == '000' or r[\"status\"] == \"never_connected\":\n continue\n agents.append(\n {\"name\": r[\"name\"], \"ip\": r[\"ip\"], \"status\": r[\"status\"]})\n return agents", "title": "" }, { "docid": "1c7c23e4a35696b07df0a429230e73a8", "score": "0.60398275", "text": "def get_actors(self):\n actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]\n return actors", "title": "" }, { "docid": "d9a8b61a09d6dd32a06d485027faf96a", "score": "0.59938645", "text": "def _get_actions():\n action = ActionOutput(port=1)\n return [action]", "title": "" }, { "docid": "d7ac1172e34188acf9c6d6fa3eaafa21", "score": "0.598213", "text": "def update_targets(self):\n for i in range(self.nb_agent):\n self.session.run(self.agents[i].update_policy_target + self.agents[i].update_critic_target)", "title": "" }, { "docid": "4931a6ed77744831866f7b1f14fdbdbf", "score": "0.5974635", "text": "def get_all_targets(self):\r\n result = []\r\n for batch in self.batches:\r\n result.extend(batch.targets)\r\n return result", "title": "" }, { "docid": "41d5b383302ee4835562b969f34b774a", "score": "0.5947765", "text": "def get_actions(node, actions):\n if node.delegate.__name__ == \"maya_reference\":\n actions[\"attach_geocache\"] = Action(\"Attach Geocache...\",\n func=attach_geocache, args=[node],\n params=[Param(\"geocache\", Param.Revision)])\n elif node.delegate.__name__ == \"maya_scene\":\n actions[\"load_all_geocaches\"] = Action(\"Load all geocaches (and texture assets)\",\n func=load_all_geocaches, args=[node])\n actions[\"load_one_geocache\"] = Action(\"Load one geocache (and texture asset)\",\n func=load_one_geocache, args=[node],\n params=[Param(\"geocache\", Param.Revision)])\n return actions", "title": "" }, { "docid": "6d85a0fb1d099b5fd6412919548f43ec", "score": "0.5933231", "text": "def getTargets(self):\n return self.targets", "title": "" }, { "docid": "f74fca8b61ab8618eef2e65bae405482", "score": "0.59286726", "text": "def act(self, states, add_noise=False):\n actions = []\n for agent, state in zip(self.agents, states):\n action = agent.act(state, add_noise=True)\n actions.append(action)\n return np.array(actions)", "title": "" }, { "docid": "11c68259a9c9c3a943b17edf9f0dd1c9", "score": "0.58726245", "text": "def calculate_agent_relevant_vars(self, agent):\n res = set()\n for action in self.actions:\n if agent in self.utils.extract_agents_from_action(action):\n res = res.union(set(self.utils.extract_objectives_from_action(action, self.metadata)))\n return res", "title": "" }, { "docid": "9f383e173345c6af33b020fb17e9ce0e", "score": "0.58670264", "text": "def select_actor_action(self, env_output, agent_output):", "title": "" }, { "docid": "1ca6756d3576658f900ca76056bfc449", "score": "0.58454883", "text": "def get_all_actions(self):\n return self.actions", "title": "" }, { "docid": "2f1549f5e7912f7622c37e11e1382860", "score": "0.5829516", "text": "def actions(self)->list:\n return self._actions", "title": "" }, { "docid": "0bb4df116a8781a064b868384c776840", "score": "0.5806846", "text": "def agent_action(self, agent):\n pass", "title": "" }, { "docid": "0e91886688e0e66172985f05b878f1cc", "score": "0.5782202", "text": "def get_actions(self):\n\t\treturn self.get_instructions().get(\"actions\",{})", "title": "" }, { "docid": "c409bdde0e33621a6412bcd87cf6bd73", "score": "0.5755443", "text": "def __find_accelerator_targets(self):\n idlist = []\n for el in self.doc.getElementsByTagName(\"accelerator\"):\n textelm = self.doc.get_first_child_with_tagname(el, \"text\")\n idlist.append(textelm.getAttribute(\"id\"))\n idtargets = self.doc.get_targets(idlist)\n for xmlid, targetid in zip(idlist, idtargets):\n request = \"UPDATE %s SET target=\\\"%s\\\" WHERE xmlid = \\\"%s\\\"\" % (self.table, targetid, xmlid)\n self.session.execute_query(request)", "title": "" }, { "docid": "659d1e48e5749fdd96009c18b809330f", "score": "0.5743816", "text": "def act(self, states, add_noise=True):\n# assert (states.shape[0] == self.__debug_num_agents), 'Mismatch dim of states.shape[0]'\n \n actions = None\n \n for s, agent in zip(states, self.agent_list):\n \n s = np.expand_dims(s, axis=0)\n# pdb.set_trace()\n \n action = agent.act(s)\n \n # expand dim from (2,) to (1, 2)\n# action = np.expand_dims(action, axis=0)\n\n if actions is None:\n actions = action\n else:\n actions = np.append(actions, action, axis=0)\n \n\n# pdb.set_trace()\n \n# assert (actions.shape[0] == self.__debug_num_agents), 'Mismatch dim of actions.shape[0]'\n# assert (actions.shape[0] == self.__debug_action_size), 'Mismatch dim of actions.shape[0]'\n \n return actions", "title": "" }, { "docid": "e934b7c49964136945f011f6051d0230", "score": "0.57281744", "text": "def act(self, observations, add_noise=False):\n \n # Action list.\n actions = []\n \n for agent, observation in zip(self.agents, observations):\n action = agent.act(observation, add_noise=add_noise)\n actions.append(action)\n \n # Return the actions.\n return np.array(actions)", "title": "" }, { "docid": "ceb8b6ad0bfeafce5e974a945c0725d8", "score": "0.57242846", "text": "def get_list_of_actions(self):\n pass", "title": "" }, { "docid": "62d36f015301b4676e751357b0b21691", "score": "0.572404", "text": "def get_agent(self):", "title": "" }, { "docid": "383f22de3180c7117f4a5d49406c42a6", "score": "0.57232916", "text": "def agents(self):\n agents = self.list()\n return [name for name in agents if name != 'Pyro.NameServer']", "title": "" }, { "docid": "8a9ee85a5e699685684a7e2ecb01a635", "score": "0.57180834", "text": "def get_all_targets(gmp):\n\n all_targets = {}\n targets_list = []\n temp_target = {}\n targets = gmp.get_targets()\n tree = ET.fromstring(targets)\n\n pretty_print(all_targets)\n\n for name in tree.findall('target'):\n target_name = name.find('name').text\n # print(target_name)\n target_id = name.attrib['id']\n\n temp_target['name'] = target_name\n temp_target['id'] = target_id\n\n # print(temp_target)\n\n targets_list.append(temp_target)\n temp_target = {}\n # print(targets_list)\n all_targets['targets'] = targets_list\n\n return all_targets", "title": "" }, { "docid": "f08e2b8dccd1bfb5efbc229cb730a6c9", "score": "0.57048726", "text": "def update_all_targets(self):\n for a in self.agents:\n soft_update(a.target_critic, a.critic, self.tau)\n soft_update(a.target_actor, a.actor, self.tau)", "title": "" }, { "docid": "0275700b70c415573955a47861606ee8", "score": "0.5679149", "text": "def take_actions(self, agent_actions, agent_type=\"hide\"):\n if agent_type == \"hide\":\n list_agents = self.list_hiders\n agent_type = 2\n else:\n list_agents = self.list_seekers\n agent_type = 3\n for agent_idx in range(len(list_agents)):\n action = agent_actions[agent_idx]\n start_coord = list_agents[agent_idx]\n dest_coord = list(start_coord)\n if action == 0:\n pass\n elif action == 1: # up is x-1\n dest_coord[0] -= 1\n elif action == 2: # right is y+1\n dest_coord[1] += 1\n elif action == 3: # down is x+1\n dest_coord[0] += 1\n elif action == 4: # left is y-1\n dest_coord[1] -= 1\n elif action == 5: # upper right is x-1 y+1\n dest_coord[0] -= 1\n dest_coord[1] += 1\n elif action == 6: # lower right is x+1 y+1\n dest_coord[0] += 1\n dest_coord[1] += 1\n elif action == 7: # lower left is x+1 y-1\n dest_coord[0] += 1\n dest_coord[1] -+ 1\n elif action == 8: # upper left is x-1 y-1\n dest_coord[0] -= 1\n dest_coord[1] -= 1\n if self.can_move(dest_coord):\n self.world[tuple(start_coord)] = 0\n self.world[tuple(dest_coord)] = agent_type\n list_agents[agent_idx] = list(dest_coord)", "title": "" }, { "docid": "0e01ee70bb634bc966091560e1cb76ea", "score": "0.5667807", "text": "def getActions(self):\n return []", "title": "" }, { "docid": "3b31ddd136dfa462d6464c1e4b21b314", "score": "0.56556284", "text": "def get_action(self,agent,env):\n data=self.set_input(agent,env)\n names=self.agents[agent][\"name\"]\n grid_now=0\n memory=self.agents[agent][\"memory\"]\n for index, name in np.ndenumerate(names):\n use_data=[]\n now=index[0]\n policy=names[now]+\"Policy\"\n demand_add=data[1]\n grid_total=data[3,1]\n if index[0]==0:\n grid_now=grid_total\n else:\n t_grid_sell=self.grid_sell_call(names[now-1],env.hour)/1000\n grid_now=grid_now - t_grid_sell\n use_data.append(sum(demand_add[now:]))\n use_data.append(grid_now)\n use_idata=list(data[:,now])\n used_data=use_data+use_idata\n used_data=np.reshape(used_data,[1,7])\n state=copy.copy(used_data)\n action=self.agents[agent][policy].choose_action(state)\n self.implement_action(names[now],env,action)\n next_data=self.set_next_put(agent,env)\n use_indata=list(next_data[:,now])\n n_state=use_data+use_indata\n n_state=np.reshape(n_state,[1,7])\n reward=self.cal_ireward(agent,names[now],env)\n g_reward=self.cal_greward(env,names)\n self.agents[agent][policy].learn_act(used_data,reward,n_state,env.done,g_reward,memory)", "title": "" }, { "docid": "9d57951fbefd6a10bd7f809169c71088", "score": "0.56456274", "text": "def get_n_actions(self, agent_cls_name):", "title": "" }, { "docid": "58cb9c46aef8d12d0396129384206446", "score": "0.56407756", "text": "def getActions(self):\n return self.actions", "title": "" }, { "docid": "61395539bc8dbaef774602ad7ffdcc26", "score": "0.5635525", "text": "def agents(self):\n proxy = NSProxy(self.addr)\n agents = proxy.list()\n proxy.release()\n return [name for name in agents if name != 'Pyro.NameServer']", "title": "" }, { "docid": "abe6baef479e6e2488a3709506d9bcb4", "score": "0.56270427", "text": "def get(self):\n current_app.edm.ensure_initialed()\n targets = list(current_app.edm.targets)\n return targets", "title": "" }, { "docid": "ddb173803bba3d389581272cd5003222", "score": "0.56253606", "text": "def get_actions(self):\n\n return [action for action in dir(self.house)\n if callable(getattr(self.house, action))\n and re.match(\"action.*\", action)]", "title": "" }, { "docid": "48b311ea64b8e1ea235e4ef5abf1a8a8", "score": "0.5606772", "text": "def get_actions(self, state):\n return [self.v[n] for n in self.e[state[0]]]", "title": "" }, { "docid": "c5537370aa0efbc6fa21df2dbdbb7b3f", "score": "0.55921876", "text": "def targets(self, session, entity_id):\n url = utils.urljoin('entities', entity_id, 'agent',\n 'check_types', self.id, 'targets')\n resp = session.get(url, endpoint_filter=self.service).json()\n return resp['values']", "title": "" }, { "docid": "29e09df46893202398c6f3bcd41c0b48", "score": "0.55905575", "text": "def getActions(self):\n return self.actions[:]", "title": "" }, { "docid": "29e09df46893202398c6f3bcd41c0b48", "score": "0.55905575", "text": "def getActions(self):\n return self.actions[:]", "title": "" }, { "docid": "2cd5b5c39eb665364ecf371428a7a3f3", "score": "0.55883086", "text": "def actions(self) -> Optional[Sequence['outputs.NetworkTapRuleActionResponse']]:\n return pulumi.get(self, \"actions\")", "title": "" }, { "docid": "a3b7497eb8c30f7f96078e9a1235a3ee", "score": "0.5568111", "text": "def get_all_mlb_target(self):\n response = self.send_get(\n url=self.__base_lb_url +\n \"/target\",\n entity_name=\"mlb target\"\n )\n\n formatted_response = self.convert_json(\n response, self.camel_to_underscore)\n\n retVal = formatted_response[\"response\"][\"items\"]\n\n return retVal", "title": "" }, { "docid": "9be390021d6ccb3367802cf20831d1f2", "score": "0.55623126", "text": "def getActions(self):\n\t\traise Exception(\"Not implemented\")", "title": "" }, { "docid": "e59c03bba1b378e55194cbabb22398ef", "score": "0.5552745", "text": "def __call__(self, state, action):\n return state.get_fragile_agent(self.agent_id)", "title": "" }, { "docid": "8554d08fd12086006a4c5cf18c73638c", "score": "0.55444986", "text": "def take_actions(self, agent_actions, agent_type=\"hide\"):\n if agent_type == \"hide\":\n list_agents = self.list_hiders\n agent_type = 2\n else:\n list_agents = self.list_seekers\n agent_type = 3\n for agent_idx in range(len(list_agents)):\n action = agent_actions[agent_idx]\n start_coord = list_agents[agent_idx]\n dest_coord = list(start_coord)\n if action == 0:\n pass\n elif action == 1:\n dest_coord[0] -= 1\n elif action == 2:\n dest_coord[1] += 1\n elif action == 3:\n dest_coord[0] += 1\n elif action == 4:\n dest_coord[1] -= 1\n\n if self.can_move(dest_coord):\n self.world[tuple(start_coord)] = 0\n self.world[tuple(dest_coord)] = agent_type\n list_agents[agent_idx] = list(dest_coord)", "title": "" }, { "docid": "8554d08fd12086006a4c5cf18c73638c", "score": "0.55444986", "text": "def take_actions(self, agent_actions, agent_type=\"hide\"):\n if agent_type == \"hide\":\n list_agents = self.list_hiders\n agent_type = 2\n else:\n list_agents = self.list_seekers\n agent_type = 3\n for agent_idx in range(len(list_agents)):\n action = agent_actions[agent_idx]\n start_coord = list_agents[agent_idx]\n dest_coord = list(start_coord)\n if action == 0:\n pass\n elif action == 1:\n dest_coord[0] -= 1\n elif action == 2:\n dest_coord[1] += 1\n elif action == 3:\n dest_coord[0] += 1\n elif action == 4:\n dest_coord[1] -= 1\n\n if self.can_move(dest_coord):\n self.world[tuple(start_coord)] = 0\n self.world[tuple(dest_coord)] = agent_type\n list_agents[agent_idx] = list(dest_coord)", "title": "" }, { "docid": "19561486fa468bfc4a4a5c2f48d2892d", "score": "0.55441236", "text": "def actions(self):\n return self._actions", "title": "" }, { "docid": "19561486fa468bfc4a4a5c2f48d2892d", "score": "0.55441236", "text": "def actions(self):\n return self._actions", "title": "" }, { "docid": "63c795a24ccc4800d5d818111e83add0", "score": "0.55200624", "text": "def actions(self,state):\n return self.actionset", "title": "" }, { "docid": "f01b7f179af335f11ee02cfb36f036e9", "score": "0.5519792", "text": "def _add_actions(self, actions: List[str]):\n for agent_id, action in enumerate(actions):\n self._actions[agent_id].add(action)", "title": "" }, { "docid": "7ac733e6028c49503738f99faad7b9df", "score": "0.5519576", "text": "def actors(self):\n from .actor import MobileAttckActor\n return_list = []\n item_dict = {}\n for item in self.mobile_attck_obj['objects']:\n if 'type' in item:\n if item['type'] == 'intrusion-set':\n item_dict[item['id']] = item\n try:\n for item in self._RELATIONSHIPS[self.stix]:\n if item in item_dict:\n return_list.append(MobileAttckActor(**item_dict[item]))\n except:\n pass\n return return_list", "title": "" }, { "docid": "71d727c11a08f87e10d484435b4108ca", "score": "0.55190223", "text": "def select(self):\n return self.agents, self.connections", "title": "" }, { "docid": "aa83f10faf921dfcec1dc6eeeeaba8ce", "score": "0.5517152", "text": "def agents(self):\n ret = []\n for player in self.players:\n if player.isComputer: continue\n try:\n if player.observer: continue\n except: pass\n ret.append(player)\n return ret", "title": "" }, { "docid": "aa83f10faf921dfcec1dc6eeeeaba8ce", "score": "0.5517152", "text": "def agents(self):\n ret = []\n for player in self.players:\n if player.isComputer: continue\n try:\n if player.observer: continue\n except: pass\n ret.append(player)\n return ret", "title": "" }, { "docid": "183afe3300e5ec670fcce10063d29132", "score": "0.550783", "text": "def step(self, action):\n self.step_count += 1\n\n reward = [None] * self.agents.n_agents\n done = False\n\n if len(action) != self.agents.n_agents:\n print('len of actions and # of agents is not same')\n #TODO: check o/p with return\n return\n\n #initializing lists for multi-agent\n left_pos = [None] * len(action)\n left_cell = [None] * len(action)\n right_pos = [None] * len(action)\n right_cell = [None] * len(action)\n up_pos = [None] * len(action)\n up_cell = [None] * len(action)\n down_pos = [None] * len(action)\n down_cell = [None] * len(action)\n\n #cell direction contents\n #(m.a- TODO: make self.left_pos and all list instead of single value)\n left_pos = self.left_pos\n right_pos = self.right_pos\n up_pos = self.up_pos\n down_pos = self.down_pos\n\n for i in range(len(action)):\n left_cell[i] = self.grid.get(left_pos[i][0],left_pos[i][1])\n right_cell[i] = self.grid.get(right_pos[i][0],right_pos[i][1])\n up_cell[i] = self.grid.get(up_pos[i][0],up_pos[i][1])\n down_cell[i] = self.grid.get(down_pos[i][0],down_pos[i][1])\n\n if action[i] == self.actions.left:\n if (left_cell[i] == None or left_cell[i].can_overlap()) and tuple(left_pos[i]) not in self.agents.agent_pos.values():\n self.agents.agent_pos[i] = tuple(left_pos[i])\n elif action[i] == self.actions.right:\n if (right_cell[i] == None or right_cell[i].can_overlap()) and tuple(right_pos[i]) not in self.agents.agent_pos.values():\n self.agents.agent_pos[i] = tuple(right_pos[i])\n elif action[i] == self.actions.up:\n if (up_cell[i] == None or up_cell[i].can_overlap()) and tuple(up_pos[i]) not in self.agents.agent_pos.values():\n self.agents.agent_pos[i] = tuple(up_pos[i])\n elif action[i] == self.actions.down:\n if (down_cell[i] == None or down_cell[i].can_overlap()) and tuple(down_pos[i]) not in self.agents.agent_pos.values():\n self.agents.agent_pos[i] = tuple(down_pos[i])\n else:\n assert False, \"unknown action\"\n\n #determine reward\n reward = self._reward(self.reward_type)\n\n #update coverage map and trajectory\n self.coverage_map = self.update_coverage_map()\n self.trajectory = self.update_trajectory()\n\n #set cell as covered\n for i in range(self.agents.n_agents):\n self.grid.set(self.agents.agent_pos[i][0],self.agents.agent_pos[i][1],None)\n\n #check if all cells covered\n grid_str = self.__str__()\n if 'U' not in grid_str:\n done = True\n\n #generate new obs\n obs = self.gen_obs()\n\n return obs, reward, done, self.agents, {}", "title": "" }, { "docid": "71cb65672cd8cad25802ac6e783f4a9f", "score": "0.5481487", "text": "def get_actions(self, obs_n, avail_actions_n, greedy=False):\n with torch.no_grad():\n dists_n = self.forward(obs_n, avail_actions_n)\n actions_n = dists_n.sample().numpy()\n if not greedy:\n actions_n = dists_n.sample().numpy()\n else:\n actions_n = np.argmax(dists_n.probs.numpy(), axis=-1)\n agent_infos_n = {}\n agent_infos_n['action_probs'] = [dists_n.probs[i].numpy() \n for i in range(len(actions_n))]\n return actions_n, agent_infos_n", "title": "" }, { "docid": "2a3c9fffade807d1e8979c7ade194723", "score": "0.54788566", "text": "def get_next_actions(self) -> list:\n pass", "title": "" }, { "docid": "6cbb2aa6ad6feddfffe89c70c5edd0c4", "score": "0.54705447", "text": "def targets(self):\n return self.properties.get(\"Targets\", None)", "title": "" }, { "docid": "32368faf1ce8e26bccf2d6734b6ece11", "score": "0.54650867", "text": "def get_targets_object(self):\n return self.move.targets", "title": "" }, { "docid": "f7811d471ab5fbbbb40a0b48e4291cef", "score": "0.5453524", "text": "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n self.agentCount = gameState.getNumAgents()\n depth = 0\n agent = self.index #0 = pacman, >0 é fantasma\n actionDic = {}\n actions = gameState.getLegalActions(agent)\n actions.remove(Directions.STOP)\n for a in actions:\n pontuacao = self.minimax(gameState.generateSuccessor(agent,a),agent+1,depth+1)\n actionDic[pontuacao] = a\n\n return actionDic[max(actionDic)]", "title": "" }, { "docid": "bc35fbd592e8171a9d9004249b93178b", "score": "0.545094", "text": "def Targets(self):\r\n return str(self.targets)", "title": "" }, { "docid": "40f2951b680263823c21e229f3fc9a89", "score": "0.54363203", "text": "def get_actions(self):\n def load_actions():\n \"\"\"Create all concrete Load actions and return a list\n\n :return: list of Action objects\n \"\"\"\n loads = []\n for p in self.planes:\n for a in self.airports:\n for c in self.cargos:\n precond_pos = [expr(\"At({}, {})\".format(c, a)),\n expr(\"At({}, {})\".format(p, a))]\n # remove this for tests where plane can load more than cargo\n precond_neg = [expr(\"In({}, {})\".format(c1, p)) for c1 in self.cargos]\n effect_add = [expr(\"In({}, {})\".format(c, p))]\n effect_rem = [expr(\"At({}, {})\".format(c, a))]\n act = Action(expr(\"Load({}, {}, {})\".format(c, p, a)),\n [precond_pos, precond_neg],\n [effect_add, effect_rem])\n loads.append(act)\n\n return loads\n\n def unload_actions():\n \"\"\"Create all concrete Unload actions and return a list\n\n :return: list of Action objects\n \"\"\"\n unloads = []\n # create all Unload ground actions from the domain Unload action\n for p in self.planes:\n for a in self.airports:\n for c in self.cargos:\n precond_pos = [expr(\"In({}, {})\".format(c, p)),\n expr(\"At({}, {})\".format(p, a))]\n\n effect_add = [expr(\"At({}, {})\".format(c, a))]\n effect_rem = [expr(\"In({}, {})\".format(c, p))]\n act = Action(expr(\"Unload({}, {}, {})\".format(c, p, a)),\n [precond_pos, []],\n [effect_add, effect_rem])\n unloads.append(act)\n\n return unloads\n\n def fly_actions():\n \"\"\"Create all concrete Fly actions and return a list\n\n :return: list of Action objects\n \"\"\"\n flys = []\n\n for fr in self.airports:\n for to in self.airports:\n if fr != to:\n for p in self.planes:\n precond_pos = [expr(\"At({}, {})\".format(p, fr))]\n # precond_neg = []\n effect_add = [expr(\"At({}, {})\".format(p, to))]\n effect_rem = [expr(\"At({}, {})\".format(p, fr))]\n fly = Action(expr(\"Fly({}, {}, {})\".format(p, fr, to)),\n [precond_pos, []],\n [effect_add, effect_rem])\n flys.append(fly)\n return flys\n data = load_actions() + unload_actions() + fly_actions()\n shuffle(data)\n return data", "title": "" }, { "docid": "075d55cdb92368ea96791db6a8d4fc21", "score": "0.5421246", "text": "def get_actions(self, state):\n return self.game.child_actions(state)", "title": "" }, { "docid": "3a5ed22879581ca9ee5bdde8aae1045a", "score": "0.5417561", "text": "def test_step_MultiplePlayer_SelectSource_MultipleActionType_MultipleAction_Agent_DONE(test_name):\n env = build_test_env(\n test_name,\n \"tests/gdy/test_step_MultiPlayer_SelectSource_MultipleActionType_MultipleAction.yaml\"\n )\n\n assert len(env.observation_space) == 2\n assert len(env.action_space) == 2\n\n assert env.global_observation_space.shape == (2, 5, 6)\n\n for p in range(env.player_count):\n assert env.observation_space[p].shape == (2, 5, 6)\n assert env.action_space[p].shape == (4,)\n assert np.all(env.action_space[p].nvec == [5, 6, 2, 5])\n\n obs, reward, done, info = env.step([\n [\n [1, 3, 0, 1],\n [3, 4, 1, 3],\n ],\n None,\n\n ])\n\n assert obs[0].shape == (2, 5, 6)\n assert reward[0] == 1\n assert obs[1].shape == (2, 5, 6)\n assert reward[1] == 0\n assert not done\n assert info == {}\n\n player1_avatar1_state = get_object_state(env, 'avatar1', player=1)\n player1_avatar2_state = get_object_state(env, 'avatar2', player=1)\n\n assert player1_avatar1_state['Location'] == [0, 3]\n assert player1_avatar2_state['Location'] == [4, 4]\n\n object_names = env.game.get_object_names()\n avartar1_id = object_names.index('avatar1')\n avartar2_id = object_names.index('avatar2')\n\n assert obs[0][avartar1_id, 0, 3] == 1\n assert obs[0][avartar2_id, 4, 4] == 1\n\n player2_avatar1_state = get_object_state(env, 'avatar1', player=2)\n player2_avatar2_state = get_object_state(env, 'avatar2', player=2)\n\n assert player2_avatar1_state['Location'] == [3, 3]\n assert player2_avatar2_state['Location'] == [1, 4]\n\n avartar1_id = object_names.index('avatar1')\n avartar2_id = object_names.index('avatar2')\n\n assert obs[1][avartar1_id, 3, 3] == 1\n assert obs[1][avartar2_id, 1, 4] == 1\n\n sample = env.action_space.sample()\n assert len(sample) == 2\n assert sample[0].shape == (4,)\n assert sample[1].shape == (4,)", "title": "" }, { "docid": "a7621574c77d370596d90d364043ef98", "score": "0.54089314", "text": "def target_groups(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerDefaultActionForwardTargetGroupArgs']]]:\n return pulumi.get(self, \"target_groups\")", "title": "" }, { "docid": "db56fc79e33e9dc739156393a68d27de", "score": "0.5404294", "text": "def actions(self, A: str):\n return list(self.graph.get(A).keys())", "title": "" }, { "docid": "0933ae41d13832dc1e53b3050a61fbcd", "score": "0.540145", "text": "def test_step_MultiplePlayer_SelectSource_MultipleActionType_Agent_DONE(test_name):\n env = build_test_env(\n test_name,\n \"tests/gdy/test_step_MultiPlayer_SelectSource_MultipleActionType.yaml\"\n )\n\n assert len(env.observation_space) == 2\n assert len(env.action_space) == 2\n\n assert env.global_observation_space.shape == (1, 5, 6)\n assert env.game.get_object_names() == ['avatar']\n\n for p in range(env.player_count):\n assert env.observation_space[p].shape == (1, 5, 6)\n assert env.action_space[p].shape == (4,)\n assert np.all(env.action_space[p].nvec == [5, 6, 2, 5])\n\n obs, reward, done, info = env.step([\n [1, 3, 0, 1],\n None,\n ])\n\n assert obs[0].shape == (1, 5, 6)\n assert reward[0] == 0\n assert obs[1].shape == (1, 5, 6)\n assert reward[1] == 0\n assert not done\n assert info == {}\n\n player1_avatar_state = get_object_state(env, 'avatar', player=1)\n player2_avatar_state = get_object_state(env, 'avatar', player=2)\n\n assert player1_avatar_state['Location'] == [0, 3]\n assert player2_avatar_state['Location'] == [3, 3]\n\n sample = env.action_space.sample()\n assert len(sample) == 2\n assert sample[0].shape == (4,)\n assert sample[1].shape == (4,)", "title": "" }, { "docid": "fdfd90bb70f4de618ce9ac6e27ca4a7d", "score": "0.54008913", "text": "def getAgentList(self):\n return self.optionsDialog.agentsList", "title": "" }, { "docid": "91ed2adb20f742c6520d26818e47d53c", "score": "0.53992814", "text": "def get_targets(self, conf):\r\n targets = []\r\n exchanges = [\r\n conf.nova_control_exchange,\r\n conf.cinder_control_exchange,\r\n conf.glance_control_exchange,\r\n conf.neutron_control_exchange,\r\n conf.heat_control_exchange,\r\n conf.keystone_control_exchange,\r\n conf.sahara_control_exchange,\r\n conf.trove_control_exchange,\r\n ]\r\n\r\n for exchange in exchanges:\r\n targets.extend(oslo.messaging.Target(topic=topic,\r\n exchange=exchange)\r\n for topic in conf.notification_topics)\r\n return targets", "title": "" }, { "docid": "eb9d971cde33e7ded645c0cfe405972a", "score": "0.5398509", "text": "def _advance_by_action(game, agents, action):\n getLogger(__name__).debug(\"Agent {} action {}\".format(game.current_agent_id, action))\n agent_id_for_action = game.current_agent_id\n\n game.take_action(action)\n for agent in agents:\n agent.take_action(action, agent.agent_id == agent_id_for_action)", "title": "" }, { "docid": "8f1d26eefac59f0ea4795982ee7fbb25", "score": "0.53978044", "text": "def do_targets(self, arg):\n print(\"Targets:\")\n for target in self.call.get_targets():\n print(\" {}\".format(target.name))", "title": "" }, { "docid": "229499a43cced575dd8c4ef38a83c147", "score": "0.5395092", "text": "def targets(self) -> List[DsTarget]:\n return self.__targets", "title": "" }, { "docid": "f2f9401052a1975e32d7547e347adce4", "score": "0.5389769", "text": "def target_groups(self) -> pulumi.Input[Sequence[pulumi.Input['ListenerRuleActionForwardTargetGroupArgs']]]:\n return pulumi.get(self, \"target_groups\")", "title": "" }, { "docid": "645e5c83098e04c676c2ea1e0f9572b9", "score": "0.53832823", "text": "def legal_actions(self):\r\n actions = self.env.getLegalMoves()\r\n return actions", "title": "" }, { "docid": "fef2eca0bd7627970e08c9eb0e2725ae", "score": "0.53789955", "text": "def targets(self) -> Optional[pulumi.Input['TargetPropertiesArgs']]:\n return pulumi.get(self, \"targets\")", "title": "" }, { "docid": "ebc7df4098df64767c36af29808b279e", "score": "0.53756773", "text": "def step(self, actions): \n for remote, action in zip(self.remotes, actions):\n remote.send(('step', action))\n results = [remote.recv() for remote in self.remotes]\n obs, rews, dones, infos = zip(*results) \n return np.stack(obs), np.stack(rews), np.stack(dones), infos", "title": "" }, { "docid": "190e42ed17ffcf7e70e18b1bb07ce68d", "score": "0.5375208", "text": "def step(\n self, actions: ActionDict\n ) -> tuple[\n ObsDict,\n dict[AgentID, float],\n dict[AgentID, bool],\n dict[AgentID, bool],\n dict[AgentID, Any],\n ]:\n # assert that the actions _must_ have actions for all agents\n assert len(actions) == len(\n self.agents\n ), f\"Must have actions for all {len(self.agents)} agents, currently only found {len(actions)}.\"\n\n timestep = self._env.step(actions.values())\n\n obs, rewards, terminations, truncations, infos = _unravel_ma_timestep(\n timestep, self.agents\n )\n\n if self.render_mode == \"human\":\n self.viewer.render(self.render_mode)\n\n if any(terminations.values()) or any(truncations.values()):\n self.agents = []\n\n return obs, rewards, terminations, truncations, infos", "title": "" }, { "docid": "4569bb1efbe0f568cb376ff46cb84cdb", "score": "0.5357963", "text": "def eval(self, action_list, env_output_list, agent_output=None):", "title": "" }, { "docid": "a173877adc75926f80d640e125a5fb9b", "score": "0.5354866", "text": "def target_names(self) -> List[Text]:\n return [node_name for node_name, node in self.nodes.items() if node.is_target]", "title": "" }, { "docid": "a9f210ff9030bd5d376c456848a8c843", "score": "0.5348946", "text": "def getTargets(self):\n return [b for b in grok.getSite()['clients'].values()]", "title": "" }, { "docid": "e95a9f8e071619acc3e9da4e27de6e5c", "score": "0.5346781", "text": "def actions(self, state):\n if state in self.terminals:\n return [None]\n else:\n\t\t\treturn self.actlist", "title": "" }, { "docid": "1d2f4a3abf0ac763a804a035d3f1fb49", "score": "0.53437185", "text": "def update_all_targets(self):\n for a in self.agents:\n soft_update(a.target_critic, a.critic, self.tau)\n soft_update(a.target_policy, a.policy, self.tau)\n self.niter += 1", "title": "" }, { "docid": "7eb3f82a5789c8153a5442130554d251", "score": "0.5343542", "text": "def solution(self):\n return [node.action for node in self.path()[1:]]", "title": "" }, { "docid": "7eb3f82a5789c8153a5442130554d251", "score": "0.5343542", "text": "def solution(self):\n return [node.action for node in self.path()[1:]]", "title": "" }, { "docid": "7eb3f82a5789c8153a5442130554d251", "score": "0.5343542", "text": "def solution(self):\n return [node.action for node in self.path()[1:]]", "title": "" }, { "docid": "7eb3f82a5789c8153a5442130554d251", "score": "0.5343542", "text": "def solution(self):\n return [node.action for node in self.path()[1:]]", "title": "" }, { "docid": "96a3614c7237c86df91275aae3aa7626", "score": "0.53383875", "text": "def act(self, state):\n actions = []\n for i in range(self.n_tanks):\n if self.action_delay_cnt[i] >= self.action_delay[i]:\n self.action_delay_cnt[i] = 0\n\n if np.random.rand() <= float(self.epsilon[i]): # Exploration\n random_action = np.random.uniform(0, 1)\n actions.append(random_action)\n else:\n action = self.act_greedy(state, i) # Exploitation\n actions.append(action)\n else:\n actions = self.actions\n self.action_delay_cnt[i] += 1\n self.actions = actions\n return self.actions", "title": "" }, { "docid": "d7e05ca70c2476b4033588ab48f45b09", "score": "0.5333902", "text": "def solution(self):\r\n return [node.action for node in self.path()[1:]]", "title": "" }, { "docid": "fd2129e0e27d83cdaec8b421948b2425", "score": "0.53297764", "text": "def process_actions(self, n_steps, actions):\n # Each row of actions is one time step,\n # row contains action indices for all agents\n # Convert to [time, agents, l_action]\n # so each agent gets its own 1-hot row vector\n actions_1hot = np.zeros([n_steps, self.n_agents, self.l_action], dtype=int)\n grid = np.indices((n_steps, self.n_agents))\n actions_1hot[grid[0], grid[1], actions] = 1\n # Convert to format [time*agents, agents-1, l_action]\n # so that the set of <n_agent> actions at each time step\n # is duplicated <n_agent> times, and each duplicate\n # now contains all <n_agent>-1 actions representing\n # the OTHER agents actions\n list_to_interleave = []\n for n in range(self.n_agents):\n # extract all actions except agent n's action\n list_to_interleave.append( actions_1hot[:, np.arange(self.n_agents)!=n, :] )\n # interleave\n actions_others_1hot = np.zeros([self.n_agents*n_steps, self.n_agents-1, self.l_action])\n for n in range(self.n_agents):\n actions_others_1hot[n::self.n_agents, :, :] = list_to_interleave[n]\n # In-place reshape of actions to [time*n_agents, l_action]\n actions_1hot.shape = (n_steps*self.n_agents, self.l_action)\n\n return actions_1hot, actions_others_1hot", "title": "" }, { "docid": "a5dba12bcf4080361bef280a3403bc00", "score": "0.5323123", "text": "def _get_target_values(self, next_states, goal_states, rewards, dones):\n\n with torch.no_grad():\n # get next actions via target actor and noise\n next_target_actions = self.nets[\"actor_target\"](next_states, goal_states)\n noise = (\n torch.randn_like(next_target_actions) * self.algo_config.actor.noise_std\n ).clamp(-self.algo_config.actor.noise_clip, self.algo_config.actor.noise_clip)\n next_actions = (next_target_actions + noise).clamp(-1.0, 1.0)\n\n # TD3 trick to combine max and min over all Q-ensemble estimates into single target estimates\n all_value_targets = self.nets[\"critic_target\"][0](next_states, next_actions, goal_states).reshape(-1, 1)\n max_value_targets = all_value_targets\n min_value_targets = all_value_targets\n for critic_target in self.nets[\"critic_target\"][1:]:\n all_value_targets = critic_target(next_states, next_actions, goal_states).reshape(-1, 1)\n max_value_targets = torch.max(max_value_targets, all_value_targets)\n min_value_targets = torch.min(min_value_targets, all_value_targets)\n value_targets = self.algo_config.critic.ensemble.weight * min_value_targets + \\\n (1. - self.algo_config.critic.ensemble.weight) * max_value_targets\n q_targets = rewards + dones * self.discount * value_targets\n\n return q_targets", "title": "" }, { "docid": "e4fdccb67c62f9df9764b7ea75ff782e", "score": "0.5318843", "text": "def select_targets(immune, infection):\n\n immune.sort(key=lambda x: x[\"initiative\"], reverse=True)\n immune.sort(key=lambda x: x[\"power\"], reverse=True)\n infection.sort(key=lambda x: x[\"initiative\"], reverse=True)\n infection.sort(key=lambda x: x[\"power\"], reverse=True)\n\n result = []\n aa = select_group_targets(immune, infection)\n bb = select_group_targets(infection, immune)\n result.extend(aa)\n result.extend(bb)\n return result", "title": "" } ]
1f7fdb3fa2fb40d9710441a1a0f17b84
Gathers recursively all inertial object children from the specified object. The inertia objects need to be in the specified objectlist to be encluded. Also, links which are not in the objectlist, will be considered, too. This will gather inertial objects which are child of a link not in the list.
[ { "docid": "369087941b79d35fd4c096e7677171b3", "score": "0.8509039", "text": "def gatherInertialChilds(obj, objectlist):\n\n # only gather the links that are not in the list\n childlinks = [\n link for link in obj.children if link.phobostype == 'link' and link not in objectlist\n ]\n # only gathe the inertials that are in the list\n inertialobjs = [\n inert for inert in obj.children if inert.phobostype == 'inertial' and inert in objectlist\n ]\n\n inertials = []\n for inertial in inertialobjs:\n # log inertials with errors\n errors, *_ = validateInertiaData(inertial, adjust=True)\n if errors:\n for error in errors:\n error.log()\n\n # add inertial to list\n inertials.append(inertial)\n\n # collect optional inertials in sublinks\n for link in childlinks:\n if link.children:\n inertials.extend(gatherInertialChilds(link, objectlist=objectlist))\n\n return inertials", "title": "" } ]
[ { "docid": "43c145cbaf3e9fe77a99d33e6097f801", "score": "0.598192", "text": "def _object_list_helper(self, **params):\n\n if not self.is_folder:\n raise SolveError(\n \"Only folders contain child objects. This is a {}\"\n .format(self.object_type))\n\n params['vault_id'] = self.vault_id\n if 'recursive' in params:\n params['ancestor_id'] = self.id\n params['limit'] = 1000\n else:\n params['parent_object_id'] = self.id\n\n items = self.all(client=self._client, **params)\n return items", "title": "" }, { "docid": "ef90aae6be940ad73bb88c92086ba437", "score": "0.5755927", "text": "def __find_object_children(self, obj) -> dict:\r\n if hasattr(obj, 'items') and \\\r\n isinstance(obj.items, types.BuiltinFunctionType):\r\n return self.__construct_object(obj)\r\n elif isinstance(obj, (list, tuple, set)):\r\n return self.__construct_list(obj)\r\n else:\r\n exclude_list = []\r\n if hasattr(obj, '_sa_instance_state'):\r\n # load only deferred objects\r\n if len(orm.attributes.instance_state(obj).unloaded) > 0:\r\n mapper = inspect(obj)\r\n for column in mapper.attrs:\r\n column.key\r\n column.value\r\n if hasattr(obj, 'json_exclude_list'):\r\n # do not serialize any values in this list\r\n exclude_list = obj.json_exclude_list\r\n return self.__construct_object(vars(obj), exclude_list)\r\n return None", "title": "" }, { "docid": "926e0b0cbc5cb578d3f97bda974b5341", "score": "0.55532104", "text": "def resolve_object_links(self, obj):\n out = [obj]\n for d in self.obj_deps[obj]:\n out.extend(self.resolve_module_objects(d))\n return out", "title": "" }, { "docid": "1ea1e6517ea0d4b92816d3658e810f3c", "score": "0.5515404", "text": "def get_object_children(cmis_id):", "title": "" }, { "docid": "6f327d86d24964c954a378a69e0c8c21", "score": "0.52572906", "text": "def list_children(self, obj):\n if obj not in self._children_cache:\n children = self._children_cache[obj] = {}\n\n for name, child in super(_AugmentedGraphView, self).list_children(\n obj,\n save_type=base.SaveType.SAVEDMODEL,\n cache=self._serialization_cache):\n if isinstance(child, defun.ConcreteFunction):\n child = self._maybe_uncache_variable_captures(child)\n children[name] = child\n\n # Keep track of untraced functions for later reporting to the user.\n if isinstance(obj, def_function.Function) and not children:\n self.untraced_functions.append(obj.name)\n\n for name, child in self._children_cache[obj].items():\n yield base.TrackableReference(name, child)", "title": "" }, { "docid": "bb0ac4f82c222c55d6f78d89ce17d4b8", "score": "0.5213967", "text": "def get_children ( self, obj ):\n return [ ( obj, node ) for node in self.nodes ]", "title": "" }, { "docid": "ed24dea5a04a9322d7bde0afbb34023b", "score": "0.5153937", "text": "def get_referents(object: Any, level: int = 1) -> List[Any]:\n res = gc.get_referents(object)\n level -= 1\n if level > 0:\n for o in res:\n res.extend(get_referents(o, level))\n res = _remove_duplicates(res)\n return res", "title": "" }, { "docid": "00c7d63312048104d24973d02c84fb6e", "score": "0.51479757", "text": "def allows_children ( self, obj ):\n return True", "title": "" }, { "docid": "2796f69b8654926ef3f3c09fa304aa19", "score": "0.5088696", "text": "def visit_object(self, obj, visited_objects):\n\n if not isinstance(visited_objects, list):\n raise TypeError\n\n user_list = []\n if obj not in visited_objects:\n query = \"(objectClass=*)\"\n result = self.search(query,\n base=obj,\n scope=ldap.SCOPE_BASE,\n attributes=[])[0]\n\n if 'person' in result.objectClass:\n user_list.extend(result.sAMAccountName)\n elif hasattr(result, 'member'):\n for child_obj in result.member:\n user_list.extend(self.visit_object(child_obj, visited_objects))\n\n visited_objects.append(result.dn)\n return user_list", "title": "" }, { "docid": "29b98373692e419c7ed53002efa11864", "score": "0.5085008", "text": "def has_children ( self, obj ):\n return True", "title": "" }, { "docid": "485b3efd2e0b2a25f2ce4c92c7adcf2c", "score": "0.50683117", "text": "def transform_object_list(object_type: str, object_list=None):\n if not isinstance(object_list, list):\n return None\n\n transformed_list = []\n for object in object_list:\n transformed_list.append(transform_object(object_type, object))\n return transformed_list", "title": "" }, { "docid": "961d3ff17f16cf1647975df04e8be463", "score": "0.49726304", "text": "def walkobj(obj, # pylint: disable=R0912,R0913\n gen_leaf = False,\n gen_nonleaf = False,\n gen_path = False,\n gen_obj = False,\n path = (),\n memo = None):\n # If the object is elemental, it cannot be\n # decomposed, so we must bottom out the\n # recursion and yield the object and its'\n # path before returning control back up\n # the stack.\n #\n is_itable = isinstance(obj, collections.Iterable)\n is_leaf = (not is_itable) or is_string(obj)\n\n if is_leaf:\n if gen_leaf:\n if gen_path and gen_obj:\n yield (path, obj)\n elif gen_path:\n yield path\n elif gen_obj:\n yield obj\n return\n\n # Since this is a recursive function, we need\n # to be on our guard against any references\n # to objects back up the call stack (closer\n # to the root of the tree). Any such\n # references would be circular, leading to\n # an infinite tree, and causing us to blow\n # our stack in a fit of unbounded recursion.\n #\n # If we detect that we've already visited\n # this object (using identity not equality),\n # then the safe thing to do is to halt the\n # recursive descent and return control back\n # up the stack.\n #\n _id = id(obj)\n if memo is None:\n memo = set()\n if _id in memo:\n return\n memo.add(_id)\n\n # If the object is not elemental (i.e. it is\n # an Iterable), then it may be decomposed, so\n # we should recurse down into each component,\n # yielding the results as we go. Of course,\n # we need different iteration functions for\n # mappings vs. other iterables.\n #\n def mapiter(mapping):\n \"\"\"\n Return an iterator over the specified mapping or other iterable.\n\n This function selects the appropriate\n iteration function to use.\n\n \"\"\"\n return getattr(mapping, 'iteritems', mapping.items)()\n itfcn = mapiter if isinstance(obj, collections.Mapping) else enumerate\n\n for pathpart, component in itfcn(obj):\n\n childpath = path + (pathpart,)\n if gen_nonleaf:\n if gen_path and gen_obj:\n yield (childpath, component)\n elif gen_path:\n yield childpath\n elif gen_obj:\n yield component\n\n for result in walkobj(obj = component,\n gen_leaf = gen_leaf,\n gen_nonleaf = gen_nonleaf,\n gen_path = gen_path,\n gen_obj = gen_obj,\n path = childpath,\n memo = memo):\n yield result\n\n # We only need to guard against infinite\n # recursion within a branch of the call-tree.\n # There is no danger in visiting the same item\n # instance in sibling branches, so we can\n # forget about objects once we are done\n # with them and about to pop the stack.\n #\n memo.remove(_id)\n return", "title": "" }, { "docid": "bd234f800ada529bb492dc96f3a4f8b0", "score": "0.4888756", "text": "def checkForChildren(self, obj):\n iface = self.interface(obj)\n return (len(iface) > 3 and iface[3])", "title": "" }, { "docid": "5e7cfb8575c54f380ecc7ae107699a2a", "score": "0.48699003", "text": "def DNARemoveChildren(dnaObject):\n children = []\n for i in range(dnaObject.getNumChildren()):\n children.append(dnaObject.at(i))\n for child in children:\n dnaObject.remove(child)\n DNASTORE.removeDNAGroup(child)", "title": "" }, { "docid": "302456a8f2087077351ac9a1d3dbd746", "score": "0.4848284", "text": "def __getitem__(self, obj):\n\n # check for previously unknown obj\n assert obj in self.parents, f\"Object {obj} not in parents\"\n\n # find path of objects leading to the root\n path = [obj]\n root = self.parents[obj]\n while root != path[-1]:\n path.append(root)\n root = self.parents[root]\n\n # compress the path and return\n for ancestor in path:\n self.parents[ancestor] = root\n return root", "title": "" }, { "docid": "24d6ef109ce63e4932db4b35a4d14540", "score": "0.48310173", "text": "def list_all(obj: Union[list, object]) -> list:\n if isinstance(obj, list):\n return gather_lists([list_all(x) for x in obj])\n # return sum([list_all(x) for x in obj], [])\n return [obj]", "title": "" }, { "docid": "0481ead8f08487c73b01fb3f8be179cd", "score": "0.48044986", "text": "def cascade_iterator(self, type, object, recursive=None, halt_on=None):\n\n return iter([])", "title": "" }, { "docid": "abd9b4874a0c9eb304280c487fb817e7", "score": "0.47941947", "text": "def __dfsSort(self, object, sortedList):\n\n if not self.__isMarked(object):\n self.__mark(object)\n\n for influencer in self.getInfluencers(object):\n self.__dfsSort(influencer, sortedList)\n\n sortedList.append(object)", "title": "" }, { "docid": "ea1272fc40279cc90b3d1ace53280ff5", "score": "0.4784849", "text": "def _clear_obj_inlinks(self, objid):\n if objid in self._inlinks:\n inlinks = self._inlinks[objid]\n for i in inlinks:\n if objid in self._outlinks[i]:\n self._outlinks[i].remove(objid)\n self._inlinks[objid] = set()", "title": "" }, { "docid": "a16f8c31074bbca210699c558a9f5bf3", "score": "0.47740746", "text": "def collect_objects(obj, identifiers=('id', '_id')):\n if isinstance(obj, (list, tuple, set)):\n generator = (collect_objects(item, identifiers) for item in obj)\n return [item for item in generator if item != None and item != []]\n elif isinstance(obj, dict):\n if is_object(obj):\n return obj\n else:\n for key, value in obj.items():\n result = collect_objects(value, identifiers)\n if result != None and result != []:\n return result\n return []", "title": "" }, { "docid": "19ae8c4eeffd8f0b26710ce30cb3db13", "score": "0.47663996", "text": "def remove_obj_edges(self, objid):\n if objid in self._inlinks:\n inlinks = self._inlinks[objid]\n for a in inlinks:\n del self.edges[a, objid]\n\n if objid in self._outlinks:\n outlinks = self._outlinks[objid]\n for a in outlinks:\n del self.edges[objid, a]\n\n self._remove_obj_from_edges_index(objid)", "title": "" }, { "docid": "0e24b4f0ab23d5e41f7809eca1c3adc5", "score": "0.4756287", "text": "def getIncomingLinks(obj=None, intid=None):\n catalog = getUtility(ICatalog)\n if intid is not None:\n return catalog.findRelations({\n 'to_id': intid,\n 'from_attribute': referencedRelationship})\n else:\n intids = getUtility(IIntIds)\n return catalog.findRelations({\n 'to_id': intids.getId(obj),\n 'from_attribute': referencedRelationship})", "title": "" }, { "docid": "1052f1acab120fee991868f7b83c666e", "score": "0.4753535", "text": "def get_children(self, obj):\n filtered_courses_id = self.context.get('filtered_courses_id')\n if filtered_courses_id:\n children = obj.get_children().filter(pk__in=filtered_courses_id)\n else:\n children = obj.get_children()\n return ChildCategoryListSerializer(children, many=True).data", "title": "" }, { "docid": "6b76de218f25c575f7e37b57db1c7448", "score": "0.47486457", "text": "def add(self, *objects):\n for obj in objects:\n if obj not in self.context.children:\n self.context.children.append(obj)\n self.context._dirty = True\n self._add(*obj.references())", "title": "" }, { "docid": "4a060c32471dc607d8524345d2618a6e", "score": "0.473293", "text": "def set_candidate_object_list(self, candidate_objects):\n return None", "title": "" }, { "docid": "15d72b406bb789c17313e64034c6a94c", "score": "0.47054482", "text": "def __get_all_objects(self, obj):\r\n\t\tresult = \"<Object Name=\\\"%s\\\" ID=\\\"%s\\\" Type=\\\"%s\\\">\\n\" % (obj.original_name, obj.id, obj.type.id)\r\n\t\t# attributes\r\n\t\tresult += \"<Attributes>\\n\"\r\n\t\tfor a in obj.get_attributes().values():\r\n\t\t\tresult += u\"<Attribute Name=\\\"%s\\\">%s</Attribute>\\n\" % (a.name, self.__attrvalue(a.original_value))\r\n\t\tresult += \"</Attributes>\\n\"\r\n#\t\tresult += \"<Script>%s</Script>\\n\" % obj.script\r\n\t\t# put all child objects\r\n\t\tresult += \"<Objects>\\n\"\r\n\t\tfor o in obj.get_objects_list():\r\n\t\t\tresult += self.__get_all_objects(o)\r\n\t\tresult += \"</Objects>\\n\"\r\n\t\tresult += self.__get_code_interface(obj)\r\n\t\tresult += \"</Object>\\n\"\r\n\t\treturn result", "title": "" }, { "docid": "c6beb6018b9ab088b029b357299f3630", "score": "0.46974126", "text": "def visitList(self, obj, *args, **kwargs):\n for value in obj:\n self.visit(value, *args, **kwargs)", "title": "" }, { "docid": "3c71632de83150b1c8b105378ad4a215", "score": "0.4631788", "text": "def add_all( obj, obj_list ):\n if isinstance( obj, Signal ):\n obj_list.append( obj )\n elif isinstance( obj, list ): # SORRY\n for i, o in enumerate( obj ):\n add_all( o, obj_list )", "title": "" }, { "docid": "3a48e52c8354bc260f731c59b1d5e4d7", "score": "0.46281424", "text": "def calculateInertia(obj, mass, geometry, errors=None, adjust=False, logging=False):\n if errors and not adjust:\n if logging:\n log(\"Can not calculate inertia from object.\", 'ERROR')\n return None\n\n inertia = None\n\n # Get the rotation of the object\n object_rotation = np.array(obj.rotation_euler.to_matrix())\n\n if isinstance(geometry, representation.Box):\n inertia = calculateBoxInertia(mass, geometry.size)\n elif isinstance(geometry, representation.Cylinder):\n inertia = calculateCylinderInertia(mass, geometry.radius, geometry.length)\n elif isinstance(geometry, representation.Sphere):\n inertia = calculateSphereInertia(mass, geometry.radius)\n elif isinstance(geometry, representation.Mesh):\n sUtils.selectObjects((obj,), clear=True, active=0)\n inertia = calculateMeshInertia(mass, obj.data, scale=obj.scale)\n\n # Correct the inertia orientation to account for Cylinder / mesh orientation issues\n inertia = object_rotation.dot(inertiaListToMatrix(inertia)).dot(object_rotation.transpose())\n\n return inertiaMatrixToList(inertia)", "title": "" }, { "docid": "bf0726fc1b33ac15128e0c86985f5692", "score": "0.46047118", "text": "def recursive_get_related(obj, user, parent_objs=None, acc=None): # NOQA C901\n if acc is None:\n acc = defaultdict(set)\n\n if parent_objs is None:\n parent_objs = []\n\n model = obj.__class__\n\n # Go through every relation (except the ones marked as skip) and collect\n # all of the referenced items.\n skip_relations = getattr(model, \"recursive_get_related_skip_relations\", [])\n\n # relations = (\n # f for f in model._meta.get_fields(include_hidden=True)\n # if f.is_relation and f.name not in skip_relations\n # )\n #\n for relation in model._meta.get_fields(include_hidden=True):\n if (\n not relation.is_relation\n or not relation.name\n or relation.name in skip_relations\n ):\n continue\n\n accessor_name = relation.name\n if hasattr(relation, \"get_accessor_name\"):\n accessor_name = relation.get_accessor_name()\n\n # Skip relations that don't have backwards reference\n if accessor_name.endswith(\"+\"):\n continue\n\n # Skip relations to a parent model\n if relation.related_model in (po.__class__ for po in parent_objs):\n continue\n\n if relation.concrete or isinstance(relation, OneToOneRel):\n # Get value as-is if relation is a foreign key or a one-to-one relation\n if not hasattr(obj, accessor_name):\n continue\n concrete_item = getattr(obj, accessor_name)\n if not concrete_item:\n continue\n all_items = [concrete_item]\n else:\n # Otherwise get all instances from the related manager\n related_manager = getattr(obj, accessor_name)\n\n if not hasattr(related_manager, \"all\"):\n continue\n\n # Include soft deleted objects\n if hasattr(related_manager, \"all_with_deleted\"):\n all_items = related_manager.all_with_deleted()\n else:\n all_items = related_manager.all()\n\n # Model permission check\n permission_name = \"{}.view_{}\".format(\n relation.model._meta.app_label, relation.model._meta.model_name\n )\n has_permission = user.has_perm(permission_name)\n\n for item in all_items:\n # Include item only if user has permission, but recurse into sub items regardless\n if has_permission:\n acc[ContentType.objects.get_for_model(item)].add(item)\n\n parent_objs.append(obj)\n recursive_get_related(item, user=user, parent_objs=parent_objs, acc=acc)\n parent_objs.pop()\n\n return acc", "title": "" }, { "docid": "92366b9c681cf09e7a1945d1f56ebf36", "score": "0.46026844", "text": "def get_objects(self, item):\n\n objects = []\n\n if self._children != []:\n # we have children lets try and see if item fits in any of them\n index = self._get_node(item)\n\n if index != None:\n objects += self._children[index].get_objects(item)\n else:\n for child in self._children:\n objects += child.get_objects(item)\n\n objects += self._contents\n\n return objects", "title": "" }, { "docid": "0374c0051b92ec25b2f2a8e0429f4911", "score": "0.46020555", "text": "def updateRelations(obj, event):\n catalog = component.queryUtility(ICatalog)\n intids = component.queryUtility(IIntIds)\n\n if catalog is None or intids is None:\n return\n\n # check that the object has an intid, otherwise there's nothing to be done\n try:\n obj_id = intids.getId(obj)\n except KeyError:\n # The object has not been added to the ZODB yet\n return\n\n # remove previous relations coming from id (now have been overwritten) have\n # to activate query here with list() before unindexing them so we don't get\n # errors involving buckets changing size\n rels = list(catalog.findRelations({'from_id': obj_id}))\n for rel in rels:\n if hasattr(obj, rel.from_attribute):\n catalog.unindex(rel)\n\n # add new relations\n addRelations(obj, event)", "title": "" }, { "docid": "21bb932529a147afe50bc445cb183a1e", "score": "0.46006808", "text": "def _envelope_list(self, objects, pagination_metadata=None):\n return objects", "title": "" }, { "docid": "ee86b2119474620acdd961fc23a52a2b", "score": "0.46004298", "text": "def get_children ( self, obj ):\n return getattr( obj, self.children )", "title": "" }, { "docid": "4d681419e1242147b2bcafd7d6d7bab9", "score": "0.45998597", "text": "def query_children(self, in_mo=None, in_dn=None, class_id=None,\n hierarchy=False, timeout=None):\n\n from .imcmethodfactory import config_resolve_children\n\n if not in_mo and not in_dn:\n raise ValueError('[Error]: GetChild: Provide in_mo or in_dn.')\n\n if in_mo:\n parent_dn = in_mo.dn\n elif in_dn:\n parent_dn = in_dn\n\n meta_class_id = None\n # Setting the default class-id to None\n # When hierarchy and class-id are passed together to Cisco IMC,\n # an empty response is received.\n # Hence, passing the class-id only when hierarchy is not set\n # When both hierarchy and class-id are set, do local filtering for class-id\n if class_id and not hierarchy:\n meta_class_id = imccoreutils.find_class_id_in_mo_meta_ignore_case(\n class_id)\n if not meta_class_id:\n meta_class_id = class_id\n\n elem = config_resolve_children(cookie=self.cookie,\n class_id=meta_class_id,\n in_dn=parent_dn,\n in_hierarchical=hierarchy)\n\n response = self.post_elem(elem, timeout=timeout)\n if response.error_code != 0:\n raise ImcException(response.error_code, response.error_descr)\n\n out_mo_list = imccoreutils.extract_molist_from_method_response(response,\n hierarchy\n )\n if class_id and hierarchy:\n out_mo_list = imccoreutils.filter_molist_on_class_id(\n out_mo_list,\n class_id=class_id)\n return out_mo_list", "title": "" }, { "docid": "de4c1bee2c6a45c775490d31833e9e97", "score": "0.4599437", "text": "def addMember(self, object):\n if not self.hasMember(object):\n if not isinstance(object, CBD):\n node = DepNode(object)\n self.__dependents[object] = []\n self.__influencers[object] = []\n self.__semanticMapping[object] = node\n else:\n for block in object.getBlocks():\n self.addMember(block)\n else:\n raise ValueError(\"Specified object is already member of this graph\")", "title": "" }, { "docid": "d35857fdadba60b96d50e664d2856ae8", "score": "0.45899504", "text": "def ignore_done_list(obj):\n if not isinstance(obj, list): return obj\n\n new_list = []\n for val in obj:\n #print('list item: %s' % val)\n val = ignore_done_dict(val)\n val = ignore_done_list(val)\n\n if val: new_list.append(val)\n\n return new_list", "title": "" }, { "docid": "2a75c4caebf3edd6e5d8df78a423af90", "score": "0.4586856", "text": "def _get_subobjects(self) -> Iterable[SymbolicObject]:\n\n pass", "title": "" }, { "docid": "0f3879d75f1052d34e7a83504c25f829", "score": "0.45774323", "text": "def subtree_deeper(cls, object_id, limit_depth=1000000, flat=True,\n db_session=None):\n raw_q = \"\"\"\n WITH RECURSIVE subtree AS (\n SELECT res.*, 1 as depth, array[ordering] as sorting FROM\n resources res WHERE res.resource_id = :id\n UNION ALL\n SELECT res_u.*, depth+1 as depth,\n (st.sorting || ARRAY[res_u.ordering] ) as sort\n FROM resources res_u, subtree st\n WHERE res_u.parent_id = st.resource_id\n )\n SELECT * FROM subtree WHERE depth<=:depth ORDER BY sorting;\n \"\"\"\n db_session = get_db_session(db_session)\n q = db_session.query(cls).from_statement(raw_q).params(id=object_id,\n depth=limit_depth)\n return q", "title": "" }, { "docid": "a7261cb187e088c2a0de57c08fbcad91", "score": "0.45757172", "text": "def list_dependencies(self, obj):\n if obj not in self._children_cache:\n # Slot variables do not appear in the children_cache.\n children = {}\n else:\n children = self._children_cache[obj]\n for name, dep in obj._deserialization_dependencies(children).items(): # pylint: disable=protected-access\n if not isinstance(dep, base.Trackable):\n raise TypeError(\n f\"The dependency of type {type(dep)} is not an instance `Trackable`\"\n \", and can't be saved to SavedModel. Please check the \"\n \"implementation of `_deserialization_dependencies` in the parent \"\n f\"object {obj}.\")\n yield name, dep", "title": "" }, { "docid": "5532fca0c91a13082978904504791258", "score": "0.45625404", "text": "def add_children(obj, name, master, cnf={}):\n children = cnf.pop('CHILDREN', [])\n for name, options in children:\n child = getattr(obj, name)\n master.add(child, **options)", "title": "" }, { "docid": "0a9a1acfe76fb1d3d5e82c080cb771fc", "score": "0.45585668", "text": "def list_levels(obj: List[Any]) -> List:\n\n return [list_level(obj, d) for d in range(1, depth(obj) + 1)]", "title": "" }, { "docid": "4e048e13e56571fac355bfaa99ee9f9d", "score": "0.45409748", "text": "def _get_entries(obj):\n dirs = []\n entries = []\n if isinstance(obj, objects.ConfigObject):\n for name in obj.GetAllSlots():\n child = getattr(obj, name, None)\n if isinstance(child, (list, dict, tuple, objects.ConfigObject)):\n dirs.append(name)\n else:\n entries.append(name)\n elif isinstance(obj, (list, tuple)):\n for idx, child in enumerate(obj):\n if isinstance(child, (list, dict, tuple, objects.ConfigObject)):\n dirs.append(str(idx))\n else:\n entries.append(str(idx))\n elif isinstance(obj, dict):\n dirs = obj.keys()\n\n return dirs, entries", "title": "" }, { "docid": "4fcc963b96e1c2dc521cebe841372f62", "score": "0.45399207", "text": "def flatten_tree(node_list, node):\n for c in node.children:\n flatten_tree(node_list, c)\n\n # Don't be destructive.\n no_kids = copy.deepcopy(node)\n no_kids.children = []\n node_list.append(no_kids)", "title": "" }, { "docid": "9c5e86a1b19041727c2ab8856baf201d", "score": "0.45390347", "text": "def contains(self, obj):\n if obj.level < self.level + 1:\n return False\n while obj.level > self.level + 1:\n obj = obj.parent\n if obj is None:\n return False\n return obj in self", "title": "" }, { "docid": "2367db321c3b8764c500bef6328bc4f4", "score": "0.45328552", "text": "def define_children(list_of_nodes):\r\n\r\n # Check each node in the network\r\n # If the node has parents, find it and add the child node to its list of children\r\n for child_node in list_of_nodes:\r\n if child_node.parents:\r\n for parent in child_node.parents:\r\n parent_id, parent_connections = parent[0], parent[1] # Define for readability\r\n parent_node = get_node_by_id(parent_id)\r\n parent_node.add_child(child_node.id, parent_connections)", "title": "" }, { "docid": "f23478bd458ea664a3b92b73b607552f", "score": "0.45241043", "text": "def getAllConfmonObjects(base):\n from Products.ZenModel.ZenModelRM import ZenModelRM\n from Products.ZenModel.ZenModelBase import ZenModelBase\n from Products.ZenRelations.ToManyContRelationship \\\n import ToManyContRelationship\n from Products.ZenRelations.ToManyRelationship \\\n import ToManyRelationship\n from Products.ZenRelations.ToOneRelationship \\\n import ToOneRelationship\n\n def descend(obj):\n \"\"\"\n Function to determine whether or not to continue searching\n @param obj: object\n @type obj: object\n @return: True if we want to keep searching\n @rtype: boolean\n \"\"\"\n return (\n isinstance(obj, ZenModelBase) or\n isinstance(obj, ToManyContRelationship) or\n isinstance(obj, ToManyRelationship) or\n isinstance(obj, ToOneRelationship))\n\n def filter(obj):\n \"\"\"\n Filter function to decide whether it's an object we\n want to know about or not.\n\n @param obj: object\n @type obj: object\n @return: True if we want to keep it\n @rtype: boolean\n \"\"\"\n return isinstance(obj, ZenModelRM) and obj.id != \"dmd\"\n\n return getSubObjectsMemo(base, filter=filter, descend=descend)", "title": "" }, { "docid": "a56118c8d3771bdf3861fe479e27477f", "score": "0.45238638", "text": "def walkref(obj,lookfor=\"\", level = 0, dipest = 5, ignoreHidden=True):\n for i in dir(obj):\n try:\n if i == lookfor:\n return [i]\n if ignoreHidden and i.startswith(\"__\"):\n continue\n if level<dipest:\n data = walkref(getattr(obj,i),lookfor, level+1, dipest)\n if data:\n data.append(i)\n return data\n except:\n pass", "title": "" }, { "docid": "62a823faf0f219fe14b0ce54d37facdd", "score": "0.45237574", "text": "def get_children(self, refobj): #pragma: no cover\n raise NotImplementedError", "title": "" }, { "docid": "348203ce8f91110e142453bf645c219b", "score": "0.45089656", "text": "def recurse_crawl(url, object):\n if (len(object.dq) != 0):\n next_url = object.dq.pop()\n Actions.process_urls(next_url)", "title": "" }, { "docid": "2faf7cdea620ea1c54c792ed7ea0891f", "score": "0.4499787", "text": "def addObjects(self):\n\n self.normal = self.guide.blades[\"blade\"].z\n self.binormal = self.guide.blades[\"blade\"].x\n\n # Chain of deformers -------------------------------\n self.locList = []\n self.ctlList = []\n self.npoList = []\n parent = self.root\n\n self.jointList = []\n self.previusTag = self.parentCtlTag\n for i, t in enumerate(transform.getChainTransform2(self.guide.apos,\n self.normal,\n self.negate)):\n\n lvl = primitive.addTransform(parent, self.getName(\"%s_lvl\" % i), t)\n npo = primitive.addTransform(lvl, self.getName(\"%s_npo\" % i), t)\n loc = primitive.addTransform(npo, self.getName(\"%s_loc\" % i), t)\n jnt_parent = loc\n\n if self.settings[\"metaCtl\"]:\n\n if i:\n guide_loc_ref = \"{}_loc\".format(str(i - 1))\n else:\n guide_loc_ref = \"root\"\n meta_ctl = self.addCtl(loc,\n \"meta%s_ctl\" % i,\n t,\n self.color_ik,\n \"cube\",\n w=self.size * .2,\n h=self.size * .2,\n d=self.size * .2,\n tp=self.previusTag,\n guide_loc_ref=guide_loc_ref)\n\n self.ctlList.append(meta_ctl)\n self.previusTag = meta_ctl\n jnt_parent = meta_ctl\n\n if self.settings[\"jointChainCnx\"]:\n self.jnt_pos.append([jnt_parent, i])\n else:\n self.jnt_pos.append([jnt_parent, i, \"parent_relative_jnt\"])\n\n self.locList.append(loc)\n self.npoList.append(npo)\n if i == len(self.guide.apos) - 1:\n ctl_npo = primitive.addTransform(self.root,\n self.getName(\"ctl_npo\"),\n t)\n\n self.meta_ctl = self.addCtl(ctl_npo,\n \"ctl\",\n t,\n self.color_fk,\n \"cube\",\n w=self.size * .5,\n h=self.size * .5,\n d=self.size * .5,\n tp=self.parentCtlTag)", "title": "" }, { "docid": "f0b5069c8306b0f52b789184f2332a19", "score": "0.4499294", "text": "def __dfsCollect(self, object, component, curIt):\n if not self.__isMarked(object):\n self.__mark(object)\n\n for dependent in self.getDependents(object):\n self.__dfsCollect(dependent, component, curIt)\n\n component.append(object)", "title": "" }, { "docid": "ac2a25680ecec68f9c62686da0ed7011", "score": "0.4487215", "text": "def get_chapters(self, obj, found=None):\n if not found:\n found = []\n if obj['tag'] == 'chapter':\n found.append(obj)\n else:\n for child in obj['children']:\n found = self.get_chapters(child, found)\n return found", "title": "" }, { "docid": "bbe4818630122f60b6bde56132df1e4e", "score": "0.44866896", "text": "def fetch_new_children(self, ):\n root = self.get_root()\n refobjinter = self.get_refobjinter()\n unwrapped = self.get_unwrapped(root, refobjinter)\n self.wrap(self.get_root(), self.get_refobjinter(), unwrapped)\n\n suggestions = self.get_suggestions()\n for typ, element in suggestions:\n for c in self._children:\n if typ == c.get_typ() and element == c.get_element():\n break\n else:\n Reftrack(root=root, refobjinter=refobjinter, typ=typ, element=element, parent=self)", "title": "" }, { "docid": "f00b3695bd05fc515c7b52b904bda944", "score": "0.4459496", "text": "def badobjects(obj, depth=0, exact=False, safe=False):\n from dill import pickles\n if not depth:\n if pickles(obj,exact,safe): return None\n return obj\n return dict(((attr, badobjects(getattr(obj,attr),depth-1,exact,safe)) \\\n for attr in dir(obj) if not pickles(getattr(obj,attr),exact,safe)))", "title": "" }, { "docid": "d8420f50ceacaff6d17cb7e437a5f38f", "score": "0.44532835", "text": "def walk(*walkables):\n if not walkables:\n return\n walkable = walkables[0]\n try:\n entities = walkable.__walk__()\n except AttributeError:\n raise TypeError(\"Object %r is not walkable\" % walkable)\n for entity in entities:\n yield entity\n end_node = walkable.end_node()\n for walkable in walkables[1:]:\n try:\n if end_node == walkable.start_node():\n entities = walkable.__walk__()\n end_node = walkable.end_node()\n elif end_node == walkable.end_node():\n entities = reversed(list(walkable.__walk__()))\n end_node = walkable.start_node()\n else:\n raise ValueError(\"Cannot append walkable %r \"\n \"to node %r\" % (walkable, end_node))\n except AttributeError:\n raise TypeError(\"Object %r is not walkable\" % walkable)\n for i, entity in enumerate(entities):\n if i > 0:\n yield entity", "title": "" }, { "docid": "3dddf953928ae5a1c131c6390c90c4df", "score": "0.4450274", "text": "def flatten(object_: Object,\r\n *,\r\n path_separator: str = '.') -> Array[Object]:\r\n keys = set(object_)\r\n result = [dict(object_)]\r\n while keys:\r\n key = keys.pop()\r\n new_result = []\r\n for index, record in enumerate(result):\r\n try:\r\n value = record[key]\r\n except KeyError:\r\n new_result.append(record)\r\n else:\r\n if isinstance(value, dict):\r\n del record[key]\r\n new_value = flatten_nested_objects(\r\n value,\r\n prefix=key + path_separator,\r\n path_separator=path_separator)\r\n keys.update(new_value.keys())\r\n new_result.append({**new_value, **record})\r\n elif isinstance(value, list):\r\n del record[key]\r\n new_records = [\r\n flatten_nested_objects(sub_value,\r\n prefix=key + path_separator,\r\n path_separator=path_separator)\r\n for sub_value in value]\r\n keys.update(chain.from_iterable(map(dict.keys,\r\n new_records)))\r\n new_result.extend({**new_record, **record}\r\n for new_record in new_records)\r\n else:\r\n new_result.append(record)\r\n result = new_result\r\n return result", "title": "" }, { "docid": "045d27eb49ecd1932151e6363d6fefb4", "score": "0.44481143", "text": "def gather_hyper_prams(obj, hp_list):\n non_collection_types = (str, bytes, bytearray, np.ndarray)\n try:\n if isinstance(obj, HyperParameter):\n new_hp_list = [] # If this is a hyper parameter, we create a nested hp_list. This makes the below cases add\n # nested hyper parameters to the inner list\n ptr = HyperParameterPointer(obj)\n hp_list.append((ptr, new_hp_list))\n # for now, the only hyper parameter which can have children is the discrete one, so we should check\n # children of that\n if isinstance(obj, DiscreteHyperParameter):\n obj.values = [gather_hyper_prams(v, new_hp_list) for v in obj.values]\n return ptr\n\n elif isinstance(obj, Mapping):\n return type(obj)({k: gather_hyper_prams(v, hp_list) for k,v in obj.items()})\n elif isinstance(obj, Collection) and not isinstance(obj, non_collection_types):\n return type(obj)([gather_hyper_prams(v, hp_list) for v in obj])\n elif hasattr(obj, '__dict__'):\n try:\n obj_copy = copy.copy(obj)\n obj_copy.__dict__ = gather_hyper_prams(obj.__dict__, hp_list)\n return obj_copy\n except TypeError:\n return obj\n else:\n return obj\n except TypeError as e:\n raise MaterializeError(obj, \"Failed to materialize\") from e", "title": "" }, { "docid": "24d052ef3ba5586d5423606a97895a44", "score": "0.44448438", "text": "def walk(fn, obj, *args, **kwargs):\n objType = type(obj)\n if objType in [list, tuple, vim.List]:\n return list(walk(fn, o, *args) for o in obj)\n elif objType in [dict, vim.Dictionary]:\n return dict((walk(fn, k, *args), walk(fn, v, *args))\n for k, v in obj.items())\n return fn(obj, *args, **kwargs)", "title": "" }, { "docid": "7c739ef694270461f3a277e2a2b87307", "score": "0.44250375", "text": "def compound_inertia_analysis_3x3(objects):\n total_mass, common_com = combine_com_3x3(objects)\n\n shifted_inertias = list()\n for obj in objects:\n if 'rot' in obj and not obj['rot'] == mathutils.Matrix.Identity(\n 3\n ): # if object is rotated in local space\n objinertia = spin_inertia_3x3(\n inertiaListToMatrix(obj['inertia']), obj['rot']\n ) # transform inertia to link space\n else:\n objinertia = inertiaListToMatrix(obj['inertia']) # keep inertia\n inert_i_at_common_com = shift_com_inertia_3x3(\n obj['mass'], obj['com'], objinertia, common_com\n )\n shifted_inertias.append(inert_i_at_common_com)\n\n total_inertia_at_common_com = mathutils.Matrix.Identity(3)\n total_inertia_at_common_com.zero()\n for inertia in shifted_inertias:\n total_inertia_at_common_com = total_inertia_at_common_com + inertia\n\n return total_mass, common_com, total_inertia_at_common_com", "title": "" }, { "docid": "b0f37b484afbd852abeab51a0ef7be96", "score": "0.44106233", "text": "def serialize_object_list(self, obj_list):\n return super().serialize_object_list(self.dump_list(obj_list))", "title": "" }, { "docid": "35ed7e5564bd01136d0d40e17296e485", "score": "0.44098994", "text": "def clean_up(self, obj):\n\n def clean_it(obj):\n # Rename comment list to 'comments' and force it to be a list.\n try:\n if obj.comment:\n obj.comments = obj.comment\n del obj.comment\n if not type(obj.comments) == list:\n obj.comments = [obj.comments,]\n except AttributeError:\n pass\n \n # Force media to be a list.\n try:\n if not type(obj.media) == list:\n obj.media = [obj.media,]\n except AttributeError:\n pass\n return obj\n \n \n if type(obj) == list:\n obj = [clean_it(x) for x in obj]\n else:\n obj = clean_it(obj)\n \n return obj", "title": "" }, { "docid": "be49008883dcc1b1f9cc076b65f0de0c", "score": "0.4405966", "text": "def get_lineage(self, name_object):\n ancestors = [] # Initialise the list of all nodes from root to leaf.\n tax_id = self.tax_id # Define leaf\n\n while 1:\n if name_object.has_key(tax_id):\n ancestors.append(tax_id)\n tax_id = name_object[tax_id].parent\n else:\n break\n if tax_id == \"1\":\n # If it is the root, we reached the end.\n # Add it to the list and break the loop\n ancestors.append(tax_id)\n break\n return ancestors # Return the list", "title": "" }, { "docid": "82f8e6eface2b21c9ac666122318328d", "score": "0.44025633", "text": "def objwalk(obj, path=(), memo=None):\n if len( path ) > MAX_DEPTH + 1:\n yield path, obj # Truncate it!\n if memo is None:\n memo = set()\n iterator = None\n if isinstance(obj, Mapping):\n iterator = iteritems\n elif isinstance(obj, (Sequence, Set)) and not isinstance(obj, string_types):\n iterator = enumerate\n elif hasattr( obj, '__class__' ) and hasattr( obj, '__dict__' ) and type(obj) not in primitives: # If type(obj) == <instance>\n iterator = class_iterator\n elif hasattr(obj, '__iter__') or isinstance(obj, types.GeneratorType):\n obj = [o for o in obj]\n else:\n pass\n if iterator:\n if id(obj) not in memo:\n memo.add(id(obj))\n for path_component, value in iterator(obj):\n for result in objwalk(value, path + (path_component,), memo):\n yield result\n memo.remove(id(obj))\n else:\n yield path, obj", "title": "" }, { "docid": "13a0c79f6682fec292bfe83a9244a446", "score": "0.4400031", "text": "def recursive_search_for_context(self, objects, max_x, min_x, max_y, min_y):\n \n objects = self.find_roots(objects)\n objects = self.find_fractions(objects, max_x, min_x, max_y, min_y)\n objects = self.find_equalsigns(objects)\n objects = self.find_multiplicationsigns(objects)\n objects = self.sort_objects_by_x_value(objects)\n objects = self.find_exponents(objects, max_x, min_x, max_y, min_y)\n objects = self.sort_objects_by_x_value(objects)\n\n return objects", "title": "" }, { "docid": "229f18e6cbff0dc3f8ec3d784fa13899", "score": "0.4395767", "text": "def has_children ( self, obj ):\n return (len( self.get_children( obj ) ) > 0)", "title": "" }, { "docid": "1aa689b885acccae6eb569a6d69c2bd4", "score": "0.43832174", "text": "def filter_neighbors(self, objects):\n neighbors = []\n for obj in objects:\n if abs(obj.pos[0] - self.pos[0]) <= 1 and abs(obj.pos[1] - self.pos[1]) <= 1:\n neighbors.append(obj)\n return neighbors", "title": "" }, { "docid": "d8db06a546f4cb2b1243bba7ce9823d1", "score": "0.43792042", "text": "def on_object(objects):\n # type: (List[Object]) -> None\n # Update Context with Perceived Objects\n self.context.add_objects(objects)\n\n # Add Perceived People to People Info\n self._people_info = [obj for obj in objects if obj.name == \"person\"]", "title": "" }, { "docid": "9f6fce15d0ddc258752180ccb5f9cf68", "score": "0.43768722", "text": "def relatedAll(obj, prop, maxIterations=10000, w=config.cidsServer) :\t\n\t\n\t# get the list of property chains for prop\n\tpropertyChains = prop.get_property_chain()\n\t\n\tcurrentObjects = {obj}\n\trelatedObjects = set()\n\tpropIndirect = \"INDIRECT_\" + prop.name\t\t# use power of owlready2 to get transitive, symmetric, inverse, etc.\n\titeration = 0\n\t\n\twhile (len(currentObjects) > 0) and (iteration < maxIterations) : \n\t\titeration += 1\n\t\tcObject = currentObjects.pop()\n\t\trelatedObjects = relatedObjects | {cObject}\n\t\tnewObjects = set(getattr(cObject, propIndirect))\n\t\tcurrentObjects = currentObjects | (newObjects - relatedObjects)\n\t\n\t\t# process property chains\n\t\tfor propertyChain in propertyChains :\n\t\t\tstageSet = {cObject}\n\t\t\tfor chainProp in propertyChain :\n\t\t\t\tnewObjects = set()\n\t\t\t\tchainPropIndirect = \"INDIRECT_\" + chainProp.name\n\t\t\t\twhile (len(stageSet) > 0) and (iteration < maxIterations) :\n\t\t\t\t\titeration += 1\n\t\t\t\t\tcobj = stageSet.pop()\n\t\t\t\t\tnobjects = set(getattr(cobj, chainPropIndirect))\n\t\t\t\t\tnewObjects = newObjects | nobjects\n\t\t\t\tstageSet = newObjects\n\t\t\tcurrentObjects = currentObjects | (stageSet - relatedObjects)\n\treturn(relatedObjects - {obj})", "title": "" }, { "docid": "70df8742e84480db4c482d8699f3ece5", "score": "0.43686804", "text": "def ZopeFindAndApply( self, obj, obj_ids=None, search_sub=0, REQUEST=None, pre='',\n apply_func=None, apply_path='', trace=None,\n recursive=None, check=None, **kw ):\n obj_metatypes = get_param('obj_metatypes', REQUEST, kw)\n obj_not_cataloged_only = get_param('obj_not_cataloged_only', REQUEST, kw)\n obj_searchterm = get_param('obj_searchterm', REQUEST, kw)\n obj_expr = get_param('obj_expr', REQUEST, kw)\n obj_mtime = get_param('obj_mtime', REQUEST, kw)\n obj_mspec = get_param('obj_mspec', REQUEST, kw)\n obj_permission = get_param('obj_permission', REQUEST, kw)\n obj_roles = get_param('obj_roles', REQUEST, kw)\n\n if recursive is None:\n if obj_metatypes and 'all' in obj_metatypes:\n obj_metatypes = None\n\n if obj_mtime and type(obj_mtime)==type('s'):\n obj_mtime = DateTime(obj_mtime).timeTime()\n\n if obj_permission:\n obj_permission = p_name(obj_permission)\n\n if obj_roles and type(obj_roles) is type('s'):\n obj_roles = [ obj_roles ]\n\n if obj_expr:\n # Setup expr machinations\n md = td()\n obj_expr = (Eval(obj_expr), md, md._push, md._pop )\n result = []\n\n base = obj\n if hasattr(obj, 'aq_base'):\n base = obj.aq_base\n\n if not hasattr(base, 'objectItems'):\n return result\n\n try: items = obj.objectItems()\n except: return result\n\n try: add_result = result.append\n except: raise AttributeError, `result`\n\n if apply_path and not apply_path.startswith('/'):\n apply_path = '/' + apply_path\n\n for id, ob in items:\n if pre: p = \"%s/%s\" % ( pre, id )\n else: p = id\n\n dflag = 0\n if hasattr(ob, '_p_changed') and ob._p_changed == None:\n dflag = 1\n\n if hasattr(ob, 'aq_base'): base = ob.aq_base\n else: base = ob\n\n meta_type = getattr(base, 'meta_type', None)\n\n if ( \\\n ( not obj_ids or absattr(base.id) in obj_ids )\n and\n ( not obj_metatypes or meta_type in obj_metatypes )\n and\n ( not obj_searchterm or ( hasattr(ob, 'PrincipiaSearchSource') and ob.PrincipiaSearchSource().find(obj_searchterm) >= 0 ))\n and\n ( not obj_expr or expr_match(ob, obj_expr) )\n and\n ( not obj_mtime or mtime_match(ob, obj_mtime, obj_mspec) )\n and\n ( not obj_permission or not obj_roles or role_match(ob, obj_permission, obj_roles) ) ):\n\n if apply_func is not None:\n IsRun = 1\n uid = '%s/%s' % ( apply_path, p )\n try:\n ob.parent_path()\n except AttributeError:\n ob = self.resolve_path(uid)\n #uid = '/'.join(aq_base(ob).getPhysicalPath())\n #LOG('xxx', DEBUG, 'uid:%s-%s' % ( uid, apply_path ))\n\n if obj_not_cataloged_only:\n if self._catalog.getRid( uid ) is not None:\n IsRun = 0\n try:\n if IsRun:\n if not check:\n kw = { 'force': 1 }\n apply( apply_func, ( ob, uid, ), kw )\n #apply_func(ob, uid, force=1)\n add_result( ( p, ob ) )\n except Exception, message:\n exc = sys.exc_info()\n logger.error('ZSQLCatalog.ZopeFindAndApply: %s: meta_type %s, uid [%s]' % ( \\\n exc[1], meta_type, uid ))\n raise #continue\n if trace:\n logger.info('ZSQLCatalog.ZopeFindAndApply: uid [%s]' % uid)\n else:\n add_result( ( p, ob ) )\n dflag = 0\n\n if search_sub and hasattr( base, 'objectItems' ):\n result += \\\n self.ZopeFindAndApply( ob, obj_ids, search_sub, REQUEST, p, apply_func, apply_path,\n obj_metatypes=obj_metatypes,\n obj_not_cataloged_only=obj_not_cataloged_only,\n obj_searchterm=obj_searchterm, obj_expr=obj_expr,\n obj_mtime=obj_mtime, obj_mspec=obj_mspec,\n obj_permission=obj_permission, obj_roles=obj_roles,\n trace=trace, recursive=1, check=check\n )\n if dflag: ob._p_deactivate()\n\n return result", "title": "" }, { "docid": "2de3dc948b8c09c06c6b4ea1ce7f34d1", "score": "0.43685237", "text": "def getSubObjectsMemo(base, filter=None, descend=None, memo={}):\n from Products.ZenRelations.RelationshipManager \\\n import RelationshipManager\n if base.meta_type == \"To One Relationship\":\n objs = [base.obj]\n else:\n objs = base.objectValues()\n for obj in objs:\n if (isinstance(obj, RelationshipManager) and\n not obj.getPrimaryDmdId().startswith(base.getPrimaryDmdId())):\n continue\n if not filter or filter(obj):\n yield obj\n if not descend or descend(obj):\n for x in getSubObjectsMemo(obj, filter, descend, memo):\n yield x", "title": "" }, { "docid": "19bd35db7a34de7878b11ca573c7dce3", "score": "0.43489665", "text": "def get_children(self, obj):\n filtered_courses_id = self.context.get('filtered_courses_id')\n filtered_category_id = self.context.get('filtered_category_id')\n subject = self.context.get('subject')\n if filtered_category_id:\n if subject in obj.name.lower():\n children = obj.course.all()\n else:\n children = obj.course.all().filter(name__icontains=subject)\n elif filtered_courses_id:\n children = obj.course.all().filter(pk__in=filtered_courses_id)\n else:\n children = obj.course.all()\n return CourseSerializer(children, many=True).data", "title": "" }, { "docid": "39677824ffcb29358cbac0ca2cd2f972", "score": "0.43481776", "text": "def addSubObjects(self, objects, REQUEST=None):\n if objects:\n storage = getattr(aq_base(self), '_v_at_subobjects', None)\n if storage is None:\n setattr(self, '_v_at_subobjects', {})\n storage = getattr(aq_base(self), '_v_at_subobjects')\n for name, obj in objects.items():\n storage[name] = aq_base(obj)", "title": "" }, { "docid": "879f2fa37395dc6c718bf9103537b2b5", "score": "0.43418694", "text": "def delete_object(self, object_to_delete):\n # store the type of object in a variable\n # check that variable against all the possible\n # Object types and locate the dict\n # then call del(approriate_dict[object_to_delete.key])\n object_type = type(object_to_delete)\n object_dict = {}\n object_keys_list = []\n object_key_map = {}\n object_mapper = ''\n cascaded_objects = []\n if object_type == orderCategory:\n object_dict = self.order_categories\n object_keys_list = self.order_category_keys\n object_key_map = self.order_category_name_key_map\n object_mapper = object_to_delete.name\n cascaded_objects = object_to_delete.get_all_orders(self)\n\n elif object_type == order:\n object_dict = self.orders\n object_keys_list = self.order_keys\n object_key_map = self.order_name_key_map\n object_mapper = object_to_delete.name\n cascaded_objects = object_to_delete.get_all_steps(self)\n\n elif object_type == orderStep:\n object_dict = self.order_steps\n object_keys_list = self.order_step_keys \n\n else:\n raise TypeError('%s type does not exist in database' % str(object_type)) \n\n try:\n del(object_dict[object_to_delete.key])\n object_keys_list.remove(object_to_delete.key)\n if object_mapper:\n del(object_key_map[object_mapper])\n # delete child componet objects\n for cascaded_object in cascaded_objects:\n self.delete_object(cascaded_object)\n except KeyError:\n raise KeyError('%s does not exist' % str(object_type))", "title": "" }, { "docid": "76461fc3f50df997c0a5cb078feac49c", "score": "0.43343", "text": "def flatten_ragged(raggedlist, memo = None):\n # If the object is elemental, it cannot be\n # decomposed, so we must bottom out the\n # recursion and yield the object and its'\n # path before returning control back up the\n # stack.\n #\n is_itable = isinstance(raggedlist, collections.Iterable)\n\n is_leaf = (not is_itable) or is_string(raggedlist)\n\n if is_leaf:\n yield raggedlist\n return\n\n # Since this is a recursive function, we need\n # to be on our guard against any references\n # to objects back up the call stack (closer\n # to the root of the tree). Any such\n # references would be circular, leading to\n # an infinite tree, and causing us to blow\n # our stack in a fit of unbounded recursion.\n #\n # If we detect that we've already visited this\n # object (using identity not equality), then\n # the safe thing to do is to halt the\n # recursive descent and return control back\n # up the stack.\n #\n _id = id(raggedlist)\n if memo is None:\n memo = set()\n if _id in memo:\n return\n memo.add(_id)\n\n for element in flatten_ragged(raggedlist, memo):\n yield element\n\n # We only need to guard against infinite\n # recursion within a branch of the call-tree.\n # There is no danger in visiting the same\n # item instance in sibling branches, so we\n # can forget about objects once we are done\n # with them and about to pop the stack.\n #\n memo.remove(_id)\n return", "title": "" }, { "docid": "665d5447f53f579aa2f276a25987cfca", "score": "0.43326625", "text": "def find_children(self):\n for i in range(len(self.vertices)):\n self.vertices[i].children = []\n for i in range(len(self.vertices)):\n for parent in self.vertices[i].parents:\n if i not in self.vertices[parent].children:\n self.vertices[parent].children.append(i)", "title": "" }, { "docid": "5ef138b509bcda47f08d6a329076c9ef", "score": "0.43321827", "text": "def remove_object(self, obj: GameObject):\n for group in self.objects.values():\n group.remove_object(obj)\n if group.remove_object(obj):\n return", "title": "" }, { "docid": "ef03914fcc1412d59a5edfa924ac72c9", "score": "0.4318288", "text": "def __contains__(self, obj):\n if isinstance(obj, Node):\n return self.child.__contains__(obj)\n else:\n return self._order.__contains__(obj)", "title": "" }, { "docid": "edc15d89d9dbec16e8d58a028cb26f3b", "score": "0.4316085", "text": "def get_outer_object(cls, object):\n return None", "title": "" }, { "docid": "ff89c09646590e04d226b55bd768d56d", "score": "0.4305269", "text": "def _initialize_object(cls, obj: Reactable) -> None:\n _rich_traceback_omit = True\n for name, reactive in obj._reactives.items():\n reactive._initialize_reactive(obj, name)", "title": "" }, { "docid": "04c9a9695867f5664eaa59bc5984edcb", "score": "0.43038896", "text": "def default_nested_facets(class_object):\n setup = []\n parents = class_object.objects.filter(parent=None)\n default_attrs = {\n \"selected\": False,\n \"child_selected\": False,\n \"children\": None,\n \"parent\": None,\n }\n for parent in parents:\n parent_setup = copy.copy(default_attrs)\n parent_setup.update({\"id\": str(parent.id), \"title\": parent.title})\n child_setups = []\n for child in parent.children.exclude(activitypage=None):\n _child_setup = copy.copy(default_attrs)\n _child_setup.update({\n \"id\": str(child.id),\n \"title\": child.title,\n \"parent\": str(child.parent_id)})\n child_setups.append(_child_setup)\n parent_setup[\"children\"] = child_setups\n setup.append(parent_setup)\n return setup", "title": "" }, { "docid": "6df3ec952116627fef95726d1e59a518", "score": "0.43003002", "text": "def flatten(obj: list) -> list:\n for el in obj:\n if isinstance(el, collections.abc.Iterable) and not isinstance(el, (str, bytes)):\n yield from ParseJson.flatten(el)\n else:\n yield el", "title": "" }, { "docid": "a90a4393d90bd3962d1e1d155356fbcd", "score": "0.4291356", "text": "def _potential_relations(obj):\n for iface in providedBy(obj).flattened():\n for name, field in getFields(iface).items():\n if IRelation.providedBy(field):\n try:\n relation = getattr(obj, name)\n except AttributeError:\n # can't find this relation on the object\n continue\n yield name, None, relation\n if IRelationList.providedBy(field):\n try:\n l_ = getattr(obj, name)\n except AttributeError:\n # can't find the relation list on this object\n continue\n if l_ is not None:\n for i, relation in enumerate(l_):\n yield name, i, relation", "title": "" }, { "docid": "89fc49a80aaf44fc1d1d21104200a1de", "score": "0.42871517", "text": "def add_ancestor(self, obj):\n assert isinstance(obj, RomeoKeyValue)\n self._ancestors.add(obj)\n for child in self.CHILDREN:\n child.add_ancestor(obj)", "title": "" }, { "docid": "38a851d476792f2bee5a8fe20b723058", "score": "0.42799807", "text": "def removeMember(self, object):\n if self.hasMember(object):\n for dependent in self.getDependents(object):\n self.__influencers[dependent].remove(object)\n for influencer in self.getInfluencers(object):\n self.__dependents[influencer].remove(object)\n\n del self.__dependents[object]\n del self.__influencers[object]\n del self.__semanticMapping[object]\n else:\n raise ValueError(\"Specified object is not member of this graph\")", "title": "" }, { "docid": "d73e6a579dab0e0b66c056b61f93f345", "score": "0.42765447", "text": "def getSubObjects(base, filter=None, descend=None, retobjs=None):\n if not retobjs: retobjs = []\n for obj in base.objectValues():\n if not filter or filter(obj):\n retobjs.append(obj)\n if not descend or descend(obj):\n retobjs = getSubObjects(obj, filter, descend, retobjs)\n return retobjs", "title": "" }, { "docid": "c13fc86d949871d0c802f49060b9d7af", "score": "0.42737523", "text": "def _filter_nonvisible_objects(self, objects):\n def add_visible(obj, walls):\n line_cells = get_line(obj.pos, self.pos)\n for cell in line_cells[1:-1]:\n if cell in walls:\n return False\n\n return True\n\n # Get the walls and non-walls\n walls = set()\n\n for obj in objects:\n if type(obj) == Wall:\n walls.add(obj.pos)\n\n # If no walls everything is visible\n if len(walls) <= 0:\n return objects\n\n # Proceed to check which neighbors are visible\n visible_objects = []\n for obj in objects:\n visible = add_visible(obj, walls)\n if visible:\n visible_objects.append(obj)\n\n return visible_objects", "title": "" }, { "docid": "bb08fcb31c69a2c7b30c901505ebdb16", "score": "0.4267972", "text": "def touch_object(self, objects: Set[Object]) -> Set[Object]:\n objects_per_box = self._separate_objects_by_boxes(objects)\n return_set = set()\n for box, box_objects in objects_per_box.items():\n candidate_objects = box.objects\n for object_ in box_objects:\n for candidate_object in candidate_objects:\n if self._objects_touch_each_other(object_, candidate_object):\n return_set.add(candidate_object)\n return return_set", "title": "" }, { "docid": "c6bdd247705bb109abb966bb6fb387aa", "score": "0.42611468", "text": "def add_children(self, child_list):\n for child in child_list:\n if isinstance(child, list):\n self.add_children(child)\n else:\n self.add_child(child)", "title": "" }, { "docid": "d43c621c2c4ce98cb2ed770bd1648405", "score": "0.42568436", "text": "def interpret_reference_object(\n interpreter,\n speaker,\n d,\n extra_tags=[],\n limit=1,\n loose_speakerlook=False,\n allow_clarification=True,\n) -> List[ReferenceObjectNode]:\n F = d.get(\"filters\")\n special = d.get(\"special_reference\")\n # F can be empty...\n assert (F is not None) or special, \"no filters or special_reference sub-dicts {}\".format(d)\n if special:\n mem = get_special_reference_object(interpreter, speaker, special)\n return [mem]\n\n if F.get(\"contains_coreference\", \"NULL\") != \"NULL\":\n mem = F[\"contains_coreference\"]\n if isinstance(mem, ReferenceObjectNode):\n return [mem]\n elif mem == \"resolved\":\n pass\n else:\n logging.error(\"bad coref_resolve -> {}\".format(mem))\n\n if len(interpreter.progeny_data) == 0:\n if any(extra_tags):\n if not F.get(\"triples\"):\n F[\"triples\"] = []\n for tag in extra_tags:\n F[\"triples\"].append({\"pred_text\": \"has_tag\", \"obj_text\": tag})\n # TODO Add ignore_player maybe?\n candidate_mems = apply_memory_filters(interpreter, speaker, F)\n if len(candidate_mems) > 0:\n # FIXME?\n candidates = [(c.get_pos(), c) for c in candidate_mems]\n r = filter_by_sublocation(\n interpreter, speaker, candidates, d, limit=limit, loose=loose_speakerlook\n )\n return [mem for _, mem in r]\n elif allow_clarification:\n # no candidates found; ask Clarification\n # TODO: move ttad call to dialogue manager and remove this logic\n interpreter.action_dict_frozen = True\n confirm_candidates = apply_memory_filters(interpreter, speaker, F)\n objects = object_looked_at(interpreter.agent, confirm_candidates, speaker=speaker)\n if len(objects) == 0:\n raise ErrorWithResponse(\"I don't know what you're referring to\")\n _, mem = objects[0]\n interpreter.provisional[\"object_mem\"] = mem\n interpreter.provisional[\"F\"] = F\n interpreter.dialogue_stack.append_new(ConfirmReferenceObject, mem)\n raise NextDialogueStep()\n else:\n raise ErrorWithResponse(\"I don't know what you're referring to\")\n\n else:\n # clarification answered\n r = interpreter.progeny_data[-1].get(\"response\")\n if r == \"yes\":\n # TODO: learn from the tag! put it in memory!\n return [interpreter.provisional.get(\"object_mem\")] * limit\n else:\n raise ErrorWithResponse(\"I don't know what you're referring to\")", "title": "" }, { "docid": "fe2a99a3827e27087e5dce719c3ad904", "score": "0.42520168", "text": "def find_refs_and_clone(self, obj_dict, parent_tenant=None):\n if not parent_tenant:\n parent_tenant = conv_utils.get_name(obj_dict['tenant_ref'])\n for k, v in obj_dict.items():\n if k == 'tenant_ref':\n continue\n if isinstance(v, dict):\n self.find_refs_and_clone(v, parent_tenant)\n elif isinstance(v, list) and len(v) > 0 and isinstance(v[0], dict):\n for child_obj in v:\n self.find_refs_and_clone(child_obj, parent_tenant)\n elif k.endswith('_refs'):\n ref_list = list(set(copy.deepcopy(v)))\n for ref in v:\n obj_tenant = conv_utils.get_tenant_from_ref(ref)\n if obj_tenant == parent_tenant:\n continue\n o_type = conv_utils.get_obj_type_from_ref(ref)\n if (obj_tenant == 'admin' and o_type in\n admin_obj_visible_to_all):\n continue\n c_obj, c_ref = self.get_clone_ref_obj(parent_tenant, ref)\n if c_ref:\n ref_list.remove(ref)\n ref_list.append(c_ref)\n if c_obj:\n self.add_cloned_obj(o_type, c_obj)\n if len(ref_list) > 0:\n obj_dict[k] = ref_list\n elif k.endswith('_ref'):\n if v:\n o_tenant = conv_utils.get_tenant_from_ref(v)\n o_type = conv_utils.get_obj_type_from_ref(v)\n\n if o_type == 'vsvip':\n continue\n if o_tenant == parent_tenant:\n continue\n if (o_tenant == 'admin' and o_type in\n admin_obj_visible_to_all):\n continue\n c_obj, c_ref = self.get_clone_ref_obj(parent_tenant, v)\n if c_ref:\n obj_dict = self.rewrite_avi_obj_ref(obj_dict, c_ref, k)\n if c_obj:\n self.add_cloned_obj(o_type, c_obj)\n return", "title": "" }, { "docid": "40ecae67ff33ec571ce5bf36befe0979", "score": "0.42447424", "text": "def breakRelations(event):\n obj = event.object\n if not IHasIncomingRelations.providedBy(obj):\n return\n catalog = component.queryUtility(ICatalog)\n intids = component.queryUtility(IIntIds)\n if catalog is None or intids is None:\n return\n\n # find all relations that point to us\n try:\n obj_id = intids.getId(obj)\n except KeyError:\n # our intid was unregistered already\n return\n rels = list(catalog.findRelations({'to_id': obj_id}))\n for rel in rels:\n rel.broken(rel.to_path)\n # we also need to update the relations for these objects\n notify(RelationBrokenEvent(rel.from_object))", "title": "" }, { "docid": "1ef51433e1f81b12d587e75d7cb5fe6b", "score": "0.4241741", "text": "def _clear_obj_outlinks(self, objid):\n if objid in self._outlinks:\n outlinks = self._outlinks[objid]\n for o in outlinks:\n if objid in self._inlinks[o]:\n self._inlinks[o].remove(objid)\n self._outlinks[objid] = set()", "title": "" }, { "docid": "ed0b09da65f00f7405faea2dde5e7256", "score": "0.42322695", "text": "def get_object_list(self, request):\n\n return super(ExploitResource, self).get_object_list(request, Exploit)", "title": "" }, { "docid": "69e58d2ae5e1776a9c97844593cb33f4", "score": "0.42320836", "text": "def test_ui_object2_oid_get_children_get(self):\n pass", "title": "" }, { "docid": "a6f7d3f665ec2132a45bc1ab731ede88", "score": "0.423187", "text": "def flatten(obj):\n\tif obj is None: return None\n\t\n\tif isinstance(obj, dict):\n\t\tres = {}\n\t\tfor k,v in obj.items():\n\t\t\tres[k] = flatten(v)\n\t\treturn res\n\tprops = dir(obj)\n\t# segun esto, esto contienen list, tuple, set, frozetset\n\tif '__iter__' in props and '__getitem__' in props:\n\t\tres = []\n\t\tfor i in obj:\n\t\t\tres.append(flatten(i))\n\t\treturn res\n\t# objetos normalitos, clases\n\tif hasattr(obj, '__dict__'):\n\t\tvals = vars(obj)\n\t\tres = {}\n\t\tfor k,v in vals.items():\n\t\t\tres[k] = flatten(v)\n\t\treturn res\n\treturn obj", "title": "" }, { "docid": "d5161c095136d6458991fdf7c0938c7d", "score": "0.4231114", "text": "def isChild(self, obj):\n return bool(obj in self._children)", "title": "" }, { "docid": "4ad84b7f014f55e4491025203deef16f", "score": "0.42290092", "text": "def isRelated(self, obj):\n #easiest, case direct relationship\n if bool(obj in self._children | self._ancestors):\n return True\n #since this isn't a binary tree structure we have to look\n #for similarities and related top objects, avoid ROOTNODE\n objList = [obj]\n seen = set() #deduplicate and prevent infinite recursion\n while objList: #expensive relationship search\n x = objList.pop()\n for foo in x.RELATED:\n if foo in seen: continue\n else: seen.add(foo)\n if foo.isChild(self) and foo.isChild(obj):\n if foo.ROOTNODE: #too ambiguous\n continue #look for a better relationship\n return True #found a common ancestor\n #further inspect object relationships\n if foo.isAncestor(self):\n objList.append(foo)\n return False", "title": "" }, { "docid": "cc36b8fa3d033e11eb3caf05eab6736d", "score": "0.42276347", "text": "def flatten(obj):\n if isinstance(obj, list) or isinstance(obj, dict):\n l = []\n to_flatten = obj if isinstance(obj, list) else obj.values()\n for sublist in map(flatten, to_flatten):\n if isinstance(sublist, list):\n l += flatten(sublist)\n else:\n l.append(sublist)\n return l\n return obj", "title": "" }, { "docid": "03b7ee72c431b8ae365bae67857ac8c6", "score": "0.42254156", "text": "def recursive_gzip(obj):\n # Stop case: a non iterative structure is reached\n if isinstance(obj, basestring) and os.path.isfile(obj):\n return gzip_file(obj, prefix=\"\", outdir=None,\n remove_original_file=True)\n\n # Go deeper\n elif isinstance(obj, (tuple, list)):\n gzip_obj = []\n for item in obj:\n gzip_obj.append(recursive_gzip(obj))\n if isinstance(obj, tuple):\n gzip_obj = tuple(gzip_obj)\n return gzip_obj\n\n # Default return object\n else:\n return obj", "title": "" } ]
509a286e60fce150300aabb366f83175
django.test.client.Client gets confused with templates when using the cache, that's why we need to clear it.
[ { "docid": "e86263ace9d9f65d37bbbb5eae3575f8", "score": "0.0", "text": "def setUp(self):\n cache.clear()\n self.author = User.objects.create_user(username='joe', password='qwerty')\n self.teaser = \"this is a teaser\"\n self.body = \"and this is the body\"", "title": "" } ]
[ { "docid": "4fbfbe6153af5f24a44f085add23e075", "score": "0.732025", "text": "def clear_template_cache(request):\n bottle.TEMPLATES.clear()", "title": "" }, { "docid": "aa32e576cf2aba393dcface5a14020e7", "score": "0.72820395", "text": "def test_cached_views(self):\n response = self.client.get('/template/cached/bar/')\n self.assertEqual(response.status_code, 200)\n\n time.sleep(1.0)\n\n response2 = self.client.get('/template/cached/bar/')\n self.assertEqual(response2.status_code, 200)\n\n self.assertEqual(response.content, response2.content)\n\n time.sleep(2.0)\n\n # Let the cache expire and test again\n response2 = self.client.get('/template/cached/bar/')\n self.assertEqual(response2.status_code, 200)\n\n self.assertNotEqual(response.content, response2.content)", "title": "" }, { "docid": "18d4558516bad4668a4b0f5baa4064ae", "score": "0.6800866", "text": "def test_invalidate_cache(self):\n pass", "title": "" }, { "docid": "84a1faaed7ed1adb5a3ad882f86f822c", "score": "0.6736604", "text": "def clear_cache():\n # Code run before each test\n yield\n # Code run afer each test\n cache.clear()", "title": "" }, { "docid": "d62bd8fc4096772015786c78a3601753", "score": "0.6733266", "text": "def reset(self):\n self.template_cache.clear()", "title": "" }, { "docid": "0a9eb128dabad0840450ed46e610f459", "score": "0.6536857", "text": "def testCacheReset(self):\n tpl = template.Template('{% load flatblock_tags %}{% flatblock \"block\" 60 %}')\n tpl.render(template.Context())\n name = '%sblock' % settings.CACHE_PREFIX\n self.assertNotEquals(None, cache.get(name))\n block = FlatBlock.objects.get(slug='block')\n block.header = 'UPDATED'\n block.save()\n self.assertEquals(None, cache.get(name))", "title": "" }, { "docid": "b4248fa56a29ef446961bffdfc39f221", "score": "0.6531932", "text": "def test_double_clear_cache():\n clear_user_agent_cache()\n clear_user_agent_cache()", "title": "" }, { "docid": "def0bcd9f3624fac33fec708f563523f", "score": "0.6394786", "text": "def test_landing_caching(self):\n self.client.get(self.landing_url)\n cache_response = cache.get(self.landing_url)\n self.assertIsNone(cache_response)", "title": "" }, { "docid": "3b6b5be6779e59213a88d06d3f759343", "score": "0.63892835", "text": "def test_clear(self):\n self.assertEqual(len(self.cache), 2)\n self.cache.clear()\n self.assertEqual(len(self.cache), 0)", "title": "" }, { "docid": "6e1227772a6989f5ca5b8dc432739828", "score": "0.6389076", "text": "def clear_cache():\n ...", "title": "" }, { "docid": "62ee807bd04bc1400168627a299834d9", "score": "0.6334245", "text": "def cache_clear(self):\n pass", "title": "" }, { "docid": "f4cd6f9f3c3fa1a431f797ec365b4f52", "score": "0.63298", "text": "def test_cache(self):\n with self.assertNumQueries(3):\n response = self.authorized_client.get(reverse(\"index\"))\n self.assertEqual(response.status_code, 200)\n response = self.authorized_client.get(reverse(\"index\"))\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "3be7ec0a9331853730e79b707eddcb37", "score": "0.6310376", "text": "def ClearCache(self):\n pass", "title": "" }, { "docid": "a55b8694b022090385393faa5b13c97b", "score": "0.63025206", "text": "def tearDown(self):\n super(Base, self).tearDown()\n cache.clear()", "title": "" }, { "docid": "0351f9ac2df775af8f8679633b3fa519", "score": "0.62981504", "text": "def test_delete_cache_no_op(self):\n from django.conf import settings\n from django.core.cache import cache as django_cache\n from garage.cache import delete_cache\n key = 'abcd1234'\n # test if data is in cache\n cached_data = django_cache.get(key)\n # delete cache and check result\n result = delete_cache(key)\n self._msg('test', 'delete_cache', first=True)\n self._msg('CACHES', repr(settings.CACHES))\n self._msg('key', key)\n self._msg('result', result)\n self._msg('cached_data', cached_data)\n self.assertFalse(cached_data)\n self.assertFalse(result)", "title": "" }, { "docid": "c7d4a82e965a4ef364bb1eea8667e1f4", "score": "0.6296959", "text": "def tearDown(self):\n\n self.testing_client_context.pop()", "title": "" }, { "docid": "c32f3a71d6fc162ffc0cf181b8140d7b", "score": "0.62139", "text": "def clear_cache(self):\n self._cache.clear()", "title": "" }, { "docid": "53fa703198b26949104799544febef87", "score": "0.6209545", "text": "def test_cart_clear_cache(self):\n pass", "title": "" }, { "docid": "f82203ea65f2d9b376932b19b8597976", "score": "0.6197493", "text": "def test_cache_invalidation_with_no_cache(self): \n url = reverse('dynamic-image', args=('simplified_cell_states_graph',))\n request = self.factory.get(url)\n \n cache_key = get_cache_key(request)\n self.assertEquals(cache_key, None)", "title": "" }, { "docid": "a46e3cdfea3e1851b1361aafbbcb7936", "score": "0.61956894", "text": "def clear_cache():\n _cache.clear()", "title": "" }, { "docid": "408a6516930ebc638bd62d0730418ff9", "score": "0.6147505", "text": "def fix_test_cache() -> Path:\n clear_test_cache()\n return TEST_TMP_CACHE", "title": "" }, { "docid": "df1077678b9b69ccec7a9c185ab6eaf9", "score": "0.61311483", "text": "def clear():\n get_cache().clear()", "title": "" }, { "docid": "5bca5c3fc52149e621e82230ae9e9302", "score": "0.61267656", "text": "def test_clear(self):\n metas = [\n {'id': 1,\n 'name': 'Image1',\n 'size': len(FIXTURE_DATA)},\n {'id': 2,\n 'name': 'Image2',\n 'size': len(FIXTURE_DATA)}]\n\n for image_id in (1, 2):\n self.assertFalse(self.cache.hit(image_id))\n\n for meta in metas:\n with self.cache.open(meta, 'wb') as cache_file:\n cache_file.write(FIXTURE_DATA)\n\n for image_id in (1, 2):\n self.assertTrue(self.cache.hit(image_id))\n\n self.cache.clear()\n\n for image_id in (1, 2):\n self.assertFalse(self.cache.hit(image_id))", "title": "" }, { "docid": "47f5e2f30620e11edfa39d3fceb3567c", "score": "0.61127317", "text": "def test_cache(self):\n old_post = Post(date=timezone.now(), text=\"Old text\")\n new_post = Post(date=timezone.now(), text=\"New text\")\n key = make_template_fragment_key('posttemplate', [old_post]) \n self.assertEqual(cache.get(key), None)", "title": "" }, { "docid": "b99af7c82dbe2b73a217d4fa21864a1f", "score": "0.610341", "text": "def clear_caches():\r\n from jinja2.environment import _spontaneous_environments\r\n from jinja2.lexer import _lexer_cache\r\n _spontaneous_environments.clear()\r\n _lexer_cache.clear()", "title": "" }, { "docid": "84449417dc6760fc1e1cc33f866561e7", "score": "0.60990995", "text": "def clear_cache(self):\r\n global SITE_CACHE\r\n SITE_CACHE = {}", "title": "" }, { "docid": "351af7591f2d5c8499dac974688a0294", "score": "0.60692674", "text": "def clear_cache():\n cache.clear()", "title": "" }, { "docid": "48245cafdb4122a569052f410397d944", "score": "0.60501647", "text": "def _purge():\n _cache.clear()", "title": "" }, { "docid": "84a1f1884074f37d85d9f24b00dd0603", "score": "0.60130674", "text": "def clear_cache(self):\n global SITE_CACHE\n SITE_CACHE = {}", "title": "" }, { "docid": "7e5947548e7baf856bfdf82081104d10", "score": "0.6004758", "text": "def test_models_cached(self):\n request = self.get_request()\n get_title = \"{{ settings.tests.testgenericsetting.title }}\"\n\n # force site query beforehand\n Site.find_for_request(request)\n\n with self.assertNumQueries(1):\n for i in range(1, 4):\n with self.subTest(attempt=i):\n self.assertEqual(\n self.render(request, get_title * i),\n self.default_settings.title * i,\n )", "title": "" }, { "docid": "b31b3dfd1446e6565a7260c26339ca5a", "score": "0.60032976", "text": "def clear_cache(request):\n _ = request\n cache_mgr.clear()\n return HttpResponseRedirect(\"/admin\")", "title": "" }, { "docid": "20e434a1c1168a8b4f6a643200d91959", "score": "0.5992307", "text": "def plaintext_cache(request):\n orig_cache = pydov.cache\n\n if len(request.param) == 0:\n max_age = datetime.timedelta(seconds=1)\n else:\n max_age = request.param[0]\n\n plaintext_cache = PlainTextFileCache(\n cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),\n max_age=max_age)\n pydov.cache = plaintext_cache\n\n yield plaintext_cache\n\n plaintext_cache.remove()\n pydov.cache = orig_cache", "title": "" }, { "docid": "45dede79523f354b3b1a61db00d8c1ed", "score": "0.59629774", "text": "def test_models_cached(self):\n get_title = '{{ settings(\"tests.testgenericsetting\").title }}'\n\n request = self.get_request()\n # run extra query before hand\n Site.find_for_request(request)\n\n for i in range(1, 4):\n with self.assertNumQueries(1):\n context = {\"request\": request}\n template = self.engine.from_string(get_title * i)\n self.assertEqual(\n template.render(context), self.default_settings.title * i\n )", "title": "" }, { "docid": "ed397633ae761dbd3c31157ac113ad3e", "score": "0.5947184", "text": "def clear_cache() -> None:\n cache.clear()", "title": "" }, { "docid": "4f5f67729eacf983a2052e5369bb6c91", "score": "0.5944619", "text": "def clearcache():\n cache.clear()", "title": "" }, { "docid": "0da7776a3671f5328e801dccf0e2f899", "score": "0.5910703", "text": "def client():\n skip_if_no_django()\n\n from django.test.client import Client\n\n return Client()", "title": "" }, { "docid": "f75ba1c3ddfde144f2fd1224d2ddc178", "score": "0.59068644", "text": "def test_update_template(self):\n pass", "title": "" }, { "docid": "169347f045d2780e361c38a2286c94aa", "score": "0.5895001", "text": "def clear_caches():\n global format_templates_cache, format_elements_cache, format_outputs_cache\n format_templates_cache = {}\n format_elements_cache = {}\n format_outputs_cache = {}", "title": "" }, { "docid": "82ee28128caa34e350a852a6c58969f4", "score": "0.5894757", "text": "def clear_cache(self):\n self.cache = {}\n self.timeouts = {}", "title": "" }, { "docid": "8e47e45a9090848886c459ea0d75b777", "score": "0.58846486", "text": "def tearDown(self):\n self.app_context.pop()\n self.client = None\n PARTIES.clear()\n OFFICES.clear()", "title": "" }, { "docid": "27d46585d30d86b2b98999cb2324b063", "score": "0.58711547", "text": "def test_fetch_template(self):\n pass", "title": "" }, { "docid": "cf57b99676fa6de75540b8d263d09a83", "score": "0.5865316", "text": "def clear_cache(self):\r\n self.__class__._cache.clear()", "title": "" }, { "docid": "08a6a6e1097d8fca56a545ca2a5f80bc", "score": "0.58531225", "text": "def clear_cache(self):\n raise NotImplementedError", "title": "" }, { "docid": "703b470eb3a407c8b34539a12d6a08e5", "score": "0.58505666", "text": "def test_delete_cache(self):\n from django.conf import settings\n from django.core.cache import cache as django_cache\n from garage.cache import cache_key, cache_data, delete_cache\n\n key = 'abcd1234'\n\n @cache_data(key)\n def dummy_func(*args):\n return '_'.join([a for a in args])\n\n some_data = ['abcd1234', 'l’écriture', '寫作']\n result = dummy_func(*some_data)\n\n # test if data is in cache\n cached_data = django_cache.get(key)\n\n self._msg('test', 'delete_cache', first=True)\n self._msg('CACHES', repr(settings.CACHES))\n self._msg('key', key)\n self._msg('some_data', repr(some_data))\n self._msg('result', result)\n self._msg('cached_data', cached_data)\n self.assertEqual(result, cached_data)\n delete_cache(key)\n cached_data = django_cache.get(key)\n self.assertFalse(cached_data)\n self._msg('data after delete', cached_data)", "title": "" }, { "docid": "6fbc69372ff67c91e354feac0b4bb822", "score": "0.5847322", "text": "def teardown_test_environment():\n saved_data = _TestState.saved_data\n\n settings.ALLOWED_HOSTS = saved_data.allowed_hosts\n settings.DEBUG = saved_data.debug\n settings.EMAIL_BACKEND = saved_data.email_backend\n Template._render = saved_data.template_render\n\n del _TestState.saved_data\n del mail.outbox", "title": "" }, { "docid": "616ebd6ae1dfdc9053b3e584bc0d2cf4", "score": "0.583078", "text": "def test_register_uri_template_clears_uri_template_cache(self):\n resource = self.root_res\n request = RequestFactory().get('/api/')\n resource.get_uri_templates(request)\n self.assertEqual(len(resource._cached_uri_templates._cache), 1)\n\n resource.register_uri_template('extension_name', 'some/relative/path/')\n self.assertEqual(len(resource._cached_uri_templates._cache), 0)", "title": "" }, { "docid": "d77d15c768bf1245530e644cf185cfe3", "score": "0.5822072", "text": "def clear_cache(self):\n self.cache.clear()", "title": "" }, { "docid": "2ed25306f0656f1b3e347abb97e0b177", "score": "0.58100986", "text": "def uncache(self):", "title": "" }, { "docid": "1112ea58977740d31f3e88ae94b1d47d", "score": "0.58062994", "text": "def clear_cache(self):\n template = 'https://ci.appveyor.com/api/projects/{0.username}/{0.project}/buildcache'\n url = template.format(self)\n headers = {'Authorization': 'Bearer {0.api_token}'.format(self)}\n response = requests.delete(url, headers=headers)\n print('Status code: %s'%response.status_code)\n if response.status_code == requests.codes.ok:\n print('Cache deleted.')\n else:\n response.raise_for_status()", "title": "" }, { "docid": "61d2ccdb8c6a2df4d82977447729f185", "score": "0.5795701", "text": "def gziptext_cache(request):\n orig_cache = pydov.cache\n\n if len(request.param) == 0:\n max_age = datetime.timedelta(seconds=1)\n else:\n max_age = request.param[0]\n\n gziptext_cache = GzipTextFileCache(\n cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),\n max_age=max_age)\n pydov.cache = gziptext_cache\n\n yield gziptext_cache\n\n gziptext_cache.remove()\n pydov.cache = orig_cache", "title": "" }, { "docid": "40376287f32d4f303623319cb5dc1a08", "score": "0.5775219", "text": "def test_logout_doesnt_cache(self):\n response = self.client.get('/logout/')\n self.assertIn('no-store', response['Cache-Control'])", "title": "" }, { "docid": "4456835f00caee04a55243342d999699", "score": "0.5771677", "text": "def tearDown(self):\n self.app = None\n self.cache = None", "title": "" }, { "docid": "d5e7f0ec565b9117ed4346b794e629cb", "score": "0.5765388", "text": "def clear_cache(self):\n # By default, no cache to clear.\n pass", "title": "" }, { "docid": "38dfdd6de77ca80f44011eb8759f4cfc", "score": "0.5760433", "text": "def clear_cache(self):\n self.cache = None", "title": "" }, { "docid": "50d1b6f1297f84c282d2b6f0bc6a7adf", "score": "0.5731596", "text": "def test_delete(self):\n self.assertEqual(len(self.cache), 2)\n del self.cache[\"foo\"]\n self.assertEqual(\"foo\" in self.cache, False)\n self.assertEqual(len(self.cache), 1)", "title": "" }, { "docid": "a514e79370a78a325f65e52cbb10f774", "score": "0.5729341", "text": "def testPageCache(self):\n email1 = '[email protected]'\n name1 = 'User 1'\n email2 = '[email protected]'\n name2 = 'User 2'\n\n # login as one user and view 'unit' and other pages, which are not cached\n login(email1)\n register(self, name1)\n Permissions.assert_enrolled(self)\n response = view_unit(self)\n AssertContains(email1, response.body)\n logout()\n\n # login as another user and check 'unit' and other pages show correct new email\n login(email2)\n register(self, name2)\n Permissions.assert_enrolled(self)\n response = view_unit(self)\n AssertContains(email2, response.body)\n logout()", "title": "" }, { "docid": "dae720c6732fa148a471c4ae65f4bf41", "score": "0.5712795", "text": "def no_cache_subscriber(event):\r\n event.response.cache_expires = 1", "title": "" }, { "docid": "34b586bd2bd91953f99ced6fc427c2a4", "score": "0.57097507", "text": "def test_template_loader_postmortem(self):\n template_name = \"notfound.html\"\n with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:\n tempdir = os.path.dirname(tmpfile.name)\n template_path = os.path.join(tempdir, template_name)\n with override_settings(TEMPLATES=[{\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [tempdir],\n }]), self.assertLogs('django.request', 'ERROR'):\n response = self.client.get(reverse('raises_template_does_not_exist', kwargs={\"path\": template_name}))\n self.assertContains(response, \"%s (Source does not exist)\" % template_path, status_code=500, count=2)\n # Assert as HTML.\n self.assertContains(\n response,\n '<li><code>django.template.loaders.filesystem.Loader</code>: '\n '%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),\n status_code=500,\n html=True,\n )", "title": "" }, { "docid": "4646a6b95afbe3348e88e4b1e752c43d", "score": "0.5702138", "text": "def test_template_name_leading_dash_caching(self):\n self.assertEqual(self.engine.template_loaders[0].cache_key('-template.html', []), '-template.html')", "title": "" }, { "docid": "a879d30dc9172825cbe848fedd2fbcde", "score": "0.5691656", "text": "def clear_in_memory_cache_fixture(monkeypatch):\n def fixture():\n monkeypatch.setattr(latest_user_agents, '_cached_user_agents', None)\n return fixture", "title": "" }, { "docid": "4c36b62e8faadcb0751935fd6dfe8c1c", "score": "0.56758386", "text": "def test_delete_implant_template(self):\n pass", "title": "" }, { "docid": "0e58d0c8ecaa3531410f8381380e0ceb", "score": "0.56756055", "text": "def test_unregister_uri_template_clears_uri_template_cache(self):\n resource = self.root_res\n request = RequestFactory().get('/api/')\n resource.get_uri_templates(request)\n self.assertEqual(len(resource._cached_uri_templates._cache), 1)\n\n resource.unregister_uri_template('extensions')\n self.assertEqual(len(resource._cached_uri_templates._cache), 0)", "title": "" }, { "docid": "3f37bd70778277d863cd6b9a290e292b", "score": "0.5669926", "text": "def test_report_view_cache(client, log_output, bennett_org):\n report = ReportFactory(\n org=bennett_org,\n title=\"test\",\n repo=\"output-explorer-test-repo\",\n branch=\"master\",\n report_html_file_path=\"test-outputs/output.html\",\n )\n\n # fetch report\n response = client.get(report.get_absolute_url())\n assert response.status_code == 200\n\n # force update\n response = client.get(report.get_absolute_url() + \"?force-update=\")\n\n assert response.status_code == 302\n assert response.url == report.get_absolute_url()\n\n old_token = report.cache_token\n report.refresh_from_db()\n assert old_token != report.cache_token\n\n expected_log_items = {\n \"report_id\": report.id,\n \"slug\": report.slug,\n \"event\": \"Cache token refreshed and requests cache cleared; redirecting...\",\n }\n cache_logs = [\n log for log in reversed(log_output.entries) if \"cache\" in log[\"event\"].lower()\n ]\n last_cache_log = next(iter(cache_logs), None)\n for key, value in expected_log_items.items():\n assert last_cache_log[key] == value\n log_output.entries.clear()", "title": "" }, { "docid": "2f18e3f22a3c8e9c4b95ba0dd36b6138", "score": "0.5666756", "text": "def empty_cache():\n cache_dir = appdirs.user_cache_dir('argflow', 'argflow')\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)", "title": "" }, { "docid": "275b6ceaa9e2135f0404a36aaa174929", "score": "0.5651066", "text": "def clearcache():\n\n global cache\n cache = {}", "title": "" }, { "docid": "d1a29c1e71780efb0a0e34dcac0305fd", "score": "0.564711", "text": "def test_add_when_full(self):\n cache = _URITemplatesCache(max_size=2)\n\n cache.add('/api1/', {\n 'template1': 'http://localhost:8080/api1/resource1/',\n 'template2': 'http://localhost:8080/api1/resource2/',\n })\n\n cache.add('/api2/', {\n 'template3': 'http://localhost:8080/api2/resource3/',\n })\n\n cache.add('/api3/', {\n 'template4': 'http://localhost:8080/api3/resource4/',\n })\n\n self.assertEqual(\n list(cache._cache.items()),\n [\n ('/api2/', {\n 'template3': 'http://localhost:8080/api2/resource3/',\n }),\n ('/api3/', {\n 'template4': 'http://localhost:8080/api3/resource4/',\n }),\n ])", "title": "" }, { "docid": "e1a414436a6663293e58b8879066131e", "score": "0.5627658", "text": "def tearDown(self):\n self.client.close()", "title": "" }, { "docid": "12fb0595debec182972cc402dfb620c2", "score": "0.5614681", "text": "def test_bundle_cache_clear(self):\n cache = BundleCache(self.bundle.uuid)\n key1 = (\"some\", \"key\", \"1\")\n value1 = \"value1\"\n cache.set(key1, value1)\n assert cache.get(key1) == value1\n\n # Now publish a new version of the bundle:\n api.write_draft_file(self.draft.uuid, \"test.txt\", \"we need a changed file in order to publish a new version\")\n api.commit_draft(self.draft.uuid)\n\n # Now the cache will not be immediately invalidated; it takes up to MAX_BLOCKSTORE_CACHE_DELAY seconds.\n # Since this is a new bundle and we _just_ accessed the cache for the first time, we can be confident\n # it won't yet be automatically invalidated.\n assert cache.get(key1) == value1\n # Now \"clear\" the cache, forcing the check of the new version:\n cache.clear()\n assert cache.get(key1) is None", "title": "" }, { "docid": "0b6b306ff55a00428d202c91e3d74341", "score": "0.5606968", "text": "def no_cache(response):\n response.cache_control.no_cache = True\n response.cache_control.no_store = True\n response.cache_control.must_revalidate = True\n response.cache_control.max_age = 0\n response.headers['Pragma'] = 'no-cache'\n response.expires = 0\n return response", "title": "" }, { "docid": "774e8933e0d1b9736453da65400757ac", "score": "0.56019706", "text": "def test_cache_resetting(self):\n g = get_language_from_request\n r = self.rf.get('/')\n r.COOKIES = {}\n r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}\n self.assertEqual('pt-br', g(r))\n with self.settings(LANGUAGES=[('en', 'English')]):\n self.assertNotEqual('pt-br', g(r))", "title": "" }, { "docid": "8b25a4dd172cf0b93601e392bbfceb7e", "score": "0.55976444", "text": "def clear_cache(self):\n\t\tself.namespace = None", "title": "" }, { "docid": "6c810c2e50d59807e51050a14d589c74", "score": "0.5585087", "text": "def test_if_view_uses_correct_template(self):\n response = self.client.get(reverse('website:index'))\n # Check that we got a response \"success\"\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'website/index.html')", "title": "" }, { "docid": "11af914c2407f20b96a5a9d5f3b2eadb", "score": "0.55641615", "text": "def test_purge(self):\n meta = {'id': 1,\n 'name': 'Image1',\n 'size': len(FIXTURE_DATA)}\n\n self.assertFalse(self.cache.hit(1))\n\n with self.cache.open(meta, 'wb') as cache_file:\n cache_file.write(FIXTURE_DATA)\n\n self.assertTrue(self.cache.hit(1))\n\n self.cache.purge(1)\n\n self.assertFalse(self.cache.hit(1))", "title": "" }, { "docid": "7e62f5e0f797f6b3e6fe4ea8681db9b4", "score": "0.55580646", "text": "def test_cached_service_invalidated_acl(self):\n svc1_name = self.test_service_name + \"_invalidate_1\"\n svc2_name = self.test_service_name + \"_invalidate_2\"\n svc1_path = \"/ows/proxy/{}\".format(svc1_name)\n svc2_path = \"/ows/proxy/{}\".format(svc2_name)\n info = utils.TestSetup.create_TestService(self, override_service_name=svc1_name)\n utils.TestSetup.create_TestUserResourcePermission(self, resource_info=info, override_permission=\"read\")\n info = utils.TestSetup.create_TestService(self, override_service_name=svc2_name)\n utils.TestSetup.create_TestUserResourcePermission(self, resource_info=info, override_permission=\"read\")\n\n admin_cookies = self.cookies.copy()\n self.login_test_user()\n user_cookies = self.test_cookies.copy()\n user_headers = self.test_headers.copy()\n utils.check_or_try_logout_user(self)\n self.test_headers = None\n self.test_cookies = None\n\n tmp_req = self.mock_request(svc1_path, method=\"GET\", headers=self.cache_reset_headers.copy())\n svc1_ref = self.ows.get_service(tmp_req)\n tmp_req = self.mock_request(svc2_path, method=\"GET\", headers=self.cache_reset_headers.copy())\n svc2_ref = self.ows.get_service(tmp_req)\n invalidate_service(svc1_name)\n invalidate_service(svc2_name)\n\n def run_svc_req(_svc_path):\n _msg = \"User is expected to have access to service\"\n _req = self.mock_request(_svc_path, method=\"GET\", headers=user_headers, cookies=user_cookies)\n utils.check_no_raise(lambda: self.ows.check_request(_req), msg=_msg)\n utils.check_no_raise(lambda: self.ows.check_request(_req), msg=_msg)\n\n # run first set of requests to trigger caching of both Service and ACL, for both services\n for svc_path, svc_ref in [(svc1_path, svc1_ref), (svc2_path, svc2_ref)]:\n mocks = self.run_with_caching_mocks(svc_ref, lambda: run_svc_req(svc_path))\n mock_svc_cached, mock_svc_real, mock_acl_cached, mock_acl_real = mocks\n utils.check_val_equal(mock_svc_cached.call_count, 2,\n msg=\"Cached service call expected for each request (preparation)\")\n utils.check_val_equal(mock_acl_cached.call_count, 2,\n msg=\"Cached ACL resolution expected for each request (preparation)\")\n utils.check_val_equal(mock_svc_real.call_count, 1,\n msg=\"Real service call expected only for first request before caching (preparation)\")\n utils.check_val_equal(mock_acl_real.call_count, 1,\n msg=\"Real ACL call expected only for first request before caching (preparation)\")\n\n # trigger service cache invalidation\n # NOTE:\n # It is important that the operation done by the method only explicitly interacts with the Service cache and\n # not the ACL cache. Only service cache invalidation should also cascade invalidation of corresponding ACL\n # caches for that service.\n\n # path not important, only need a 'request' object with 'no-cache' set\n req_no_cache = self.mock_request(\"\", method=\"GET\", headers=self.cache_reset_headers.copy())\n req_no_cache.registry[\"dbsession_factory\"] = lambda *_, **__: self.session\n with mock.patch(\"magpie.adapter.magpieservice.get_admin_cookies\", lambda *_, **__: admin_cookies):\n store = self.adapter.servicestore_factory(req_no_cache)\n store.fetch_by_name(svc1_name) # this triggers invalidate service cache because of request no-cache header\n\n # re-run service/ACL requests, now cache should have been invalidated for service 1, but not for service 2\n # because service 1 caches and its corresponding ACL caches were reset, same counts as last run\n # for service 2 though, real call count should be 0 because they are still in valid caches\n mocks1 = self.run_with_caching_mocks(svc1_ref, lambda: run_svc_req(svc1_path))\n mocks2 = self.run_with_caching_mocks(svc2_ref, lambda: run_svc_req(svc2_path))\n mock_svc1_cached, mock_svc1_real, mock_acl1_cached, mock_acl1_real = mocks1\n mock_svc2_cached, mock_svc2_real, mock_acl2_cached, mock_acl2_real = mocks2\n utils.check_val_equal(mock_svc1_cached.call_count, 2,\n msg=\"Cached service call expected for each request\")\n utils.check_val_equal(mock_acl1_cached.call_count, 2,\n msg=\"Cached ACL resolution expected for each request\")\n utils.check_val_equal(mock_svc1_real.call_count, 1,\n msg=\"Real service call expected only for first request before caching (after reset)\")\n utils.check_val_equal(mock_acl1_real.call_count, 1,\n msg=\"Real ACL call expected only for first request before caching (after reset)\")\n utils.check_val_equal(mock_svc2_cached.call_count, 2,\n msg=\"Cached service call expected for each request\")\n utils.check_val_equal(mock_acl2_cached.call_count, 2,\n msg=\"Cached ACL resolution expected for each request\")\n utils.check_val_equal(mock_svc2_real.call_count, 0,\n msg=\"Real service call not expected since caches should remain valid (after reset)\")\n utils.check_val_equal(mock_acl2_real.call_count, 0,\n msg=\"Real ACL call not expected since caches should remain valid (after reset)\")", "title": "" }, { "docid": "05d6502349d502a59c03aca73d84c1e6", "score": "0.555459", "text": "def no_cache(*args, **kwargs):\n response = make_response(view(*args, **kwargs))\n response.headers['Last-Modified'] = datetime.now()\n response.headers[\n 'Cache-Control'] = 'no-store, no-cache, must-revalidate,\\\n post-check=0, pre-check=0, max-age=0'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '-1'\n return response", "title": "" }, { "docid": "9bfb518b1801c36c3f87b2a78b75eaa1", "score": "0.55520505", "text": "def test_homepage(self):\n response = self.client.get(reverse(\"homepage\"))\n self.assertTemplateUsed(response, \"homepage.html\")", "title": "" }, { "docid": "d51bfe51333cf50a83f70fa83ba7c9f4", "score": "0.5550596", "text": "def reset(cls):\n cls._ALL_TEMPLATES = {}", "title": "" }, { "docid": "dfb96c9d10d3121d7a886d9057ed9873", "score": "0.5548804", "text": "def test_clear_cache(self, mock_labbook):\n _, _, lb = mock_labbook\n username = 'test'\n r = LabbookCacheController()\n r.clear_entry((username, lb.owner, lb.name))\n\n # Retrieve the values and put them in the cache\n r.cached_description((username, lb.owner, lb.name))\n r.cached_created_time((username, lb.owner, lb.name))\n r.cached_modified_on((username, lb.owner, lb.name))\n\n assert r.db.exists(r._make_key((username, lb.owner, lb.name))) == 1\n\n r.clear_entry((username, lb.owner, lb.name))\n assert not r.db.hgetall(r._make_key((username, lb.owner, lb.name)))\n assert r.db.exists(r._make_key((username, lb.owner, lb.name))) == 0", "title": "" }, { "docid": "7111b3c8dfbdcf8e9d8a36eb4c7a5470", "score": "0.55482715", "text": "def reset_cache(dov_proxy_no_xdov):\n gziptext_cache = GzipTextFileCache(\n cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests_error'),\n max_age=datetime.timedelta(seconds=0.1))\n gziptext_cache.remove()\n\n orig_cache = pydov.cache\n pydov.cache = gziptext_cache\n\n yield\n\n gziptext_cache.remove()\n pydov.cache = orig_cache", "title": "" }, { "docid": "954b1f5ecec08e68483b9bc58bf85b1a", "score": "0.5543952", "text": "def reset_cache_backend_state(celery_app):\n yield\n backend = celery_app.__dict__.get('backend')\n if backend is not None:\n if isinstance(backend, CacheBackend):\n if isinstance(backend.client, DummyClient):\n backend.client.cache.clear()\n backend._cache.clear()", "title": "" }, { "docid": "9e5d5e179fc1fb4873fafd05bb8ca404", "score": "0.5539814", "text": "def quick_test():\n from httplib2 import FileCache\n\n logging.basicConfig(level=logging.DEBUG)\n store = FileCache(\".cache\")\n opener = request.build_opener(CacheHandler(store))\n request.install_opener(opener)\n response = request.urlopen(\"http://www.google.com/\")\n print(response.headers)\n print(\"Response:\", response.read()[:100], '...\\n')\n\n response.reload(store)\n print(response.headers)\n print(\"After reload:\", response.read()[:100], '...\\n')", "title": "" }, { "docid": "d76aa198c88dd1b3c7fe70af63829b66", "score": "0.553765", "text": "def test_project_delete_page_with_project(self):\n\n setUp(self)\n project = Project.objects.get(slug='post-1')\n c = Client()\n # Log the user in\n c.login(username=USER, password=PASSWORD)\n response = c.get(\"/projects/project-one/delete/\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('projects/project_delete.html', response.template_name)\n self.assertTemplateUsed(response, 'projects/project_delete.html')", "title": "" }, { "docid": "891d505b8f3719b03d21716aa822ac87", "score": "0.5523537", "text": "def test_lti_consumer_api_get_context_method_is_cached(self, mock_params):\n\n def create_request(data):\n request = RequestFactory().get(\"/\", data)\n request.user = AnonymousUser()\n request.session = {}\n request.toolbar = CMSToolbar(request)\n return request\n\n placeholder = Placeholder.objects.create(slot=\"test\")\n user_id = str(uuid.uuid4())\n\n request = create_request({\"user_id\": user_id})\n view_set = LTIConsumerViewsSet()\n lti_consumer = LTIConsumerFactory()\n model_instance = add_plugin(\n placeholder,\n LTIConsumerPlugin,\n \"en\",\n url=lti_consumer.url,\n lti_provider_id=lti_consumer.lti_provider_id,\n )\n\n with self.assertNumQueries(1):\n view_set.get_context(request, \"v1.0\", model_instance.pk)\n\n mock_params.assert_called_once_with(user_infos={\"user_id\": user_id}, edit=False)\n mock_params.reset_mock()\n\n with self.assertNumQueries(0):\n view_set.get_context(request, \"v1.0\", model_instance.pk)\n\n mock_params.assert_not_called()\n\n # Check that cache is set separately for each language\n translation.activate(\"fr\")\n with self.assertNumQueries(1):\n view_set.get_context(request, \"v1.0\", model_instance.pk)\n\n mock_params.assert_called_once_with(user_infos={\"user_id\": user_id}, edit=False)\n mock_params.reset_mock()\n\n with self.assertNumQueries(0):\n view_set.get_context(request, \"v1.0\", model_instance.pk)\n\n mock_params.assert_not_called()\n translation.deactivate()\n\n # Check that cache is set separately for each user\n [user_1, user_2] = UserFactory.create_batch(2)\n request = create_request({\"user_id\": user_1.username})\n with self.assertNumQueries(1):\n view_set.get_context(request, \"v1.0\", model_instance.pk)\n\n mock_params.assert_called_once_with(\n user_infos={\"user_id\": user_1.username}, edit=False\n )\n mock_params.reset_mock()\n\n request = create_request({\"user_id\": user_2.username})\n with self.assertNumQueries(1):\n view_set.get_context(request, \"v1.0\", model_instance.pk)\n\n mock_params.assert_called_once_with(\n user_infos={\"user_id\": user_2.username}, edit=False\n )\n mock_params.reset_mock()\n\n request = create_request({\"user_id\": user_1.username})\n with self.assertNumQueries(0):\n view_set.get_context(request, \"v1.0\", model_instance.pk)\n\n mock_params.assert_not_called()", "title": "" }, { "docid": "24cbac2b9039c5290dba4fa6fbc91fca", "score": "0.55161595", "text": "def SiteManager_clear_cache(self):\n models = sys.modules.get(self.__class__.__module__)\n models.SITE_CACHE.clear()", "title": "" }, { "docid": "831cc2dafe194076a98c69e10fa104d6", "score": "0.55142975", "text": "def test_get_template_data(self, mock_gsiup):\n mock_gsiup.return_value = models.UserPref(\n email='[email protected]')\n with test_app.test_request_context(self.request_path):\n template_data = self.handler.get_template_data()\n\n # Everything is done in JS, so there is no template_data\n self.assertEqual({}, template_data)", "title": "" }, { "docid": "d4b58e5453afe58ffbbac0618cc7fe7d", "score": "0.5512373", "text": "def clear_cache(self):\n self._cache.clear()", "title": "" }, { "docid": "b8e50b6d3710fe6327b5d86387698ab0", "score": "0.55099666", "text": "def test_get_and_not_found(self):\n cache = _URITemplatesCache()\n\n with self.assertRaises(KeyError):\n self.assertIsNone(cache.get('/api/'))\n\n self.assertEqual(cache._cache, {})", "title": "" }, { "docid": "d0b7a36a3e94486c9bd7c93cc9bd15eb", "score": "0.5505748", "text": "def clear_cache() -> Generator[None, Any, Any]:\n feature_loader.instance.clear_cache()\n yield\n feature_loader.instance.clear_cache()", "title": "" }, { "docid": "3b1fa1dccc802b59ce87db7cc09223ff", "score": "0.54955214", "text": "def test_cache(self):\n self.assertIsNone(self.cache.get(EVENT_ID))\n expected_json = b'{\"id\":\"b5ee7e41f16a4a62429655c619a5b5be_14770730026240\",\"title\":\"Drink a cup of coffee with C42 Team\",\"names\":[\"API\",\"Michel\",\"Jasper\",\"Bob\",\"Dennis\",\"Edmon\",\"Aslesha\",\"Lars\"]}'\n response = self.app.get('/events-with-subscriptions/{}'.format(EVENT_ID))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, expected_json)\n self.assertIsNotNone(self.cache.get(EVENT_ID))\n response = self.app.get('/events-with-subscriptions/{}'.format(EVENT_ID))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, expected_json)\n self.assertIsNotNone(self.cache.get(EVENT_ID))\n time.sleep(1)\n self.assertIsNotNone(self.cache.get(EVENT_ID))\n time.sleep(4.2*60)\n self.assertIsNone(self.cache.get(EVENT_ID))", "title": "" }, { "docid": "b1727cb71b2ccc5a768466053bfc70e7", "score": "0.5492392", "text": "def test_get_template_missing_debug_off(self):\n self.engine.debug = False\n with self.assertRaises(TemplateDoesNotExist):\n self.engine.get_template('prod-template-missing.html')\n e = self.engine.template_loaders[0].get_template_cache['prod-template-missing.html']\n self.assertEqual(e, TemplateDoesNotExist)", "title": "" }, { "docid": "ff2982fc7484de16829b317fab27724e", "score": "0.5491667", "text": "def test_index_view_basic(self):\n request = self.factory.get('/')\n request.session = {}\n response = index(request)\n\n with self.assertTemplateUsed('products/index.html'):\n # I was a little puzzled on how this works, so I decided to look \n # it up. So I went to the source code and found what actually was\n # going on.\n # How this works: assertTemplateUsed returns a context manager,\n # inside that it subscribes to all \"render template\" signals, \n # so every time a template is rendered, it gets notified. It stores \n # all of the template names in a list. When the context manager's \n # '__exit__()' is called (at the end of \"with:\"), first it \n # unsubscribes from the signals and then it checks \n # if 'products/index.html' is actually within that list of template \n # names that have been rendered.\n response = index(request)\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "b677c52efbf8355f18f829aad2ddd43e", "score": "0.5490889", "text": "def test_client_view(client, auth):\n with client:\n\n # logs in user with valid credentials\n auth.login()\n\n # test that viewing the page renders without template errors\n assert client.get('/clients').status_code == 200", "title": "" }, { "docid": "cc201d8ab855fb2cce842bf1c0c34590", "score": "0.54848135", "text": "def _invalidate_fallback_cache(self):\n self._fallback_cache.delete(CMS_PAGE_CACHE_VERSION_KEY)", "title": "" }, { "docid": "2e65f57d6ce1208cb57bdbe32c98fbb8", "score": "0.54761535", "text": "def purge_cache():\n shutil.rmtree(cache_init())", "title": "" }, { "docid": "133cd99b813a57e78670ceed9d88fe3d", "score": "0.5473779", "text": "def test_correctness_via_cache(self, mock_labbook):\n _, _, lb = mock_labbook\n username = 'test'\n r = LabbookCacheController()\n r.clear_entry((username, lb.owner, lb.name))\n assert lb.description == r.cached_description((username, lb.owner, lb.name))\n assert lb.creation_date.utctimetuple() == r.cached_created_time((username, lb.owner, lb.name)).utctimetuple()\n assert lb.modified_on.utctimetuple() == r.cached_modified_on((username, lb.owner, lb.name)).utctimetuple()", "title": "" }, { "docid": "3606cf49316441e4a9db9f459f1b3034", "score": "0.5467416", "text": "def test_cache_deletion(app):\n current_identities_cache.set(\"foo\", \"bar\")\n assert current_identities_cache.get(\"foo\") == \"bar\"\n current_identities_cache.delete(\"foo\")\n assert current_identities_cache.get(\"foo\") is None", "title": "" }, { "docid": "17afa7f466b7291478c756e95cfc7b20", "score": "0.5467228", "text": "def clean_cache(self):\n # type: () -> None\n\n self._apidoc = None\n for filename in self._cache_dir_contents():\n os.unlink(filename)", "title": "" }, { "docid": "1e1a0a2a4fdc824c876dca1b972288d7", "score": "0.5462664", "text": "def test_no_template_source_loaders(self):\n with self.assertLogs('django.request', 'ERROR'):\n with self.assertRaises(TemplateDoesNotExist):\n self.client.get('/render_no_template/')", "title": "" }, { "docid": "2ea4882c92fedaa882b6a1e48c90f990", "score": "0.54570866", "text": "def ensure_cache_clear(qtbot, signal, cache):\n cache.clear()\n\n # Ensure no callbacks are still in flight\n try:\n with qtbot.wait_signal(signal, timeout=1000):\n ...\n except pytestqt.exceptions.TimeoutError:\n ...\n\n cache.clear()", "title": "" }, { "docid": "fe6b3031f22440aeb41b7793b715d299", "score": "0.5454046", "text": "def test_no_context_processor(self):\n template = Template(\"{{ settings.tests.TestGenericSetting.title }}\")\n context = Context()\n self.assertEqual(template.render(context), \"\")", "title": "" }, { "docid": "a06ab9e338718ca8ab026d104142e11c", "score": "0.545311", "text": "def clear_cache(self):\n if hasattr(self, \"_session\"):\n del self.session", "title": "" } ]
0263a2c1fabaf9b3dbe87411b3d2a161
Checks if the server process is healthy
[ { "docid": "93ddbcc47b26f4c9bc06d42eda0099da", "score": "0.77359974", "text": "def server_is_healthy(self):\r\n\r\n if get_settings(active_view(), 'jsonserver_debug', False) is True:\r\n return True\r\n\r\n if self.process.poll() is None:\r\n try:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.settimeout(0.5)\r\n s.connect((self.hostname, self.available_port))\r\n s.sendall(bytes('{\"method\": \"check\"}', 'utf8'))\r\n data = sublime.value_decode(s.recv(1024))\r\n s.close()\r\n except:\r\n return False\r\n\r\n return data == b'Ok'\r\n else:\r\n logger.error(\r\n 'Something is using the port {} in your system'.format(\r\n self.available_port\r\n )\r\n )\r\n return False", "title": "" } ]
[ { "docid": "c750bab4d329fd8a29dd3926ae56d7e8", "score": "0.7100369", "text": "def CheckIfSyncServerRunning(port):\n sync_server_healthz_url = ('%s:%s/healthz' % (HTTP_SERVER_URL, port))\n req = urllib2.Request(sync_server_healthz_url)\n try:\n response = urllib2.urlopen(req)\n except urllib2.HTTPError, e:\n logging.error(\n 'It seems like Sync Server is not running, healthz check failed.'\n 'Request URL: %s , Error Code: %s'% (sync_server_healthz_url, e.code))\n return False\n except urllib2.URLError, e:\n logging.error(\n 'Failed to reach Sync server, healthz check failed.'\n 'Request URL: %s , Error: %s'% (sync_server_healthz_url, e.reason))\n return False\n else:\n logging.info(\n 'Sync Server healthz check Passed.Request URL: %s , Response: %s'\n % (sync_server_healthz_url, response.readlines()))\n response.fp._sock.recv = None\n response.close()\n return True", "title": "" }, { "docid": "1a924434a3d7f0d32b84fd3a7d27f467", "score": "0.706477", "text": "def check_health(self):\n return defer.succeed(True)", "title": "" }, { "docid": "13a5ee82d44fe53748cb1ba877d37290", "score": "0.70445967", "text": "def _instance_healthy(self, port):\n url = \"http://\" + self._private_ip + \":\" + str(port) + FETCH_PATH\n try:\n opener = urllib2.build_opener(NoRedirection)\n response = opener.open(url, timeout=HEALTH_CHECK_TIMEOUT)\n if response.code == httplib.SERVICE_UNAVAILABLE:\n return False\n except IOError:\n return False\n\n return True", "title": "" }, { "docid": "dfc5d0da2f9465d4e5b2efc6dc26a390", "score": "0.7022821", "text": "def check_alive(self):\n returncode = self._process.poll()\n if returncode is not None:\n raise RuntimeError(\"%s unexpectedly quit\" % self._name)", "title": "" }, { "docid": "ad1cc92aee156dbf84cce31a821f7ea0", "score": "0.7016544", "text": "def _is_alive(self, process):\n timeout_timer = threading.Timer(self.ping_timeout, raise_ping_timeout)\n try:\n alive = process.is_alive()\n finally:\n timeout_timer.cancel()\n return alive", "title": "" }, { "docid": "18571eab97192a444228a58d1a1314b2", "score": "0.698765", "text": "def serviceAlive(self):\n\n\t\tif (not RenderThread.serviceAlive(self)):\n\t\t\treturn False\n\n\t\tif (not self.isLocal()):\n\t\t\ttry:\n\t\t\t\tresponse = self.rpcserver.ping()\n\n\t\t\t\tif (response != 'I am alive'):\n\t\t\t\t\t# Trash is received\n\t\t\t\t\treturn False\n\t\t\texcept (xmlrpclib.Error, socket.error), e:\n\t\t\t\tprint 'remote exception caught while pinging server', e\n\t\t\t\treturn False\n\n\t\treturn True", "title": "" }, { "docid": "d74312954f849032ff960b1963220bbe", "score": "0.6979579", "text": "def isAlive(self):\n return self.module_process.is_alive()", "title": "" }, { "docid": "289e09eb24f3ec901bf3f9f84955f300", "score": "0.6957336", "text": "def _check(self):\r\n\r\n if os.name == 'posix':\r\n try:\r\n os.kill(int(PID), 0)\r\n except OSError:\r\n self.server.logger.info(\r\n 'process {0} does not exists stopping server...'.format(\r\n PID\r\n )\r\n )\r\n self.die = True\r\n elif os.name == 'nt':\r\n # win32com is not present in every Python installation on Windows\r\n # we need something that always work so we are forced here to use\r\n # the Windows tasklist command and check its output\r\n startupinfo = subprocess.STARTUPINFO()\r\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n output = subprocess.check_output(\r\n ['tasklist', '/FI', 'PID eq {0}'.format(PID)],\r\n startupinfo=startupinfo\r\n )\r\n pid = PID if not PY3 else bytes(PID, 'utf8')\r\n if not pid in output:\r\n self.server.logger.info(\r\n 'process {0} does not exists stopping server...'.format(\r\n PID\r\n )\r\n )\r\n self.die = True", "title": "" }, { "docid": "7de7c01f7070ca925216a219018bc9af", "score": "0.6910324", "text": "async def health_check(self):\n return self.running and all([await connection.health_check() for connection in self._connections])", "title": "" }, { "docid": "53d5cd71c5a2159ce678e3dfd7ed6ef7", "score": "0.68618447", "text": "def healthcheck():\n return 'OK'", "title": "" }, { "docid": "1aa2f0cfc687ba5a18210e241e6f34f7", "score": "0.685126", "text": "def is_server_up():\n try:\n _get(\"{0}/info\".format(_create_server_api_url()))\n is_server_up = True\n except:\n is_server_up = False\n return is_server_up", "title": "" }, { "docid": "2bbe2a523c0ebf0642ca2e684f948748", "score": "0.68304724", "text": "def is_server_running():\n return SERVER_PROCESS_NAME in [p.name() for p in psutil.process_iter()]", "title": "" }, { "docid": "910e96e9e4752a275618ef0f1dc71e91", "score": "0.6815806", "text": "def is_alive(self):\n\n if self.__depot_handle == None:\n return False\n\n status = self.__depot_handle.poll()\n if status != None:\n return False\n return self.__network_ping()", "title": "" }, { "docid": "340aefde0f0bdd0457fd8d77690caea2", "score": "0.6811103", "text": "def cli_verify_health(self):\n try:\n self.mcg_obj.exec_mcg_cmd(f\"namespacestore status {self.name}\")\n return True\n except CommandFailed as e:\n if \"Not Found\" in str(e) or \"NotFound\" in str(e):\n return False", "title": "" }, { "docid": "26593f8e8ce4b2974fe7912577edca0e", "score": "0.67862535", "text": "def server_in_use(serverinstance):\n command = 'netstat -an | grep :%s' % serverinstance.port\n p = subprocess.Popen(command, shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n close_fds=True)\n # count how many established connections there are\n established = sum(1 for line in p.stdout if 'ESTABLISHED' in line)\n if established > 1:\n return True\n return False", "title": "" }, { "docid": "a9144b2fb62c0bad51fef17dd281b0a9", "score": "0.676453", "text": "def node_wait_healthy(self, node, timeout=300):\n time_start = time.time()\n while True:\n try:\n c = self.control_shell(f\"{self.docker_compose} ps {node} | grep {node}\", timeout=timeout)\n if c.exitcode == 0 and 'Up (healthy)' in c.output:\n return\n except IOError:\n raise\n except ExpectTimeoutError:\n self.close_control_shell()\n timeout = timeout - (time.time() - time_start)\n if timeout <= 0:\n raise RuntimeError(f\"failed to get docker container healthy status for the {node} service\")", "title": "" }, { "docid": "3905a5d6b5f3b18309886efc139a5a00", "score": "0.6741289", "text": "def alive(self) -> bool:\n return self.client.is_alive()", "title": "" }, { "docid": "427751bf12934272534ce00ea788d61a", "score": "0.6737351", "text": "def is_healthy():\n try:\n connection = get_db_connection()\n connection.execute('SELECT id FROM posts;').fetchone()\n connection.close()\n return True\n except Exception as e:\n app.logger.info(e)\n return False", "title": "" }, { "docid": "dcb555747df88ae5548fd138995a117a", "score": "0.6667211", "text": "def is_server_running():\n try:\n procs = [p for p in psutil.get_process_list() if 'tmsi_server' in p.name]\n return procs\n except psutil.NoSuchProcess as e:\n return False", "title": "" }, { "docid": "3138db7e3785e7252e7045c0be536ab0", "score": "0.66480696", "text": "def _is_started(self):\n cmd = \"sudo service watchdog status\"\n run_process(cmd) \n try: \n import subprocess\n output = subprocess.getoutput('ps -A')\n if 'watchdog' in output:\n self.status['service_state'] = True\n else:\n self.status['service_state'] = False \n\n except Exception:\n self.started.set()\n log.error(NAME, 'System watchodg plug-in:\\n' + traceback.format_exc())", "title": "" }, { "docid": "f956818987f1321a7ac6f071b1a5c976", "score": "0.66398555", "text": "async def health_check(self):\n if self._websocket_to_connection_task and self._connection_to_websocket_task:\n return not self._websocket_to_connection_task.done() and not self._connection_to_websocket_task.done() and \\\n not self._websocket.closed and not self._connection_reader.at_eof()\n return False", "title": "" }, { "docid": "d22a8fafb9ea33dfba4e0bc0e1ff0b9d", "score": "0.663666", "text": "def is_cluster_healthy(self):\n return self.ceph_not_health_error() and pod.wait_for_pods_to_be_running()", "title": "" }, { "docid": "e0ce67c120247ca8511bd4c4aa3a4dbb", "score": "0.6630787", "text": "def is_running(self):\n response = None\n url = baseUrl + \"/status\"\n try:\n response = urllib2.urlopen(url, timeout=5)\n if response.code == 200:\n return True\n else:\n return False\n except URLError:\n return False\n finally:\n if response:\n response.close()", "title": "" }, { "docid": "e564ab1451347b995163443167221204", "score": "0.6607326", "text": "def is_alive():\n pass", "title": "" }, { "docid": "c8df63e2af7d288dcc6ee38f6ac330ea", "score": "0.6593553", "text": "def health_check() -> bool:\n print(\"running health check through load balancer\")\n # TODO this information should come from the ansible inventory file\n LB1_PUBLIC_IP = \"173.255.245.83\"\n LB2_PUBLIC_IP = \"212.71.246.209\"\n return dns_health_check(LB1_PUBLIC_IP) and dns_health_check(LB2_PUBLIC_IP)", "title": "" }, { "docid": "f60d71e655e56a26e9f44d51a97f3d5e", "score": "0.6586771", "text": "def is_alive(host, path=\"/\", port={{ kube_apiserver_insecure_port }}, timeout=2):\n try:\n conn = httplib.HTTPConnection(host, port, timeout)\n conn.request(\"GET\", path)\n conn.sock.settimeout(2)\n return conn.getresponse().status == 200\n except (StandardError, socket.error) as err:\n return False\n finally:\n conn.close()", "title": "" }, { "docid": "1de663a89181fae8541c6f8616105d7c", "score": "0.6574906", "text": "def isUp(self):\n try:\n data = self.send('/isUp')\n return True if data['response'] == 'ok' else False\n # except Exception as e:\n # if e.errno == errno.ECONNREFUSED:\n # return False\n except:\n return False", "title": "" }, { "docid": "bd516797158677a9a7a3f2e01c7aaf22", "score": "0.65665317", "text": "def check_health(self):\n if self.health_check_interval and time.time() > self.next_health_check:\n self.ping(reconnect=True)\n self.next_health_check = time.time() + self.health_check_interval", "title": "" }, { "docid": "103208e107395aa7c6dfc69ba32928e4", "score": "0.6557455", "text": "def is_healthy(self):\n assert self.raw_ptr is not None\n\n return lib.srl__consul__get_status(self.raw_ptr) == lib.STATUS_OK", "title": "" }, { "docid": "3e98fda96884f84af0e10633865103f5", "score": "0.65455925", "text": "def _verify_running(self):\n try:\n self.session.get(\"http://{}:{}/\".format(self.host, self.port))\n except ConnectionError:\n # Expecting connection error if LiteServ is not running on the port\n return False\n\n return True", "title": "" }, { "docid": "4386b2ea87fd0729b1ed7e4277bc7841", "score": "0.65367895", "text": "def health_check():\n return jsonify(dict(message='Healthy App Server')), 200", "title": "" }, { "docid": "f103e76e7da7a51918246e3ace2efc5c", "score": "0.65251034", "text": "def server_running():\n # NOTE: A bug in server_is_running means we cannot specify the port.\n is_running = server_is_running()\n if is_running is None or is_running is False:\n run_status = typer.style('NOT RUNNING', fg=typer.colors.RED, bold=True)\n typer.secho(f'Server status: {run_status}')\n\n return is_running", "title": "" }, { "docid": "ec45593872ca6fc03243cf754c5c459e", "score": "0.65210384", "text": "def health_check():\n return 'Ok'", "title": "" }, { "docid": "aacef74bda4aa8928b9abf6b15841482", "score": "0.650866", "text": "def check_is_alive():\n for cmd_lst in [[BZIP2_COMMAND, '-h'],\n ['sudo', '-n', DOCKER_COMMAND, 'version']]:\n DockerHandler._run_shell_command(cmd_lst)\n logger.debug(\"%s passed.\", \" \".join(cmd_lst))", "title": "" }, { "docid": "93801735c01b01cf5fc7f01c0e6e0988", "score": "0.64817846", "text": "def is_healthy(self):\n return self._is_healthy", "title": "" }, { "docid": "827e9c23e66e58c885dc65c50e24737c", "score": "0.64801097", "text": "def check_monit_running(duthost):\n monit_services_status = duthost.get_monit_services_status()\n if not monit_services_status:\n return False\n\n return True", "title": "" }, { "docid": "bed5fddab6cf96105ffc04dfdba9405f", "score": "0.6473962", "text": "def isalive(self):\n if self.pid is None:\n return False\n while True:\n try:\n pid, status = os.waitpid(self.pid, os.WNOHANG)\n except OSError as e:\n if e.errno == errno.EINTR:\n continue\n elif e.errno == errno.ECHILD:\n pid, status = (self.pid, None)\n break\n else:\n raise\n break\n if pid == 0:\n return True\n assert pid == self.pid\n self.pid = None\n if status is not None:\n self.termsig = os.WTERMSIG(status) if os.WIFSIGNALED(status) else None\n self.exitstatus = os.WEXITSTATUS(status) if os.WIFEXITED(status) else None\n self.close()\n return False", "title": "" }, { "docid": "8cc7ccc25d03cb5183ef00f3e0840d5b", "score": "0.64586353", "text": "def is_pid_alive(self, process_id):\n return self.test_query(\n 'ps -p {pid}'.format(pid=process_id), re.DOTALL\n )", "title": "" }, { "docid": "97a03a7614ecbc828a0e04399f5dc538", "score": "0.643342", "text": "def health_check(self):\n return self.backend.health_check()", "title": "" }, { "docid": "77ff2fd29cf6f5457e08496505c3cd8d", "score": "0.64282256", "text": "def healthcheck():\n\n return Response('it works', status=200)", "title": "" }, { "docid": "9609845fd860f20bd8149d1a52c3bc05", "score": "0.6425296", "text": "def isRunning(daemon):\n return call([daemon, \"status\"], stdout=PIPE, stderr=STDOUT) == 0", "title": "" }, { "docid": "93013743ab6d2cd05d6d3f083daa387f", "score": "0.6424151", "text": "def check_running():\n try:\n os.kill(pid, 0)\n except OSError:\n return False\n else:\n return True", "title": "" }, { "docid": "b3075306f8d0ca407e973bdf3d7caa72", "score": "0.6421361", "text": "def has_started(self):\n return self._server is not None", "title": "" }, { "docid": "25e2953dbfb8aed084b1c0d51d111c2d", "score": "0.6395491", "text": "def isInstanceHealthy(self):\n if (self.type == DbClusterStatus.INSTANCE_TYPE_COORDINATOR):\n if (self.status != DbClusterStatus.INSTANCE_STATUS_NORMAL):\n return False\n\n if (self.type == DbClusterStatus.INSTANCE_TYPE_GTM):\n if (self.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n return True\n elif (self.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n if (self.connStatus != DbClusterStatus.CONN_STATUS_NORMAL):\n return False\n else:\n return False\n \n if (self.type == DbClusterStatus.INSTANCE_TYPE_DATANODE):\n if (self.status == DbClusterStatus.INSTANCE_STATUS_PRIMARY):\n return True\n elif (self.status == DbClusterStatus.INSTANCE_STATUS_DUMMY):\n return True\n elif (self.status == DbClusterStatus.INSTANCE_STATUS_STANDBY):\n if (self.haStatus != DbClusterStatus.HA_STATUS_NORMAL):\n return False\n else:\n return False\n\n return True", "title": "" }, { "docid": "b6cb30d2ad45db26e40d2bec433a62f8", "score": "0.63948524", "text": "def report_health(self):\n return True", "title": "" }, { "docid": "f97ef5b7950d4110a9a6b3614907d699", "score": "0.6377732", "text": "def test_server_is_up_and_running(self):\n response = request.urlopen(self.get_server_url())\n self.assertEqual(response.code, 200)", "title": "" }, { "docid": "3ecd273b649c6a1f44c002502e278b97", "score": "0.6373472", "text": "def healthy():\n return \"Healthy as it should.\"", "title": "" }, { "docid": "1ee9d989e399148484508be4d609cafd", "score": "0.63715696", "text": "def is_ready(\n ctrl_address: str, timeout: float = 1.0, logger=None, **kwargs\n ) -> bool:\n try:\n from grpc_health.v1 import health_pb2, health_pb2_grpc\n\n response = send_health_check_sync(ctrl_address, timeout=timeout)\n return (\n response.status == health_pb2.HealthCheckResponse.ServingStatus.SERVING\n )\n except RpcError as exc:\n if logger:\n logger.debug(f'Exception: {exc}')\n return False", "title": "" }, { "docid": "b54d2c40658c48b002cfa06804163603", "score": "0.63712436", "text": "def healthcheck():\n return Health()", "title": "" }, { "docid": "600eef654d130f43ab429abcff5f5808", "score": "0.636846", "text": "def isNodeHealthy(self):\n instances = self.coordinators + self.gtms + self.datanodes\n \n for inst in instances:\n if (not inst.isInstanceHealthy()):\n return False\n \n return True", "title": "" }, { "docid": "3bdfd6c8fe10a17ca585c4f7010b18fe", "score": "0.6357697", "text": "def is_alive(self):\n # if self.current_health <= 0:\n # False\n # else:\n # True", "title": "" }, { "docid": "587d197fbb3896383ea8201cf1d5bcbf", "score": "0.6353707", "text": "def get_healthz():\n url = 'https://localhost:8443/healthz'\n ret = 'FAIL'\n with settings(warn_only=True):\n ret = run('curl -k %s' % url)\n fprint('Healthz status: %s' % ret)\n return ret == 'OK'", "title": "" }, { "docid": "9a8d28d3cd02ac39550763a081196c59", "score": "0.63533527", "text": "def jenkins_healthcheck(host, port):\n result = False\n r = requests.get('http://{host}:{port}/'.format(host=host, port=port))\n\n if 'X-Jenkins' in r.headers: # The header exists\n if r.headers['X-Jenkins']: # The header contains data\n result = True\n\n return result", "title": "" }, { "docid": "63403eccd7f5a34634198458ff84fda2", "score": "0.6347442", "text": "def check_server():\n return jsonify({\"status\": True, \"message\": \"Running!!!\"}), 200", "title": "" }, { "docid": "e332389501ebcbcc4d9da8d9be8dbb15", "score": "0.63458306", "text": "def is_running(process):\n return process.poll() is None", "title": "" }, { "docid": "0fae498ea5374a290c2432ebd3a36ae9", "score": "0.6336625", "text": "async def healthcheck(cls) -> bool:\n if cls._consumer is None:\n return False\n\n try:\n node_id = cls._consumer._client.get_random_node()\n return cast(bool, await cls._consumer._client.ready(node_id))\n except KafkaError:\n return False", "title": "" }, { "docid": "fb5fab6bb23c01ea12befaf0682ba0d7", "score": "0.6332984", "text": "def is_alive(self) -> bool:\n return True if self.total_health > 0 and self.health > 0 else False", "title": "" }, { "docid": "83bf8fa1db988f244835aedea7e2d0af", "score": "0.6317607", "text": "def server_is_active(self):\r\n\r\n try:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.settimeout(0.05)\r\n s.connect((self.hostaddr, self.available_port))\r\n s.close()\r\n except socket.timeout:\r\n return False\r\n except socket.error as error:\r\n if error.errno == errno.ECONNREFUSED:\r\n return False\r\n else:\r\n logger.error(\r\n 'Unexpected error in `server_is_active`: {}'.format(error)\r\n )\r\n return False\r\n else:\r\n return True", "title": "" }, { "docid": "dc76e8d587c5795f10371890cd9e94b7", "score": "0.63151604", "text": "def is_alive(self):\n return self.hostapd_thread.is_alive()", "title": "" }, { "docid": "cda2d33f9296434e41ca16f6f634fc47", "score": "0.6310389", "text": "def is_running(self):\n proc = Popen(['service', self.name, 'status'], stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n \n # Return the status\n return True if ('running' in out.rstrip()) else False", "title": "" }, { "docid": "68cf9ddff354f5dca8057e17dc457c72", "score": "0.6297017", "text": "def is_alive(self):\n for worker in self._workers:\n if worker.is_alive():\n return True\n return False", "title": "" }, { "docid": "f59e2442e7d7145cd5d02998bf1605a4", "score": "0.628832", "text": "def alive():\n cmd = \"echo alive\"\n run(cmd)", "title": "" }, { "docid": "2db58d508ed33921578b778cfd442b85", "score": "0.6281342", "text": "def status(self):\n\n pid = self.get_pid()\n if pid is None:\n message = 'Hiveary Agent is not running\\n'\n sys.stdout.write(message)\n sys.exit(1)\n\n # Check for the existence of a process with the pid\n pid_exists = psutil.pid_exists(pid)\n if not pid_exists:\n message = 'Pidfile contains pid %s, but no running process could be found\\n' % pid\n sys.stderr.write(message)\n sys.exit(1)\n\n message = 'Hiveary Agent is running with pid %s\\n' % pid\n sys.stdout.write(message)\n sys.exit()", "title": "" }, { "docid": "7668ac37998c433cc89cb579e6219f27", "score": "0.6277526", "text": "def isalive(self):\n if not self._pipe is None:\n return self._pipe.poll() is None\n\n if not self._ready:\n return False\n\n ### For this part I owe a great deal of acknowledgement to \n ### the pexpect project, since the following is basically \n ### copy and paste\n pid = None\n status = None\n i = 0\n\n while pid == 0 and i<2: # TODO: Fix this part and test it properly\n try:\n pid, status = os.waitpid(self.pid, 0) # TODO: either os.WNOHANG or 0\n except OSError, e: # No child processes\n if e[0] == errno.ECHILD:\n raise ExceptionPexpect ('isalive() encountered condition where \"terminated\" is 0, but there was no child process. Did someone else call waitpid() on our process?')\n else:\n raise\n i+=1\n\n if pid == 0:\n return True\n\n\n if not status is None and ( os.WIFEXITED (status) or os.WIFSIGNALED (status) ): \n self._ready = False\n return False\n return True\n # Copy/past end", "title": "" }, { "docid": "c01e0c72c989127553c54bf38073d479", "score": "0.6276047", "text": "def is_running(self):\n return bool(self._process and self._process.poll() is None)", "title": "" }, { "docid": "3d9a59717b02d596f6030d0100e1d1a2", "score": "0.62755704", "text": "def is_running(self):\n with settings(warn_only=True):\n status = sudo('/etc/init.d/nginx status')\n if status is not None and status.find(\"nginx is running\") >= 0:\n return True\n else:\n return False", "title": "" }, { "docid": "de8ec9d9492a78b604e05ceeadd16a7b", "score": "0.62752646", "text": "def health():\n logger.debug(\"Processed health-check request.\")\n\n return \"API health ok.\"", "title": "" }, { "docid": "78cf00071b267eb826d41836847eed05", "score": "0.6275174", "text": "def is_flume_process_live(pid_file):\n live = False\n\n try:\n check_process_status(pid_file)\n live = True\n except ComponentIsNotRunning:\n pass\n\n return live", "title": "" }, { "docid": "ac17212c5722d124ec7a61b20bcf212f", "score": "0.6266667", "text": "def check(self):\n\n try:\n if self.url == '':\n raise KeyError\n\n except KeyError:\n self.error(\"Please specify a FXserver URL inside the fxserver.conf!\", \"Disabling plugin...\")\n\n return False\n\n # Check once if FXserver is running when host is localhost.\n if self.host in ['localhost', '127.0.0.1']:\n FXserver_running = False\n\n pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]\n\n for pid in pids:\n try:\n if b'FXServer' in open(os.path.join('/proc', pid, 'cmdline').encode(), 'rb').read():\n FXserver_running = True\n break\n\n except IOError as e:\n self.error(e)\n\n if FXserver_running is False:\n self.error(\"No local FXserver running. Disabling plugin...\")\n\n return False\n\n else:\n self.debug(\"FXserver process found. Connecting...\")\n\n UrlService.check(self)\n return True", "title": "" }, { "docid": "48a9d171d795f4fa6e19e53cc0368819", "score": "0.6264278", "text": "def healthy(self):\n return self._healthy", "title": "" }, { "docid": "48a9d171d795f4fa6e19e53cc0368819", "score": "0.6264278", "text": "def healthy(self):\n return self._healthy", "title": "" }, { "docid": "fa4f7072ac2b76b80a9d2a080947cd82", "score": "0.6259043", "text": "def celery_is_ok(self) -> bool:\n try:\n i = celery_app.control.inspect()\n availability = i.ping()\n if not availability:\n return False\n except Exception as e:\n logging.error('Celery error: %s', e)\n return False\n return True", "title": "" }, { "docid": "4ee1b4ff2f44b62bccb1c8d03d4a898b", "score": "0.6255324", "text": "def check_running():\n\n all_procs = subprocess.check_output(['ps', 'aux'])\n if all_procs.count(\" python rtmp_stream_delayer.py\") > 1:\n return True", "title": "" }, { "docid": "5f95e7a266e0502a6a9d27ed1d4aadc9", "score": "0.625078", "text": "def is_healthy(self):\n if self.reason is not None or not self.is_running():\n return False\n if any(self._crashreports()):\n log.debug(\"crash report found\")\n return False\n for check in self._checks:\n if check.check():\n log.debug(\"%r check abort conditions met\", check.name)\n return False\n return True", "title": "" }, { "docid": "e3eda328357cbfeb20e3a515913b5ab1", "score": "0.624266", "text": "def checkAlive(self):\n try:\n r = requests.get(\"%s/rest/settings.json\" % self._url, \\\n auth=(self._user, self._passwd), \\\n headers={\"Accept\": \"text/json\"}).json()\n\n return 200 if \"global\" in r.keys() else 404\n except:\n return 404", "title": "" }, { "docid": "09c012a6887f06a97aa4f7bbdf9fdf82", "score": "0.62332094", "text": "def is_alive(self):\n return self.running", "title": "" }, { "docid": "b8ce33af272f77faa9ef0cf548e6008e", "score": "0.62248707", "text": "def checkServer(self):\n try:\n self.client.server_info()\n self.db = self.client.logRecipe\n self.__startTables()\n\n return True\n except:\n return False", "title": "" }, { "docid": "c5e065521c41c372dd517c2a3b98c475", "score": "0.6218173", "text": "def check_server(server, port: int, timeout: int) -> bool:\n return Server(server).check_service(port, timeout)", "title": "" }, { "docid": "2841eed84f33d4d8dae9aad5dbed824a", "score": "0.62159085", "text": "def test_healthcheck(self):\n resp = self.app.get('/healthcheck')\n self.assertEqual(resp.status_code, HTTP_200_OK)", "title": "" }, { "docid": "5e7927edd5759f58f7d3d27aefb74bb4", "score": "0.6189333", "text": "def _check_DB_ready(self):\n logger.info('TestMode:installcheck, check the PG server is running......')\n\n child = subprocess.run('ps -ef |grep postgres', shell=True, stdout=subprocess.PIPE)\n if child.returncode != 0:\n logger.info('Postgremaster process is NOT running, please confirm your DB is started!')\n exit()\n else:\n bin_path = get_installation_bin_path()\n psql = os.path.join(bin_path, 'psql')\n lib_path = get_installation_lib_path()\n logger.debug('DBServer::is_ready() bin: %s, lib: %s'\n % (psql, lib_path))\n #env = {'LD_LIBRARY_PATH': lib_path,'HG_BASE':_INSTALLDIR}\n env = {'LD_LIBRARY_PATH': lib_path,'PGPASSWORD': config.password,'HG_BASE':_INSTALLDIR}\n cmd = \" \".join([\n psql, '-U', str(config.user), '-p', str(_PORT), str(_DBNAME), '<', '/dev/null'\n ])\n child = subprocess.run(\n cmd, shell=True, env=env, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n if child.returncode != 0:\n logger.debug(\"psql connect fail for reason %s\"\n % child.stderr)\n logger.info('DB server cannot be connected, please confirm your DB is ready!')\n exit()\n logger.info('PG server is ready,start to run test......')", "title": "" }, { "docid": "1f8255630923fa649753456bc583c298", "score": "0.61873025", "text": "def show_running_status(pid_file):\n if os.path.exists(pid_file):\n cit.warn(\"uwsgi is running @ \" + pid_file)\n return True\n else:\n cit.info(\"No uwsgi running\")\n return False", "title": "" }, { "docid": "c26700c31d92cd94377cf2fe44c34d22", "score": "0.6174648", "text": "def is_glusterd_running(servers):\n if isinstance(servers, str):\n servers = [servers]\n\n cmd1 = \"service glusterd status\"\n cmd2 = \"pidof glusterd\"\n cmd1_results = g.run_parallel(servers, cmd1)\n cmd2_results = g.run_parallel(servers, cmd2)\n\n _rc = 0\n for server, ret_values in cmd1_results.iteritems():\n retcode, _, _ = ret_values\n if retcode != 0:\n g.log.error(\"glusterd is not running on the server %s\", server)\n _rc = 1\n if cmd2_results[server][0] == 0:\n g.log.error(\"PID of glusterd is alive and status is not \"\n \"running\")\n _rc = -1\n return _rc", "title": "" }, { "docid": "95048a07e7deefc7b8dbfbd21923dabc", "score": "0.61703557", "text": "def healthcheck():\n return jsonify(status='ok')", "title": "" }, { "docid": "e86d39a50ba04c9cf32405e51c9f418f", "score": "0.6155021", "text": "def health_check(self, context):\n\n return self._state_operations.health_check()", "title": "" }, { "docid": "8603b59d518adfee57051fb7562426ff", "score": "0.61378294", "text": "def test_health_check(client):\n response = client.get(\"/live\")\n\n assert response.status_code == 200", "title": "" }, { "docid": "024629f83d3f6d66cd0080c05d98fb84", "score": "0.61218977", "text": "def is_monitoring(self):\n for key in self.r.scan_iter(\"status:*hera_snap_redis_monitor.py\"):\n state = self.r.get(key)\n return state == \"alive\"\n # no status key => monitor isn't running\n return False", "title": "" }, { "docid": "17f8b5ed2f3e771a36facfa4b6763fca", "score": "0.61124814", "text": "def check(self):\n # reread conf file every time so we can see changes\n success = True\n for cluster_name, monitors in list(self.cluster_monitors.items()):\n if self.is_active(cluster_name):\n unreachable = 0\n for monitor in monitors:\n try:\n success = monitor.check()\n except IOError:\n try:\n # check the network connection\n urlopen('http://google.com')\n except IOError:\n logger.log('no network connection')\n else:\n unreachable += 1\n logger.error('Failed to get %s' % monitor.url)\n\n # we've tried at least one monitor by this point\n # if it was reachable (or the netowrk is down) so\n # we don't want to check any more monitors\n if unreachable == 0:\n break\n\n if unreachable >= (len(monitors) + 1) // 2:\n sent = self.send_email('More than half of HAProxy instances are'\n 'unreachable on %s' % cluster_name + \". Please refer to doc https://docs.google.com/a/room77.com/document/d/1Ii1cxpIucAU3Qb63Zv3Cc-Ymf9WX6a945guZ_Cg01NI/edit#heading=h.7pw52dk9gnzc\", logger)\n success = False\n return success", "title": "" }, { "docid": "2f1ee667036d7ffb5b391170e329583f", "score": "0.60992473", "text": "def check_running(self):\n all_running = True\n\n to_delete = []\n for process in self.__processes:\n code = process.poll()\n if code is not None:\n # Process exited.\n logger.error(\"Process %d exited unexpectedly with return code %d\" % \\\n (process.pid, code))\n\n all_running = False\n to_delete.append(process)\n\n for process in to_delete:\n self.__processes.remove(process)\n\n return all_running", "title": "" }, { "docid": "d74d94c80d8f0dae09928f1751e2cb4c", "score": "0.6098628", "text": "def health():\n return HealthResponse(available=True)", "title": "" }, { "docid": "e3f60a33176d0500473e1efdc229e228", "score": "0.60946894", "text": "def _is_docker_running(self):\n try:\n self.client.ping()\n return True\n except (ConnectionError, AttributeError):\n if not self.silent:\n self.logger.error(\"Failed to connect to Docker Server. Is it running?\")\n self.logger.error(\"Not able to list or use local tools.\")\n return False", "title": "" }, { "docid": "d94a5e7ed329aaa23d8fa76af283c765", "score": "0.60946524", "text": "def check_timeout(self):\n while self.runtime.get() <= self.opts.process_timeout:\n if not any(p.is_alive() for p in self.procs): # Check if any process has exited\n break\n time.sleep(1)\n else:\n self.logger.info(\"Timed out.\")\n self.terminate_early()", "title": "" }, { "docid": "2fde2c57c9f9d8d2b1910c2259be02b6", "score": "0.6081799", "text": "def check_availability(self):\n r = requests.get(self._host + \"/watch\")\n if r.text is not None and r.text == \"OK\":\n return True\n return False", "title": "" }, { "docid": "ec9a275cd0a7876d830283ec51e3243e", "score": "0.6079923", "text": "def process_is_dead(process):\n\n # Only do this if os.kill exists for this platform (e.g. Windows doesn't\n # support it).\n if callable(getattr(os, \"kill\", None)) and reap_process(process.pid):\n return True\n\n # Then try to ping the process using its pipe.\n try:\n proc_is_alive = process.is_alive()\n except OSError:\n return True\n else:\n return not proc_is_alive", "title": "" }, { "docid": "f7be994fc5536aa2bd3331828f5a426d", "score": "0.6078752", "text": "def server_is_shutdown(self) -> bool:\n assert self.curr_architect is not None, \"No architect to check\"\n return not self.curr_architect.server_is_running()", "title": "" }, { "docid": "8f5073230fa9d4a7b8bcda325e27a45a", "score": "0.6076144", "text": "def is_alive(self) -> bool:\n if self.socket_connection is None:\n return False\n return self.socket_connection.alive", "title": "" }, { "docid": "1d936cb3950d01d58f39f2d846bf2094", "score": "0.60738504", "text": "def isalive(self):\n return self._pipe.isalive()", "title": "" }, { "docid": "6e21b7f72a0e35a951e50a07ef84c4de", "score": "0.60737747", "text": "async def async_is_ready(\n ctrl_address: str, timeout: float = 1.0, logger=None, **kwargs\n ) -> bool:\n try:\n from grpc_health.v1 import health_pb2, health_pb2_grpc\n\n response = await send_health_check_async(ctrl_address, timeout=timeout)\n return (\n response.status == health_pb2.HealthCheckResponse.ServingStatus.SERVING\n )\n except RpcError as exc:\n if logger:\n logger.debug(f'Exception: {exc}')\n return False", "title": "" }, { "docid": "e08158f189e5d5934bbce64809b7e784", "score": "0.60735595", "text": "def active(self):\n resp = self.server.head(\"get\", \"/processes/%s\" % self.pid)\n if resp.code == 200:\n return True\n return False", "title": "" }, { "docid": "8e4bbe7f4340649e41e7fe21af772fd1", "score": "0.6073455", "text": "def status_check(self):\n try:\n # The is_master command is cheap and require auth.\n self.connection.admin.command('ismaster')\n except Exception as error:\n logging.error(error)\n return False\n else:\n return True", "title": "" }, { "docid": "e9bbd9fa6bd4b08ad96fb495d779b3ff", "score": "0.6070528", "text": "def is_running(self):\n if self._process is None:\n ret = self.internal_run(\"systemctl\", \"show\", \"-p\", \"MainPID\", self.get_service_name())\n _, mainpid = ret.stdout.split(\"=\")\n if mainpid == \"0\":\n return False\n self._process = psutil.Process(int(mainpid))\n return self._process.is_running()", "title": "" } ]
101c5fb6c67e4feb997767ddaf69b7dc
today returns today's date
[ { "docid": "666a6159a5dc480c0343a4ebadb44753", "score": "0.8839325", "text": "def today() -> date:\n return datetime.datetime.now().date()", "title": "" } ]
[ { "docid": "c27bb3e457eaa5764e0d5be961713deb", "score": "0.9091201", "text": "def get_today_date():\n return datetime.date.today()", "title": "" }, { "docid": "dfdd837ef405d39242b663e7550ab03e", "score": "0.87969136", "text": "def get_today_date():\r\n now = datetime.datetime.now()\r\n return str(now.year) + \"-\" + str(now.month) + \"-\" + str(now.day)", "title": "" }, { "docid": "67b868381bb76e6e53c5d4b37a9497b4", "score": "0.8676119", "text": "def get_today_date(self):\r\n today = str(datetime.date.today().year) + \"-\" + f\"{datetime.date.today():%m}\" + \"-\" + f\"{datetime.date.today():%d}\" # format date to yyyy-mm-dd\r\n return today", "title": "" }, { "docid": "7752b7bf2e03e33afdb1f1b9e7a905fb", "score": "0.8409512", "text": "def today():\n today = datetime.datetime.today()\n day = str(today.day)\n if len(day) == 1: day = '0' + day\n month = calendar.month_abbr[today.month]\n year = str(today.year)\n return day + '-' + month + '-' + year", "title": "" }, { "docid": "925757c3322c84524c31d0d7a2d2dfed", "score": "0.8375385", "text": "def today_datetime():\n return datetime.now()", "title": "" }, { "docid": "09c10f3d02a4413b7524e8a611dca2f0", "score": "0.8297048", "text": "def get_today():\n date_today = datetime.date.today().timetuple()[:3]\n return date_today", "title": "" }, { "docid": "06c954cd15b32ed6036bf8013de554f5", "score": "0.8252228", "text": "def current_date(self):\n return self.today.strftime('%Y%m%d')", "title": "" }, { "docid": "a50c238446a7790a2e516931df7eed9a", "score": "0.8159731", "text": "def today(self):\n\n return self._filter_datetime_input(pds.datetime.today())", "title": "" }, { "docid": "267adbd6b863a2cdf8448ca51d37ef44", "score": "0.80904657", "text": "def date(self):\n return datetime.date.today()", "title": "" }, { "docid": "695a373c37626744695126860f9d492c", "score": "0.7977346", "text": "def get_Date():\n Date=dt.date.today()\n\n return Date", "title": "" }, { "docid": "d29e769f836de06ce2eafda24672fc03", "score": "0.7858096", "text": "def get_date():\n return datetime.today().strftime(\"%m-%d-%Y\")", "title": "" }, { "docid": "68290e9c9794c8404e3032486088ac1d", "score": "0.77970606", "text": "def getdate():\n import datetime\n return datetime.datetime.now()", "title": "" }, { "docid": "3f06bfdeb5b1f6b5bbbbd6abae8d1392", "score": "0.7758534", "text": "def today_date():\n\n tlist = tcnv.currentTime()\n year = str(tlist[0])\n mon = tlist[1]\n day = tlist[2]\n\n syear = year[2] + year[3]\n smon = str(mon)\n if mon < 10:\n smon = '0' + smon\n sday = str(day)\n if day < 10:\n sday = '0' + sday\n\n ldate = smon + '/' + sday + '/' + syear\n\n return ldate", "title": "" }, { "docid": "5fe0057bbd19917211b3bcae34f5ab22", "score": "0.77537894", "text": "def get_date():\n return date.today().strftime(\"%d.%m.%Y\")", "title": "" }, { "docid": "6451b433b44893eab619e9ee627411bf", "score": "0.77247894", "text": "def get_date():\n return datetime.datetime.now().date()", "title": "" }, { "docid": "f3fa7b7f9522a70f59cb1e522bf02d54", "score": "0.7619278", "text": "def current_date():\n return str(datetime.datetime.now()).split()[0]", "title": "" }, { "docid": "d5d7151380e6a9ff369e2efc2d866bbc", "score": "0.75049776", "text": "def getCurrentDate():\n\n\t\treturn time.strftime(\"%d-%b-%y\")", "title": "" }, { "docid": "fe6959c40e7946fcb89660e12ab5e6a9", "score": "0.7467948", "text": "def today(self):\n\t\tnow = datetime.utcnow()\n\t\tdiff = now - self.gday0\n\t\tself.reset()\n\t\tself.add_days(diff.days) \n\t\treturn self", "title": "" }, { "docid": "893389ac4eb9edffebd0e56847e985c1", "score": "0.7442568", "text": "def get_today_ymd():\n return time.strftime('%Y%m%d')", "title": "" }, { "docid": "b3110054258605e1edf8d0eb81114a62", "score": "0.7397814", "text": "def getDate():\n import datetime\n return datetime.datetime.now()", "title": "" }, { "docid": "051b083e2461a3ef4281a863e6bf2e28", "score": "0.7374603", "text": "def date_now():\n now = datetime.datetime.now()\n return now.strftime(\"2018-%m-%d\")", "title": "" }, { "docid": "7614111bc4e5c0ca298f7f2419319a3d", "score": "0.73410696", "text": "def daily(date=datetime.date.today()):\n return date", "title": "" }, { "docid": "c571f5cfa005356774dfbbe92ffdc8af", "score": "0.73200834", "text": "def time():\n return datetime.datetime.today()", "title": "" }, { "docid": "ddfcba9249dc780cc7d682960fc61260", "score": "0.73064524", "text": "def getDate(self):\n block = '.'\n argv_len = len(sys.argv)\n if argv_len > 1:\n year,mon,day= time.strftime('%Y'),time.strftime('%m'),time.strftime('%d')\n year = year[-1]\n mon = str(int(mon))\n day = str(int(day))\n ymd = year + block + mon + block + day\n else:\n today = datetime.date.today()\n #today = datetime.date(2013,4,1)\n one_day = datetime.timedelta(1)\n yesterday = today - one_day\n ymd_t = str(yesterday).split('-')\n year = ymd_t[0][-1]\n mon = str(int(ymd_t[1]))\n day = str(int(ymd_t[2]))\n ymd = year + block + mon + block + day\n self.date = ymd\n self.today_url = self.main_url + ymd + '/'\n print('#date==%s#'% self.date)", "title": "" }, { "docid": "525e462d39387d720237a6b69579239d", "score": "0.7275122", "text": "def getDate():\n\treturn str(datetime.datetime.now().date())", "title": "" }, { "docid": "0f639c5ac066fc60657bb410aeece045", "score": "0.7257863", "text": "def get_date():\n today = datetime.date.today()\n return today.day, today.month, today.year", "title": "" }, { "docid": "a67f67e2b021805d07e5aff29ef527a9", "score": "0.7256676", "text": "def get_current_date():\n date = datetime.datetime.now().astimezone()\n return get_date(date)", "title": "" }, { "docid": "d919fcd087debd837eb241562e2b01b0", "score": "0.7243084", "text": "def today(year=None):\n return datetime.date(int(year), _date.month, _date.day) if year else _date", "title": "" }, { "docid": "c2f4a52c29a3fed4ec829076bbb40f24", "score": "0.71997195", "text": "def get_todays_version():\r\n\r\n return datetime.date.today().strftime('%Y%m%d')", "title": "" }, { "docid": "86e9759c58f5f49f9aaec78a64f164ee", "score": "0.7178838", "text": "def getDate():\n date=str(datetime.date.today())\n\n if __name__ == \"__main__\":\n print(date)\n\n return date", "title": "" }, { "docid": "fef833990edc8c922bf6b0d3a8e0c571", "score": "0.71618223", "text": "def date_():\n year = datetime.datetime.now().year\n month = datetime.datetime.now().month\n date = datetime.datetime.now().day\n speak(\"The current date is\")\n speak(date)\n speak(month)\n speak(year)", "title": "" }, { "docid": "ef77588dfd24828ccbc43742399f8273", "score": "0.711331", "text": "def current_date(self):\n return self.game.sim.date", "title": "" }, { "docid": "846f69c61d9d7e58298fa1f47cfdef47", "score": "0.7091212", "text": "def get_current_date(self) -> ADate:\n return ADate.from_date(self._right_now.as_date())", "title": "" }, { "docid": "f86023e64afb9bf854eb59ef727209f7", "score": "0.706168", "text": "def date_for_retrieval():\n today = now()\n return today - datetime.timedelta(days=today.weekday())", "title": "" }, { "docid": "163d480b9c01513533cb381a8152f910", "score": "0.70495677", "text": "def get_current_date():\n utc = pytz.utc\n date = datetime.now(tz=utc)\n return date", "title": "" }, { "docid": "95d561b3f576589da192128d788a3a11", "score": "0.7039363", "text": "def get_today_ymdh():\n return time.strftime('%Y%m%d%H')", "title": "" }, { "docid": "19c7efb9f3b85e150eb435ae04cbb89a", "score": "0.7038526", "text": "def get_date(self):\n return self.date if self.date else datetime.datetime.now()", "title": "" }, { "docid": "92305ac91a0fbef7169a7d7e573a4ae7", "score": "0.7014282", "text": "def get_current_datetime():\n return datetime.datetime.now()", "title": "" }, { "docid": "0ba125df306406d9c4e5fdfc4c412f11", "score": "0.70028996", "text": "def getDate():\n current_time = datetime.datetime.now()\n day = current_time.day\n month = current_time.month\n year = current_time.year\n date = \"{dd}-{mm}-{yyyy}\".format(dd=day,mm=month,yyyy=year)\n return date", "title": "" }, { "docid": "d9d300fa0d0bf2c591440982b003547e", "score": "0.6981523", "text": "def get_date():\r\n current_time = datetime.now()\r\n result = (format_day(current_time.day) + \" \" + MONTHS[current_time.month-1]\r\n + \" \" + str(current_time.year))\r\n return result", "title": "" }, { "docid": "e4171291b7a54528910753142dc76f79", "score": "0.6959468", "text": "def today(tz: timezone.zoneinfo.ZoneInfo | None = None) -> datetime_.date:\n return date(timezone.now(), tz)", "title": "" }, { "docid": "429c0eae3c0913ebb31320580049ccad", "score": "0.6947072", "text": "def date(self):\n year = int(datetime.datetime.now().year)\n month = int(datetime.datetime.now().month)\n date = int(datetime.datetime.now().day)\n month_list = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n self.speak(\"the current date is\")\n self.speak(date)\n self.speak(month_list[month - 1])\n self.speak(year)", "title": "" }, { "docid": "f117d92477e883d5b9b05d5130cb77d1", "score": "0.69459957", "text": "def get_date_time():\n return datetime.datetime.today().strftime(\"%m-%d-%Y %H:%M:%S\")", "title": "" }, { "docid": "8fae2a62431cee9516752a956ba9852e", "score": "0.69240767", "text": "def __getNow(self):\n today = datetime.datetime.today()\n return str(today.strftime(self.__timeFormat))", "title": "" }, { "docid": "5c965c4fa147e9cc29a3fce99026df05", "score": "0.6884292", "text": "def getCurrentDate(dateformat=DATE_FORMAT_DEFAULT):\n return datetime.now().strftime(dateformat)", "title": "" }, { "docid": "a8021d5fc3f154f3636e40252ac69b1a", "score": "0.6873087", "text": "def get_current_datetime():\n return timezone.now()", "title": "" }, { "docid": "47d96dcce4d601fb5d37738d0a008748", "score": "0.6870466", "text": "def get_datetime_now():\n return datetime.datetime.now()", "title": "" }, { "docid": "47569b152f5b772e3b226227956b03e4", "score": "0.6836275", "text": "def _current_date(cls):\n localtime = time.localtime()\n day = localtime.tm_mday\n month = localtime.tm_mon\n year = localtime.tm_year\n return {\n 'day': day,\n 'month': month,\n 'year': year\n }", "title": "" }, { "docid": "178b344ce7aaef1918c9fdc7c4329bf6", "score": "0.68354124", "text": "def is_today(self):\n return self.to_date_string() == self.now(self.timezone).to_date_string()", "title": "" }, { "docid": "8761de3df856ae309ea68519e524bee8", "score": "0.6829355", "text": "def printdate():\n printdate = datetime.datetime.now().strftime('%Y-%m-%d')\n return printdate", "title": "" }, { "docid": "acc888f7eba369b0c05d84691827fc98", "score": "0.6812435", "text": "def get_now() -> datetime:\n return datetime.now()", "title": "" }, { "docid": "f2bc91622b300d93ec01c6ce534b0d07", "score": "0.6806368", "text": "def get_current_date_string(self):\n now = datetime.datetime.now()\n return now.strftime(\"%Y/%m/%d %H:%M:%S.%f\")", "title": "" }, { "docid": "4c0e607f1a4e1b3313abc72785a67f09", "score": "0.680048", "text": "def today(self, pair=\"btcusd\"):\n return self._send_request('today', pair)", "title": "" }, { "docid": "5772ef354b90c3122acbfb1d3132586d", "score": "0.6787842", "text": "def today_for_request_tz(request):\n return convert_from_utc_to_request_tz(datetime.datetime.utcnow(), request).date()", "title": "" }, { "docid": "c7302b7a91fd6163d0d7e9d8c5caa8a8", "score": "0.67774993", "text": "def today_url():\n dt = DateTime()\n today = dt.ISO().split()\n return today[0]", "title": "" }, { "docid": "b5862e5e3f12c1459685fa8387c8a102", "score": "0.6776627", "text": "def today(cls, tz=None):\n return cls.now(tz).start_of('day')", "title": "" }, { "docid": "599790b8f0e8c9fd8d66ba541e453354", "score": "0.6757535", "text": "def now(self):\n\t\treturn datetime.datetime.now()", "title": "" }, { "docid": "f5a848e79cb715493eb0bfb88d71e8ed", "score": "0.6749901", "text": "def format_date_now() -> str:\n return datetime.datetime.now().strftime(\"%Y-%m-%d\")", "title": "" }, { "docid": "d7f45fe48a03175963b084b07a5126ab", "score": "0.67206675", "text": "def getDateString():\n\treturn datetime.now().strftime('%Y%m%d')", "title": "" }, { "docid": "ed15988e501d5e1cdbdc85fa227d0bd1", "score": "0.67085904", "text": "def get_formatted_date():\n today = datetime.datetime.today().strftime(\"%d-%b-%Y\")\n \"\"\"\n # for testing\n # today = '06-May-2020'\n \"\"\"\n return today", "title": "" }, { "docid": "8a5f382dd43b04d6afab19eb61968ef9", "score": "0.66998893", "text": "def get_default_date():\n return get_default_datetime().date()", "title": "" }, { "docid": "ba03f7497e654a484f5037a84b22dc85", "score": "0.6658084", "text": "def is_today(self, event: calendar_entry.EventEntry) -> date:\n return datetime.today().date() == event._date.date()", "title": "" }, { "docid": "bf4cf74038e591f1735a017f057002c2", "score": "0.665715", "text": "def current_date (fmt=\"%a %d-%m-%Y @ %H:%M:%S\"): \n\n return datetime.strftime(datetime.now(), fmt)", "title": "" }, { "docid": "c12007014c8ffe275c8708b712f19ca7", "score": "0.66493577", "text": "def __getCurrentLVDate(self):\n\n today = datetime.date.today()\n\n return lvutils.LVDate(today.year*12 + today.month-1)", "title": "" }, { "docid": "37490f7b4d657b2feb774d75dda15491", "score": "0.66346186", "text": "def test_date(self):\n self._roundtrip(datetime.datetime.today())", "title": "" }, { "docid": "28b8cd06c36bfde18ec7e486b244f149", "score": "0.6631205", "text": "def CURRENTDAY(self):\n return \"%s\" % self.utcnow.day", "title": "" }, { "docid": "e0bddf298bd50165560109ffb8f4186e", "score": "0.662278", "text": "def get_current_date_time(self):\n now = datetime.now()\n dt_string = now.strftime(\"%m/%d/%Y %H:%M\")\n return dt_string", "title": "" }, { "docid": "3a4277888dd6f32b2198cb4c0d436c6b", "score": "0.6621696", "text": "def get_date():\n now = datetime.datetime.now()\n\n return now.strftime(\"%m/%d/%Y %H:%M\")", "title": "" }, { "docid": "84fe21af9ded8a6fed17f419ae68eb4f", "score": "0.6614129", "text": "def format_date_now():\n \n year = datetime.now().strftime('%Y')\n month = datetime.now().strftime('%m')\n day = datetime.now().strftime('%d')\n\n phrase_date = year + '-' + month + '-' + day\n return phrase_date", "title": "" }, { "docid": "da09dcd35bf4825b82ffe14bb767f3cb", "score": "0.6609533", "text": "def get_today_ym():\n return time.strftime('%Y%m')", "title": "" }, { "docid": "cdc861886b6dddb7ee2ccac177df7a45", "score": "0.66034895", "text": "def now():\n return datetime.datetime.utcnow()", "title": "" }, { "docid": "7f5b9834d0350330a6d5580a1de5dd5b", "score": "0.6593197", "text": "def make_today() -> Day:\n now = dt.now(pytz.timezone(config.TIMEZONE))\n tomorrow = now + td(days=1)\n start = dt_to_milli(to_midnight(now))\n end = dt_to_milli(to_midnight(tomorrow))\n return Day(local_date=now.date(),start=start, end=end)", "title": "" }, { "docid": "3c164240dfe85104458bb7702558ece6", "score": "0.6579424", "text": "def archive_today(request, **kwargs):\r\n today = datetime.date.today()\r\n kwargs.update({\r\n 'year': str(today.year),\r\n 'month': today.strftime('%b').lower(),\r\n 'day': str(today.day),\r\n })\r\n return archive_day(request, **kwargs)", "title": "" }, { "docid": "52f173cb0f5829fdc0c8ec3fbe7da974", "score": "0.65776336", "text": "def get_task_date(self):\n now = dt.datetime.now()\n self.task_date = now.strftime('%d/%m/%Y')", "title": "" }, { "docid": "d1d2c25aa60d2187f35661ed29badd4d", "score": "0.6570923", "text": "def current_time():\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n return now", "title": "" }, { "docid": "f8cfdd9a3c3ae0dbea59d0b896db0a90", "score": "0.65704906", "text": "def __init__(self, today_date):\n self.today_date = today_date", "title": "" }, { "docid": "9b606426116b0cc0a85ab48a7599f849", "score": "0.65687156", "text": "def now(self):", "title": "" }, { "docid": "6cc988a25768753f558c3b45c7f9ff71", "score": "0.6556906", "text": "def current_date_str():\n # datetime object containing current date and time\n now = datetime.now(timezone.utc)\n # convert between timezones with pytz\n # return string with right format\n return now.strftime(\"%Y-%m-%d\")", "title": "" }, { "docid": "786219f829a34727a171d684057c5dd2", "score": "0.6556283", "text": "def tell_me_date(self):\n return date_time.date()", "title": "" }, { "docid": "5fc7f7cd6d20d828c88a85907826d9bd", "score": "0.6538459", "text": "def get_date(self):\n return calendar.get_date()", "title": "" }, { "docid": "dbd63698900eb3887e153bfa41ecdd76", "score": "0.6530147", "text": "def get_date(self):\n return self.day", "title": "" }, { "docid": "514ca01cc66009f4800322174ff2eec0", "score": "0.65202266", "text": "def now():\n return datetime.now().isoformat()", "title": "" }, { "docid": "a66936f616a6c71f4eed985ba6d3de8a", "score": "0.6514136", "text": "def getNow():\n now = datetime.datetime.utcnow()\n return now", "title": "" }, { "docid": "a7d6b7de8ec6eba8e54fbf9b8bf6e428", "score": "0.6501469", "text": "def showToday(self):\n super().showToday()\n\n self.setSelectedDate(QDate.currentDate())", "title": "" }, { "docid": "afef08eee910efe4102c300bd616b270", "score": "0.64989114", "text": "def get_cur_date(delta=0, format=\"%Y%m%d\"):\n date = (datetime.date.today() - datetime.timedelta(days=delta)).strftime(format)\n return date", "title": "" }, { "docid": "d7997cfcff48d553e326835dcff43c27", "score": "0.64771116", "text": "def todayItems(request):\r\n lToday = datetime.datetime.now()\r\n return getDayItems(request, lToday.year, lToday.month, lToday.day, 0)", "title": "" }, { "docid": "e1b0b2af8e03f349c000725ad955741a", "score": "0.6464812", "text": "def currentDateString(self):\n return datetime.now().strftime(\"%Y%m%d_%H%M\")", "title": "" }, { "docid": "2451a55fab3c3bfa6feac6bb1f7beedc", "score": "0.64621246", "text": "def date():\n return datetime.datetime(2012, 1, 1, 12, 0, 0)", "title": "" }, { "docid": "f115faeb79bfcb183a9881f9c1576752", "score": "0.64603573", "text": "def get_current_time():\n return datetime.now()", "title": "" }, { "docid": "11bc41d2bc8ab551d616774ea6b36c2a", "score": "0.6451577", "text": "def dtnow():\n return datetime.datetime.now(datetime.timezone.utc)", "title": "" }, { "docid": "d97e63b737be51bbbb2168489414201a", "score": "0.64264727", "text": "def full_day():\n dt = DateTime()\n if dt.day() < 10 and dt.month() < 10:\n today = \"0\" + str(dt.day()) + \"0\" + str(dt.month()) + str(dt.year())\n elif dt.day() < 10 and dt.month() >= 10:\n today = \"0\" + str(dt.day()) + str(dt.month()) + str(dt.year())\n elif dt.day() >= 10 and dt.month() >= 10:\n today = str(dt.day()) + str(dt.month()) + str(dt.year())\n else:\n today = str(dt.day()) + \"0\" + str(dt.month()) + str(dt.year())\n return today", "title": "" }, { "docid": "0389973ab149777d921502471f461f3d", "score": "0.6425578", "text": "def today_str(output_format = '%Y%m%d'):\n return datetime.now().strftime(output_format)", "title": "" }, { "docid": "52d69f59f5253de242caccef7f669ecc", "score": "0.640119", "text": "def test_just_today(self):\n after, before = codemetrics.internals.handle_default_dates(self.today, None)\n self.assertEqual(self.today, after)\n self.assertIsNone(before)", "title": "" }, { "docid": "a952993612652c7fc2cfe69e9c29d479", "score": "0.6381586", "text": "def date_for_new_snippet():\n today = now()\n if (today.weekday() < 1):\n aligned = today - datetime.timedelta(days=today.weekday())\n else:\n aligned = today + datetime.timedelta(days=(7 - today.weekday()))\n return aligned", "title": "" }, { "docid": "2a4594f350de05c8ac3f2310f0c0f4ea", "score": "0.6371394", "text": "def now() -> datetime:\n\n return datetime.datetime.utcnow()", "title": "" }, { "docid": "2ff552a36928d4a69b51f158f50f9f71", "score": "0.6355535", "text": "def now():\n return datetime.datetime.now(tzlocal())", "title": "" }, { "docid": "a881e31ebed7dcc1ca666b8ce76d3592", "score": "0.63481027", "text": "def getCurrentTime(self):\n curTime = datetime.now()\n cur_date = curTime.strftime('%Y-%m-%d %H:%M')\n return str(cur_date)", "title": "" }, { "docid": "8249b8b9b215bf1107d694921860d46f", "score": "0.63471377", "text": "def get_date():\n return (datetime.now(ZoneInfo('Asia/Kolkata')) +\n timedelta(hours=6, minutes=30)).strftime(\"%Y/%m/%d\")", "title": "" }, { "docid": "21cd433ec668b05b399fbb4142e0ea1f", "score": "0.6330861", "text": "def get_date():\n current_date = datetime.now()\n data_formatted = str(current_date.strftime('%m%d%H%M%S%f'))\n return data_formatted", "title": "" }, { "docid": "8ef1000f0ed4dea2aa8ff51bd93ee01e", "score": "0.63139004", "text": "def all_day_date(self):\n return self.getattr('all_day_date')", "title": "" } ]
9945b198ee2819f25a1b4b7fc922fb45
Imports an CSV files. First row must be a header row!
[ { "docid": "b64b6452c8f571043df0a297554acb4d", "score": "0.6596792", "text": "def import_csv(self, filename, delimiter=',', quotechar='\"'):\n with open(filename) as data_file:\n reader = csv.reader(data_file, delimiter=delimiter, quotechar=quotechar)\n header = [h.strip() for h in reader.next()]\n for row in reader:\n self.data.append({k: self._num(v) for k, v in zip(header, row)})", "title": "" } ]
[ { "docid": "e455c173c1f80b526afba4f9ca27056f", "score": "0.7136156", "text": "def import_csv_command(csvdir):\n import_tables_from_csv_files(csvdir)", "title": "" }, { "docid": "4e9f35441ca60f248a8412d0d1fa470b", "score": "0.70805323", "text": "def load_lar_csv():\n cur, pg_conn = connect()\n for file in lar_files:\n if file[-3:] == \"csv\":\n table_name = \"lar_\"+file[4:8] + \"_ffiec\"\n print(table_name)\n with open(\"../sql/lar_csv.sql\") as sql_file:\n sql = sql_file.read()\n sql = sql.format(table=table_name, data_file=lar_data_path + file)\n cur.execute(sql,)\n else:\n print(\"not a csv file\")", "title": "" }, { "docid": "e47370ea1284c834341abf9e3fd6cb3a", "score": "0.6967193", "text": "def load_ts_csv():\n cur, pg_conn = connect()\n for file in ts_files:\n if file[-3:] == \"txt\":\n table_name = \"ts_\"+file[3:7] + \"_ffiec\"\n print(table_name)\n with open(\"../sql/ts_csv.sql\") as sql_file:\n sql = sql_file.read()\n sql = sql.format(table=table_name, data_file=ts_data_path + file)\n cur.execute(sql,)\n else:\n print(\"not a csv file\")", "title": "" }, { "docid": "2f086418c793a6cedcce66864bd38c0f", "score": "0.6954721", "text": "def importCSV(self):\n filepath = QFileDialog.getOpenFileName(self, 'Load CSV file',\"\",\"Comma-separated values (*.csv)\")\n if filepath !=\"\":\n urllib.urlretrieve('file:/// %s' % filepath,'Schedule.csv')\n self.tableWidget.clear()\n self.loadSchedule()", "title": "" }, { "docid": "37d395e89687ec840658e8606bf81491", "score": "0.6845323", "text": "def import_data():\n\tcsv_data = []\n\twith open('csv_data.csv') as csvfile:\n\t\treadCSV = csv.reader(csvfile, delimiter=',')\n\t\tfor row in readCSV:\n\t\t\tcsv_data.append(row)\n\treturn csv_data", "title": "" }, { "docid": "0d58c2c8511289cae57345a2c23750b9", "score": "0.6759982", "text": "def csv_loader(csv_path):\n with open(csv_path, \"r\") as csv_file:\n # reads the first line of csv that contains headers\n headers = csv_file.readline()\n\n # pulls out table name from csv file path \n table_name_regex = re.compile(r\"([a-z])+(?=\\.csv$)\", re.IGNORECASE)\n table_match = table_name_regex.search(csv_path)\n\n try:\n table_name = table_match.group(0) \n except: \n raise ValueError(\"regex failed in table name extract\")\n\n return headers, table_name", "title": "" }, { "docid": "a9f7cf9a98d1a0de583fde1596254f16", "score": "0.6753202", "text": "def import_csv(self, csvfile, columns=None, delimiter=' ', quotechar='|'):\n if isinstance(csvfile, str):\n csvfile = open(csvfile, 'rb')\n import csv\n R = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar)\n if columns is None:\n columns = R.next()\n d = []\n for x in R:\n z = {}\n for i in range(len(x)):\n y = x[i]\n if y != '':\n if y.isdigit():\n y = eval(y)\n else:\n v = y.split('.')\n if len(v) == 2 and v[0].isdigit() and v[1].isdigit():\n y = eval(y)\n z[columns[i]] = y\n d.append(z)\n self.insert(d)", "title": "" }, { "docid": "1315fb2c3dea930f843b178f8f07d96e", "score": "0.67283", "text": "def loadCSV(self, path):\n dtframe = pd.read_csv(path)\n self.loadDataframe(dtframe)\n \n #####", "title": "" }, { "docid": "e25b91c8ca23fb5bd37e99d691fb45cb", "score": "0.6706128", "text": "def import_csv(self, request):\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_file\"]\n file_data = csv_file.read().decode(\"utf-8\")\n lines = file_data.split(\"\\n\")\n del lines[0] # to delete header\n obj = StudentAdminManager().import_csv(lines)\n if obj == True:\n url = reverse('admin:index')\n return HttpResponseRedirect(url)\n\n form = CsvImportForm()\n payload = {\"form\": form}\n\n return render(request, \"admin/csv_form.html\", payload)", "title": "" }, { "docid": "ecd7ebd32a418d709c2703b32c00b1e7", "score": "0.6606399", "text": "def _data_import(self, filepath):\n data_raw = []\n with open(filepath) as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if not row:\n continue\n if self.discard_header and i == 0:\n continue\n read_line = self._process_row_raw(row)\n data_raw.append(read_line)\n return data_raw", "title": "" }, { "docid": "4ec17cc5ee8def651a7a5e4cebe7b56e", "score": "0.6591309", "text": "def load_csv(self, filename, header):\n \n csvfile = open(filename)\n reader = csv.DictReader(csvfile)\n doc = []\n for each in reader:\n row={}\n for field in header:\n row[field]=each[field]\n doc.append(row)\n\n print \"document is\", doc \n \n self.db_coll_conn.insert_many(doc)", "title": "" }, { "docid": "1367272be7111b6f30d786c0e43ec9e8", "score": "0.65734667", "text": "def load_csv(path, singlefile=None, columns=COLUMNS):\n\n if singlefile:\n df = pd.read_csv(path + '/' + singlefile, dtype=TYPESDICT,\n usecols=columns)\n\n df.dropna(subset=['DomainCountry'], inplace=True)\n return df\n\n df = pd.DataFrame()\n csvs = listdir(path)\n for csvfile in csvs:\n temp_df = pd.read_csv(path + '/' + csvfile, dtype=TYPESDICT,\n usecols=columns)\n df = pd.concat([df, temp_df], join='inner')\n df.dropna(subset=['DomainCountry'], inplace=True)\n\n return df", "title": "" }, { "docid": "45382de44238b0066ebf5b0d68d245e4", "score": "0.6551089", "text": "def import_movies():\r\n import csv\r\n with open('Movies.csv', encoding='utf-8') as csv_file:\r\n csv_reader = csv.reader(csv_file, delimiter=',')\r\n line_count = 0\r\n \r\n for row in csv_reader:\r\n if line_count == 0: #Header row\r\n line_count += 1\r\n else:\r\n line_count += 1\r\n \r\n Movies(row[0], row[1], row[2], row[3])", "title": "" }, { "docid": "0af2ac86388fad77e6779421a26f7807", "score": "0.6549083", "text": "def ingest_csv():\n # Extract the CSV file from the zip archive\n extract_csv(ZIP_FILENAME, CSV_FILENAME, EXTRACT_PATH)\n # Create a CSV import generator (next yields one db row)\n import_generator = import_csv_gen(EXTRACT_PATH + CSV_FILENAME)\n # Skip over the title row\n next(import_generator)\n # Iterate over all other rows\n while True:\n kwargs = {}\n try:\n data = next(import_generator)\n if len(data) != 8:\n logger.error(f'Data with incorrect item count: {len(data)}')\n continue\n # extract items from list and add record to database\n kwargs['customer_id'] = data[CUST_ID]\n kwargs['name'] = data[CUST_NAME]\n kwargs['lastname'] = data[CUST_LASTNAME]\n kwargs['home_address'] = data[CUST_ADDRESS]\n kwargs['phone_number'] = data[CUST_PHONE]\n kwargs['email_address'] = data[CUST_EMAIL]\n kwargs['status'] = data[CUST_STATUS]\n kwargs['credit_limit'] = float(data[CUST_CREDIT_LIMIT])\n try:\n add_customer(**kwargs)\n except ValueError:\n logger.error(f'Unable to add {data[CUST_ID]} to database')\n except StopIteration:\n break", "title": "" }, { "docid": "4d467ad4992d1021521dee67f9665b5b", "score": "0.65405756", "text": "def load_csv(csv):\n t = so(\"csv\",\"random\",\"fgdb\")\n TableToTable_conversion(\n in_rows=csv, \n out_path=os.path.dirname(t), \n out_name=os.path.basename(t)\n )\n return t", "title": "" }, { "docid": "6670fea3c870c995ac2a9826a1bb35d5", "score": "0.6538842", "text": "def import_csv(filename, csv_map=None, encoding=\"latin1\"):\n if not os.path.exists(filename):\n raise FileNotFoundError\n\n with open(filename, mode=\"r\", encoding=encoding, errors=\"ignore\") as csv_file:\n Part._import_csv_content(csv_file, csv_map)", "title": "" }, { "docid": "6ac999c28271f96ac65ded8244a0ef5f", "score": "0.6504684", "text": "def importDataCSV(path, names=None):\n if names != None:\n return pd.read_csv(filepath_or_buffer=path, names=names, header=None)\n else:\n return pd.read_csv(filepath_or_buffer=path)", "title": "" }, { "docid": "d4557f09126b742e45562954c826da2e", "score": "0.6448974", "text": "def importCSV(self, path, indexName):\n path = os.path.abspath(path)\n try:\n if not os.path.isfile(path):\n self.__createBlankCSV(path)\n print(\"New CSV created at {}.\".format(path))\n self.CSVs[indexName] = csvImport(path)\n return True\n except:\n return False", "title": "" }, { "docid": "ba69b047dab0a83c033baa79e68ab676", "score": "0.64468414", "text": "def loadCSV(filename):\r\n testList = []\r\n with open(filename) as csvfile:\r\n reader = csv.reader(csvfile)\r\n header = reader.__next__()\r\n # dim = len(header)\r\n for row in reader:\r\n testList.append(row)\r\n return testList, header", "title": "" }, { "docid": "3d1f4afd6bb052e67707dcae60868dbb", "score": "0.64432377", "text": "async def import_csv(self, ctx: commands.Context, filename):\n try:\n with open(filename, 'r', newline='') as csvfile:\n reader = csv.reader(csvfile)\n commands_added = 0\n for row in reader:\n new_cc = CCCommand(\n category=row[0],\n name=row[1],\n responce=row[2])\n self.sessionmerge(new_cc)\n commands_added += 1\n\n self.sessioncommit()\n await ctx.send(f'Added {commands_added} commands to database')\n\n #except FileNotFoundError:\n # await ctx.send(\"Error, file not found\");\n except Exception as oof:\n await ctx.send(\"Something went wrong with import, check log for details\")\n logging.info(oof)", "title": "" }, { "docid": "ed93b8dde00643a09fc9bf65891af5c5", "score": "0.6431503", "text": "def from_csv(self, filepath, delim = ','):\n\n tdo = textio.read_csv(filepath, True, delim)\n\n self.from_tdo(tdo)\n\n return", "title": "" }, { "docid": "24670fa24f28d3e291a296c67f4ef151", "score": "0.63741183", "text": "def import_csv(directory_name, filename, collection_name):\n LOGGER.debug(\"Reading CSV File: %s\", str(filename))\n contents = csv_to_json(directory_name, str(filename))\n collection = collection_name.split('.')[0]\n LOGGER.debug(\"Inserting Data Into Mongo: %s\", collection)\n results = insert_to_mongo(collection, contents)\n\n return (collection, results)", "title": "" }, { "docid": "c6399dfab1afad5c9dfceaeb5f32ab67", "score": "0.6350517", "text": "def _import_csv(self, write_cursor: DBCursor, filepath: Path, **kwargs: Any) -> None:\n with open(filepath, encoding='utf-8-sig') as csvfile:\n data = csv.DictReader(csvfile)\n for row in data:\n try:\n self._consume_uphold_transaction(write_cursor, row, **kwargs)\n except UnknownAsset as e:\n self.db.msg_aggregator.add_warning(\n f'During uphold CSV import found action with unknown '\n f'asset {e.identifier}. Ignoring entry',\n )\n continue\n except DeserializationError as e:\n self.db.msg_aggregator.add_warning(\n f'Deserialization error during uphold CSV import. '\n f'{e!s}. Ignoring entry',\n )\n continue\n except KeyError as e:\n raise InputError(f'Could not find key {e!s} in csv row {row!s}') from e", "title": "" }, { "docid": "c621b8131c6222e9cec6f0f6abbcdcc7", "score": "0.6310747", "text": "def DoImportCSV(self, event):\n\n # pick csv file\n fdlg = wx.FileDialog(None, \"Select CSV File\", \n wildcard=\"CSV Files (*.csv)|*.csv\",\n defaultDir=os.getcwd(), \n style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)\n \n if fdlg.ShowModal() == wx.ID_OK:\n csvFile = fdlg.GetPaths()[0]\n self.data.ImportCSV(csvFile)\n self.CheckSave()\n self.panel.update()\n \n fdlg.Destroy()", "title": "" }, { "docid": "0a003ec38b7ef53873f3d33d6fd4f8b9", "score": "0.62934613", "text": "def load_csv(csv_path, params={}):\n csv_list = []\n with open(csv_path, newline='') as csvfile:\n content = csv.reader(csvfile, params)\n for row in content:\n csv_list.append(row)\n return csv_list", "title": "" }, { "docid": "bedd380ef005314ae060e08174f63422", "score": "0.62628305", "text": "def load_individual(path):\n df = []\n for file in glob.glob(path + \"/*.csv\"):\n dataframe = pd.read_csv(file)\n df.append(dataframe)\n return df", "title": "" }, { "docid": "0c3c4fec90220898fea1dc15bf000664", "score": "0.6256659", "text": "def csv_import(currency, date):\n # tab delimiter used since coming from excel\n df = pd.read_csv('data/'+ currency +'-'+ date +'.txt', header=None, sep='\\s+')\n deltas = np.array(df.loc[0,1:])\n tenors = np.array(df.loc[1:,0])\n vols = np.array(df.loc[1:,1:]).astype(float)\n return pd.DataFrame(vols, index=tenors, columns=deltas)", "title": "" }, { "docid": "0f806ea136a9f799ad5694fa9f3f7e82", "score": "0.62483287", "text": "def import_data(name=None,path=[]):\n fList = [(os.path.basename(f.split('-',1)[0]), datetime.strptime(f.split('-',1)[1],\"%Y%m%d-%H%M%S\"), f)\n for f in glob.glob(os.path.join(os.path.expanduser('~'),*path,'**'),recursive=True)\n if not f.startswith('.') and not os.path.isdir(f)]\n try:\n return [pd.read_csv(f[2], skiprows=1, header=None, delim_whitespace=True) for f in fList if name == None or f[0] == name]\n except:\n print(\"well... \", name)\n pass", "title": "" }, { "docid": "b8f9e53aa298b5ff2bc711757a4401d8", "score": "0.62460893", "text": "def load_csv(ds) -> str:\n if os.name == \"nt\":\n fpath = str(ds.path.resolve()).replace('\\\\', '\\\\\\\\')\n else:\n fpath = str(ds.path.resolve())\n return f\"ImportFileCSV(u'{fpath}', linked=True, dsprefix=u'{ds.name}_')\"", "title": "" }, { "docid": "912d6e38e123382ce98e66465c67de3e", "score": "0.6221714", "text": "def open_csv():\n with open(INPUT_FILE, newline='') as _:\n reader = csv.reader(_, delimiter=';')\n next(_)\n for row in reader:\n pull_data_api(URL + row[0])", "title": "" }, { "docid": "5088e70924d8b5e782890644ecc31ba2", "score": "0.62145", "text": "def import_csv(self):\n # @todo check for format - write test\n\n # datefunc = lambda x: datetime.strptime(x.decode(\"utf-8\"), '%Y/%m/%d %H:%M:%S.%f')\n raw_data_array = np.genfromtxt(self.path, dtype=None, delimiter=\",\", names=True)\n\n return raw_data_array", "title": "" }, { "docid": "8b4cd72d0ef9050ffb9dc6daeb760b03", "score": "0.620695", "text": "def loadCSV(filepath, delimiter=\",\", verbose=0):\n with open(filepath, newline=\"\\n\", encoding=\"latin-1\") as f:\n reader = csv.reader(f, delimiter=delimiter)\n try:\n fieldNames = [ x.strip() for x in next(reader) ]\n except:\n exit(\"Cannot read line from: %s\" % filepath)\n lenFieldNames = len(fieldNames)\n rawValues = list()\n for row in reader:\n lenValues = len(row)\n if lenFieldNames != lenValues:\n exit(\"Mismatch in the number of fields, %d vs %d\\nfilepath: %s\" % (lenFieldNames, lenValues, filepath))\n row2 = {}\n allNone = True\n for i in range(0, lenFieldNames):\n col = row[i].strip()\n if col == \"n/e\" or col == \"\":\n row2[fieldNames[i]] = None\n else:\n row2[fieldNames[i]] = col\n allNone = False\n # do not insert into the flatTable if all entries are None\n if not allNone:\n rawValues.append(row2)\n if verbose >= 1:\n print(\"Loaded data from %s, %d fields, %s entries.\" % (filepath, len(fieldNames), len(rawValues)))\n if verbose >= 2:\n print(\"Following are the fields.\")\n for fieldName in fieldNames:\n print(\" - %s\" % fieldName)\n return (fieldNames, rawValues)", "title": "" }, { "docid": "81ef5743abcb84b4706426302626aca5", "score": "0.61999404", "text": "def child_import_csv(self, request):\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_file\"]\n file_data = csv_file.read().decode(\"utf-8\", errors='ignore')\n lines = file_data.split(\"\\n\")\n del lines[0] # to delete header\n obj = ChildManager().csv_import(lines)\n if obj ==True:\n url = reverse('admin:index')\n return HttpResponseRedirect(url)\n\n\n form = CsvImportForm()\n payload = {\"form\": form}\n\n return render(request, \"admin/csv_form.html\", payload)", "title": "" }, { "docid": "a55d687bd49d091fe7e2b9c03bfc46aa", "score": "0.6192247", "text": "def getCSVdata(delim, qc):\n csvfile = scribus.fileDialog(\"csv2table new:: open file\", \"*.csv\")\n if csvfile != \"\":\n try:\n reader = csv.reader(file(csvfile), delimiter=delim, quotechar=qc)\n datalist=[]\n skipfirst=False\n for row in reader:\n if skipfirst==True:\n rowlist=[]\n for col in row:\n rowlist.append(col)\n datalist.append(rowlist)\n else : skipfirst=True\n return datalist\n except Exception, e:\n scribus.messageBox(\"csv2table new\", \"Could not open file %s\"%e)\n else:\n sys.exit", "title": "" }, { "docid": "046d024e6904ea85ec21d7a54b672189", "score": "0.6175978", "text": "def read_import_csv_file(self):\n if self.choose_file:\n data = StringIO(base64.b64decode(self.choose_file).decode())\n\n if self.delimiter == \"tab\":\n reader = csv.DictReader(data, delimiter='\\t')\n elif self.delimiter == \"semicolon\":\n reader = csv.DictReader(data, delimiter=';')\n else:\n reader = csv.DictReader(data, delimiter=',')\n seller_error_line = []\n\n next(reader)\n for line in reader:\n if not line.get('Seller SKU'):\n seller_error_line.append(reader.line_num)\n message = \"\"\n if seller_error_line:\n message += 'File is invalid Seller SKU must be required field.'\n if message:\n raise UserError(_(message))", "title": "" }, { "docid": "e538177de4539f76f0b38a3f7f55fb6e", "score": "0.615973", "text": "def load_csv(csvpath):\n with open(csvpath, \"r\") as csvfile:\n data = []\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Skip the CSV Header\n next(csvreader)\n\n # Read the CSV data\n for row in csvreader:\n data.append(row)\n return data", "title": "" }, { "docid": "5d7584e9e0d96a5f9a30e45a86196451", "score": "0.61541975", "text": "def import_from_csv(\n path_or_file,\n name,\n style=None,\n delimiter=None,\n quotechar=None,\n lineterminator=None,\n encoding=\"utf-8\",\n):\n\n # Load the data\n # XXX Load the entire file in memory\n # Alternative: a file-wrapper returning the sample then the rest\n if isinstance(path_or_file, str):\n with open(path_or_file, \"rb\") as f:\n data = f.read().splitlines(True)\n else:\n # Leave the file we were given open\n data = path_or_file.read().splitlines(True)\n # Sniff the dialect\n sample = \"\".join(data[:100])\n dialect = Sniffer().sniff(sample)\n # We can overload the result\n if delimiter is not None:\n dialect.delimiter = delimiter\n if quotechar is not None:\n dialect.quotechar = quotechar\n if lineterminator is not None:\n dialect.lineterminator = lineterminator\n # Make the rows\n csv = reader(data, dialect)\n table = Table(name, style=style)\n for line in csv:\n row = Row()\n # rstrip line\n while line and not line[-1].strip():\n line.pop()\n for value in line:\n cell = Cell(_get_python_value(value, encoding))\n row.append_cell(cell, clone=False)\n table.append_row(row, clone=False)\n return table", "title": "" }, { "docid": "11c3c54d11380e27e88869ac850775e7", "score": "0.6128514", "text": "def import_csv(\n sqlcon,\n path_csv,\n schema,\n *,\n header=True,\n delimiter=',',\n na_strings={'na', 'n/a', 'null', ''}\n ):\n assert isinstance(schema, types.StructType), '{} is not a pyspark StructType'.format(schema)\n\n def _enrich_field(field_raw_value, field_type):\n \"\"\"Convert a single raw string into the anticipated Python datatype for the field\"\"\"\n if field_raw_value is None:\n return None\n if field_raw_value.lower() in na_strings:\n return None\n if isinstance(field_type, types.StringType):\n return field_raw_value\n if isinstance(field_type, (types.IntegerType, types.LongType, types.ShortType)):\n return int(field_raw_value)\n if isinstance(field_type, (types.FloatType, types.DoubleType)):\n return float(field_raw_value)\n if isinstance(field_type, types.DateType):\n try:\n return datetime.strptime(field_raw_value, '%Y-%m-%d').date()\n except ValueError:\n return datetime.strptime(field_raw_value, '%d/%m/%Y').date()\n if isinstance(field_type, types.TimestampType):\n try:\n return datetime.strptime(field_raw_value, '%d/%m/%Y %H:%M:%S')\n except ValueError:\n return datetime.strptime(field_raw_value, '%Y-%m-%d %H:%M:%S')\n\n _field_types = [field.dataType for field in schema.fields]\n\n def _parse_lines(iterator):\n \"\"\"Parse an iterator of lines (raw strings) into lists of rich data types\"\"\"\n # Utilize a csv.reader object to handle messy csv nuances\n for row in csv.reader(iterator, delimiter=delimiter):\n yield [\n _enrich_field(field_raw_value, field_type)\n for field_raw_value, field_type in zip(row, _field_types)\n ]\n\n # Start defining the data pipeline\n lines = sqlcon._sc.textFile(str(path_csv))\n\n if header:\n header_line = lines.first()\n lines = lines.filter(lambda l: l != header_line)\n\n parts_enriched = lines.mapPartitions(_parse_lines)\n\n typed_dataframe = sqlcon.createDataFrame(parts_enriched, schema)\n typed_dataframe.persist(StorageLevel.MEMORY_AND_DISK_SER)\n\n return typed_dataframe", "title": "" }, { "docid": "2c679bcd0fc990de75159296590964a3", "score": "0.6124778", "text": "def importGhtorrentProjectCsv(fname, conn):\n\n rownum = 0\n with open(fname, \"rb\") as csvfile:\n csvreader = csv.reader(csvfile, escapechar='\\\\', strict=True, doublequote=False)\n hdrs = [h.strip('`') for h in csvreader.next()]\n hdrsComma = ','.join(hdrs)\n try:\n conn.execute(\"drop table gitprojects;\")\n except:\n pass\n createProjectsSQL = \"create table gitprojects (\" + hdrsComma + \");\"\n conn.execute(createProjectsSQL)\n insertProjectsSQL = \"insert into gitprojects (\" + hdrsComma + \")\" + \\\n \" values (\" + ','.join([\"?\" for h in hdrs]) + \");\"\n for row in csvreader:\n rownum = rownum + 1\n conn.execute(insertProjectsSQL, [unicode(r, \"UTF-8\") for r in row])\n if (rownum%1000 == 0):\n print \"Wrote row \", rownum\n conn.commit() \n addGitprojectsExtraColumns(conn)\n conn.commit()", "title": "" }, { "docid": "4cc9da9b5e2ef692f702d4ceefb29465", "score": "0.6102723", "text": "def getCSVdata():\n csvfile = scribus.fileDialog(\"csv2table :: open file\", \"*.csv\")\n if csvfile != \"\":\n try:\n reader = csv.reader(file(csvfile))\n datalist=[]\n for row in reader:\n rowlist=[]\n for col in row:\n rowlist.append(col)\n datalist.append(rowlist)\n return datalist\n except Exception, e:\n scribus.messageBox(\"csv2table\", \"Could not open file %s\"%e)\n else:\n sys.exit", "title": "" }, { "docid": "08398fd3b51b8db9a08d8fbd1b502e97", "score": "0.6101736", "text": "def csv_importer(filepath):\n with open(filepath, 'r') as csv_file:\n reader = csv.DictReader(csv_file, delimiter=',')\n dict_list = []\n for line in reader:\n dict_list.append(line)\n\n return dict_list", "title": "" }, { "docid": "bfb1bebf590667c12fa86a0fa9d4194a", "score": "0.6097807", "text": "def load_csv(self):\n filepath = askopenfilename(\n filetypes=[(\"CSV Files\", \"*.csv\")]\n )\n if not filepath:\n return\n \n self.portfolio = Portfolio(filepath, 'degiro')\n\n self.cost.set(str(self.portfolio.value))\n\n i = 0\n for key in self.portfolio.positions.keys():\n lbl_position = tk.Label(self.frm_body_main, text=str(self.portfolio.positions[key]), bg=self.clr_accent_1)\n lbl_position.grid(row=i, column=0, sticky=\"nsew\", padx=2, pady=2)\n self.frm_body_main.rowconfigure(i, weight=1)\n self.positions.append(lbl_position)\n i += 1\n \n #Add close portfolio button\n self.btn_reset.grid()\n\n # Remove 'new/add' buttons\n self.btn_load_csv.grid_remove()\n self.btn_enter_manual.grid_remove()", "title": "" }, { "docid": "1523e75db9e327bc54dc4b021c23b4d8", "score": "0.60853916", "text": "def test_local_csv(self):\n with hxl.data(FILE_CSV, True) as source:\n self.compare_input(source)", "title": "" }, { "docid": "e7aa0d61e8600af571d35b3fcf0c4ec5", "score": "0.60769284", "text": "def __load_input_data(path: str) -> TabularDataset:\n input_data_files = os.listdir(path)\n try:\n input_dfs = [pd.read_csv(f'{path}/{data_file}') for data_file in input_data_files]\n return task.Dataset(df=pd.concat(input_dfs))\n except:\n print(f'No csv data in {path}!')\n return None", "title": "" }, { "docid": "35400df6e9b8d0403f7ceda2afa59e5a", "score": "0.6065713", "text": "def import_tasks(self):\n path = show_open_dialog(self, \"Import Shots\", \"CSV (*.csv)\")\n if not path:\n return\n\n with context_reset_model(self.model):\n self._manager.import_tasks(path)", "title": "" }, { "docid": "0596eda95cf47318857da8a77e2289ce", "score": "0.6049218", "text": "def open_experiments_csv(exp_csv_file):", "title": "" }, { "docid": "1f2dd6d7e5eef6cb325f5f2801452ff5", "score": "0.60491997", "text": "def import_csv(filename):\n with open(filename, newline=\"\", encoding='utf-8') as file:\n dict_list = []\n csv_data = csv.reader(file)\n headers = next(csv_data, None)\n if headers[0].startswith(\"\\ufeff\"):\n headers[0] = headers[0][1:]\n\n for row in csv_data:\n row_dict = {}\n for index, column in enumerate(headers):\n row_dict[column] = row[index]\n\n dict_list.append(row_dict)\n\n return dict_list", "title": "" }, { "docid": "2c9dff000bf0431a66111a9561964e37", "score": "0.6049069", "text": "def parse_csv(self) -> List[Dict[str, Any]]:\n csvfile_path = self.filepath()\n\n logger.info(f\"Attempting to parse CSV file: {csvfile_path}\")\n\n with open(csvfile_path, newline=\"\") as csvfile:\n csvreader = DictReader(csvfile)\n try:\n if self.check_for_required_headers(csvreader):\n documents = self.format_and_filter_rows(csvreader)\n\n return documents\n except csv.Error as e:\n self.logging_collection.add_error(\"TYPE 10\", f\"Wrong read from file\")\n\n return []", "title": "" }, { "docid": "647dde441ec00f0579e9c1ff08f72218", "score": "0.6048492", "text": "def import_from_csv(file_path, processor):\n csvfile = open(file_path, 'rU')\n reader = csv.DictReader(csvfile)\n result = [processor(row) for row in reader]\n csvfile.close()\n return result", "title": "" }, { "docid": "3c97d0429de07d30f53419a4c68038f8", "score": "0.6037201", "text": "def import_csvs(data_folder_path):\n\n # Create list of data folder contents\n data_folder_contents = os.listdir(data_folder_path)\n\n # Loop through filenames in data folder, and import data\n for filename in data_folder_contents:\n\n # Handle '/' or no '/' in folder path\n if data_folder_path[-1] == '/':\n file_path = data_folder_path + filename\n else:\n file_path = data_folder_path + '/' + filename\n\n # Open each csv in turn, and import data as dictated by filename\n with open(file_path) as csv_file:\n\n csv_reader = csv.reader(csv_file, delimiter=',')\n next(csv_reader) # Skip headers\n\n if filename == 'orders.csv': # Import orders\n print(\"Importing data from orders.csv\")\n\n for row in csv_reader:\n create_order = Order(\n id=row[0],\n created_at=row[1],\n vendor_id=row[2],\n customer_id=row[3]\n )\n create_order.save()\n\n elif filename == 'order_lines.csv': # Import order lines\n print(\"Importing data from order_lines.csv\")\n\n for row in csv_reader:\n create_order_line = OrderLine(\n order_id=row[0],\n product_id=row[1],\n product_description=row[2],\n product_price=row[3],\n product_vat_rate=row[4],\n discount_rate=row[5],\n quantity=row[6],\n full_price_amount=row[7],\n discounted_amount=row[8],\n vat_amount=row[9],\n total_amount=row[10]\n )\n create_order_line.save()\n\n elif filename == 'products.csv': # Import products\n print(\"Importing data from products.csv\")\n\n for row in csv_reader:\n create_product = Product(\n id=row[0],\n description=row[1]\n )\n create_product.save()\n elif filename == 'promotions.csv': # Import promotions\n print(\"Importing data from promotions.csv\")\n\n for row in csv_reader:\n create_promotion = Promotion(\n id=row[0],\n description=row[1]\n )\n create_promotion.save()\n\n elif filename == 'product_promotions.csv': # Import product promotions\n print(\"Importing data from product_promotions.csv\")\n\n for row in csv_reader:\n create_product_promotion = ProductPromotion(\n date=row[0],\n product_id=row[1],\n promotion_id=row[2]\n )\n create_product_promotion.save()\n\n elif filename == 'commissions.csv': # Import vendor commissions\n print(\"Importing data from commissions.csv\")\n\n for row in csv_reader:\n create_commission = VendorCommissions(\n date=row[0],\n vendor_id=row[1],\n rate=row[2]\n )\n create_commission.save()", "title": "" }, { "docid": "4dd66a3682048dcf2cbf4046dc2b98d0", "score": "0.6036312", "text": "def import_csv_gen(csv_filename):\n with open(csv_filename, 'r') as csv_fd:\n line_num = 0\n line = 'foo'\n while line:\n line_num += 1\n try:\n line = csv_fd.readline()\n # generator 'yield' statement for each\n # line of the CSV file below. Python CSV\n # support does not allow per-line parsing\n yield line.rstrip('\\n').split(',')\n except EOFError:\n return", "title": "" }, { "docid": "c26bd6a17266d136f1d56d970aa894c5", "score": "0.60197854", "text": "def import_datum( path, metadata = None ):\r\n # find header data\r\n x_label = None\r\n y_label = None\r\n header_end = None\r\n with open( path ) as f:\r\n for index, line in enumerate( f ):\r\n fields = line.split( ',' )\r\n\r\n key = fields[ 0 ].lower()\r\n if key == '\\n':\r\n # end of header\r\n header_end = index\r\n break\r\n\r\n value = fields[ 1 ].lower()\r\n if key == 'xaxis':\r\n x_label = value\r\n\r\n elif key == 'yaxis':\r\n y_label = value\r\n\r\n # import data\r\n df = pd.read_csv(\r\n path,\r\n usecols = [ 0, 1 ],\r\n skiprows = header_end,\r\n index_col = x_label,\r\n names = ( x_label, y_label )\r\n )\r\n\r\n if metadata is not None:\r\n cols = metadata_dataframe_index( path, metadata )\r\n df.columns = cols \r\n\r\n return df", "title": "" }, { "docid": "c03a129dde097e13dd76360e7f9a7708", "score": "0.6018005", "text": "def import_data(self):\n from import_csv import ImportCSV\n imported_data = ImportCSV(self.target_csv_path)\n self.timestamps = imported_data.timestamps\n self.voltages = imported_data.voltages\n logging.info(self.target_csv_path + ' imported')", "title": "" }, { "docid": "9663065ae9b49c523a25ff9c5cea59b1", "score": "0.60136545", "text": "def _import_metadata(self, filename, headers, rows, meta_container):\n for header, row1, row2 in izip(headers[0:-1], rows[0:-1], rows[1:]):\n nr_rows = row2 - row1 - 2\n offset = row1 + 1\n metadata_table = table.File()\n self._importer.import_csv(\n metadata_table, nr_rows, META_FOOT_ROWS, offset)\n\n if metadata_table.number_of_columns() == 2:\n for name_unit, value in zip(\n metadata_table.get_column_to_array('X0'),\n metadata_table.get_column_to_array('X1')):\n\n try:\n name, unit = tuple(name_unit.split('('))\n unit = unit.strip().rstrip(')')\n except ValueError:\n name = name_unit.split('(')[0]\n unit = '-'\n name = self._replace_bad_signal_header(name.strip())\n\n attributes = {'unit': unit,\n 'category': header}\n\n full_header = (header + '/' + name).replace(' ', '_')\n\n try:\n meta_container.create_column(\n unicode(full_header),\n value.reshape((1,)),\n attributes)\n except KeyError:\n pass", "title": "" }, { "docid": "9dee2a459d3b62f52f82e800bfef054c", "score": "0.6011684", "text": "def load_prod_table(csv_fname):\n left_col, right_col = 0, 3\n return load_csv(csv_fname, left_col, right_col)", "title": "" }, { "docid": "4e9ae1a770c113668cf661fe12f17b2f", "score": "0.6010866", "text": "def readCSVFile(path, isTargetFile):\n with open(path) as csvfile:\n header = []\n lines = []\n reader = csv.reader(csvfile)\n\n # if target file, use header\n if isTargetFile:\n header = next(reader)\n else:\n next(reader)\n\n # read line by line and append to list\n for line in reader:\n lines.append(line)\n\n return header, lines", "title": "" }, { "docid": "c3b5f575f16db48095022dc7d107248e", "score": "0.5983853", "text": "def load_from_csv(relative_path):\r\n (path,mimetype) = path_and_mimetype(relative_path)\r\n reader = csv.DictReader(open(path,'r'))\r\n myList = []\r\n for line in reader:\r\n myList.append(line)\r\n del reader\r\n return myList", "title": "" }, { "docid": "b5329d64a6c9761bebf8dad4892b792f", "score": "0.5976266", "text": "def load_from_csv(cls, source, filename=None, org=None):\n if isinstance(source, str):\n source = StringIO(source, newline=\"\")\n if filename is None:\n filename = datetime.utcnow().isoformat(timespec=\"seconds\")\n reader = csv.reader(source)\n header = next(reader)\n\n if len(header) == 1 and \"\\t\" in header[0]:\n source.seek(0)\n reader = csv.reader(source, delimiter=\"\\t\")\n header = next(reader)\n\n if len(header) < 2:\n raise ModelExceptionError(\"Expected CSV or TSV format file.\")\n\n header_rexs = [\n re.compile(ex, re.I)\n for ex in [\n \"title$\",\n r\"sub.*(title)?$\",\n r\"translated\\s+(title)?\",\n r\"translat(ed)?(ion)?\\s+(title)?\\s*lang(uage)?.*(code)?\",\n r\"journal\",\n \"type$\",\n r\"(short\\s*|description\\s*)+$\",\n r\"citat(ion)?.*type\",\n r\"citat(ion)?.*value\",\n r\"(publication)?.*date\",\n r\"(publ(ication?))?.*media.*(type)?\",\n r\"url\",\n r\"lang(uage)?.*(code)?\",\n r\"country\",\n r\"(is)?\\s*active$\",\n r\"orcid\\s*(id)?$\",\n \"email\",\n r\"(external)?\\s*id(entifier)?\\s+type$\",\n r\"((external)?\\s*id(entifier)?\\s+value|work.*id)$\",\n r\"(external)?\\s*id(entifier)?\\s*url\",\n r\"(external)?\\s*id(entifier)?\\s*rel(ationship)?\",\n \"put.*code\",\n r\"(is)?\\s*visib(bility|le)?\",\n r\"first\\s*(name)?\",\n r\"(last|sur)\\s*(name)?\",\n \"local.*|.*identifier\",\n ]\n ]\n\n def index(rex):\n \"\"\"Return first header column index matching the given regex.\"\"\"\n for i, column in enumerate(header):\n if rex.match(column.strip()):\n return i\n else:\n return None\n\n idxs = [index(rex) for rex in header_rexs]\n\n if all(idx is None for idx in idxs):\n raise ModelExceptionError(\n f\"Failed to map fields based on the header of the file: {header}\"\n )\n\n if org is None:\n org = current_user.organisation if current_user else None\n\n def val(row, i, default=None):\n if len(idxs) <= i or idxs[i] is None or idxs[i] >= len(row):\n return default\n else:\n v = row[idxs[i]].strip()\n return default if v == \"\" else v\n\n rows = []\n cached_row = []\n is_enqueue = False\n for row_no, row in enumerate(reader):\n # skip empty lines:\n if len([item for item in row if item and item.strip()]) == 0:\n continue\n if len(row) == 1 and row[0].strip() == \"\":\n continue\n\n orcid, email = val(row, 15), normalize_email(val(row, 16))\n if orcid:\n orcid = validate_orcid_id(orcid)\n if email and not validators.email(email):\n raise ValueError(f\"Invalid email address '{email}' in the row #{row_no+2}: {row}\")\n\n visibility = val(row, 22)\n if visibility:\n visibility = visibility.replace(\"_\", \"-\").lower()\n\n invitee = dict(\n identifier=val(row, 25),\n email=email,\n first_name=val(row, 23),\n last_name=val(row, 24),\n orcid=orcid,\n put_code=val(row, 21),\n visibility=visibility,\n )\n\n title = val(row, 0)\n external_id_type = val(row, 17, \"\").lower()\n external_id_value = val(row, 18)\n external_id_relationship = val(row, 20, \"\").replace(\"_\", \"-\").lower()\n\n if external_id_type not in EXTERNAL_ID_TYPES:\n raise ModelExceptionError(\n f\"Invalid External Id Type: '{external_id_type}', Use 'doi', 'issn' \"\n f\"or one of the accepted types found here: https://pub.orcid.org/v3.0/identifiers\"\n )\n\n if not external_id_value:\n raise ModelExceptionError(\n f\"Invalid External Id Value or Work Id: {external_id_value}, #{row_no+2}: {row}.\"\n )\n\n if not title:\n raise ModelExceptionError(\n f\"Title is mandatory, #{row_no+2}: {row}. Header: {header}\"\n )\n\n if external_id_relationship not in RELATIONSHIPS:\n raise ModelExceptionError(\n f\"Invalid External Id Relationship '{external_id_relationship}' as it is not one of the \"\n f\"{RELATIONSHIPS}, #{row_no+2}: {row}.\"\n )\n\n if (\n cached_row\n and title.lower() == val(cached_row, 0).lower()\n and external_id_type.lower() == val(cached_row, 17).lower()\n and external_id_value.lower() == val(cached_row, 18).lower()\n and external_id_relationship.lower() == val(cached_row, 20).lower()\n ):\n row = cached_row\n else:\n cached_row = row\n\n is_active = val(row, 14, \"\").lower() in [\"y\", \"yes\", \"1\", \"true\"]\n if is_active:\n is_enqueue = is_active\n\n work_type = val(row, 5, \"\").replace(\"_\", \"-\").lower()\n if not work_type:\n raise ModelExceptionError(\n f\"Work type is mandatory, #{row_no+2}: {row}. Header: {header}\"\n )\n\n # The uploaded country must be from ISO 3166-1 alpha-2\n country = val(row, 13)\n if country:\n try:\n country = countries.lookup(country).alpha_2\n except Exception:\n raise ModelExceptionError(\n f\" (Country must be 2 character from ISO 3166-1 alpha-2) in the row \"\n f\"#{row_no+2}: {row}. Header: {header}\"\n )\n\n publication_date = val(row, 9)\n citation_type = val(row, 7)\n if citation_type:\n citation_type = citation_type.replace(\"_\", \"-\").lower()\n\n if publication_date:\n publication_date = PartialDate.create(publication_date)\n rows.append(\n dict(\n work=dict(\n title=title,\n subtitle=val(row, 1),\n translated_title=val(row, 2),\n translated_title_language_code=val(row, 3),\n journal_title=val(row, 4),\n type=work_type,\n short_description=val(row, 6),\n citation_type=citation_type,\n citation_value=val(row, 8),\n publication_date=publication_date,\n url=val(row, 11),\n language_code=val(row, 12),\n country=country,\n is_active=is_active,\n ),\n invitee=invitee,\n external_id=dict(\n type=external_id_type,\n value=external_id_value,\n url=val(row, 19),\n relationship=external_id_relationship,\n ),\n )\n )\n\n with db.atomic() as transaction:\n try:\n task = Task.create(org=org, filename=filename, task_type=TaskType.WORK)\n for work, records in groupby(rows, key=lambda row: row[\"work\"].items()):\n records = list(records)\n\n wr = cls(task=task, **dict(work))\n validator = ModelValidator(wr)\n if not validator.validate():\n raise ModelExceptionError(f\"Invalid record: {validator.errors}\")\n wr.save()\n\n for external_id in set(\n tuple(r[\"external_id\"].items())\n for r in records\n if r[\"external_id\"][\"type\"] and r[\"external_id\"][\"value\"]\n ):\n ei = WorkExternalId(record=wr, **dict(external_id))\n ei.save()\n\n for invitee in set(\n tuple(r[\"invitee\"].items()) for r in records if r[\"invitee\"][\"email\"]\n ):\n rec = WorkInvitee(record=wr, **dict(invitee))\n validator = ModelValidator(rec)\n if not validator.validate():\n raise ModelExceptionError(\n f\"Invalid invitee record: {validator.errors}\"\n )\n rec.save()\n if is_enqueue:\n from .utils import enqueue_task_records\n\n enqueue_task_records(task)\n return task\n\n except Exception:\n transaction.rollback()\n app.logger.exception(\"Failed to load work file.\")\n raise", "title": "" }, { "docid": "079cb8c76f59fc42d6047381e758d790", "score": "0.597618", "text": "def import_csv_gen(csv_filename):\n with open(csv_filename, 'r') as csv_fd:\n line = 'foo'\n while line:\n try:\n line = csv_fd.readline()\n # generator 'yield' statement for each\n # line of the CSV file below. Python CSV\n # support does not allow per-line parsing\n yield line.rstrip('\\n').split(',')\n except EOFError:\n return", "title": "" }, { "docid": "95251fd8e4024a3892bd639248132016", "score": "0.5974432", "text": "def _load(subpath):\n path = os.path.join(RAW_DATA, subpath, '*.csv')\n all_files = glob.glob(path)\n\n li = []\n for filename in all_files:\n df = pd.read_csv(filename, index_col=None)\n li.append(df)\n\n return pd.concat(li, axis=0, ignore_index=True)", "title": "" }, { "docid": "23712c0c4eccc53633e99ce25acdf01b", "score": "0.59710586", "text": "def load_csv(self, filename, sep=\",\"):\n\n return self.session.read.csv(\n filename,\n inferSchema=True,\n sep=sep,\n header=True\n )", "title": "" }, { "docid": "0fd93c00da13796e1ade9a34869e7223", "score": "0.59704846", "text": "def load_from_csv(path):\n segs = []\n with open(path) as csvfile:\n reader = csv.reader(csvfile, delimiter=\"|\")\n for row in reader:\n segs.append(row)\n return segs", "title": "" }, { "docid": "2982aa5c7dd42e4859e89920d47483b1", "score": "0.5969545", "text": "def load_csv(csv_path):\n print('loading file ...')\n df = pd.read_csv(csv_path)\n df = df.drop_duplicates()\n df = df.dropna()\n print('OK')\n return df", "title": "" }, { "docid": "a1f96b9d0e07b5eb7a38b35f768b38b5", "score": "0.5966701", "text": "def _db_import_all(PATH_TO_FILES, run_silently=True):\n\n\t# assign John Wyclif to an author id.\n\tif not run_silently:\n\t\tprint \"Creating database object for Author John Wyclif...\"\n\twy = Author()\n\twy.name = \"John Wyclif\"\n\twy.save()\n\tif not run_silently:\n\t\tprint \"...done.\"\n\n\n\t# import rows from csv files.\n\n\t# Import book titles.\n\tif not run_silently:\n\t\tprint \"Importing book titles...\"\n\t_db_import(\n\t\tcsv_path = PATH_TO_FILES+\"tblTitles.csv\",\n\t\tmodel = Title,\n\t\tfield_conversion = {\n\t\t\t# djangofield <- csv\n\t\t\t\"text\" : \"title\" ,\n \"volume\" : \"volume\" ,\n \"pages\" : \"pages\" ,\n\t\t\t\"old_id\" : \"titleID\" ,\n\t\t},\n\t\tobject_assign = {\n\t\t\t#django field : assign object\n\t\t\t\"author\" : wy , #assign wyclif as author to all.\n\t\t},\n\t\trun_silently=run_silently,\n\t)\n\tif not run_silently:\n\t\tprint \"...Done importing book titles.\"\n\t\n\t\n\t#import chapters.\n\tif not run_silently:\n\t\tprint \"Importing chapter information...\"\n\t_db_import(\n\t\tcsv_path = PATH_TO_FILES+\"tblChapters.csv\",\n\t\tmodel = Chapter,\n\t\tfield_conversion = {\n\t\t\t# djangofield <- csv\n\t\t\t\"old_id\" : \"chapterID\" ,\n \"xml_chapter_id\" : \"xmlChapterID\" ,\n\t\t\t\"heading\" : \"chapterHead\" ,\n\t\t\t\"start_page_no\" : \"startPageNo\" ,\n\t\t},\n\t\tquery_assign = {\n\t\t\t# djangofield <- { use value for csvfield in csv to get an object from model's modelfield }\n\t\t\t# \t\t(effectively links models together)\n\t\t\t\"title\" : {\n\t\t\t\t\"csvfield\" : \"titleID\",\n\t\t\t\t\"get_model\" : Title,\n\t\t\t\t\"get_modelfield\" : \"old_id\",\n\t\t\t},\n\t\t},\n\t\trun_silently=run_silently,\n\t)\n\tif not run_silently:\n\t\tprint \"...Done importing chapter information.\"\t\n\t\n\tdummy_title = Title(author=wy, volume=0, pages=0)\n\tdummy_title.save()\n\t\n\tdummy_page = Page(title=dummy_title, number=0)\n\tdummy_page.save()\n\t\n\tif not run_silently:\n\t\tprint \"Importing Paragraphs...\"\n\t_db_import(\n\t\tcsv_path = PATH_TO_FILES+\"tblParagraphs.csv\",\n\t\tmodel = Paragraph,\n\t\tfield_conversion = {\n\t\t\t# djangofield <- csv\n\t\t\t\"old_id\" : \"paragraphID\" ,\n \"number\" : \"paragraphNo\" ,\n\t\t\t\"old_page_number\" : \"pageNo\" ,\n\t\t\t\"split\" : \"split\" ,\n\t\t\t\"text\" : \"paragraphText\" ,\n\t\t},\n\t\tquery_assign = {\n\t\t\t# djangofield <- { use value for csvfield in csv to get an object from model's modelfield }\n\t\t\t# \t\t(effectively links models together)\n\t\t\t\"chapter\" : {\n\t\t\t\t\"csvfield\" : \"chapterID\",\n\t\t\t\t\"get_model\" : Chapter,\n\t\t\t\t\"get_modelfield\" : \"old_id\",\n\t\t\t},\n\t\t},\n\t\tobject_assign = {\n\t\t\t#django field : assign object\n\t\t\t\"page\" : dummy_page , #assign wyclif as author to all.\n\t\t},\n\t\trun_silently=run_silently,\n\t)\n\tif not run_silently:\n\t\tprint \"...Done importing Paragraphs.\"\n\t\t\n\tif not run_silently:\n\t\tprint \"Generating new Page information...\"\n\tfor paragraph in Paragraph.objects.all():\n\t\tmodel = Page\n\t\t\n\t\ttry:\n\t\t\tpage = Page.objects.get(title=paragraph.chapter.title, number=paragraph.old_page_number)\n\t\texcept Page.DoesNotExist:\n\t\t\tnewdata = {\n\t\t\t\t\"title\" : paragraph.chapter.title,\n\t\t\t\t\"number\" : paragraph.old_page_number,\n\t\t\t\t\"scan\" : None,\n\t\t\t}\n\t\t\tpage = model(**newdata)\n\t\t\tpage.save()\t\n\n\t\tif not run_silently:\n\t\t\tprint \"%s -> %s\" % (newdata,model)\n\t\t\t\n\t\tparagraph.page = page\n\t\tparagraph.save()\n\t\tif not run_silently:\n\t\t\tprint \"page %s -> paragraph %s\" % (page.pk,paragraph.pk)\n\t\n\tif not run_silently:\n\t\tprint \"...done generating new Page information.\"\n\t\n\tdummy_title.delete()\n\tdummy_page.delete()\n\n\t#_random_check(dummy_page,dummy_title)\n\t\n\treturn", "title": "" }, { "docid": "6c3a4b9b2d7dae0c85c94903a476572a", "score": "0.5964083", "text": "def import_csv(file_path, override=True, collection_name=None):\n if not file_path:\n print(\"Missing file_path\")\n return\n\n try:\n mongo_client = get_monog_client()\n mongo_db = mongo_client[MONGO_DB_NAME]\n collection_name = collection_name if collection_name else get_collection_name_from_file_path(file_path)\n \n df = pd.read_csv(file_path)\n data_json = json.loads(df.to_json(orient='records'))\n db_cm = mongo_db[collection_name]\n if override:\n db_cm.drop()\n\n db_cm.insert_many(data_json)\n print(f'CSV file import completed,and data is availabe in Collection {collection_name}')\n except Exception as e:\n print(f'Error While importing {file_path}: {e}')", "title": "" }, { "docid": "0f81fedc747532d003b40da78dc40e00", "score": "0.59635174", "text": "def ReadCsv(source):\n original_directory = os.getcwd()\n source = os.path.abspath(source)\n os.chdir(os.path.dirname(source))\n # Try dict first, because dict is a subset of list of dict.\n try:\n if IsCsvADictHeader(source):\n ret = ReadCsvAsDict(source)\n else:\n ret = ReadCsvAsListOfDict(source)\n finally:\n os.chdir(original_directory)\n return ret", "title": "" }, { "docid": "5d030bde2bbc9d187d7b7ed526954ae9", "score": "0.59590536", "text": "def loadcsv(self, fn):\n csvfile = fn\n with open(csvfile) as f:\n reader = csv.reader(f, delimiter=\",\")\n #print reader.next(), \" ommitted\"\n for row in reader:\n row0 = row[0]\n row8 = row[8]\n row3 = row[3]\n row4 = row[4]\n row5 = row[5]\n row6 = row[6]\n row7 = row[7]\n self.name[row8] = row0\n\n self.path[row8] = row3\n if \"Size\" not in row4:\n row4 = int(row4)\n self.size[row8] = row4\n #self.pctsize[row[8]] = float(row[4) / float(self.tsize)\n # size in KB\n self.modified[row8] = row5\n self.accessed[row8] = row6\n self.created[row8] = row7\n\n return", "title": "" }, { "docid": "4c39a62d68e05a7f02afbef58a6d1525", "score": "0.5959031", "text": "def load_csvs(\n self,\n csv_files: List[FilePath]\n # , assume_schema_integrity=False\n ) -> None:\n self.csv_files = csv_files", "title": "" }, { "docid": "9638019575685ce6a79d8640bba7c8fd", "score": "0.59495956", "text": "def csv_reader(self, filename):\n data = pd.read_csv(filename, index_col=0, sep=\";\", decimal=\",\")\n return data", "title": "" }, { "docid": "5d953da314409f330b6b63358903580f", "score": "0.5943081", "text": "def test_import_csv(self):\n dbs = ['product', 'customer', 'rental']\n\n LOGGER.info(\"test csv-files insertion\")\n for name in dbs:\n item = db.database[name]\n path = \"csvdata/{}s.csv\".format(name)\n cnt_rec_err = import_csv(item, path)\n expected_cnt_rec_err = (50, 0)\n try:\n self.assertEqual(cnt_rec_err, expected_cnt_rec_err)\n LOGGER.info(\"test of csv insertion sucesfull\")\n except (FileNotFoundError, UnboundLocalError) as err:\n LOGGER(\"test of csv insertion unsucessfull'\")\n LOGGER(err)\n\n LOGGER.info(\"test exception when importing non-existent file\")\n for name in dbs:\n item = db.database[name]\n path_prod = f\"csvdata/searched_{name}_file.csv\"\n try:\n import_csv(item, path_prod)\n except (FileNotFoundError, UnboundLocalError) as err:\n LOGGER(\"file searched_customer_file not found\")\n LOGGER(err)\n\n LOGGER.info(\"validating missing-values exception on a test csv-file\")\n with open(\"csvdata/foo.csv\", \"w\", newline=\"\") as my_empty_rentals_file:\n erf = csv.writer(my_empty_rentals_file)\n erf.writerow([\"rental_id\", \"customer_id\", \"product_id\"])\n erf.writerow([\"rnt00000\", \"\", \"prd0022\"]) # first row - 1 missing\n erf.writerow([\"\", \"user0001\", \"\"]) # second row - 2 missing\n my_empty_rentals_file.close()\n\n itema = db.database[\"foo\"]\n path = \"csvdata/foo.csv\"\n tpl = import_csv(itema, path)\n\n exp_tpl = (2, 3) # expected 2 rows and 3 total missing values\n LOGGER.info(\"test file has %s rows, total missing values %s\", *exp_tpl)\n self.assertEqual(tpl, exp_tpl)\n\n if os.path.exists(\"csvdata/foo.csv\"):\n os.remove(\"csvdata/foo.csv\")\n LOGGER.info(\"test file removed\")", "title": "" }, { "docid": "34cf05dda7eb4842bb97fd29e504230a", "score": "0.59353244", "text": "def load_csv(file_name):\n columns = [ # changed every semester\n \"Timestamp\",\n \"Email Address\",\n \"What is your full name?\",\n \"What is your TAMU email?\",\n \"What is your Slack display name (not the same as full name - can be found under user settings)?\",\n \"Please choose your first pick!\",\n \"Please choose your second choice!\",\n \"Please choose your third choice!\",\n ]\n return pd.read_csv(file_name, usecols=columns).fillna(\"\")", "title": "" }, { "docid": "3f6a51692793dd6262243d4ef0ff39f0", "score": "0.5932975", "text": "def load_data_into_table(csv_path, headers, table_name, cursor):\n with open(csv_path, \"r\") as csv_file:\n # need to skip line with headers \n csv_file.next()\n \n for line in csv_file: \n # ----------------------------------------------------\n # need to wrap any string values in quotes to avoid SQL error \n line_split = line.split(\",\")\n new_line = []\n pattern = r'([a-z])\\w+'\n\n for item in line_split:\n\n if re.match(pattern, item, re.IGNORECASE):\n new_line.append(\"'{}'\".format(item))\n elif item == '':\n new_line.append(\"NULL\")\n else: \n new_line.append(item)\n\n new_line = ','.join(new_line)\n # ----------------------------------------------------\n\n sql = \"INSERT INTO {} ({}) VALUES ({});\".format(table_name, headers, new_line)\n\n cursor.execute(sql)\n\n return", "title": "" }, { "docid": "72d1d1e438673b34bb769784a157f481", "score": "0.5922621", "text": "def test_stream_existing_file(filename, header, rows):\n dataset = CSVFile(filename)\n assert dataset.columns == header\n for rowid, row in dataset.iterrows():\n assert len(row) == len(header)\n assert rowid == rows", "title": "" }, { "docid": "93c1713c9204a27f8665765e9931b3a3", "score": "0.59219253", "text": "def import_data(path):\n train = np.loadtxt(\n f\"{path}/data/train.csv\",\n delimiter = \",\",\n skiprows=0,\n dtype=str\n )\n\n test = np.loadtxt(\n f\"{path}/data/test.csv\",\n delimiter = \",\",\n skiprows=0,\n dtype=str\n )\n\n col_names = train[0,:]\n\n # Remove column names\n train = np.delete(train, obj=0, axis=0)\n test = np.delete(test, obj=0, axis=0)\n\n # Map 0 & 1 to label\n label_idx = np.where(col_names == \"Prediction\")[0][0]\n train[:,label_idx] = np.where(train[:,label_idx]==\"s\", 1, 0)\n\n test[:,label_idx] = 0\n\n # Replace -999 with nan\n train = train.astype(np.float32)\n train[train == -999] = np.nan\n\n test = test.astype(np.float32)\n test[test == -999] = np.nan\n return train, test, col_names", "title": "" }, { "docid": "c10242ec45e60e141b7dcc292cbdaa75", "score": "0.59102935", "text": "def parse_files(self):\r\n if self.check_files():\r\n for data_file in self.files:\r\n with open(data_file) as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n if data_file == 'StoreOpenHour.csv':\r\n self.parse_store_open_hours(reader)\r\n if data_file == 'V_OpsStoreAndMail.csv':\r\n self.init_site_contant(reader)\r\n\r\n\tfrom models import Site, SgClientLinkage, SiteContact, SiteWorkingHoursDetail", "title": "" }, { "docid": "11c72bf0bd4ece9827b0550456cb4178", "score": "0.5909863", "text": "def start_import(self):\n\t\tannotations = []\n\t\twith open(self.annotation_file_path, 'rb') as csvfile:\n\t\t\tfilereader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\t\t\tfilereader.next()\n\t\t\tfor row in filereader:\n\t\t\t\tusername = row[0].split(\"_\")[0].strip()\n\t\t\t\timagename = row[1].split(\"\\\\\")[-1].strip()\n\t\t\t\tcapture_at = string_2_datetime(row[2])\n\t\t\t\tannotation_term = annotation_import_get_annotation(row[3])\n\t\t\t\timage_path = imagename_2_path(imagename, uid, capture_at)\n\t\t\t\tannotations.append([username,image_path,annotation_term])\n\t\t#DBHelper.insert_annotationactions(annotations)", "title": "" }, { "docid": "59582d82e29df5023dd641dcaeb763f3", "score": "0.5905735", "text": "def import_cities(self, path):\n labeled_cities = pd.read_csv(path)\n for i in range(100):\n x = labeled_cities.iloc[i,1]\n y = labeled_cities.iloc[i,2]\n self.tab_villes.append([x, y])", "title": "" }, { "docid": "f84c6ef57159adadd27d953744ffe5e8", "score": "0.59008545", "text": "def load_from_csv(self, trigger=None):\n if self.check_reset():\n file_name = self.try_browse(file_type='csv(*.csv);;all (*)')\n if file_name:\n header = self.image_handler.load(file_name)\n if self.image_handler.ind > 0:\n self.display_fit()", "title": "" }, { "docid": "e0cf42f7548e6accb2465bf5734883d4", "score": "0.5899414", "text": "def test_csv_data(self):\n\n filename = \"select_one_external.xlsx\"\n self.get_file_path(filename)\n\n output_csv = os.path.join(DIR, \"test_output\", self.root_filename + \".csv\")\n\n self.assertTrue(\n sheet_to_csv(self.path_to_excel_file, output_csv, \"external_choices\")\n )\n\n # Compare with the expected output\n with codecs.open(output_csv, \"rb\", encoding=\"utf-8\") as expected_file:\n rows = expected_file.read()\n # Evaluate the last line obtained from the csv file\n self.assertEqual(\n rows.splitlines()[-1],\n '\"cities\",\"puyallup\",\"Puyallup\",\"washington\",\"pierce\"',\n )\n # Evaluate the first line obtained from the csv file\n self.assertEqual(\n rows.splitlines()[0], '\"list_name\",\"name\",\"label\",\"state\",\"county\"'\n )", "title": "" }, { "docid": "43b188195db3f426dbac5e98abb1d056", "score": "0.5894604", "text": "def load_from_csv(relative_path):\r\n path = path_and_mimetype(relative_path)[0]\r\n reader = csv.DictReader(open(path,'r'))\r\n myList = []\r\n for line in reader:\r\n myList.append(line)\r\n del reader\r\n return myList", "title": "" }, { "docid": "8dfaf71adbeb67b66ae91c9b9cba60e3", "score": "0.5889915", "text": "def import_csv_w_date_flags()-> pd.DataFrame:\n path = os.path.join('..', '..', 'data', 'interim', '1_tbs_with_date_flags.csv')\n return pd.read_csv(path, index_col = 0)", "title": "" }, { "docid": "20243f128697a4901437db784fe89cdd", "score": "0.58895236", "text": "def load_tab_delimited_csv(filepath):\n data = []\n with open(filepath) as file:\n reader = csv.reader(file, delimiter=\"\\t\")\n\n header = []\n for csv_row in reader:\n if len(header) == 0:\n header = [val.strip() for val in csv_row]\n else:\n data_row = OrderedDict()\n\n for i, value in enumerate(csv_row):\n data_row[header[i]] = value.strip()\n\n data.append(data_row)\n\n return data", "title": "" }, { "docid": "988944e6677c9e66fdd2f6e5fb1f4e62", "score": "0.5888307", "text": "def import_file(self, filename):\n df = pd.read_csv(filename, sep=\"\\t\")\n\n # drop 'call' columns\n subset_keys = list(k for k in df.keys() if k[:4] != 'call')\n self.df = df[subset_keys]", "title": "" }, { "docid": "0989ec8d31e7fcfbf1fe29792f6053b2", "score": "0.5886829", "text": "def _iter_read_csv(csv_path, row_types):\n\n with open(csv_path, 'r') as f:\n # Read over the csv\n reader = csv.reader(f)\n reader.__next__()\n for i, row in enumerate(reader):\n # Skip the row if it has unexpected row length\n if len(row) != len(row_types):\n logger.warning('Unexpected row length at row {}'.format(i))\n continue\n\n # skip the row if it has unexpected data types\n good_format = True\n row = [cell.strip() for cell in row] # avoid whitespaces\n for j, cell in enumerate(row):\n #TODO other data types (bool, float, etc) need further support\n if row_types[j] not in [int, str]:\n raise NotImplementedError('cleaning data type {} is not implemented: row {}'.\\\n format(row_types[j].__name__, i))\n\n # if cell needs to be string, go ahead. if needs to be int, check if it's number\n good_format &= (row_types[j] == str) or (row_types[j] == int and cell.isdigit())\n\n if not good_format:\n logger.warning('Unexpected row format at row {}'.format(i))\n continue\n\n row = [row_types[j](cell) for j, cell in enumerate(row)]\n yield row", "title": "" }, { "docid": "cb7083336b2c590e2f1ca9ca2e009069", "score": "0.58837426", "text": "def load_data(path):\n input_file = pd.read_csv(path)\n\n return input_file", "title": "" }, { "docid": "0a1a54ef862cc3eb0eeb7acdbdfcc4e0", "score": "0.58741224", "text": "def csv_dialog_import(self, widget):\n CSVDialogImport().run()\n self.display_data(self.datasetdata)\n self.set_plot_action_active()", "title": "" }, { "docid": "28ed8c52ad96b5de47a427a077b0fa38", "score": "0.5872143", "text": "def create_csv_iter(path, skip_header):\n with tf.gfile.Open(path) as csv_file:\n reader = csv.reader(csv_file)\n if skip_header: # Skip the header\n next(reader)\n for row in reader:\n yield row", "title": "" }, { "docid": "27234fd1945a03f9e02e05c1e7c0e1c8", "score": "0.58670974", "text": "def import_data(directory_name, product_file, customer_file,\n rentals_file):\n\n # this section could be parallelized\n # load the csv files and insert them.\n LOGGER.debug(\"Reading CSV Files\")\n csv_files = [product_file, customer_file, rentals_file]\n results = []\n start_time = time.time()\n LOGGER.debug(\"Insertion method: Multiprocessing\")\n pool = mp.Pool(mp.cpu_count())\n results = [pool.apply_async(import_csv,\n args=(directory_name,\n filename,\n filename)) for filename in csv_files]\n pool.close()\n\n end_time = time.time()\n execution_time = end_time - start_time\n\n stats = []\n for result in results:\n LOGGER.debug(\"RESULT: %s\", result.get())\n stats.append(result.get())\n\n LOGGER.debug(\"Execution Time: %s\", execution_time)\n\n return [(stats[0][0], execution_time,) + stats[0][1],\n (stats[1][0], execution_time,) + stats[1][1]]", "title": "" }, { "docid": "36f348951349635670bf0fdfbf388f68", "score": "0.585979", "text": "def read_csv(\n cls,\n paths: Union[str, List[str]],\n delimiter: str = \",\",\n header: bool = False,\n names: Optional[Union[str, List[str]]]=None,\n dtype: Optional[Union[str, List[str], Dict[str, str]]]=None\n ) -> \"FeatureTable\":\n return cls(Table._read_csv(paths, delimiter, header, names, dtype))", "title": "" }, { "docid": "c87f19de8fcf1107c3ff26fab645ba42", "score": "0.58454734", "text": "def load_from_csv(cls, source, filename=None, org=None):\n if isinstance(source, str):\n source = StringIO(source, newline=\"\")\n if filename is None:\n filename = datetime.utcnow().isoformat(timespec=\"seconds\")\n reader = csv.reader(source)\n header = next(reader)\n\n if len(header) == 1 and \"\\t\" in header[0]:\n source.seek(0)\n reader = csv.reader(source, delimiter=\"\\t\")\n header = next(reader)\n\n if len(header) < 2:\n raise ModelExceptionError(\"Expected CSV or TSV format file.\")\n\n header_rexs = [\n re.compile(ex, re.I)\n for ex in [\n \"title$\",\n r\"translated\\s+(title)?\",\n r\"translat(ed)?(ion)?\\s+(title)?\\s*lang(uage)?.*(code)?\",\n \"type$\",\n r\"org(ani[sz]ation)?\\s*(defined)?\\s*type\",\n r\"(short\\s*|description\\s*)+$\",\n \"amount\",\n \"currency\",\n r\"start\\s*(date)?\",\n r\"end\\s*(date)?\",\n r\"(org(gani[zs]ation)?)?\\s*name$\",\n \"city\",\n \"region|state\",\n \"country\",\n r\"disambiguated\\s*(org(ani[zs]ation)?)?\\s*id(entifier)?\",\n r\"disambiguation\\s+source$\",\n r\"(is)?\\s*active$\",\n r\"orcid\\s*(id)?$\",\n \"email\",\n r\"(external)?\\s*id(entifier)?\\s+type$\",\n r\"((external)?\\s*id(entifier)?\\s+value|funding.*id)$\",\n r\"(external)?\\s*id(entifier)?\\s*url\",\n r\"(external)?\\s*id(entifier)?\\s*rel(ationship)?\",\n \"put.*code\",\n r\"(is)?\\s*visib(bility|le)?\",\n r\"first\\s*(name)?\",\n r\"(last|sur)\\s*(name)?\",\n \"local.*|.*identifier\",\n r\"url\",\n ]\n ]\n\n def index(rex):\n \"\"\"Return first header column index matching the given regex.\"\"\"\n for i, column in enumerate(header):\n if rex.match(column.strip()):\n return i\n else:\n return None\n\n idxs = [index(rex) for rex in header_rexs]\n\n if all(idx is None for idx in idxs):\n raise ModelExceptionError(\n f\"Failed to map fields based on the header of the file: {header}\"\n )\n\n if org is None:\n org = current_user.organisation if current_user else None\n\n def val(row, i, default=None):\n if len(idxs) <= i or idxs[i] is None or idxs[i] >= len(row):\n return default\n else:\n v = row[idxs[i]].strip()\n return default if v == \"\" else v\n\n rows = []\n cached_row = []\n is_enqueue = False\n for row_no, row in enumerate(reader):\n # skip empty lines:\n if len([item for item in row if item and item.strip()]) == 0:\n continue\n if len(row) == 1 and row[0].strip() == \"\":\n continue\n\n orcid, email = val(row, 17), normalize_email(val(row, 18, \"\"))\n orcid = validate_orcid_id(orcid)\n if email and not validators.email(email):\n raise ValueError(f\"Invalid email address '{email}' in the row #{row_no+2}: {row}\")\n\n visibility = val(row, 24)\n if visibility:\n visibility = visibility.replace(\"_\", \"-\").lower()\n\n invitee = dict(\n identifier=val(row, 27),\n email=email,\n first_name=val(row, 25),\n last_name=val(row, 26),\n orcid=orcid,\n put_code=val(row, 23),\n visibility=visibility,\n )\n\n title = val(row, 0)\n external_id_type = val(row, 19, \"\").lower()\n external_id_value = val(row, 20)\n external_id_relationship = val(row, 22, \"\").replace(\"_\", \"-\").lower()\n\n if external_id_type not in EXTERNAL_ID_TYPES:\n raise ModelExceptionError(\n f\"Invalid External Id Type: '{external_id_type}', Use 'doi', 'issn' \"\n f\"or one of the accepted types found here: https://pub.orcid.org/v3.0/identifiers\"\n )\n\n if not external_id_value:\n raise ModelExceptionError(\n f\"Invalid External Id Value or Funding Id: {external_id_value}, #{row_no+2}: {row}.\"\n )\n\n if not title:\n raise ModelExceptionError(\n f\"Title is mandatory, #{row_no+2}: {row}. Header: {header}\"\n )\n\n if external_id_relationship not in RELATIONSHIPS:\n raise ModelExceptionError(\n f\"Invalid External Id Relationship '{external_id_relationship}' as it is not one of the \"\n f\"{RELATIONSHIPS}, #{row_no+2}: {row}.\"\n )\n\n if (\n cached_row\n and title.lower() == val(cached_row, 0).lower()\n and external_id_type.lower() == val(cached_row, 19).lower()\n and external_id_value.lower() == val(cached_row, 20).lower()\n and external_id_relationship.lower() == val(cached_row, 22).lower()\n ):\n row = cached_row\n else:\n cached_row = row\n\n is_active = val(row, 16, \"\").lower() in [\"y\", \"yes\", \"1\", \"true\"]\n if is_active:\n is_enqueue = is_active\n\n funding_type = val(row, 3)\n if not funding_type:\n raise ModelExceptionError(\n f\"Funding type is mandatory, #{row_no+2}: {row}. Header: {header}\"\n )\n else:\n funding_type = funding_type.replace(\"_\", \"-\").lower()\n\n # The uploaded country must be from ISO 3166-1 alpha-2\n country = val(row, 13)\n if country:\n try:\n country = countries.lookup(country).alpha_2\n except Exception:\n raise ModelExceptionError(\n f\" (Country must be 2 character from ISO 3166-1 alpha-2) in the row \"\n f\"#{row_no+2}: {row}. Header: {header}\"\n )\n\n rows.append(\n dict(\n funding=dict(\n title=title,\n translated_title=val(row, 1),\n translated_title_language_code=val(row, 2),\n type=funding_type,\n organization_defined_type=val(row, 4),\n short_description=val(row, 5),\n amount=val(row, 6),\n currency=val(row, 7),\n start_date=PartialDate.create(val(row, 8)),\n end_date=PartialDate.create(val(row, 9)),\n org_name=val(row, 10) or org.name,\n city=val(row, 11) or org.city,\n region=val(row, 12) or org.region,\n country=country or org.country,\n url=val(row, 28),\n is_active=is_active,\n disambiguated_id=val(row, 14) or org.disambiguated_id,\n disambiguation_source=val(row, 15, \"\").upper()\n or org.disambiguation_source,\n ),\n invitee=invitee,\n external_id=dict(\n type=external_id_type,\n value=external_id_value,\n url=val(row, 21),\n relationship=external_id_relationship,\n ),\n )\n )\n\n with db.atomic() as transaction:\n try:\n task = Task.create(org=org, filename=filename, task_type=TaskType.FUNDING)\n for funding, records in groupby(rows, key=lambda row: row[\"funding\"].items()):\n records = list(records)\n\n fr = cls(task=task, **dict(funding))\n validator = ModelValidator(fr)\n if not validator.validate():\n raise ModelExceptionError(f\"Invalid record: {validator.errors}\")\n fr.save()\n\n for external_id in set(\n tuple(r[\"external_id\"].items())\n for r in records\n if r[\"external_id\"][\"type\"] and r[\"external_id\"][\"value\"]\n ):\n ei = ExternalId(record=fr, **dict(external_id))\n ei.save()\n\n for invitee in set(\n tuple(r[\"invitee\"].items()) for r in records if r[\"invitee\"][\"email\"]\n ):\n rec = FundingInvitee(record=fr, **dict(invitee))\n validator = ModelValidator(rec)\n if not validator.validate():\n raise ModelExceptionError(\n f\"Invalid invitee record: {validator.errors}\"\n )\n rec.save()\n if is_enqueue:\n from .utils import enqueue_task_records\n\n enqueue_task_records(task)\n return task\n\n except Exception:\n transaction.rollback()\n app.logger.exception(\"Failed to load funding file.\")\n raise", "title": "" }, { "docid": "b1debe0098b9ad3a90f2fa46af3c1d76", "score": "0.5842897", "text": "def test_performance():\n startcsv = time.time()\n multi_file_csv = h2o.import_file(path=pyunit_utils.locate(\"bigdata/laptop/airlines_all.05p.csv\"),\n na_strings=['\\\\N'])\n endcsv = time.time()\n print(\"************** CSV parse time is {0}\".format(endcsv-startcsv))", "title": "" }, { "docid": "ef7b314ad7417131912ee804549bc3cb", "score": "0.58419734", "text": "def create_csv_iter(path, skip_header):\r\n with tf.gfile.Open(path) as csv_file:\r\n reader = csv.reader(csv_file, delimiter='\\t')\r\n if skip_header: # Skip the header\r\n next(reader)\r\n for row in reader:\r\n yield row", "title": "" }, { "docid": "3c1aa6ce09c141abce60a7b5ed5a9f57", "score": "0.5837966", "text": "def import_csv_to_sql(data):\n\ttry:\n\t\tsession = Session()\n\t\tnew_record = DeliveryNotes(alchemy_id = data[0],\n\t\t\t\t\t\t\t\t\tNextID = data[1],\n\t\t\t\t\t\t\t\t\tParentID = data[2],\n\t\t\t\t\t\t\t\t\tFolder = data[3],\n\t\t\t\t\t\t\t\t\tDocument_ID = data[4],\n\t\t\t\t\t\t\t\t\tFile_Name = data[5],\n\t\t\t\t\t\t\t\t\tFile_Directory = data[6],\n\t\t\t\t\t\t\t\t\tFile_Size = data[7],\n\t\t\t\t\t\t\t\t\tFile_Format = data[8],\n\t\t\t\t\t\t\t\t\tFile_Date = data[9],\n\t\t\t\t\t\t\t\t\tFolder_Title = data[10],\n\t\t\t\t\t\t\t\t\tAccount_Code = data[11],\n\t\t\t\t\t\t\t\t\tDelivery_Date = data[12],\n\t\t\t\t\t\t\t\t\tNew_File_Path = data[13])\n\t\tsession.add(new_record)\n\t\tsession.commit()\n\texcept:\n\t\tprint \"There has been an issue with alchemy_id = {}\".format(data[0])", "title": "" }, { "docid": "08cf2abbd97ca76ef69d719d91aa8ba4", "score": "0.5836721", "text": "def load_csv(filename):\n with open(filename, 'rb') as csvfile:\n dialect = csv.Sniffer().sniff(csvfile.read(1024))\n csvfile.seek(0)\n csv_data = csv.reader(csvfile, dialect)\n data = []\n for row in csv_data:\n data.append(row)\n return data", "title": "" }, { "docid": "27dffebeb078eb7ca5ccaf54036d39e7", "score": "0.58356893", "text": "def read_files():\n if not Path(\"./data/Kickstarter_merged.csv\").exists():\n # Read and merge .csv-files\n # Read all .csv-files\n all_files = glob.glob(os.path.join(path, \"Kickstarter*.csv\"))\n df_from_each_file = (pd.read_csv(f, sep=',') for f in all_files)\n # Merge .csv-files\n df_merged = pd.concat(df_from_each_file, ignore_index=True)\n df_merged.to_csv('./data/Kickstarter_merged.csv')\n \"\"\"Otherwise just read in dataframe from merged .csv file\"\"\"\n return pd.read_csv('./data/Kickstarter_merged.csv', index_col=0)", "title": "" }, { "docid": "87367289cab3e674d50765680b6cad70", "score": "0.58340824", "text": "def _open_convert_csv_files(self):\r\n comb_index = None\r\n for s in self.symbol_list:\r\n # Load the CSV file with no header information, indexed on date\r\n self.symbol_data[s] = pd.io.parsers.read_csv(\r\n os.path.join(self.csv_dir, '%s.csv' % s),\r\n header=0,\r\n index_col=0,\r\n names=['datetime', 'open', 'high', 'low', 'close', 'volume'])\r\n\r\n # Combine the index to pad forward values\r\n if comb_index is None:\r\n comb_index = self.symbol_data[s].index\r\n else:\r\n comb_index.union(self.symbol_data[s].index)\r\n\r\n # Set the latest symbol_data to None\r\n self.latest_symbol_data[s] = []\r\n\r\n # Reindex the dataframes\r\n for s in self.symbol_list:\r\n self.symbol_data[s] = self.symbol_data[s].reindex(\r\n index=comb_index,\r\n method='pad').iterrows()", "title": "" }, { "docid": "5ea22700c89d7544315ae7a0c23807d4", "score": "0.5829756", "text": "def read_csv_file(fname):\n lines = read_lines(fname)\n\n # Read the header line.\n n = lines[0].count(SEPARATOR)\n headers = np.empty(n+1, dtype=object)\n headers = lines[0].split(SEPARATOR)\n\n # Drop the three non-text bytes in front of Excel csv.\n if headers[0][1] == 'L':\n headers[0] = headers[0][1:]\n\n local_fields = np.empty((len(lines)-1, n+1), dtype=object)\n for i in range(1, len(lines)):\n s = lines[i].count(SEPARATOR)\n if s != n:\n print(\"Line\", i, \":\", lines[i], \", count\", s)\n sys.exit()\n local_fields[i-1] = lines[i].split(SEPARATOR)\n\n return headers, local_fields", "title": "" }, { "docid": "0b98382b1bd7e79b0bdf524da693c409", "score": "0.58277917", "text": "def test_csv_header_row(path):\n # Without header\n ds_file_without_header = DataSource(path(\"0_0.csv\"), reader_kwargs={\"names\": [\"colA\", \"colB\"]})\n assert ds_file_without_header.get_df().shape == (3, 2)\n meta = ds_file_without_header.get_metadata()\n assert meta[\"total_rows\"] == 3\n assert meta[\"df_rows\"] == 3\n\n # With header\n ds_file_with_header = DataSource(path(\"0_0.csv\"))\n assert ds_file_with_header.get_df().shape == (2, 2)\n meta = ds_file_with_header.get_metadata()\n assert meta[\"total_rows\"] == 2\n assert meta[\"df_rows\"] == 2", "title": "" }, { "docid": "359b44598036d994d36432f1c880b081", "score": "0.58209485", "text": "def _open_convert_csv_files(self):\n comb_index = None\n for s in self.symbol_list:\n # Load the CSV file with no header information, indexed on data\n self.symbol_data[s] = pd.io.parsers.read_csv(\n os.path.join(self.csv_dir,\n '{symbol}.csv'.format(s)),\n header=0, index_col=0,\n names=['datetime', 'open', 'low',\n 'high', 'close', 'volume', 'oi']\n )\n\n # Combine the index to pad forward values\n if comb_index is None:\n comb_index = self.symbol_data[s].index\n else:\n comb_index.union(self.symbol_data[s].index)\n\n # Set the latest symbol_data to None\n self.latest_symbol_data[s] = []\n\n # Reindex the dataframes\n for s in self.symbol_list:\n self.symbol_data[s] = self.symbol_data[s].reindex(\n index=comb_index,\n method='pad'\n ).iterrows()", "title": "" }, { "docid": "e766c8edbe1a477dffbd5a1d7490262c", "score": "0.5815101", "text": "def bulk_insert_from_csv(self, filename, index_name, csv_fields=None, thread_count=1, **kwargs):\n with open(filename) as f:\n reader = csv.DictReader(f, fieldnames=csv_fields)\n return self.bulk(index_name, reader, thread_count, **kwargs)", "title": "" } ]
381f74c2b4051b21303d642135e54e72
Fetch graph from one frame; include streams that start in the frame.
[ { "docid": "03d08c27c27d534ab48214ee8dfa10d2", "score": "0.545553", "text": "def fetch_frame(self, start, minutes=0, seconds=0):\n total_secs = minutes * 60 + seconds\n end = start + total_secs\n with self._database:\n g = networkx.MultiDiGraph()\n with self._database.cursor() as cur:\n cur.execute('select * from edges where \\\n (%s<=stime_epoch_secs and stime_epoch_secs<%s);',\n (start, end))\n for rec in cur:\n add_record(g, rec)\n return g", "title": "" } ]
[ { "docid": "ff58a4182114fc95691a872a0ed2b5c9", "score": "0.52892774", "text": "def fetch(self):\n if self.queries is None:\n version_identifier = get_pg_version(self.connection)\n self.queries = queries_for_version(QUERIES, version_identifier)\n print(\"multigraph %s\" % self.graph_name)", "title": "" }, { "docid": "27d2e170ff12ace72993851177f1a57e", "score": "0.52173537", "text": "def _get_full_graph(self):\n from osp.core.utils.simple_search import find_cuds_object\n\n for cuds_object in find_cuds_object(\n lambda x: True,\n self._registry.get(self.root),\n cuba.relationship,\n True,\n ):\n pass\n return self.graph", "title": "" }, { "docid": "eeb4c3479e8faf6d6dd9e3bb09e493ac", "score": "0.51204145", "text": "def get_frame(self):\n\n while True:\n try:\n if self.capture.isOpened() and self.online:\n # Read next frame from stream and insert into deque\n status, frame = self.capture.read()\n if status:\n self.deque.append(frame)\n else:\n self.capture.release()\n self.online = False\n else:\n # Attempt to reconnect\n print(\"attempting to reconnect\", self.camera_stream_link)\n self.load_network_stream()\n self.spin(2)\n self.spin(0.001)\n except AttributeError:\n pass", "title": "" }, { "docid": "9e7a1174fd25583ef99432125314f8b7", "score": "0.510762", "text": "def getSources(self) -> List[ghidra.util.graph.Vertex]:\n ...", "title": "" }, { "docid": "6c52f95659e716158cd69f147c45e414", "score": "0.50767577", "text": "def get_graph_from_snapshots(comp, new_snapshot_graph, param, idx):\n comp.graph_snapshots[idx].append(list(new_snapshot_graph.edges()))\n while len(comp.graph_snapshots[idx]) > param.lookback_cnt:\n comp.graph_snapshots[idx].popleft()\n #print(len(new_snapshot_graph.edges()), len(comp.graph_snapshots[idx]))\n U = nx.DiGraph()\n for e_list in comp.graph_snapshots[idx]:\n #print(len(e_list))\n U.add_edges_from(e_list)\n #print(param.lookback_cnt, len(U.edges()))\n return U", "title": "" }, { "docid": "939af91513d8091e1bc50e2d56588e1b", "score": "0.50317055", "text": "def get_stream_data(self, first_time=True):\n logging.info('getting play url {url} now...'.format(url=self.play_url if first_time else self.next_url))\n response = requests.get(self.play_url if first_time else self.next_url, headers=self.headers)\n parsed = response.json()\n\n try:\n mix_set = parsed['set']\n track = mix_set['track']\n\n result = {\n 'stream_url': track['track_file_stream_url'],\n 'name': track['name'],\n 'performer': track['performer'],\n 'skip_allowed': mix_set['skip_allowed'],\n 'done': mix_set['at_last_track']\n }\n logging.info('result is {result}'.format(result=result))\n\n return result\n except KeyError:\n logging.error('we hit an error while fetching the stream data: {result}'.format(result=parsed))\n raise TrackAccessException()", "title": "" }, { "docid": "a7e26fa39c208b13f249e9f72c304129", "score": "0.49617037", "text": "def iterate_graph_for_feature(self, calmaURL, feature):\n\n featureURL = self.get_feature_url(feature)\n\n # Get top-level analysis information\n url = calmaURL + '/analyses.ttl'\n r = requests.get(url, stream=True)\n g = rdflib.Graph()\n g.parse(r.raw, format=\"n3\")\n\n # Get URL for desired feature\n q = \"\"\"\n SELECT DISTINCT ?s ?p\n WHERE {{\n {{?s ?p <{0}>}} .\n }}\"\"\".format(featureURL)\n qres = g.query(q)\n\n foundURL = False\n for i, e in qres:\n if not foundURL:\n r = requests.get(str(i), stream=True)\n foundURL = True\n\n g = rdflib.Graph()\n g.parse(r.raw, format=\"n3\")\n\n # Get blob URL for specific feature\n q = \"\"\"\n SELECT DISTINCT ?s ?o\n WHERE {{\n {{?s <{0}> ?o}} .\n }}\"\"\".format(\"http://calma.linkedmusic.org/vocab/feature_blob\")\n qres = g.query(q)\n\n blobURL = None\n foundBlob = False\n for i, e in qres:\n if not foundBlob:\n blobURL = e\n\n if blobURL:\n # Extract .tar.gz file and open first (only) file\n blobContents = self.extract_zip(blobURL)\n\n # Get events contained in the RDF data\n events = self.get_feature_events(blobContents, feature)\n\n # Save in cache for next time\n self.save_new_calma_cache(calmaURL, feature, events)\n\n # Return the events to the user\n return events", "title": "" }, { "docid": "3836a027839adcea96e41cd55394cda0", "score": "0.49611688", "text": "def test_fetch_incoming_edges(self):\n self.save_edges(self.edges().values())\n\n results = dynamo.IncomingEdge.items.query(fbid_target__eq=200)\n self.assertItemsEqual([(x['fbid_source'], x['fbid_target']) for x in results],\n [(100, 200), (101, 200)])\n for x in results:\n d = dict(x.items())\n edge = self.edges().get((x['fbid_source'], x['fbid_target']))\n _remove_null_values(edge)\n\n # munge the raw dict from dynamo in a compatible way\n assert 'updated' in d\n del d['updated']\n self.assertDictEqual(edge, d)", "title": "" }, { "docid": "0c45a54a27fadb3893d23a8c3f755811", "score": "0.49384093", "text": "def test_fetch_all_incoming_edges(self):\n self.save_edges(self.edges().values())\n\n results = dynamo.IncomingEdge.items.scan()\n self.assertGreater(len(results), 0)\n self.assertItemsEqual([(x['fbid_source'], x['fbid_target']) for x in results],\n self.edges().keys())\n for x in results:\n d = dict(x.items())\n edge = self.edges().get((x['fbid_source'], x['fbid_target']))\n _remove_null_values(edge)\n\n # munge the raw dict from dynamo in a compatible way\n assert 'updated' in d\n del d['updated']\n self.assertDictEqual(edge, d)", "title": "" }, { "docid": "35239f6d4dc839145944b963dbcf94cd", "score": "0.49256596", "text": "def list_recorded_graphs(self):\n # Query database\n labels = []\n query = \"START n=node(*) RETURN distinct labels(n)\"\n data = self.db_connection.data(query)\n # Get all data\n for response_data in data:\n temp = response_data.values()\n # Parse data found\n if len(temp[0]):\n temp = temp[0]\n for a in temp:\n if 'chronograph' in a:\n labels.append(a)\n return labels", "title": "" }, { "docid": "cca894075289c8656d18508671738812", "score": "0.49247292", "text": "def bft_events_graph(start):\n dg = nx.DiGraph()\n stack = [start]\n visited = set()\n\n while stack:\n item = stack.pop()\n current_events = all_events(item)\n\n for ev in current_events:\n dg.add_edge(ev.parent, ev.dependent, label=ev.state)\n\n if ev.depend_node in visited:\n continue\n\n # it is possible to have events leading to same resource but\n # different action\n if ev.depend_node in stack:\n continue\n\n stack.append(ev.depend_node)\n visited.add(ev.parent_node)\n return dg", "title": "" }, { "docid": "d043475b92462b0c4f2bca700d4615ef", "score": "0.49133924", "text": "def graph(self):\n return self._graph", "title": "" }, { "docid": "d043475b92462b0c4f2bca700d4615ef", "score": "0.49133924", "text": "def graph(self):\n return self._graph", "title": "" }, { "docid": "ef8ebfbc33161d79b2367497c441e740", "score": "0.49133384", "text": "def get_frame_to_stream(self):\n frame = None\n self._lock.acquire()\n if self._mode == cst.STREAM_MOTION:\n if self._use_gray:\n frame = concurrent.motion_detection_output_frame_gray\n else:\n frame = concurrent.motion_detection_output_frame\n elif self._mode == cst.STREAM_OBJECT:\n frame = concurrent.object_detection_output_frame\n self._lock.release()\n\n return frame", "title": "" }, { "docid": "69bb8cec638f2579461f60cc0ea410d0", "score": "0.48932704", "text": "def stream_liveview(self, url):\n r = requests.request(\"GET\", url, stream = True)\n while True:\n # Read common header, 8 bytes.\n seq, timestamp, ptype= self._decode_common_header(r.raw.read(8))\n jpeg_size, padding_size = \\\n self._decode_payload_header(r.raw.read(128), ptype)\n import time\n bjpg = r.raw.read(jpeg_size)\n r.raw.read(padding_size)\n yield bjpg\n # Read payload header, 128 bytes.\n \n # if ptype == b'0x01':\n # jpeg_size, padding_size = \\\n # self._decode_payload_header(r.raw.read(128), ptype)\n # # Read JPEG frame.\n # jpeg_frame = r.raw.read(jpeg_size)\n # # Throw away the padding.\n # r.raw.read(padding_size)\n # yield jpeg_frame", "title": "" }, { "docid": "323523996a1a12a3a11d162321118e2b", "score": "0.4882496", "text": "def gather(self, graph):\n if self.using_dask and self.client is not None:\n return self.client.gather(graph)\n else:\n return graph", "title": "" }, { "docid": "2236012a6a0b94ca765c29d2774b5e98", "score": "0.4864201", "text": "def graph(self):\n return self.__graph", "title": "" }, { "docid": "6acb51e1f1dc8971ddfada89771fbc66", "score": "0.48578256", "text": "def getReadInfo(self, firstFrame=None):\n ...", "title": "" }, { "docid": "5af6ab375471f93e75a789b83a7e4739", "score": "0.48490757", "text": "def fetch_all(self):\n with self._database:\n g = networkx.MultiDiGraph()\n with self._database.cursor() as cur:\n cur.execute(\"select * from edges;\")\n for rec in cur:\n add_record(g, rec)\n return g", "title": "" }, { "docid": "8fbb15d29f20fcedd825d1688a044752", "score": "0.48294345", "text": "def lj_graph_stream(self):\n try:\n data = self.master_graph_queue.get_nowait()\n # print self.master_graph_queue.qsize()\n except Queue.Empty:\n pass\n else:\n self.lj_graph.update_plot(data)\n if self.master_graph_queue.qsize() == 0 and self.clear_lj_plot:\n self.lj_graph.clear_plot()\n self.lj_graph.create_new_lines()\n self.clear_lj_plot = False\n self.master.after(15, self.lj_graph_stream)", "title": "" }, { "docid": "6083503551e62347b4ebbc0065c22ba6", "score": "0.48293313", "text": "def get_graph(self):\n return self.graph", "title": "" }, { "docid": "7b3016613cf97d3a7b4ef3495539426d", "score": "0.48239678", "text": "def rdf(self):\n\n super(PendingGenome, self).rdf()\n self.graph.add((n[self.name], rdf.type, n.pending_genome))", "title": "" }, { "docid": "601dd498b62ad76faa9ac46af96f7144", "score": "0.48106718", "text": "def query_iterator(self):\n return iter(self.get_graphs_list())", "title": "" }, { "docid": "4e6364aac38207fcf2395a0b0d197cd2", "score": "0.4788036", "text": "def fetch_streams(self):\n\n self._logger.debug(\"Fetching all OOI Streams\")\n\n port = 12575\n end_point = \"/stream\"\n\n request_url = self.build_request(port, end_point)\n\n # Send the request\n self.send_request(request_url)\n\n if self._status_code == HTTP_STATUS_OK:\n return self._response\n else:\n return None", "title": "" }, { "docid": "91bb282d79fcebdc53694b686d281e24", "score": "0.47857875", "text": "def graph(self):\n if not hasattr(self, \"_graph\"):\n self._graph = defaultdict(list)\n\n with open(self.filename) as f:\n for edge in f.readlines():\n tail, head = edge.split()\n self._graph[tail].append(head)\n\n return self._graph", "title": "" }, { "docid": "abcc5808c1a52928e1c9e80ca62fc2e2", "score": "0.47838312", "text": "def get_all(self):\n frames = []\n while True:\n try:\n frames.append(self.frames.get(block=False))\n except Queue.Empty:\n break\n return frames", "title": "" }, { "docid": "414aaf5e2d9d93c130c11cbc2534792f", "score": "0.47792667", "text": "def scan(self, frame=None):\n if frame is not None:\n self.frame = frame\n self.convert()\n self.detect()\n return self.get_objects()", "title": "" }, { "docid": "f00154995bd0781c0d1bc4cf176a4339", "score": "0.4775606", "text": "def load_graph():\n #tuples of (id,url)\n raw_nodes = db.Document.get_all_words()\n nodes = {tup[0]:Node(tup[0],tup[1]) for tup in raw_nodes}\n\n #tuples of (from, to, frequency)\n raw_edges = db.Link.get_all_edges()\n\n num_from = set([edge[0] for edge in raw_edges])\n num_to = set([edge[1] for edge in raw_edges])\n for edge in raw_edges:\n node1 = nodes[edge[0]]\n node2 = nodes[edge[1]]\n for i in range(edge[2]):\n node1.add_outgoing_link(node2)\n\n return nodes", "title": "" }, { "docid": "5ad104fbf00dcfc0eaed7137bad650ac", "score": "0.475809", "text": "def get_graph(self):\n return self._graph", "title": "" }, { "docid": "54ee227a173c3e0468f7fede4003e494", "score": "0.47577813", "text": "def flow_graphs(loading_record):\n # Adjacency graph mapping X: [downstream of X]\n down_flow = {}\n # Adjacency graph mapping X: [upstream of X]\n up_flow = {}\n # populate the graphs\n for dp, ds_dps in zip(\n loading_record.device_position,\n loading_record.downstream_device_position\n ):\n if(ds_dps is not None):\n ds_dps = [int(x) for x in ds_dps.split(\",\")]\n if(dp not in down_flow):\n down_flow[dp] = []\n down_flow[dp].extend(ds_dps)\n for ds_dp in ds_dps:\n if(ds_dp not in up_flow):\n up_flow[ds_dp] = []\n up_flow[ds_dp].append(dp)\n return (up_flow, down_flow)", "title": "" }, { "docid": "d71f75e15e975e045483ff83e532a1ea", "score": "0.47460845", "text": "def first_source(graph: Dict[Vertex, List[Vertex]]):\n for item in graph:\n return item", "title": "" }, { "docid": "d71f75e15e975e045483ff83e532a1ea", "score": "0.47460845", "text": "def first_source(graph: Dict[Vertex, List[Vertex]]):\n for item in graph:\n return item", "title": "" }, { "docid": "60490699cbb28cd9f1f8db3f96678d94", "score": "0.47430435", "text": "def get_next_packet(self) -> dict:\n\n try:\n packet_small = pickle.load(self.metadata_file)\n except (EOFError, pickle.UnpicklingError):\n return None\n\n if \"images\" in packet_small:\n # a packet with images, get them from the videos\n\n packet_big = deepcopy(packet_small)\n\n for pos, img_num in packet_small[\"images\"].items():\n\n if not img_num == self.open_videos[pos].get_crt_frame_number():\n print(\"ERROR Frame index differs from video index!!!!\")\n\n img = self.open_videos[pos].read_frame()\n packet_big[\"images\"][pos] = img\n\n return packet_big\n else:\n return packet_small", "title": "" }, { "docid": "f543198deb931995fe788fb21e64865f", "score": "0.47271103", "text": "def get_next_stream(self):\n # do we have a list yet?\n if len(self.stream_list) == 0:\n for file_name in os.listdir(self.bro_http_dir):\n m = REGEX_CONNECTION_ID.match(file_name)\n if m:\n self.stream_list.append(m.group(1))\n\n if len(self.stream_list) == 0:\n return None\n\n return self.stream_list.popleft()", "title": "" }, { "docid": "e5d29ef88287915438161bab376969f0", "score": "0.4709779", "text": "def get_from_gdb (self, kind):\n query = (\n r\"\"\"MATCH path = (a)-[:%s]->(b) \n WHERE ID(a) < ID(b)\n RETURN a,b \"\"\" % kind.upper()\n )\n logging.info(\"query neo4j for %s\" % kind)\n records = self.graph.run(query)\n time.sleep(0.2)\n\n records = eL([\n eT((self.Predicatrix.get_predication(pair['a']['id']),\n self.Predicatrix.get_predication(pair['b']['id'])))\n for pair in records\n ])\n assert all (records)\n assert all (all (pair) for pair in records)\n return records", "title": "" }, { "docid": "6d360495a1e76d15ba9d6bd7bb9a5c39", "score": "0.46877718", "text": "def fetch_edges(self):\n edges = []\n for k, v, o in self.G.edges(data=True):\n source_id = self.get_curie(k)\n target_id = self.get_curie(v)\n edge_source = o[\"info\"].get(\"$api\")\n _type = o.get(\"label\")\n _id = self.hash_id(source_id + target_id + edge_source + _type)\n edge = {\n \"source_id\": source_id,\n \"target_id\": target_id,\n \"edge_source\": edge_source,\n \"id\": _id,\n \"type\": _type,\n }\n self.result[source_id + \"|||\" + target_id].append(_id)\n edges.append(edge)\n return edges", "title": "" }, { "docid": "a8bcbd3a2acde0b00682e151c894100a", "score": "0.46863467", "text": "def getGraphs(uid):", "title": "" }, { "docid": "7a97299dec973759836532b27adfac92", "score": "0.4664132", "text": "def get_activity_graph(self, activity_uri, granularity=\"FINE\"):\n result = self.query_modeler.get_activity_graph_request(activity_uri, granularity)\n return result.toxml(\"utf-8\", element_name='ns1:getActivityGraphRequest').decode('utf-8')", "title": "" }, { "docid": "7a0e8afc56e3bc6328ac71f980a11744", "score": "0.46469316", "text": "def get_observed_graph(self):\r\n train_edge_number = int(self.graph.number_of_edges() * self.train_edge_ratio)\r\n added_node = set()\r\n added_edges_number = 0\r\n\r\n _node = list(self.graph.nodes)\r\n start_node = random.choice(_node)\r\n added_node.add(start_node)\r\n logging.debug(\"random choose start node {}\".format(start_node))\r\n\r\n for p, child in nx.bfs_successors(self.graph, start_node):\r\n\r\n for n in child:\r\n neighbor_n = set(self.graph.neighbors(n))\r\n n_new_edges_number = len(neighbor_n & added_node)\r\n added_edges_number += n_new_edges_number\r\n added_node.add(n)\r\n if added_edges_number >= train_edge_number:\r\n h = self.graph.subgraph(added_node)\r\n logging.critical(\"random sample subgraph done. %d edges sampled. ratio %f, with %d nodes\" % (\r\n h.number_of_edges(), h.number_of_edges() / self.graph.number_of_edges(), h.number_of_nodes()))\r\n return h\r\n\r\n raise RuntimeError(\"can not get {:d} edges starting from node {:d}\".format(train_edge_number, start_node))", "title": "" }, { "docid": "3b23a63056c574ec1b56770dccdd71ee", "score": "0.46406087", "text": "def traversal_source(self):\n if self._conn is None:\n self._conn = DriverRemoteConnection(self._gremlin_url[0], \"g\")\n return traversal().withRemote(self._conn)", "title": "" }, { "docid": "e14e65082cc2904f7a18bf669b71822d", "score": "0.46333405", "text": "def open_graph(connectDir, scan_id):\n infile = \"%s%s_connectivity.json\" % (connectDir, scan_id)\n G = nx.Graph()\n with open(infile) as f:\n data = json.load(f)\n for i, item in enumerate(data):\n if item[\"included\"]:\n for j, conn in enumerate(item[\"unobstructed\"]):\n if conn and data[j][\"included\"]:\n assert data[j][\"unobstructed\"][i], \"Graph should be undirected\"\n G.add_edge(\n item[\"image_id\"],\n data[j][\"image_id\"],\n weight=distance(item, data[j]),\n )\n return G", "title": "" }, { "docid": "28860452991b9c92dc03a0c2bc52f9d6", "score": "0.46265242", "text": "def get_all_frames(pascal_voc_json):\n return pascal_voc_json['visitedFrames']", "title": "" }, { "docid": "05492db567ef60741548054dea01651b", "score": "0.4621844", "text": "def get_streams(file_name):\n cap = pyshark.FileCapture(file_name)\n streams = {}\n for packet in cap:\n if not hasattr(packet.tcp, 'payload'):\n continue\n\n src = f'{packet.ip.addr}:{packet.tcp.srcport}'\n dst = f'{packet.ip.dst}:{packet.tcp.dstport}'\n payload = decode_payload(packet.tcp.payload)\n\n if packet.tcp.stream not in streams:\n # Stream's first packet determines which peer is the client/server.\n message = RawMessage(\n src,\n dst,\n payload,\n packet.sniff_time,\n packet.sniff_time)\n streams[packet.tcp.stream] = TCPStream(src, dst, [message])\n else:\n stream = streams[packet.tcp.stream]\n last_message = stream.messages[-1]\n if last_message.src == src and last_message.dst == dst:\n last_message.data += payload\n last_message.end_timestamp = packet.sniff_time\n else:\n stream.messages.append(RawMessage(\n src,\n dst,\n payload,\n packet.sniff_time,\n packet.sniff_time))\n\n # TODO: detect when a stream ends, yield and discard it early.\n yield from streams.values()", "title": "" }, { "docid": "a62bf8705ce75fb274904ae388aac28d", "score": "0.46164295", "text": "def GetSources(self) :\r\n\r\n sources = []\r\n for i in self.digraph.nodes() :\r\n if self.digraph.in_degree( i ) == 0 :\r\n sources.append(i)\r\n\r\n return sources", "title": "" }, { "docid": "86e47bb8c3df129d631d717f8a2e5865", "score": "0.46087468", "text": "def load_graph(self, label):\n\n self.label_graph = label\n\n # Gets the nodes\n nodes = Evenement.select(self.db_connection).where(\n \"_: `\" + self.label_graph + \"`\")\n\n # Builds the graph\n if nodes:\n for node in nodes:\n # Add nodes\n self.total_graph.add_node(node)\n\n for node in self.total_graph.nodes():\n # Find equivalences\n inter = [x for x in self.total_graph.nodes()\n if x in node.consequence]\n for i_node in inter:\n # Get edge properties from database\n edge_prop = self.edge_attributes(node.consequence.get(i_node, 'label'),\n node.consequence.get(\n i_node, 'descrption'),\n node.consequence.get(i_node, 'attachment_list'))\n # Grow edge and import properties\n self.total_graph.add_edge(node, i_node, edge_prop)\n # self.total_graph.add_star([node] + inter)", "title": "" }, { "docid": "a3cfad943608ff0a40c180fbe3fbe2c0", "score": "0.46076587", "text": "def get_full_stream_list(self):\n return self.__execute_api_call('stream/full')", "title": "" }, { "docid": "b92bd7148aa9b75a8723afbc4bf7e28a", "score": "0.460758", "text": "def resolve(self):\n return Graph.fromdict(self.data)", "title": "" }, { "docid": "6419a7ef399de96227a89f735de29378", "score": "0.46030417", "text": "def get_commongraph(common_graph_txts, train, subset = False):\n now = datetime.now()\n apps = [os.path.join(common_graph_txts, (appname + \"graph.txt\")) for appname in train]\n if subset == True:\n apps = apps[:10]\n lst_of_dfs = []\n \n for app in apps:\n if os.path.exists(app):\n df = pd.read_csv(app, delimiter = \" \", header = None)\n lst_of_dfs.append(df)\n\n concat = pd.concat(lst_of_dfs, ignore_index = True)\n \n concat.columns = [\"source\", \"target\", \"weight\", \"type1\", \"type2\"]\n concat.type1 = concat.type1.apply(fix_node_type)\n concat.type2 = concat.type2.apply(fix_node_type)\n \n no_dup = concat.drop_duplicates(subset = \"source\", keep = \"last\")\n dct = no_dup.groupby(['type1'])['source'].apply(lambda grp: list(grp.value_counts().index)).to_dict()\n \n for key in dct.keys():\n dct[key] = IndexedArray(index = dct[key])\n \n commongraph = StellarGraph(dct, concat[[\"source\", \"target\", \"weight\"]])\n print(\"common graph loaded: \", (datetime.now() - now))\n return commongraph", "title": "" }, { "docid": "bf9b53e944a4cad38e217c1f91c34b8c", "score": "0.45806044", "text": "def _get_news_sources(artist):\n sources = []\n future_calls = []\n\n if 'Myspace' in artist.urls:\n\n if 'myspace_id' not in artist:\n myspace_profile = artist.urls['Myspace'][0]\n myspace_id = _get_myspace_id(myspace_profile)\n artist.myspace_id = myspace_id\n artist.changes_present = True\n\n if 'myspace_id' in artist and artist.myspace_id:\n myspace_blog_feed = \"http://blogs.myspace.com/Modules/BlogV2/Pages/RssFeed.aspx?friendID=%s\" % artist.myspace_id\n sources.append(myspace_blog_feed)\n\n t = mmda_logger('www','request','find feeds',artist.name)\n\n for source_type in LOOK_FOR_FEEDS:\n if source_type in artist.urls:\n future_calls = [Future(_get_feed_link_for,url) for url in artist.urls[source_type]]\n\n sources.extend(list(set([url() for url in future_calls if url()])))\n\n mmda_logger('www','result','found feeds',len(sources),t)\n\n return [(src,None,None) for src in sources]", "title": "" }, { "docid": "7185c70f83a779e1d64594c7a9359c36", "score": "0.4559403", "text": "def load_flows(self, fr):\r\n for i in fr.stream():\r\n self.load_flow(i)", "title": "" }, { "docid": "61878dadf89e8a2d13ad78f0eace7ade", "score": "0.45563957", "text": "def graph_indexes_on_video_parallel(args):\r\n frame = args[0]\r\n raw_frame = frame.copy()\r\n if args[1] in np.unique(args[2][:,1]):\r\n for index in np.argwhere(args[2][:,1] == args[1]):\r\n raw_frame[args[2][index,2], args[2][index,3],:] = np.r_[args[3](args[4]/args[5])[-2::-1]]\r\n frame = cv2.addWeighted(raw_frame, 0.0008, frame, 1 - 0.0008, 0)\r\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\r\n return frame", "title": "" }, { "docid": "0ffdc8693e650aa36d561251d6c55f79", "score": "0.4539861", "text": "def get_graphs_list(self):\n from sage.graphs.graph_list import from_graph6\n\n s = self.__query_string__\n re.sub('SELECT.*FROM ', 'SELECT graph6 FROM ', s)\n q = GenericGraphQuery(s, self.__database__, self.__param_tuple__)\n graph6_list = q.query_results()\n return [Graph(str(g[0])) for g in graph6_list]", "title": "" }, { "docid": "30558d15cdcfe11bb9c592efa7bc8995", "score": "0.45361125", "text": "def graph(self) -> \"Graph\":\n return self._graph", "title": "" }, { "docid": "585d7433b1b875bcc076cc70e106d6f6", "score": "0.45341045", "text": "def graph():\n from graph import Graph\n graph = Graph()\n return graph", "title": "" }, { "docid": "0721f72f84d1d25e6276ec141c094bc1", "score": "0.4533644", "text": "def list_graphs(n, connected=True):\n list_of_graphs = []\n if connected:\n the_dict = _dict_connected\n else:\n the_dict = _dict_all\n with gzip.open(the_dict[n], 'rt') as graph_file:\n for graph in graph_file:\n graph = graph.strip()\n graph = nx.from_graph6_bytes(bytes(graph, 'utf8'))\n list_of_graphs.append(graph)\n return list_of_graphs", "title": "" }, { "docid": "82530f91a13f648a57ac9efc9f89f4bc", "score": "0.45336124", "text": "def read(self):\n (_, frame) = self._stream.retrieve()\n return frame", "title": "" }, { "docid": "93e03babf439dcf3f36b9ebeaa684803", "score": "0.45332208", "text": "def dump_frames(self, frame):\n current_thread = threading.currentThread()\n frames = []\n frame_browser = frame\n \n # Browse the frame chain as far as we can\n _logger.f_debug(\"dump_frames(), frame analysis:\")\n spacer = \"\"\n while hasattr(frame_browser, 'f_back') and frame_browser.f_back != self.frame_beginning:\n spacer += \"=\"\n _logger.f_debug(\"%s>frame = %s, frame.f_code = %s, frame.f_back = %s, \"\n \"self.frame_beginning = %s\",\n spacer,\n hex(id(frame_browser)),\n frame_browser.f_code,\n hex(id(frame_browser.f_back)),\n hex(id(self.frame_beginning)))\n\n # At root frame, globals == locals so we dump only globals\n if hasattr(frame_browser.f_back, 'f_back')\\\n and frame_browser.f_back.f_back != self.frame_beginning:\n locals_vars_list = self.extract_object_properties(frame_browser.f_locals,\n limit_size=True)\n else:\n locals_vars_list = []\n \n globals_vars_list = self.extract_object_properties(frame_browser.f_globals,\n limit_size=True)\n # normalize path sent to debugging client\n file_path = self.normalize_path_out(frame_browser.f_code.co_filename)\n\n frame_name = \"%s() [%s]\" % (frame_browser.f_code.co_name, current_thread.name,)\n remote_frame = {\n 'id': id(frame_browser),\n 'name': frame_name,\n 'line_number': frame_browser.f_lineno, # Warning 1 based\n 'file_path': file_path,\n 'f_locals': locals_vars_list + globals_vars_list,\n 'thread': current_thread.ident,\n 'thread_name': current_thread.name\n }\n frames.append(remote_frame)\n frame_browser = frame_browser.f_back\n return frames", "title": "" }, { "docid": "5be38f3ea8baa5a1fa80b1f7ec9c97bd", "score": "0.4530746", "text": "def get_data(self) -> Dict[str, Graph]:\n result = {name: graph for name, graph in self.graphs.values()}\n return result", "title": "" }, { "docid": "2984213349a291c7aaa3e3f10f09418e", "score": "0.4521717", "text": "def traverse_start(self, graph):\n raise NotImplementedError", "title": "" }, { "docid": "f56c05c78a409865ee8f0ffc617669d5", "score": "0.4520572", "text": "def to_graph(self) -> GraphGenome:\n gdb = GraphGenome.objects.get_or_create(name=self.source_path)[0]\n for segment in self.gfa.segments:\n Node.objects.get_or_create(seq=segment.sequence, name=(segment.name), graph=gdb)\n\n for path in self.gfa.paths:\n p = Path(accession=path.name, graph=gdb)\n p.save()\n p.append_gfa_nodes(path.segment_names)\n return gdb", "title": "" }, { "docid": "cd1a7f6aa18d6fa42d4b58c6d9c3ed66", "score": "0.45075056", "text": "def _dfs_from(self,\n start: MetaTable,\n visited: Set[MetaTable],\n skip_nullable: bool,\n recursion_stack: Set[MetaTable] = None) -> Iterable[MetaTable]:\n if start in visited:\n return []\n visited.add(start)\n if recursion_stack is None:\n recursion_stack = set()\n recursion_stack.add(start)\n for neighbor in self._neighbors(start, skip_nullable):\n if neighbor in visited:\n if neighbor in recursion_stack:\n self._cycle_detected = True\n continue\n yield from self._dfs_from(neighbor, visited, skip_nullable, recursion_stack)\n recursion_stack.remove(start)\n yield start", "title": "" }, { "docid": "4008c0a4f9f6a748b3b2452b645816dd", "score": "0.4506834", "text": "def concretize(self, db: models.DB, location: Location) -> Iterator[str]:\n source_info = self.fetch_node(db, location)\n yield source_info", "title": "" }, { "docid": "363724d44d30b3f86094c9928afb6bd1", "score": "0.44929448", "text": "def fetch(self):\n \n ## initially, assume only single channel signals\n # this base debug breakpoint will apply to all flavours of acquisition\n debug_(pyfusion.DEBUG, level=2, key='base_multi_fetch')\n ordered_channel_names = self.ordered_channel_names()\n data_list = []\n channels = ChannelList()\n timebase = None\n meta_dict={}\n if hasattr(self, 't_min') and hasattr(self, 't_max'):\n t_range = [float(self.t_min), float(self.t_max)]\n else:\n t_range = []\n for chan in ordered_channel_names:\n sgn = 1\n if chan[0]=='-': sgn = -sgn\n bare_chan = (chan.split('-'))[-1]\n fetcher_class = import_setting('Diagnostic', bare_chan, 'data_fetcher')\n tmp_data = fetcher_class(self.acq, self.shot,\n config_name=bare_chan).fetch()\n\n if len(t_range) == 2:\n tmp_data = tmp_data.reduce_time(t_range)\n channels.append(tmp_data.channels)\n # two tricky things here - tmp.data.channels only gets one channel hhere\n #config_name for a channel is attached to the multi part -\n #we need to move it to the particular channel \n # was channels[-1].config_name = chan\n channels[-1].config_name = tmp_data.config_name\n meta_dict.update(tmp_data.meta)\n #print(tmp_data.signal[-1], sgn)\n tmp_data.signal = sgn * tmp_data.signal\n #print(tmp_data.signal[-1], sgn)\n if timebase == None:\n timebase = tmp_data.timebase\n data_list.append(tmp_data.signal)\n else:\n if hasattr(self, 'skip_timebase_check') and self.skip_timebase_check == 'true':\n data_list.append(tmp_data.signal)\n else:\n try:\n assert_array_almost_equal(timebase, tmp_data.timebase)\n data_list.append(tmp_data.signal)\n except:\n raise\n signal=Signal(data_list)\n output_data = TimeseriesData(signal=signal, timebase=timebase,\n channels=channels)\n #output_data.meta.update({'shot':self.shot})\n output_data.meta.update(meta_dict)\n return output_data", "title": "" }, { "docid": "4e300c8875a8dfb18663744e76e7b43d", "score": "0.4491091", "text": "def GetProcessOffsetGraph(self):\n graph_by_process = collections.defaultdict(list)\n for f in self._filenames:\n process_info = self._ReadJSON(f)\n assert ('total_calls_count' in process_info\n and 'call_graph' in process_info), ('Unexpected JSON format for '\n '%s.' % f)\n self._SanityCheckAllCallsCapturedByTheInstrumentation(process_info)\n graph_by_process[self._ProcessName(f)].append(process_info['call_graph'])\n return graph_by_process", "title": "" }, { "docid": "b0950e5a69e5faf1f000ec177c534bc1", "score": "0.44910833", "text": "def get_stats(self): \n if self.source:\n core.openflow.connections[self.source].send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))\n core.openflow.connections[self.source].send(of.ofp_stats_request(body=of.ofp_port_stats_request()))\n if self.dest:\n core.openflow.connections[self.dest].send(of.ofp_stats_request(body=of.ofp_flow_stats_request()))\n core.openflow.connections[self.dest].send(of.ofp_stats_request(body=of.ofp_port_stats_request()))", "title": "" }, { "docid": "aaa70c50a7b49247d48c1c009cd94e65", "score": "0.4487541", "text": "def get_stream(datasource, scnl, tstart, tend, fill_value=0, filepattern='*',\n filter=None, samprate=100, verbose=False): \n \n from obspy import UTCDateTime\n import obspy\n from obspy.clients.fdsn import Client\n from obspy.clients.earthworm import Client as EWClient\n from obspy.core.trace import Trace\n from obspy.core.stream import Stream\n from obspy.signal.trigger import coincidence_trigger\n import numpy as np\n from scipy import stats\n from scipy.fftpack import fft\n import glob, os, itertools\n \n #print(datasource)\n #print(scnl)\n #print(tstart)\n #print(tend)\n \n tstart = UTCDateTime(tstart)\n tend = UTCDateTime(tend)\n \n nets = []; stas = []; locs = []; chas = [];\n for s in scnl:\n #print(s)\n nets.append(s.split('.')[2])\n stas.append(s.split('.')[0])\n locs.append(s.split('.')[3])\n chas.append(s.split('.')[1])\n \n st = Stream()\n \n if '/' in datasource:\n # Retrieve data from file structure\n \n flist = list(itertools.chain.from_iterable(glob.iglob(os.path.join(\n root, filepattern)) for root, dirs, files in os.walk(datasource)))\n \n # Determine which subset of files to load based on start and end times and\n # station name; we'll fully deal with stations below\n flist_sub = []\n for f in flist:\n # Load header only\n stmp = obspy.read(f, headonly=True)\n # Check if station is contained in the stas list\n if stmp[0].stats.station in stas:\n # Check if contains either start or end time\n ststart = stmp[0].stats.starttime\n stend = stmp[0].stats.endtime\n if (ststart<=tstart and tstart<=stend) or (ststart<=tend and\n tend<=stend) or (tstart<=stend and ststart<=tend):\n flist_sub.append(f)\n \n # Fully load data from file\n stmp = Stream()\n for f in flist_sub:\n tmp = obspy.read(f, starttime=tstart, endtime=tend)\n if len(tmp) > 0:\n stmp = stmp.extend(tmp)\n \n # merge\n stmp = stmp.taper(max_percentage=0.01)\n for m in range(len(stmp)):\n if stmp[m].stats.sampling_rate != samprate:\n stmp[m] = stmp[m].resample(samprate)\n stmp = stmp.merge(method=1, fill_value=fill_value)\n \n # Only grab stations/channels that we want and in order\n netlist = []\n stalist = []\n chalist = []\n loclist = []\n for s in stmp:\n stalist.append(s.stats.station)\n chalist.append(s.stats.channel)\n netlist.append(s.stats.network)\n loclist.append(s.stats.location)\n \n # Find match of SCNL in header or fill empty\n for n in range(len(stas)):\n for m in range(len(stalist)):\n if (stas[n] in stalist[m] and chas[n] in chalist[m] and nets[n] in\n netlist[m] and locs[n] in loclist[m]):\n st = st.append(stmp[m])\n if len(st) == n:\n print('No data found for {}.{}.{}.{}'.format(stas[n],chas[n],nets[n],locs[n]))\n trtmp = Trace()\n trtmp.stats.sampling_rate = samprate\n trtmp.stats.station = stas[n]\n st = st.append(trtmp.copy())\n \n else:\n # retrieve data from server\n \n if '.' not in datasource:\n client = Client(datasource)\n else:\n datasource = datasource.split(':')\n client = EWClient(datasource[0], int(datasource[1]))\n \n for n in range(len(stas)):\n try:\n stmp = client.get_waveforms(nets[n], stas[n], locs[n], chas[n], tstart, tend)\n for m in range(len(stmp)):\n #stmp[m].data = np.ma.masked_where(stmp[m].data == -2**31, stmp[m].data) # masks out all values of -2**31 (Winston NaN Token)\n #stmp[m] = stmp[m].split().merge(method=0, fill_value='interpolate')[0] # splits trace at masked values; then re-merges using linear interpolation\n stmp[m].data = np.where(stmp[m].data==-2**31,0,stmp[m].data)\n if stmp[m].stats.sampling_rate != samprate:\n stmp[m] = stmp[m].resample(samprate)\n stmp = stmp.taper(max_percentage=0.01)\n stmp = stmp.merge(method=1, fill_value=fill_value)\n except (obspy.clients.fdsn.header.FDSNException):\n try: # try again\n stmp = client.get_waveforms(nets[n], stas[n], locs[n], chas[n],\n tstart, tend)\n for m in range(len(stmp)):\n #stmp[m].data = np.ma.masked_where(stmp[m].data == -2**31, stmp[m].data) # masks out all values of -2**31 (Winston NaN Token)\n #stmp[m] = stmp[m].split().merge(method=0, fill_value='interpolate')[0] # splits trace at masked values; then re-merges using linear interpolation\n stmp[m].data = np.where(stmp[m].data==-2**31,0,stmp[m].data)\n if stmp[m].stats.sampling_rate != samprate:\n stmp[m] = stmp[m].resample(samprate)\n stmp = stmp.taper(max_percentage=0.01)\n stmp = stmp.merge(method=1, fill_value=fill_value)\n except (obspy.clients.fdsn.header.FDSNException):\n print('No data found for {0}.{1}'.format(stas[n],nets[n]))\n trtmp = Trace()\n trtmp.stats.sampling_rate = samprate\n trtmp.stats.station = stas[n]\n stmp = Stream().extend([trtmp.copy()])\n \n # Last check for length; catches problem with empty waveserver\n if len(stmp) != 1:\n print('No data found for {}.{}.{}.{}'.format(stas[n],chas[n],nets[n],locs[n]))\n trtmp = Trace()\n trtmp.stats.sampling_rate = samprate\n trtmp.stats.station = stas[n]\n stmp = Stream().extend([trtmp.copy()])\n \n st.extend(stmp.copy()) \n\n\n st = st.trim(starttime=tstart, endtime=tend, pad=True, fill_value=fill_value)\n \n return st", "title": "" }, { "docid": "02e67f3884a8361c2f7494d3ed0b0a58", "score": "0.44826198", "text": "def compute_frames_to_record(\n this_frame: FrameType,\n ) -> List[Tuple[FrameType, int, FrameType]]:\n if threading._active_limbo_lock.locked(): # type: ignore\n # Avoids deadlock where a Scalene signal occurs\n # in the middle of a critical section of the\n # threading library\n return []\n frames: List[Tuple[FrameType, int]] = [\n (\n cast(\n FrameType,\n sys._current_frames().get(cast(int, t.ident), None),\n ),\n cast(int, t.ident),\n )\n for t in threading.enumerate()\n if t != threading.main_thread()\n ]\n # Put the main thread in the front.\n frames.insert(\n 0,\n (\n sys._current_frames().get(\n cast(int, threading.main_thread().ident), None\n ),\n cast(int, threading.main_thread().ident),\n ),\n )\n # Process all the frames to remove ones we aren't going to track.\n new_frames: List[Tuple[FrameType, int, FrameType]] = []\n for (frame, tident) in frames:\n orig_frame = frame\n if not frame:\n continue\n fname = frame.f_code.co_filename\n # Record samples only for files we care about.\n if not fname:\n # 'eval/compile' gives no f_code.co_filename. We have\n # to look back into the outer frame in order to check\n # the co_filename.\n back = cast(FrameType, frame.f_back)\n fname = Filename(back.f_code.co_filename)\n while not Scalene.should_trace(fname):\n # Walk the stack backwards until we hit a frame that\n # IS one we should trace (if there is one). i.e., if\n # it's in the code being profiled, and it is just\n # calling stuff deep in libraries.\n if frame:\n frame = cast(FrameType, frame.f_back)\n if frame:\n fname = frame.f_code.co_filename\n continue\n else:\n break\n if frame:\n new_frames.append((frame, tident, orig_frame))\n return new_frames", "title": "" }, { "docid": "dcd082f3a3dbb50946b1e60018284ae8", "score": "0.44823265", "text": "def dfs_loop(graph_dict, nodes, track):\n\n for node in nodes:\n if node not in track.explored:\n track.current_source = node\n dfs(graph_dict, node, track)", "title": "" }, { "docid": "dc2649fc4394a941f31eb546523a826e", "score": "0.4480871", "text": "def get_provenance_graph(self, start=None, filter_long_labels=True):\n graph = start or nx.MultiDiGraph()\n label = \"{}: {}\".format(self.symbol.name, self.pretty_string())\n if filter_long_labels and len(label) > 30:\n label = \"{}\".format(self.symbol.name)\n graph.add_node(\n self, fillcolor=\"#43A1F8\", fontcolor='white', label=label)\n model = getattr(self.provenance, 'model', None)\n source = getattr(self.provenance, 'source', None)\n if model is not None:\n model = \"Model: {}\".format(model)\n graph.add_node(model, label=model, fillcolor='orange',\n fontcolor='white', shape='rectangle')\n graph.add_edge(model, self)\n for model_input in self.provenance.inputs:\n graph = model_input.get_provenance_graph(start=graph)\n graph.add_edge(model_input, model)\n elif source is not None:\n source = \"Source: {}\".format(source['source'])\n graph.add_edge(source, self)\n\n return graph", "title": "" }, { "docid": "f0f498ab58b4dd5fc5b88d42d005ff58", "score": "0.44799507", "text": "def get_all_sources(cnx):\n\n cursor = cnx.cursor()\n result = list()\n try:\n cursor.execute(\"\"\"SELECT media_source.id, media_source.name, os.id, os.name, date_acquired FROM media_source\n INNER JOIN os\n ON media_source.os_id = os.id\n \"\"\")\n result = cursor.fetchall()\n cursor.close()\n except Exception as err:\n print err\n return None\n\n sources = list()\n for r in result:\n sources.append(SourceInfo(r[0],r[1], r[2],r[3],r[4]))\n\n return sources", "title": "" }, { "docid": "04dd1e2252b8dbb20555197909f28394", "score": "0.4475871", "text": "def stream(session: Session, dataset: AnyDataset) -> Iterator[JsonDict]:\n with session.get(str(dataset.url) + \"/records\", stream=True) as r:\n yield from response.ndjson(r)", "title": "" }, { "docid": "c3a91cbef123fc874f150176fb21deec", "score": "0.44609404", "text": "def fetch(self):\n if self.pos >= self.num_wedges:\n return ()\n\n wedge = self.wedges[self.pos]\n self.pending = wedge['weight']\n\n if self.distinct:\n name_suffix = chr(ord('A') + self.pos)\n wedge['name'] = f\"{wedge['original_name']}-{name_suffix}\"\n wedge['first'] = 1 # distinct wedges all start from frame 1 because they will be treated as\n # unique datasets rather than frames from the same set.\n self.pos += 1\n\n # prepare inverse beam\n if wedge['inverse']:\n inv_wedge = copy.deepcopy(wedge)\n inv_wedge['first'] = 1\n inv_wedge['start'] += 180.\n self.factor = 2\n\n # for inverse beam treat as separate datasets with different original names\n wedge['original_name'] = wedge['name'] = f\"{wedge['original_name']}_1\"\n inv_wedge['original_name'] = inv_wedge['name'] = f\"{wedge['original_name']}_2\"\n\n self.dispensed[wedge['original_name']].append(wedge)\n self.dispensed[inv_wedge['original_name']].append(inv_wedge)\n return wedge, inv_wedge,\n else:\n self.factor = 1\n self.dispensed[wedge['original_name']].append(wedge)\n return wedge,", "title": "" }, { "docid": "e56201942cf20484558273c52d131ea6", "score": "0.4458813", "text": "def get(self, track_id):\n args = stream_parser.parse_args()\n stream_preview = args.get(\"preview\")\n decoded_id = decode_with_abort(track_id, ns)\n info = get_track_stream_info(decoded_id)\n\n track = info.get(\"track\")\n cid = track.get(\"track_cid\")\n if stream_preview:\n cid = track.get(\"preview_cid\")\n if not track or not cid:\n logger.error(\n f\"tracks.py | stream | Track with id {track_id} may not exist or has no {'preview' if stream_preview else 'track'}_cid. Please investigate.\"\n )\n abort_not_found(track_id, ns)\n\n cid = cid.strip()\n redis = redis_connection.get_redis()\n healthy_nodes = get_all_healthy_content_nodes_cached(redis)\n if not healthy_nodes:\n logger.error(\n f\"tracks.py | stream | No healthy Content Nodes found when streaming track ID {track_id}. Please investigate.\"\n )\n abort_not_found(track_id, ns)\n\n rendezvous = RendezvousHash(\n *[re.sub(\"/$\", \"\", node[\"endpoint\"].lower()) for node in healthy_nodes]\n )\n content_nodes = rendezvous.get_n(5, cid)\n\n request_args = stream_parser.parse_args()\n\n # signature for the track to be included as a query param in the redirect to CN\n signature = get_track_stream_signature(\n {\n \"track\": track,\n \"stream_preview\": stream_preview,\n \"user_data\": request_args.get(\"user_data\"),\n \"user_signature\": request_args.get(\"user_signature\"),\n \"premium_content_signature\": request_args.get(\n \"premium_content_signature\"\n ),\n }\n )\n if not signature:\n abort_not_found(track_id, ns)\n\n params = {\"signature\": json.dumps(signature)}\n skip_play_count = request_args.get(\"skip_play_count\", False)\n if skip_play_count:\n params[\"skip_play_count\"] = skip_play_count\n filename = request_args.get(\"filename\")\n if filename:\n params[\"filename\"] = filename\n\n base_path = f\"tracks/cidstream/{cid}\"\n query_string = urllib.parse.urlencode(params, quote_via=urllib.parse.quote)\n path = f\"{base_path}?{query_string}\"\n\n for content_node in content_nodes:\n stream_url = urljoin(content_node, path)\n headers = {\"Range\": \"bytes=0-1\"}\n try:\n response = requests.get(\n stream_url + \"&skip_play_count=True\", headers=headers, timeout=0.5\n )\n if response.status_code == 206:\n return stream_url\n except:\n pass\n abort_not_found(track_id, ns)", "title": "" }, { "docid": "dbd656f448037811f6b36977b86146f3", "score": "0.44586176", "text": "def get_graph(x, device=None):\n # if device is None:\n # device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n device = torch.device(\"cpu\")\n\n x, batch = get_entities(x, device)\n ei = get_ei(batch, device)\n\n # edges features are concatenatenations of node features for source\n # and destination nodes\n src, dest = ei\n e = torch.cat([x[src.type(torch.IntTensor)], x[dest.type(torch.IntTensor)]], 1)\n\n return x, batch, ei, e", "title": "" }, { "docid": "b3477420521b4e66c1f467f0c83ada14", "score": "0.44569695", "text": "def set_graphs_from_db(self):\n database = Database(self.app, self.session)\n if 'user' in self.session:\n query = '''\n SELECT graph_name, user_id, public\n FROM datasets\n WHERE (user_id = ? OR public = ? )\n '''\n rows = database.execute_sql_query(query, (self.session['user']['id'], True))\n else:\n query = '''\n SELECT graph_name, user_id, public\n FROM datasets\n WHERE public = ?\n '''\n rows = database.execute_sql_query(query, (True, ))\n\n for row in rows:\n if row[2]:\n self.public_graphs.append(row[0])\n else:\n self.private_graphs.append(row[0])", "title": "" }, { "docid": "1a8fd897daf6f68dbec120319715a977", "score": "0.44556472", "text": "def graph(self):\n # type: () -> Dict[str, Any]\n return self.service.graph(self)", "title": "" }, { "docid": "6b3913c1f6e21000fd146be9565ac734", "score": "0.44552997", "text": "def get_all_stream_info(self):\n session = self.tautulli.get_activity(session_id=self.session_id)\n if session:\n self._set_stream_attributes(session)\n self.session_exists = True\n else:\n self.session_exists = False", "title": "" }, { "docid": "7c2f142c45b6cfcb1e661897249db02c", "score": "0.44522798", "text": "def get_graph(self):\n node_list, edge_list = [], []\n for cmd in self.seq:\n if cmd[0] == \"N\":\n node_list.append(cmd[1])\n elif cmd[0] == \"E\":\n edge_list.append(cmd[1])\n return node_list, edge_list", "title": "" }, { "docid": "3303b27b1ec8d5ccfb4eaae6a6ca0991", "score": "0.4442142", "text": "def test_fetch_outgoing_edges(self):\n self.save_edges(self.edges().values())\n\n results = dynamo.OutgoingEdge.incoming_edges.query(fbid_source__eq=100)\n self.assertItemsEqual([(x['fbid_source'], x['fbid_target']) for x in results],\n [(100, 200), (100, 202)])\n for x in results:\n d = dict(x.items())\n edge = self.edges().get((x['fbid_source'], x['fbid_target']))\n _remove_null_values(edge)\n\n # munge the raw dict from dynamo in a compatible way\n assert 'updated' in d\n del d['updated']\n self.assertDictEqual(edge, d)", "title": "" }, { "docid": "342992f5c6a4fc3fd1d9c2730c913b1b", "score": "0.44405144", "text": "def display_graph_from_commit(head=None):\n try:\n if not head:\n head = request.args['head']\n head_obj = g.repo[head]\n return display_graph(head_obj)\n except KeyError:\n abort(404)", "title": "" }, { "docid": "a3c5b27e0043c1fa2b7d8fa71679a6b7", "score": "0.4439112", "text": "def do_req_covid_all(self, _):\n save_frame, do_plot = man_data()\n self.loop.create_task(self.get_covid_data_all(save_frame, do_plot))", "title": "" }, { "docid": "cc5bd8b448382b026a61a6dd30c06560", "score": "0.44385692", "text": "async def _fetch(self):\n\n data = await get('game/{}/feed/live', self.game_id)\n all_plays = data.liveData.plays.allPlays\n self.log.debug('received %s plays', len(all_plays))\n new_plays = all_plays[self.marker:]\n self.log.info('received %s new plays', len(new_plays))\n self.marker = len(all_plays)\n\n self.final = data.gameData.status.abstractGameState == 'Final'\n\n for play in new_plays:\n yield play\n\n if self.final:\n yield self._final_play(data)", "title": "" }, { "docid": "41be7e0332876ff02e35cd76b131db80", "score": "0.44291648", "text": "def index_impl(self):\n return [run_name\n for (run_name, run_data) in self._multiplexer.Runs().items()\n if run_data.get(event_accumulator.GRAPH)]", "title": "" }, { "docid": "962166a13026dca28e31ef6fb28d2b92", "score": "0.44263875", "text": "def poll_frames(self):\n frames = {}\n while len(frames) < len(self._enabled_devices.items()) :\n for (serial, device) in self._enabled_devices.items():\n streams = device.pipeline_profile.get_streams()\n frameset = device.pipeline.poll_for_frames() #frameset will be a pyrealsense2.composite_frame object\n if frameset.size() == len(streams):\n frames[serial] = {}\n for stream in streams:\n if (rs.stream.infrared == stream.stream_type()):\n frame = frameset.get_infrared_frame(stream.stream_index())\n key_ = (stream.stream_type(), stream.stream_index())\n else:\n frame = frameset.first_or_default(stream.stream_type())\n key_ = stream.stream_type()\n frames[serial][key_] = frame\n\n return frames", "title": "" }, { "docid": "73823fdcd4002eccd93f77195b0ac874", "score": "0.442633", "text": "def getSinks(self) -> List[ghidra.util.graph.Vertex]:\n ...", "title": "" }, { "docid": "949f4020625912cd50f78b0461f437e4", "score": "0.44213614", "text": "def return_internal_graph(self):\n return self.g", "title": "" }, { "docid": "607275ab6438a9108dba329a6a829feb", "score": "0.4420686", "text": "def show_graph_by_date(data):\n server.ubigraph.clear()\n\n periods = data['axis']\n elements = data['data']\n\n for p in periods:\n if not p in elements:\n continue\n nodes = elements[p]\n for n in nodes:\n add_node(n)\n sleep(1)", "title": "" }, { "docid": "4124bbd0c4ed98871985e06836c63b29", "score": "0.4417521", "text": "def _construct_graph_df(self, seq_det_df, start_frame, end_frame = None, ensure_end_is_in = False):\r\n if end_frame is not None:\r\n # Just load all frames between start_frame and end_frame at the desired step size\r\n valid_frames = np.arange(start_frame, end_frame + 1, self.step_size)\r\n\r\n if ensure_end_is_in and (end_frame not in valid_frames):\r\n valid_frames = valid_frames.tolist() + [end_frame]\r\n if self.dataset_params['max_detects'] is not None:\r\n # print(self.dataset_params['max_detects'])\r\n scene_df_ = seq_det_df[seq_det_df.frame.isin(valid_frames)].copy()\r\n frames_cumsum = scene_df_.groupby('frame')['bb_left'].count().cumsum()\r\n valid_frames = frames_cumsum[frames_cumsum <= self.dataset_params['max_detects']].index\r\n\r\n\r\n else:\r\n # Consider all posible future frames (at distance step_size)\r\n valid_frames = np.arange(start_frame, seq_det_df.frame.max(), self.step_size)\r\n # print(\"valid\")\r\n # print(valid_frames)\r\n # We cannot have more than dataset_params['frames_per_graph'] frames\r\n if self.dataset_params['frames_per_graph'] != 'max':\r\n valid_frames = valid_frames[:self.dataset_params['frames_per_graph']]\r\n\r\n # We cannot have more than dataset_params['max_detects'] detections\r\n if self.dataset_params['max_detects'] is not None:\r\n print(self.dataset_params['max_detects'])\r\n scene_df_ = seq_det_df[seq_det_df.frame.isin(valid_frames)].copy()\r\n frames_cumsum = scene_df_.groupby('frame')['bb_left'].count().cumsum()\r\n valid_frames = frames_cumsum[frames_cumsum <= self.dataset_params['max_detects']].index\r\n\r\n graph_df = seq_det_df[seq_det_df.frame.isin(valid_frames)].copy()\r\n # print(graph_df)\r\n # graph_df = graph_df.sort_values(by=['id','frame']).reset_index(drop=True)\r\n # print(graph_df)\r\n graph_df = graph_df.sort_values(by=['frame', 'detection_id']).reset_index(drop=True)\r\n # print(graph_df.sort_values(by=['id', 'frame']).reset_index(drop=True))\r\n return graph_df, sorted(graph_df.frame.unique()),sorted(graph_df.id.values), sorted(graph_df.frame.values)", "title": "" }, { "docid": "4f124241277d120d899d4d53afff4bb4", "score": "0.4414191", "text": "def load_graph(fd):\n # Initiate graph dictionary\n graph = {}\n relate = {}\n # Iterate through the file line by line\n for line in fd:\n # And split each line into two URLs\n node, target = line.split()\n # Put nodes into the 'from' list\n graph.setdefault('from', [])\n # Put targets into the 'to' list\n graph.setdefault('to', [])\n graph['from'].append(node)\n graph['to'].append(target)\n\n # Create directional graph\n data_frame = pd.DataFrame(graph)\n G = nx.from_pandas_edgelist(data_frame, 'from', 'to', create_using=nx.DiGraph())\n\n nx.draw(G, arrows=True)\n\n # Display directional graph\n plt.show()\n return graph", "title": "" }, { "docid": "ae34b3472e9b88f29966cd70b828d156", "score": "0.4413922", "text": "def get_graph_starting_in_node(self, start_node_id):\n # Get nodes and edges by traversing relationships recursively starting in the node with case_id.\n visited_nodes = {start_node_id}\n edges = set()\n not_traversed = [start_node_id]\n while not_traversed:\n q = \"\"\"MATCH (node1:$label) -[r]-> (node2:$label)\n WHERE id(node1) = {node1_id}\n RETURN id(node2) as node2_id, id(r) as r_id\"\"\"\n params = { \"node1_id\": not_traversed[0] }\n query_result = list(self.query(q, params))\n reached_nodes = { int(result[\"node2_id\"]) for result in query_result }\n visited_nodes = visited_nodes | reached_nodes\n edges = edges | { (not_traversed[0], result[\"r_id\"], result[\"node2_id\"]) for result in query_result }\n not_traversed = not_traversed[1:] + list(reached_nodes - visited_nodes)\n\n # Get the properties and labels of all nodes\n properties = dict()\n labels = dict()\n for node_id in visited_nodes:\n q = \"\"\"MATCH (node:$label)\n WHERE id(node) = {node_id}\n RETURN properties(node) as properties, labels(node) as label\"\"\"\n params = { \"node_id\": node_id }\n query_result = self.query(q, params).single()\n properties[node_id] = query_result[\"properties\"]\n labels[node_id] = [label for label in query_result[\"label\"] if label != self.label][0]\n \n # Get the properties and types of all relationships\n for (_, r_id, _) in edges:\n q = \"\"\"MATCH (node1:$label) -[r]-> (node2:$label)\n WHERE id(r) = {r_id}\n RETURN properties(r) as properties, type(r) as label\"\"\"\n params = { \"r_id\": r_id }\n query_result = self.query(q, params).single()\n properties[r_id] = query_result[\"properties\"]\n labels[r_id] = query_result[\"label\"]\n \n return (visited_nodes, edges, properties, labels)", "title": "" }, { "docid": "80f766e9272a66241afa0c3f9183f575", "score": "0.44109306", "text": "def gen_frame():\n while True:\n frame = camera_stream()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n') # concate frame one by one and show result", "title": "" }, { "docid": "3b387a1a28aa21ed53572bc6a018c3ae", "score": "0.44091365", "text": "def build_graph(self):\n raise NotImplementedError", "title": "" }, { "docid": "4b58be93e2fc63a8091b2ecd4bee768d", "score": "0.44090587", "text": "def load_network_stream(self):\n\n def load_network_stream_thread():\n if self.verify_network_stream(self.camera_stream_link):\n self.capture = cv2.VideoCapture(self.camera_stream_link)\n self.online = True\n\n self.load_stream_thread = Thread(target=load_network_stream_thread, args=())\n self.load_stream_thread.daemon = True\n self.load_stream_thread.start()", "title": "" }, { "docid": "bfa1352aebf31cb89ad993d4ba7fe60b", "score": "0.43981442", "text": "def load_call_graph(self, granularity=Granularity.FUNC):\n call_graph = nx.DiGraph()\n parent = Stack()\n\n raw_call_graph = None\n if os.path.isfile(self.source):\n raw_call_graph = open(self.source)\n elif os.path.isdir(self.source):\n raw_call_graph = self._exec_cflow()\n\n try:\n previous = Call.from_cflow(raw_call_graph.readline(), granularity)\n for line in raw_call_graph:\n current = Call.from_cflow(line, granularity)\n\n if current.level > previous.level:\n parent.push(previous)\n elif current.level < previous.level:\n for t in range(previous.level - current.level):\n parent.pop()\n\n if parent.top:\n caller = callee = None\n entry = exit = dangerous = defense = False\n if self.is_reverse:\n caller = current\n callee = parent.top\n else:\n caller = parent.top\n callee = current\n\n (caller_attrs, callee_attrs) = utilities.get_node_attrs(\n 'cflow', caller, callee, self.defenses,\n self.vulnerabilities\n )\n\n call_graph.add_node(caller, caller_attrs)\n\n if callee_attrs is not None:\n call_graph.add_node(callee, callee_attrs)\n\n # Adding the edge caller -- callee\n attrs = {'cflow': None, 'call': None}\n call_graph.add_edge(caller, callee, attrs)\n\n # Adding the edge callee -- caller with the assumption\n # that every call must return\n attrs = {'cflow': None, 'return': None}\n call_graph.add_edge(callee, caller, attrs)\n\n previous = current\n finally:\n if raw_call_graph:\n raw_call_graph.close()\n\n return call_graph", "title": "" }, { "docid": "f6cb7e09eb0e1b79c6ef1b160a3fa423", "score": "0.4396797", "text": "def get_connection(start: str, destination: str, time: str, date='today') -> dict:\n req = None\n try:\n payload = {'from': start, 'to': destination, 'time': time, 'date': date, 'num': 1}\n req = requests.get(config.search_ch['search_ch_api'], params=payload)\n connections = search_ch_parser.parse_connections(req.text)\n first_connection = connections['connections'][0]\n return first_connection\n except Exception as exception:\n _parse_exception(exception, req)", "title": "" }, { "docid": "bd235a3b6bf7581c995314e252ecb41a", "score": "0.43962887", "text": "def graph(self) -> DiGraph:\n return self._graph", "title": "" }, { "docid": "800cccebe69e2e93b0fa71e7d3cd831e", "score": "0.43902242", "text": "def parse_all_edges(inputfile, outputfile, node_map, noencode):\n global cnt_edges_wo_nodes_fn\n total_edges = 0\n\n smallest_timestamp = None\n # scan through the entire file to find the smallest timestamp from all the edges.\n # this step is only needed if we need to add some statistical information.\n \n # we will go through the CamFlow data (again) and output edgelist to a file\n output = open(outputfile, \"w+\")\n description = '\\x1b[6;30;42m[STATUS]\\x1b[0m Parsing edges in CamFlow data from {}'.format(inputfile)\n pb = tqdm.tqdm(desc=description, mininterval=1.0, unit=\" recs\")\n edge_possible_value = [\"used\", \"wasGeneratedBy\", \"wasInformedBy\", \"wasDerivedFrom\", \"wasAssociatedWith\"]\n with open(inputfile, 'r') as f:\n for line in f:\n pb.update()\n# json_object = json.loads(line.decode(\"utf-8\",\"ignore\"))\n json_object = json.loads(line) \n # var takes the value of \"used\", \"wasGeneratedBy\", \"wasInformedBy\", \"wasDerivedFrom\", \"wasAssociatedWith\"\n for var in edge_possible_value:\n # Edge information can be stored in different variable as per the entity_type: used, wasGeneratedBy,wasDerviedFrom\n # For example; If entity_type is used, src_node and dst_node is stored in `prov_entity` and `prov_activity`\n # If entity_type is wasDerivedFrom, it is stored in `prov:usedEntity` and `prov:generatedEntity`\n src_id, dst_id = get_src_dst_id(var)\n if var in json_object:\n var_json_object = json_object[var]\n for uid in var_json_object:\n if \"prov:type\" not in var_json_object[uid]:\n # an edge must have a type; if not,\n # we will have to skip the edge. Log\n # this issue if verbose is set.\n if CONSOLE_ARGUMENTS.verbose:\n logging.debug(\"edge \" + var + \" record without type: {}\".format(uid))\n continue\n else:\n edgetype = edgegen(var_json_object[uid], var, node_map)\n # cf:id is used as logical timestamp to order edges\n if \"cf:id\" not in var_json_object[uid]:\n # an edge must have a logical timestamp;\n # if not we will have to skip the edge.\n # Log this issue if verbose is set.\n if CONSOLE_ARGUMENTS.verbose:\n logging.debug(\"edge \" + var + \" record without logical timestamp: {}\".format(uid))\n continue\n else:\n timestamp = var_json_object[uid][\"cf:id\"]\n if src_id not in var_json_object[uid]:\n # an edge's source node must exist;\n # if not, we will have to skip the\n # edge. Log this issue if verbose is set.\n if CONSOLE_ARGUMENTS.verbose:\n logging.debug(\"edge (\" + var + \"/{}) record without source UUID: {}\".format(var[uid][\"prov:type\"], uid))\n continue\n if dst_id not in var_json_object[uid]:\n # an edge's destination node must exist;\n # if not, we will have to skip the edge.\n # Log this issue if verbose is set.\n if CONSOLE_ARGUMENTS.verbose:\n logging.debug(\"edge (\" + var + \"/{}) record without destination UUID: {}\".format(var[uid][\"prov:type\"], uid))\n continue\n srcUUID = var_json_object[uid][src_id]\n dstUUID = var_json_object[uid][dst_id]\n # both source and destination node must\n # exist in @node_map; if not, we will\n # have to skip the edge. Log this issue\n # if verbose is set.\n if srcUUID not in node_map:\n cnt_edges_wo_nodes_fn = cnt_edges_wo_nodes_fn + 1\n if CONSOLE_ARGUMENTS.verbose:\n logging.debug(\"edge (\" + var + \"/{}) record with an unseen srcUUID: {}\".format(var[uid][\"prov:type\"], uid))\n continue\n else:\n srcVal = node_map[srcUUID]\n if dstUUID not in node_map:\n if CONSOLE_ARGUMENTS.verbose:\n logging.debug(\"edge (\" + var + \"/{}) record with an unseen dstUUID: {}\".format(var[uid][\"prov:type\"], uid))\n continue\n else:\n dstVal = node_map[dstUUID]\n if \"cf:date\" not in var_json_object[uid]:\n # an edge must have a timestamp; if\n # not, we will have to skip the edge.\n # Log this issue if verbose is set.\n if CONSOLE_ARGUMENTS.verbose:\n logging.debug(\"edge (\" + var + \") record without timestamp: {}\".format(uid))\n continue\n else:\n # we only record @adjusted_ts if we need\n # to record stats of CamFlow dataset.\n if CONSOLE_ARGUMENTS.stats:\n ts_str = var_json_object[uid][\"cf:date\"]\n ts = time.mktime(datetime.datetime.strptime(ts_str, \"%Y:%m:%dT%H:%M:%S\").timetuple())\n adjusted_ts = ts - smallest_timestamp\n total_edges += 1\n if noencode:\n if CONSOLE_ARGUMENTS.stats:\n output.write(\"{}\\t{}\\t{}:{}:{}:{}:{}\\n\".format(srcUUID, dstUUID, srcVal, dstVal, edgetype, timestamp, adjusted_ts))\n else:\n output.write(\"{}\\t{}\\t{}:{}:{}:{}\\n\".format(srcUUID, dstUUID, srcVal, dstVal, edgetype, timestamp))\n else:\n if CONSOLE_ARGUMENTS.stats:\n output.write(\"{}\\t{}\\t{}:{}:{}:{}:{}\\n\".format(hashgen([srcUUID]), hashgen([dstUUID]), srcVal, dstVal, edgetype, timestamp, adjusted_ts))\n else:\n output.write(\"{}\\t{}\\t{}:{}:{}:{}\\n\".format(hashgen([srcUUID]), hashgen([dstUUID]), srcVal, dstVal, edgetype, timestamp))\n\n \n f.close()\n output.close()\n pb.close()\n return total_edges", "title": "" }, { "docid": "512be12308d152375e8a061eaa7e6ff5", "score": "0.43812722", "text": "def __init__(self):\n self.graph = []", "title": "" }, { "docid": "588facd61cd7f94d6143d33fb0e049eb", "score": "0.43809524", "text": "def fetch_streams(self, streams):\n self._get_toc()\n\n for mstream in streams:\n # find the stream in the TOC and make a NetCDF request\n sources = self.stream_map.get(mstream)\n if sources is not None:\n sources.sort()\n si = sources[-1]\n self._fetch_netcdf(si)", "title": "" }, { "docid": "70912398a1170a1570c3b6930b93176a", "score": "0.43730813", "text": "def breadthFirst( self, start=None ):\n currentNodes = set([start]) if start != None else self.startNodes()\n while len(currentNodes) != 0:\n nextEdges = set()\n nextNodes = set()\n for edge in self.edges:\n if edge.id1 in currentNodes:\n nextEdges.add( edge )\n nextNodes.add( edge.id2 )\n yield nextEdges\n currentNodes = nextNodes", "title": "" } ]
018880b1d5d6c22529a88b74d8db0eb8
Returns the manifest associated with the given tag.
[ { "docid": "118dccbdaeffbf0f698c7ad2b47e4a0e", "score": "0.6995916", "text": "def get_manifest_for_tag(self, tag, backfill_if_necessary=False, include_legacy_image=False):\n try:\n tag_manifest = database.TagManifest.get(tag_id=tag._db_id)\n except database.TagManifest.DoesNotExist:\n if backfill_if_necessary:\n return self.backfill_manifest_for_tag(tag)\n\n return None\n\n return Manifest.for_tag_manifest(tag_manifest)", "title": "" } ]
[ { "docid": "a1a8b9bf202f253570d1492e7d682672", "score": "0.72691697", "text": "def backfill_manifest_for_tag(self, tag):\n # Ensure that there isn't already a manifest for the tag.\n tag_manifest = model.tag.get_tag_manifest(tag._db_id)\n if tag_manifest is not None:\n return Manifest.for_tag_manifest(tag_manifest)\n\n # Create the manifest.\n try:\n tag_obj = database.RepositoryTag.get(id=tag._db_id)\n except database.RepositoryTag.DoesNotExist:\n return None\n\n assert not tag_obj.hidden\n\n repo = tag_obj.repository\n\n # Write the manifest to the DB.\n manifest = self._build_manifest_for_legacy_image(tag_obj.name, tag_obj.image)\n if manifest is None:\n return None\n\n blob_query = self._lookup_repo_storages_by_content_checksum(repo, manifest.checksums)\n storage_map = {blob.content_checksum: blob.id for blob in blob_query}\n try:\n tag_manifest = model.tag.associate_generated_tag_manifest_with_tag(\n tag_obj, manifest, storage_map\n )\n assert tag_manifest\n except IntegrityError:\n tag_manifest = model.tag.get_tag_manifest(tag_obj)\n\n return Manifest.for_tag_manifest(tag_manifest)", "title": "" }, { "docid": "5236e5f4c2863a7b783859dff87b00f6", "score": "0.5924991", "text": "async def _get_tag_digest(registry: str, image: str, tag: str) -> str:\n async with aiohttp.ClientSession() as session:\n async with session.head(\n f\"{registry}/v2/{image}/manifests/{tag}\",\n headers={\"Accept\": \"application/vnd.docker.distribution.manifest.v2+json\"},\n ) as response:\n if response.status != 200:\n raise RuntimeError(\n f\"Unable to fetch digest for {image}:{tag}: {response.status}\"\n )\n return response.headers[\"Docker-Content-Digest\"]", "title": "" }, { "docid": "70b59ca5958508165afc6ab6dec61633", "score": "0.57702184", "text": "def get_manifest(self):\n return self._manifest", "title": "" }, { "docid": "2e72d9dc5623f983d331de4c7e6433ae", "score": "0.57473236", "text": "def get_tag_feed(cls, tag):\n feed = memcache.get(\"tag\", namespace=TAG_FEED_NAMESPACE)\n\n if feed is not None:\n return feed\n\n return cls.update_tag_feed(tag)", "title": "" }, { "docid": "2a48d7105a996c4a9cf2ed34b82f263a", "score": "0.5679564", "text": "def findByTag(self, tag):\r\n pass", "title": "" }, { "docid": "99a96d391923f10666d2e3e3331714cd", "score": "0.56675214", "text": "def get_tag(tag):\n db = get_db()\n finds = db.tags.find({'_id': tag})\n for it in finds:\n return it\n return dict()", "title": "" }, { "docid": "a7ecf74539770be4ec8557cf82317dd5", "score": "0.563651", "text": "def get_entry(self, tag: str) -> Optional[JournalEntry]:\r\n if tag in self._entries:\r\n return self._entries[tag]\r\n return None", "title": "" }, { "docid": "99af8b6f21fe14999470ad29880d0a60", "score": "0.5624295", "text": "def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage):\n try:\n return manifest.get_parsed_manifest()\n except ManifestException:\n return None", "title": "" }, { "docid": "214d381f1293d641fe6272d2526720df", "score": "0.5602584", "text": "def get_manifest(self):\n namelist = self.get_namelist()\n for name in namelist:\n if name.endswith('.opf'):\n manifest_name = name\n break\n manifest = self.contents.open(manifest_name)\n return BeautifulStoneSoup(manifest.read())", "title": "" }, { "docid": "27483715ef743ad923259334b77ff995", "score": "0.5595895", "text": "def get_version(tag):\n # This is most likely to be the leaving 'v'.\n if tag.startswith(\"v\"):\n tag = tag[1:]\n\n try:\n version = semantic_version.Version(tag)\n except ValueError:\n version = tag\n return version", "title": "" }, { "docid": "75de3ccc3332dd8ca9a767cdc55496e2", "score": "0.55932", "text": "def get(tag):\n if not DockerFile.exists(tag):\n return DockerFile.LOGGER.fatal(\"Can't find a Docker object with tag '{}'\".format(tag))\n return next(filter(lambda e : e.TAG == tag, DockerFile))", "title": "" }, { "docid": "c8cad65488bd39c9f663455e24c7c787", "score": "0.5587289", "text": "def get_manifest(self, metadata):\n manifest_path = \"{0}/{1}\".format(self.url, metadata['location'])\n req = requests.get(manifest_path, stream=True)\n if req.status_code is 200:\n gz_manifest = req.raw.read()\n\n self.verify_checksum(gz_manifest, metadata['checksum'],\n metadata['location'])\n manifest = self.unzip_manifest(gz_manifest)\n self.verify_checksum(manifest, metadata['open_checksum'],\n metadata['location'].rstrip('.gz'))\n\n return self.parse_manifest(manifest)", "title": "" }, { "docid": "2cb8eb4352196d48353a281b9783fc2a", "score": "0.555654", "text": "def manifest(self):\n return _Manifest.from_json(self.get(_MANIFEST_FILENAME))", "title": "" }, { "docid": "0eb9f5f87251445d802b25effb32e315", "score": "0.5544615", "text": "def get_manifest(self):\n return self._api.manifest(self.project, self.revision)", "title": "" }, { "docid": "837c942ad0e0b211af5e6e0f8f46d44f", "score": "0.5510178", "text": "def manifest(self):\n return self._manifest", "title": "" }, { "docid": "a40ec43a5c42c935e75fe16aededc2b5", "score": "0.5506382", "text": "def info(self, tag):\n _params = {'tag': tag}\n return self.master.call('tags/info', _params)", "title": "" }, { "docid": "61604a4ec23933fff1eb456467f24d7b", "score": "0.54727566", "text": "def get_manifest(self):\n raw_data = self.client._perform_json(\"GET\", \"/apps/%s/\" % self.app_id)\n project_key = self.app_id[8:] if self.app_id.startswith('PROJECT_') else None\n return DSSAppManifest(self.client, raw_data, project_key)", "title": "" }, { "docid": "6131d3be3ec420784f85a815c60d1861", "score": "0.547176", "text": "def get_tag(self, tag):\n return self.get_element_type(*extract_namespace(tag))", "title": "" }, { "docid": "b016b11e3871965ce44974fd8b664738", "score": "0.54219735", "text": "def get_tag(field):\n found = [\n {key: value} for key, value in DicomDictionary.items() if value[4] == field\n ]\n manifest = None\n\n if len(found) > 0:\n # (VR, VM, Name, Retired, Keyword\n found = found[0] # shouldn't ever have length > 1\n tag = Tag(list(found)[0])\n VR, VM, longName, _, keyword = found[tag]\n\n manifest = {\n \"tag\": tag,\n \"VR\": VR,\n \"VM\": VM,\n \"keyword\": keyword,\n \"name\": longName,\n }\n\n return manifest", "title": "" }, { "docid": "ab0c4e5b0ff7d1b5ca53dca7e6520fd9", "score": "0.54014915", "text": "def find(self, tag, crc):\r\n\t\ttry:\r\n\t\t\tmodule, element = tag.split(\":\")\r\n\t\t\tself._loadModule(module)\r\n\t\t\treturn self._cache[module][element][crc]\r\n\t\texcept:\r\n\t\t\treturn None", "title": "" }, { "docid": "24e27835179018cbc0d97dddae9bf3c4", "score": "0.54013544", "text": "def get_manifest(self):\n raw_data = self.client._perform_json(\"GET\", \"/projects/%s/app-manifest\" % self.project_key)\n return DSSAppManifest(self.client, raw_data)", "title": "" }, { "docid": "f267b6991761b2fb642c0619d466b946", "score": "0.53901607", "text": "def get_manifest(self) -> OrderedDict:\n if not self._manifest:\n self._manifest = self._load_manifest_data()\n\n return self._manifest", "title": "" }, { "docid": "82c38942e9ef3995af69d830c09ca1da", "score": "0.5383108", "text": "def manifest(self):\n if self._manifest is None:\n for element in self.html.xpath('//*[@data-melange-manifest]'):\n # take first\n raw = element.attrib['data-melange-manifest']\n self._manifest = json.loads(raw)\n break\n if self._manifest is None:\n raise FragmentError('No manifest found.')\n if 'name' not in self._manifest:\n raise FragmentError('Manifest must contain a \"name\".\\n' + raw)\n return self._manifest", "title": "" }, { "docid": "15157540f7373c9b0db4e02c2d90c154", "score": "0.5379211", "text": "def manifest(self):\n if not hasattr(self, '_manifest'):\n try:\n self._manifest = yaml.safe_load(\n self.contents.open('manifest.yaml'))\n except Exception as e:\n LOG.error(\"Error {0} occurred, while extracting \"\n \"manifest from package\".format(e))\n raise\n return self._manifest", "title": "" }, { "docid": "fc80ca25e19e3fa4730eea5fb36870a0", "score": "0.53426176", "text": "def parse_tag_version(tag):\n return parse_version(tag[tag.rfind(\"-\") + 1 :])", "title": "" }, { "docid": "f5a4ded49064f4964d3015fa09fd9fd4", "score": "0.528654", "text": "def get_by_tag(self, tag):\n return [part[tag] for part in self.content]", "title": "" }, { "docid": "c10b9dfeb62c409177932a1989443893", "score": "0.5270693", "text": "def get_tag(self, tag):\n try:\n return Tag.objects.get(name=tag)\n except Tag.DoesNotExist:\n tag = Tag(name=tag)\n tag.save()\n return tag", "title": "" }, { "docid": "60201b9fd95243845a99d7853cffa41d", "score": "0.52629375", "text": "def get_tag():\n return git(\"describe\", \"--tags\", \"--exact-match\", _show=False, _ignore=True)[0]", "title": "" }, { "docid": "582b06b7a8d9796236254a5c3a096fc7", "score": "0.5250025", "text": "def lookup_manifest_by_digest(\n self,\n repository_ref,\n manifest_digest,\n allow_dead=False,\n include_legacy_image=False,\n require_available=False,\n ):\n repo = model.repository.lookup_repository(repository_ref._db_id)\n if repo is None:\n return None\n\n try:\n tag_manifest = model.tag.load_manifest_by_digest(\n repo.namespace_user.username, repo.name, manifest_digest, allow_dead=allow_dead\n )\n except model.tag.InvalidManifestException:\n return None\n\n legacy_image = None\n if include_legacy_image:\n legacy_image = self.get_legacy_image(\n repository_ref, tag_manifest.tag.image.docker_image_id, include_parents=True\n )\n\n return Manifest.for_tag_manifest(tag_manifest, legacy_image)", "title": "" }, { "docid": "9e902dad1ecdaea377fed483c2c9b74c", "score": "0.5211176", "text": "def manifest(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"manifest\")", "title": "" }, { "docid": "7f4b5b5100fa22be7fa51454cb330c6c", "score": "0.5201093", "text": "def getRouteByTag(self, tag):\n for route in self.routes:\n if route.tag == tag:\n return route", "title": "" }, { "docid": "78bd7c60d39469c57d0fbb96f544d687", "score": "0.5200356", "text": "def get_release_by_tag(cls, owner: str, repo: str, tag: str):\n response, _ = cls._call('/repos/{owner}/{repo}/releases/tags/{tag}'\n .format(owner=owner, tag=tag, repo=repo), method='GET')\n return Release.from_response(response)", "title": "" }, { "docid": "666d883c36230c6a50e5221cc040f022", "score": "0.51468724", "text": "def find_available_tag(tag):\n\n latest_version = find_latest_tag_version(tag)\n\n new_tag = make_versioned_tag(tag, latest_version+1)\n\n return new_tag", "title": "" }, { "docid": "ac19d9bc6905f45f116a5c2c00a45434", "score": "0.5113394", "text": "def get_manifest(self, url_root):\n d = self.manifest_params\n return create_manifest(url_root=url_root, assets=d['assets'],\n manifest_label=self.manifest_label,\n metadata=d['metadata'], info=d['info'],\n thumbnail_url=d.get('thumbnail_url', ''),\n return_as='text')", "title": "" }, { "docid": "476969eb0cbe9d3cc4e8168c91be748f", "score": "0.5108586", "text": "def find_by_tag(self, tag, params={}, **options):\n path = \"/tags/%s/tasks\" % (tag)\n return self.client.get_collection(path, params, **options)", "title": "" }, { "docid": "7115755c6272b71da21a0bcd922e3a7f", "score": "0.5067699", "text": "def load_manifest(self, bucket):\n # allows us to compare the etag(hash) of uploaded files against local files and only upload the cahnged ones during sync\n paginator = self.s3.meta.client.get_paginator(\"list_objects_v2\")\n for page in paginator.paginate(Bucket=bucket.name):\n for obj in page.get(\"Contents\", []):\n self.manifest[obj[\"Key\"]] = obj[\"ETag\"]", "title": "" }, { "docid": "35f78fc7e29ed8baec9e91158600ca28", "score": "0.5066888", "text": "def manifest(self):\n query_url = self.url + '/iteminfo/manifest/manifest.json'\n return BaseResource(self.request(query_url))", "title": "" }, { "docid": "c63497c40ed0b8c313f9d34378b6bf06", "score": "0.5052433", "text": "def get_manifest(self, repo, reference):\n url = '{0}/v2/{1}/manifests/{2}'.format(self.host, repo, reference)\n return self._send_request_get(url)", "title": "" }, { "docid": "afbe0d58f13d0252490f3bdd0699805d", "score": "0.50432026", "text": "def get_tag(self):\n return self.run([\"git\", \"describe\", \"--tags\", \"--abbrev=0\"])", "title": "" }, { "docid": "afbe0d58f13d0252490f3bdd0699805d", "score": "0.50432026", "text": "def get_tag(self):\n return self.run([\"git\", \"describe\", \"--tags\", \"--abbrev=0\"])", "title": "" }, { "docid": "2ae53efa66be4fd6156f499b31ac4132", "score": "0.5035069", "text": "def get(tag: str) -> str:\n if not _tags:\n raise RuntimeError('reset() should be called before')\n\n return _tags[threading.get_id()].get(tag, '')", "title": "" }, { "docid": "d3b9e135b5a16d8fe2462b7b443fa987", "score": "0.5026822", "text": "def manifest_version(self) -> str:\n return self.manifest[\"manifest\"]", "title": "" }, { "docid": "536707438465534fa43d6dc3314b188e", "score": "0.50207543", "text": "def lookup_tag(self, tag, name):\n return self.unqualified_lookup(\"{}-{}\".format(tag, name))", "title": "" }, { "docid": "6ef624a8e4a64afcae09792bc46a851f", "score": "0.49796408", "text": "def delete_tags_for_manifest(self, manifest):\n try:\n tagmanifest = database.TagManifest.get(id=manifest._db_id)\n except database.TagManifest.DoesNotExist:\n return None\n\n namespace_name = tagmanifest.tag.repository.namespace_user.username\n repo_name = tagmanifest.tag.repository.name\n tags = model.tag.delete_manifest_by_digest(namespace_name, repo_name, manifest.digest)\n return [Tag.for_repository_tag(tag) for tag in tags]", "title": "" }, { "docid": "1354d607f10dc2d35c218e5e86db055c", "score": "0.4965089", "text": "def manifest(self):\n return self._client[\"manifests\"]", "title": "" }, { "docid": "5bc3959ff798d824f31e8b6a0bc265bc", "score": "0.49587187", "text": "def get_form(self, form_tag):\n\n for form in self.forms:\n if form.tag == form_tag:\n return form\n\n return None", "title": "" }, { "docid": "ec46f100e685b0a6e40152db19b89f4e", "score": "0.49445552", "text": "def manifest_name(self) -> Optional[str]:\n return pulumi.get(self, \"manifest_name\")", "title": "" }, { "docid": "7970e6ffb5f5ecc23b5f072226e0f0b3", "score": "0.49398628", "text": "def wait_for_fragment_by_tag(self, tag):\n return call(self, \" waitForFragmentByTag\", tag)", "title": "" }, { "docid": "96bba2c409c71586abe2a79c6eb4a9c0", "score": "0.4936148", "text": "def load_package_manifest(self, package_reference):\n filename = self.digestfile_package(package_reference, short_paths=None)\n return FileTreeManifest.loads(load(filename))", "title": "" }, { "docid": "6ca80f8452389dea32edcf5167200302", "score": "0.49354765", "text": "def read_manifest(manifest_path):\n with open(manifest_path, \"r\") as pfile:\n return read_manifest_file(pfile)", "title": "" }, { "docid": "86568e7f7b889905791b56a5a62e5885", "score": "0.49347723", "text": "def get_AndroidManifest(self):\n return self.xml[\"AndroidManifest.xml\"]", "title": "" }, { "docid": "aca9dac74241f3a09ee02a7590fb153f", "score": "0.4930192", "text": "def delete(self, tag):\n _params = {'tag': tag}\n return self.master.call('tags/delete', _params)", "title": "" }, { "docid": "e9a438cafd02b757b8df652cc8fc12e8", "score": "0.49167866", "text": "def get_tag(file,tag):\n return dicom.read_file(file, force=True).data_element(tag).value", "title": "" }, { "docid": "fb63372ebc9e3f7d94a03e2714a97e1d", "score": "0.48881298", "text": "def get_tag_phase(self, tag):\n if not isinstance(tag, str):\n raise TypeError(\"tag must be str.\")\n\n phase = qcirc_get_tag_phase(self, tag)\n return phase", "title": "" }, { "docid": "215454779fcc237dc10827e7c20d3e96", "score": "0.48855317", "text": "def open_manifest(self) -> Optional[Manifest]:\n if not self.items:\n return None\n cls = resolve_manifest_set_class(self.items[0])\n return cls.from_items(self.items)", "title": "" }, { "docid": "c8b38a69d9da8459b3783fcb0d5586b7", "score": "0.48839027", "text": "def from_tag(cls, tag: str):\n project = TAGS['projects'][tag]\n return cls(project=project)", "title": "" }, { "docid": "7a4de8d1ed14cc6b2fa0996a198cf16d", "score": "0.48824337", "text": "def get_sha512_manifest(zfile):\n names = zfile.namelist()\n manifest = None\n for name in names:\n if name.endswith(\"MANIFEST.MF\"):\n manifest = name\n break\n if manifest is None:\n raise SystemExit\n return manifest", "title": "" }, { "docid": "053b6d1e0947a961e8c76be322e878cc", "score": "0.4879669", "text": "def get_tag_info(tag):", "title": "" }, { "docid": "053b6d1e0947a961e8c76be322e878cc", "score": "0.4879669", "text": "def get_tag_info(tag):", "title": "" }, { "docid": "d53e79dcfa9a6531aec863baebf99194", "score": "0.48774275", "text": "def find(self, tag):\n if self._root is None:\n return\n\n return self._root.find(tag)", "title": "" }, { "docid": "993a585f7c261e60fa29b51a150b2922", "score": "0.4844574", "text": "def get_metadata_tag(self, tag):\n return UnrealEditorEngine.metadata_tag_prefix + tag", "title": "" }, { "docid": "d8996cd61fe00d687b61a53e2cfeea03", "score": "0.48378995", "text": "def find_tagged(tag):\r\n for key, item in _tags.iteritems():\r\n try:\r\n if item._tag == tag:\r\n return item\r\n except AttributeError:\r\n pass\r\n raise ValueError('tag \"%s\" not found' % tag)", "title": "" }, { "docid": "4adb2f58539a43d8cd890b9f797b9641", "score": "0.48377332", "text": "def manifest_file(self) -> Optional[str]:\n return pulumi.get(self, \"manifest_file\")", "title": "" }, { "docid": "bed7037e3f02f6eb1a05c791b03c26d2", "score": "0.48260674", "text": "def detail_tag(self, asset_tag):\n return self.prepare(HardwareAsset.objects.get(asset_tag__istartswith=asset_tag))", "title": "" }, { "docid": "1adc858962f95a714e75064f4e69266b", "score": "0.48255005", "text": "def get_manifest(path_to_project):\n manifest_path = os.path.join(path_to_project, MANIFEST_FILE)\n try:\n with open(manifest_path) as f:\n manifest = json.load(f)\n return manifest\n except IOError:\n raise Exception(\n \"Could not find {} file. {}\".format(MANIFEST_FILE, COMPILATION_MESSAGE)\n )", "title": "" }, { "docid": "ffc5455cb3a41abfbbf704073e32aa0e", "score": "0.48243564", "text": "def find_output_with_tag(self, tag):\n # Enforce upper case\n tag = tag.upper()\n return FileList([i for i in self if tag in i.tags])", "title": "" }, { "docid": "a1d7d5c2068296f68632daa1578b7bb3", "score": "0.4813623", "text": "def get_uuid(self):\n uuid_id = self.manifest.find('package')['unique-identifier']\n uuid_tag = self.manifest.find(id=uuid_id).contents[0]\n try:\n return uuid_tag.split(\":\")[:1:-1][0]\n except:\n return uuid_tag", "title": "" }, { "docid": "c129315d6368ee11f8d70e888b9521ed", "score": "0.48095506", "text": "def find_latest_tag_version(tag):\n\n r = Repo()\n\n version = 1\n while make_versioned_tag(tag, version+1) in r.tags:\n version += 1\n\n status_update(\n 'Latest version for tag %s is %d: %s' % \\\n (tag, version, make_versioned_tag(tag, version))\n )\n\n return version", "title": "" }, { "docid": "b14393cf4b14a9713453d5b17cba90b2", "score": "0.47992757", "text": "def _ReadCacheManifest(self):\n manifest_filename = os.path.join(self.cache_dir, MANIFEST_BASENAME)\n manifest = manifest_util.SDKManifest()\n with open(manifest_filename) as stream:\n manifest.LoadDataFromString(stream.read())\n return manifest", "title": "" }, { "docid": "8340fcd6245c05cffd2ba54c5df295a5", "score": "0.47967178", "text": "def _get_tag_from_raw_application(raw_application):\n return raw_application[\"tag\"]", "title": "" }, { "docid": "9583e31fffcec57fb8d03df11d6d40d0", "score": "0.47826546", "text": "def manifest_hash(self) -> Optional[str]:\n return pulumi.get(self, \"manifest_hash\")", "title": "" }, { "docid": "859934ec5805e68ea5d2537f4c7bf20c", "score": "0.47366256", "text": "def read_manifest():\n manifest_file = Path(\"cdk.out/manifest.json\")\n if manifest_file.is_file():\n with open(manifest_file) as f:\n manifest = f.read()\n else:\n print(\"manifest.json not found in cdk.out directory.\")\n sys.exit(1)\n\n try:\n manifest = json.loads(manifest)\n except ValueError as e:\n print(f\"Invalid format of {manifest_file}, error: {e}\")\n sys.exit(1)\n # Return the stack name, account, and region\n for i in manifest[\"artifacts\"]:\n if manifest[\"artifacts\"][i][\"type\"] == \"aws:cloudformation:stack\":\n return {\n \"stackname\": i,\n \"account\": manifest[\"artifacts\"][i][\"environment\"].split(\"/\")[2],\n \"region\": manifest[\"artifacts\"][i][\"environment\"].split(\"/\")[-1],\n }", "title": "" }, { "docid": "1e407aaf9879360b9ebcb4974742e626", "score": "0.47285992", "text": "def manifest(self):\n return csv.DictReader(self.open('manifest.csv'))", "title": "" }, { "docid": "81799d358aeecec4b5df574c586a3352", "score": "0.47262886", "text": "def get_android_manifest_xml(self):\n try:\n return self.xml[\"AndroidManifest.xml\"]\n except KeyError:\n return None", "title": "" }, { "docid": "c881b97b6ece0257d76bb17dd279fa30", "score": "0.47217277", "text": "def from_tag(cls, tag: str):\n mapping = TAGS['authors'][tag]\n\n name = ' '.join(reversed(mapping['name'].split(', ')))\n institute = mapping.get('institute', 'No affiliation')\n orcid = mapping['orcid']\n\n return cls(name=name, institute=institute, orcid=orcid)", "title": "" }, { "docid": "5b1e5ed3e5813d3d6dab0831e600e44a", "score": "0.4719816", "text": "def inspect(bag: str) -> set:\n if os.path.isdir(bag):\n with open(os.path.join(bag, 'manifest-md5.txt')) as bag_manifest:\n return {tuple(line.strip().split(maxsplit=1)) for line in bag_manifest}\n\n elif tarfile.is_tarfile(bag):\n directory_name = bag.split('/')[-1].split('.')[0]\n tar = tarfile.open(bag, mode='r:*')\n\n with tar.extractfile(f'{directory_name}/manifest-md5.txt') as bag_manifest:\n return {tuple(line.strip().split(maxsplit=1)) for line in bag_manifest}\n\n else:\n raise RuntimeError(f'{bag} is neither a directory or a tar file')", "title": "" }, { "docid": "c884177672a609455e95bfb7836a96f4", "score": "0.4717208", "text": "def _get_manifest(self):\n try:\n response = requests.get(MANIFEST_URL, timeout=10)\n body = response.text\n # if the commit is not exist then MANIFEST_URL will be invalid, fall back to use manifest in latest commit\n if response.status_code == Status.NOT_FOUND:\n LOG.warning(\n \"Request to MANIFEST_URL: %s failed, the commit hash in this url maybe invalid, \"\n \"Using manifest.json in the latest commit instead.\",\n MANIFEST_URL,\n )\n raise ManifestNotFoundException()\n\n except (requests.Timeout, requests.ConnectionError, ManifestNotFoundException):\n LOG.debug(\"Request to get Manifest failed, attempting to clone the repository\")\n self.clone_templates_repo()\n manifest_path = self.get_manifest_path()\n with open(str(manifest_path)) as fp:\n body = fp.read()\n manifest_body = json.loads(body)\n return manifest_body", "title": "" }, { "docid": "888556bb0b800f7fb28ab716aff4e458", "score": "0.47168857", "text": "def release_from_tag(self, tag_name):\n \n repo = self.gh_repo\n url = repo._build_url('releases', 'tags', tag_name,\n base_url=repo._api)\n json = repo._json(repo._get(url), 200)\n return Release(json, repo) if json else None", "title": "" }, { "docid": "d5655bca807240b54196ac21b657ca08", "score": "0.4714784", "text": "def manifest_name(self) -> str:\n return pulumi.get(self, \"manifest_name\")", "title": "" }, { "docid": "05d0be9ccaa0b9d913291c4fd938bb87", "score": "0.47147095", "text": "def fetch_manifest(self) -> Tuple[str, str, dict]:\n\n bucket = self._gs_bucket\n key = f'{self._semantic_name}/{MANIFEST_NAME}'\n manifest_blob: storage.bucket.Blob = bucket.get_blob(key)\n\n if not manifest_blob:\n LOGGER.warning(f'{MANIFEST_NAME} not exists by gs://{bucket.name}/{key}, create empty')\n empty_manifest: str = json.dumps({\"@spec\": 1, \"@ns\": {}})\n\n ok, err = self.cas_blob(empty_manifest.encode('utf-8'),\n generation=0, bucket_name=bucket.name, blob_name=key)\n\n if ok or err is None:\n # manifest has just created\n manifest_blob = bucket.get_blob(key)\n else:\n LOGGER.error(\"Could not create manifest %s\", err.content)\n raise Exception(\"creating %s failed\" % key)\n\n str_ = manifest_blob.download_as_string()\n json_ = json.loads(str_)\n\n LOGGER.info('Fetching manifest gcs blob %s', manifest_blob)\n return manifest_blob.name, manifest_blob.generation, json_", "title": "" }, { "docid": "1645f38a57b4d2a580ca5e2acb1438a1", "score": "0.4711013", "text": "def _deserialize(self, tag):\n name = self._filename(tag)\n if os.path.exists(name):\n with open(name, 'rb') as to_read:\n try:\n obj = pickle.load(to_read)\n except Exception: # something bad happened during unpickling\n obj = None\n\n if isinstance(obj, struct.Node):\n struct.walk(obj, _deserialize_xml_fields)\n return obj", "title": "" }, { "docid": "947a629797b5cfc8b1b214e3062c477b", "score": "0.47082275", "text": "def _get_json_by_tag(tagname):\n repo = _get_repo_path()\n sha1 = _get_data_tag(tagname)\n json_data = ''\n try:\n f_r = open(os.path.join(repo, \"desc\", sha1), 'r+')\n\n json_data = json.loads(f_r.read())\n except:\n json_data = {\"repo_date\": str(date.today()), \"tags\": []}\n\n return json_data", "title": "" }, { "docid": "ff3be32d75ee15f86caa43448e909827", "score": "0.47054154", "text": "def get_directive(self, tag):\n # Check if a directive exists for this tag\n if tag.lower() not in self.directives:\n if self.shared_delimiter:\n return None\n else:\n raise PrologueError(f\"No directive known for tag '{tag}'\")\n # Return the directive\n return self.directives[tag.lower()]", "title": "" }, { "docid": "b623843a03cb3c5618b9122c9366e12b", "score": "0.4704216", "text": "def get_instance(tag=\"mqlibtest1\"):\n ec2 = boto.ec2.connect_to_region(AWS_REGION)\n reservations = ec2.get_all_instances()\n for res in reservations:\n for inst in res.instances:\n if \"mqlibname\" in inst.tags.keys():\n if inst.tags[\"mqlibname\"] == tag and inst.state == \"running\":\n #print(\"Found %s\"%tag)\n return inst\n print(\"Couldn't find instance\")\n return None", "title": "" }, { "docid": "6ae4dd88c48fa26c7206db2da1a37e85", "score": "0.46858624", "text": "def jsonrpc_acquire_specific_tag(self, tag, owner=None, reason=None):\n d = self.tagpool.acquire_specific_tag(tag, owner, reason)\n return d", "title": "" }, { "docid": "f87954f1e92330606adedc906f53a158", "score": "0.46807677", "text": "def from_tag(cls, tag: str) -> 'Project':\n project = TAGS['projects'][tag]\n return cls(project=project)", "title": "" }, { "docid": "c73a749f329b3096d37b1a8f918aef0f", "score": "0.46797976", "text": "def make(tag):\n match = SemanticVersion.SEMVER_MATCHER.match(tag)\n if match is None:\n raise_and_log_error(UnexpectedError('Malformed tag \"%s\"' % tag))\n\n # Keep first group as a string, but use integers for the component parts\n return SemanticVersion(\n match.group(1), *[int(num) for num in match.groups()[1:]]\n )", "title": "" }, { "docid": "63c871d7c843fce76ea0dd7c7ec97e3c", "score": "0.46770105", "text": "def convert_tag(tag):\n \n tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}\n try:\n return tag_dict[tag[0]]\n except KeyError:\n return None", "title": "" }, { "docid": "679bb2f36a9b83677bea5ca2ef05ac6f", "score": "0.46740457", "text": "def get_tag(cls, tag_id):\r\n return cls.all_tags[tag_id]", "title": "" }, { "docid": "2ad61166c7c1835d34c1f7c0fcf9cba1", "score": "0.46734896", "text": "def child_by_tag(self, tag):\r\n children_tags = [t.tag for t in self._children]\r\n if tag not in children_tags:\r\n raise ValueError(error(E_TAG, tag, self._tag))\r\n return self._children[children_tags.index(tag)]", "title": "" }, { "docid": "9b78d3ccc84c4d1797e02abfd0e538d3", "score": "0.4665449", "text": "def get_manifest_json(self):\n return json.loads(self.manifest)", "title": "" }, { "docid": "52925a7f6f32ba17a2a852fcb6bee3b4", "score": "0.4657004", "text": "def update_tag_feed(cls, tag):\n feed = Twisk.gql(\"WHERE tags = :1 ORDER BY when DESC LIMIT 50\", tag)\n\n memcache.set(tag, list(feed), namespace=TAG_FEED_NAMESPACE)\n\n return feed", "title": "" }, { "docid": "c413cda50ad2748005e371c0f0448b78", "score": "0.4650456", "text": "def unzip_manifest(self, raw_manifest):\n buf = BytesIO(raw_manifest)\n f = gzip.GzipFile(fileobj=buf)\n manifest = f.read()\n\n return manifest", "title": "" }, { "docid": "2939010e7955b43cf1ee96742c00da6e", "score": "0.4648604", "text": "def manifest(self, line, cell):\n\n r = self.impl.manifest(line, cell)\n\n self.shell.user_ns['manifest'] = self.impl._manifest\n\n return r", "title": "" }, { "docid": "a29ef968ff9fa61ecec9312d64695b0f", "score": "0.4640329", "text": "def load_manifest(self, key: D.Key) -> Manifest: # pylint: disable=unused-argument; this is a stub.", "title": "" }, { "docid": "db3eeeb9ca50369a6630329098215dad", "score": "0.46287", "text": "def getTag (self,tag):\n\n if self.matchTag(tag):\n return\n else:\n print \"getTag(\", tag, \") failed:\"\n raise BadLeoFile(\"expecting\" + tag)", "title": "" }, { "docid": "4dfde3ca8e7e5f989fa47fe6169cf44f", "score": "0.46234286", "text": "def find(self, tag):\n for child in self._children:\n if child.tag == tag:\n return child\n\n return None", "title": "" }, { "docid": "d28faed52361fc49feca38419d7bee77", "score": "0.46190724", "text": "def fetch_manifest(bucket, manifest, pem):\n client = boto3.client('s3')\n\n local_manifest_path = '/tmp/{}{}'.format(uuid.uuid4(), manifest)\n local_verify_cert_path = '/tmp/{}{}'.format(uuid.uuid4(), pem)\n\n client.download_file(bucket, manifest, local_manifest_path)\n client.download_file(bucket, pem, local_verify_cert_path)\n\n return local_verify_cert_path, local_manifest_path", "title": "" }, { "docid": "7ea4e820ad9820e4131a1d84fe40853c", "score": "0.4602156", "text": "def manifest_id(self):\n return self._manifest_id", "title": "" }, { "docid": "20a40e5808a71edd36a8587d4c3c0440", "score": "0.45990187", "text": "def find_game_object_by_tag(self, tag):\n return_list = []\n for game_object in self.game_objects:\n if game_object.tag == tag:\n return_list.append(game_object)\n return return_list", "title": "" } ]
4dba8ed3cddff94b4b7072f76b0d80c4
Test if ID is unique
[ { "docid": "05ab68083158ee7bba35180202897740", "score": "0.0", "text": "def test_RepetitiveID(self):\r\n self.assertRaises(RepetitiveID, lambda: self.x5.analyze())", "title": "" } ]
[ { "docid": "0ac5efe74d321f18377f5ccb07de5d2c", "score": "0.7788738", "text": "def isUnique(self) -> bool:\n ...", "title": "" }, { "docid": "147ce7c02042b875c8daf79145db0348", "score": "0.7677853", "text": "def check_id(self, id):\n\n if id in self.unique_ids:\n return True\n \n return False", "title": "" }, { "docid": "94ec1cbc0712af5c61a5a7a63e69b29d", "score": "0.7650848", "text": "def is_unique(query, id):\n cursor = g.conn.execute(query, (id))\n if cursor.rowcount > 0:\n cursor.close()\n return False\n\n return True", "title": "" }, { "docid": "59676831cf34fe5512b7f5066393f164", "score": "0.75196165", "text": "def is_unique(self): \n\t\treturn self.unique", "title": "" }, { "docid": "0ee02cd117010c3f6c2b7e348a375858", "score": "0.72342044", "text": "def check_for_unique_id(self, ID):\n students = self._repo.get_list()\n for student in students:\n if student[0] == ID:\n return True\n return False", "title": "" }, { "docid": "02e24d238ee90598af872212a1f80cb4", "score": "0.7218275", "text": "def is_id_unique(unique_id):\n try:\n User.objects.get(unique_id=unique_id)\n # checks whether the user is unique or not\n except User.DoesNotExist:\n return True\n return False", "title": "" }, { "docid": "1c2a143a1f0f962d69a2fc5f1b764bd4", "score": "0.7126033", "text": "def testUniqueness(self):\n generatedIDs = []\n for i in range(100):\n newID = self.node._generateID()\n # ugly uniqueness test\n self.failIf(newID in generatedIDs, 'Generated ID #%d not unique!' % (i+1))\n generatedIDs.append(newID)", "title": "" }, { "docid": "911e2a72bc927a069a2622da98d515e1", "score": "0.7112215", "text": "def is_unique(self):\n return self.unique", "title": "" }, { "docid": "8dcc9b797f8fa6ed6c4c9c027f9135f2", "score": "0.7090037", "text": "def unique(self, table, ID):\n\n# DBtable, DBname = self.__DBchoice(table)\n DBname = self.__DBchoice(table)[1]\n DBid = table.rstrip('s') + 'ID'\n\n req = \"SELECT COUNT(*) FROM %s WHERE %s = (SELECT %s \" %(table, DBname, DBname)\n req = req + \" FROM %s WHERE %s = '%d');\" % (table, DBid, ID)\n\n resQuery = self.db._query(req)\n if not resQuery['OK']:\n raise RSSDBException, where(self, self.unique) + resQuery['Message']\n else:\n n = int(resQuery['Value'][0][0])\n if n == 1 :\n return True\n else:\n return False", "title": "" }, { "docid": "8fb3f340508434468a82654776ff96bc", "score": "0.69322777", "text": "def check_id_unique(self, item_id, items_list):\n\n is_unique = False\n ids_list = []\n\n for item_record in items_list:\n ids_list.append(item_record['id'])\n \n if item_id in ids_list:\n is_unique = False\n else:\n is_unique = True\n \n return is_unique", "title": "" }, { "docid": "6e0cffc0737a6cd5adaa9b54b72fe1c8", "score": "0.69206476", "text": "def is_unique(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"is_unique\")", "title": "" }, { "docid": "f8953ff03017f5ac5b8ed8526dd8ffdd", "score": "0.6865815", "text": "def test_unique_success(self):\n pass", "title": "" }, { "docid": "f29c2f1891c1b8918cf2d068d6dfbdbd", "score": "0.6861418", "text": "def test_uniq_id(self):\n self.assertNotEqual(self.Base1.id, self.Base2.id)", "title": "" }, { "docid": "dae105db20e4bef8e7c952d245b2c11a", "score": "0.67988837", "text": "def test_unique_id(self):\n test2 = Review()\n self.assertNotEqual(self.test.id, test2.id)", "title": "" }, { "docid": "6a7c75549e2453c3e80b8ee4397cd8e6", "score": "0.67880297", "text": "def test_enforce_unique_id(self):\n ir = IntracellularRecordingsTable()\n ir.add_recording(electrode=self.electrode,\n stimulus=self.stimulus,\n response=self.response,\n id=np.int64(10))\n sw = SimultaneousRecordingsTable(intracellular_recordings_table=ir)\n sw.add_simultaneous_recording(recordings=[0])\n sws = SequentialRecordingsTable(sw)\n sws.add_sequential_recording(simultaneous_recordings=[0, ], stimulus_type='MyStimStype')\n repetitions = RepetitionsTable(sequential_recordings_table=sws)\n repetitions.add_repetition(sequential_recordings=[0, ], id=np.int64(10))\n with self.assertRaises(ValueError):\n repetitions.add_repetition(sequential_recordings=[0, ], id=np.int64(10))", "title": "" }, { "docid": "c0b6522636ebbddd68d73e426259957c", "score": "0.6767591", "text": "def missing_ID_check(self):\n pass", "title": "" }, { "docid": "1a5c0c816d8dfbdafe9dff29c6e7c37b", "score": "0.67630917", "text": "def test_unique_ids(self, cls):\n ids = [name[0] for name in cls.ALL]\n self.assertEqual(len(ids), len(set(ids)))", "title": "" }, { "docid": "25e0c07ffb0d868a2c099cb259986220", "score": "0.67462003", "text": "def check_id(row, id_set):\n try:\n row_id = int(row[0])\n except ValueError:\n return False\n if row_id in id_set:\n return False\n else:\n id_set.add(row_id)\n return True", "title": "" }, { "docid": "88642cd2a9f4a7952b3e58437e34a1f9", "score": "0.6728641", "text": "def test_enforce_unique_id(self):\n ir = IntracellularRecordingsTable()\n ir.add_recording(electrode=self.electrode,\n stimulus=self.stimulus,\n response=self.response,\n id=np.int64(10))\n sw = SimultaneousRecordingsTable(intracellular_recordings_table=ir)\n sw.add_simultaneous_recording(recordings=[0])\n sws = SequentialRecordingsTable(sw)\n sws.add_sequential_recording(simultaneous_recordings=[0, ], stimulus_type='MyStimStype')\n repetitions = RepetitionsTable(sequential_recordings_table=sws)\n repetitions.add_repetition(sequential_recordings=[0, ])\n cond = ExperimentalConditionsTable(repetitions_table=repetitions)\n cond.add_experimental_condition(repetitions=[0, ], id=np.int64(10))\n with self.assertRaises(ValueError):\n cond.add_experimental_condition(repetitions=[0, ], id=np.int64(10))", "title": "" }, { "docid": "6fb2f59f530d3519c36f12c4e77ba943", "score": "0.667989", "text": "def test_unique_fail(self):\n pass", "title": "" }, { "docid": "65ac80b1092a6adb14368fd49a7d1320", "score": "0.6673322", "text": "def test_uuid_duplicates_dupe(self):\n self.t(\"add simple\")\n self.t(\"1 duplicate\")\n\n uuids = list()\n for id in range(1,3):\n code, out, err = self.t(\"_get %d.uuid\" % id)\n uuids.append(out.strip())\n\n self.assertEqual(len(uuids), len(set(uuids)))\n\n code, out, err = self.t(\"diag\")\n self.assertIn(\"No duplicates found\", out)", "title": "" }, { "docid": "005414081901c927828c902f2ee799c2", "score": "0.6652855", "text": "def is_unique_test():\n assert is_unique(\"aa\") is False\n assert is_unique(\"abcs123\") is True\n assert is_unique(\"abcABDs123\") is True", "title": "" }, { "docid": "d17667cac34364947e17f186d1622b67", "score": "0.66092336", "text": "def test_new_meetingID(self):\n ids = self.generate_unique_values(self.server.new_meetingID)\n # test that generated ids are all distinct \n self.assertEqual(len(ids), len(set(ids)))", "title": "" }, { "docid": "edde7724fb527414af3f7466ab729c10", "score": "0.65541863", "text": "def test_enforce_unique_id(self):\n ir = IntracellularRecordingsTable()\n ir.add_recording(electrode=self.electrode,\n stimulus=self.stimulus,\n response=self.response,\n id=np.int64(10))\n sw = SimultaneousRecordingsTable(intracellular_recordings_table=ir)\n sw.add_simultaneous_recording(recordings=[0])\n sws = SequentialRecordingsTable(sw)\n sws.add_sequential_recording(simultaneous_recordings=[0, ], id=np.int64(10), stimulus_type='MyStimStype')\n with self.assertRaises(ValueError):\n sws.add_sequential_recording(simultaneous_recordings=[0, ], id=np.int64(10), stimulus_type='MyStimStype')", "title": "" }, { "docid": "112a96013e754a5bc866ae82093805a9", "score": "0.65099406", "text": "def test_enforce_unique_id(self):\n ir = IntracellularRecordingsTable()\n ir.add_recording(electrode=self.electrode,\n stimulus=self.stimulus,\n response=self.response,\n id=np.int64(10))\n with self.assertRaises(ValueError):\n ir.add_recording(electrode=self.electrode,\n stimulus=self.stimulus,\n response=self.response,\n id=np.int64(10))", "title": "" }, { "docid": "0bd928fbd647125760dfda661e8fa9ce", "score": "0.647793", "text": "def test_game_pkey(game):\n assert dh.is_unique(game, ['game_id'])", "title": "" }, { "docid": "b37427bfd5b792e3a4763f7ec991b732", "score": "0.6465477", "text": "def test_enforce_unique_id(self):\n ir = IntracellularRecordingsTable()\n ir.add_recording(electrode=self.electrode,\n stimulus=self.stimulus,\n response=self.response,\n id=np.int64(10))\n sw = SimultaneousRecordingsTable(intracellular_recordings_table=ir)\n sw.add_simultaneous_recording(recordings=[0], id=np.int64(10))\n with self.assertRaises(ValueError):\n sw.add_simultaneous_recording(recordings=[0], id=np.int64(10))", "title": "" }, { "docid": "cfc17de0eaf8111d9a727bdc90842e9d", "score": "0.6463552", "text": "def unique_id(self):\n raise NotImplementedError()", "title": "" }, { "docid": "cd7a37cf6eb2ef651ae0c302f2debbb3", "score": "0.64241207", "text": "def testAutoCreatedID(self):\n self.failUnlessEqual(type(self.node.node_id), str, 'Node does not have a valid ID')\n self.failUnlessEqual(len(self.node.node_id), 48, 'Node ID length is incorrect! '\n 'Expected 384 bits, got %d bits.' %\n (len(self.node.node_id) * 8))", "title": "" }, { "docid": "db96dc588b2a9a04f99b4a7861751bda", "score": "0.64081246", "text": "def is_identifier_pid(self, pid):\n self.prefetch_unique_ids()\n return self.r.sismember(self.unique_ids_key, pid)", "title": "" }, { "docid": "6049960d92dfd164bc1fbd035abb36a9", "score": "0.6388217", "text": "def is_valid_id(self, request_id):\n pass", "title": "" }, { "docid": "da6ba0a308ffe984c1a5ff7c26c6f27e", "score": "0.6379894", "text": "def _isColumnUnique(col):\n return isinstance(col, _StoreIDComparer)", "title": "" }, { "docid": "66765786aa48ef4b9de0aea51d2c4bac", "score": "0.637761", "text": "def getUnique(self) -> int:\n ...", "title": "" }, { "docid": "ed0d62b65977399d2184b6479cc3e0dc", "score": "0.634467", "text": "def ensure_unique_vnf_id (self):\n try:\n return self.__config[ADAPT]['DOV']['ENSURE-UNIQUE-VNF-ID']\n except KeyError:\n return False", "title": "" }, { "docid": "24b0ec6468f1e9d76f46fe5e7db21aa2", "score": "0.6342985", "text": "def id_based(self):\n return self.version is None", "title": "" }, { "docid": "3573ea3b8c11be4b62374e90e611e7b8", "score": "0.634025", "text": "def test_unique_id_must_be_equals(self):\r\n from pyga.requests import Visitor\r\n\r\n visitor = Visitor()\r\n serialized_visitor = dumps(visitor)\r\n deserialized_visitor = loads(serialized_visitor)\r\n self.assertEqual(visitor.unique_id, deserialized_visitor.unique_id)", "title": "" }, { "docid": "e1e0b99eea08ebfba507c4d180aafb63", "score": "0.62905854", "text": "def check_id(line):\n line = line.split()\n if len(line) < 2:\n print(\"** instance id missing **\")\n return False\n else:\n my_key = line[0]+\".\"+line[1]\n objects = storage.all()\n if my_key not in objects.keys():\n print(\"** no instance found **\")\n return False\n return True", "title": "" }, { "docid": "05d2273014c46aa1e36017858f7e22c6", "score": "0.6276442", "text": "def exist(userid):\n try:\n idnaam(userid)\n return True\n except:\n return False", "title": "" }, { "docid": "6aeaf89d70d85039c17c184a285f5a16", "score": "0.62701064", "text": "def _checkId(self, id, allow_dup=0):\n superCheckId = SkinnableObjectManager.inheritedAttribute('_checkId')\n if not allow_dup:\n # Temporarily disable skindata.\n # Note that this depends heavily on Zope's current thread\n # behavior.\n tid = get_ident()\n sd = SKINDATA.get(tid)\n if sd is not None:\n del SKINDATA[tid]\n try:\n base = getattr(self, 'aq_base', self)\n if not hasattr(base, id):\n # Cause _checkId to not check for duplication.\n return superCheckId(self, id, allow_dup=1)\n finally:\n if sd is not None:\n SKINDATA[tid] = sd\n return superCheckId(self, id, allow_dup)", "title": "" }, { "docid": "280ea6c41df18dc8185071e05b67447c", "score": "0.6259629", "text": "def isUnique(self, attr, value):\n search_str = filter_format('(%s=%s)', (attr, str(value)))\n res = self._delegate.search( base=self.users_base\n , scope=self.users_scope\n , filter=search_str\n )\n\n if res['exception']:\n return res['exception']\n\n return res['size'] < 1", "title": "" }, { "docid": "302217f37ceb1229b9f21b15e5fe4075", "score": "0.6253841", "text": "def test_valid_id(self):\n valid_id = student_class.is_valid_id(\"002\")\n self.assertTrue(valid_id)\n\n invalid_id = student_class.is_valid_id(\"fdmndfdng\")\n self.assertFalse(invalid_id)", "title": "" }, { "docid": "991399156f592196ca79327b6ccd6811", "score": "0.62532437", "text": "def testId(self):\n self.assertNotEqual(self.velour.id, self.velour1.id)\n self.assertNotEqual(self.velour.id, self.velour2.id)\n self.assertNotEqual(self.velour2.id, self.velour3.id)\n self.assertNotEqual(self.velour3.id, self.velour4.id)\n self.assertNotEqual(self.velour4.id, self.velour5.id)\n self.assertNotEqual(self.velour1.id, self.velour2.id)\n\n self.assertIsInstance(self.velour.id, str)", "title": "" }, { "docid": "d5a704eddf05542e2f15ea9693fd2b91", "score": "0.6248747", "text": "def test_duplicateid(self):\n Rectangle.reset_objects()\n r1 = Rectangle(10, 10, 10, 10, 5)\n r2 = Rectangle(10, 10, 10, 10, 5)\n self.assertEqual(r1.id, 5)\n self.assertEqual(r2.id, 5)", "title": "" }, { "docid": "6a6190ce1691be1811ddd5b873552a50", "score": "0.6218149", "text": "def ensure_unique_bisbis_id (self):\n try:\n return self.__config[ADAPT]['DOV']['ENSURE-UNIQUE-BiSBiS-ID']\n except KeyError:\n return False", "title": "" }, { "docid": "d8c8457530c89538ccaebb193c0cf9ad", "score": "0.62170565", "text": "def exact_uniqueness(self):\n return self._exact_uniqueness", "title": "" }, { "docid": "7d5a91616fbc477112d25b81381f213f", "score": "0.61898947", "text": "def isUnique(self, word):\n key = word if len(word) <= 2 else (word[0] + str(len(word) - 2) + word[-1])\n return True if key not in self.d else ((word in self.d[key]) and (len(self.d[key]) == 1))", "title": "" }, { "docid": "1b0a1f254d9415e01fb9b3437aa07680", "score": "0.61893594", "text": "def getUIDValidity():", "title": "" }, { "docid": "8bbfb962b589cb31450e245e32f7cdea", "score": "0.6176629", "text": "def uidAlreadyAssigned(self, element):\n if 'uid' in element[list(element)[0]].keys():\n return 1\n return 0", "title": "" }, { "docid": "c726bad8ca0eb79248f45b5b2f48c0ac", "score": "0.6155971", "text": "def is_valid(id):\n if int(id) <= 0 or int(id) >= sys.maxint / 100:\n return False\n\n return True", "title": "" }, { "docid": "87ed80bd0e3688d3ba1504859272dbf8", "score": "0.6142852", "text": "def isDup(gid, name):\n return re.match(r\"D\"+gid,name)", "title": "" }, { "docid": "f45d23fb648ea534d4e605b04c2e5922", "score": "0.6137488", "text": "def id_exists(self, n):\n\n self.c.execute(\"select * from ipmap where id=?\", (n,))\n if self.c.fetchone():\n return True\n return False", "title": "" }, { "docid": "652863dd2f88d6b43e747ff06b084560", "score": "0.6132995", "text": "def is_valid_id(val):\n regex = \"^%s$\" % ID_PATTERN\n if re.match(regex, val) is None:\n return False\n else:\n return True", "title": "" }, { "docid": "77d403ae9085a48116f82c933cd2b893", "score": "0.613197", "text": "def _get_unique_id(self):\n return None", "title": "" }, { "docid": "a34a5c7407191a8f1f0347254e9817c0", "score": "0.61226654", "text": "def test_duplication_showing_uuid(self):\n code, out, err = self.t(\"1 duplicate rc.verbose:new-uuid\")\n self.assertRegex(out, \"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\")", "title": "" }, { "docid": "291a979152ba8df2a1e2de69b5fafbc9", "score": "0.61125183", "text": "def heeft_uuid(self) -> bool:\n return not self._uuid == \"\"", "title": "" }, { "docid": "510bc028f676c02d28016c041dfc373a", "score": "0.6097747", "text": "def test_team_game_pkey(team_game):\n assert dh.is_unique(team_game, ['team_id', 'game_id'])", "title": "" }, { "docid": "8810a97bc7366b92d6d6367c45e0f04f", "score": "0.6081523", "text": "def _check_iso_unique(iso_code, coin_id=False):\n\n db, c = get_db()\n msg = None\n if not coin_id:\n c.execute(\n \"SELECT id from coin where iso_code = %s\",\n (iso_code,),\n )\n else:\n c.execute(\n \"SELECT id from coin where iso_code = %s AND id != %s\",\n (iso_code, coin_id),\n )\n\n if c.fetchone() is not None:\n msg = \"La moneda {} se encuentra registrado.\".format(iso_code)\n close_db()\n return msg", "title": "" }, { "docid": "fa7d1c942ff75d5cab3d31764c128f9e", "score": "0.60786396", "text": "def add_unique(self, *args) -> \"bool\":\n return _ida_idd.call_stack_t_add_unique(self, *args)", "title": "" }, { "docid": "30565e783807f5c135e901c06c0b58eb", "score": "0.6074432", "text": "def test_unique_id(self, _1):\n file_mgr = MagicMock(spec=UploadedFileManager)\n rs1 = ReportSession(None, \"\", \"\", file_mgr)\n rs2 = ReportSession(None, \"\", \"\", file_mgr)\n self.assertNotEqual(rs1.id, rs2.id)", "title": "" }, { "docid": "382af20f22db35f007a8b8586c9cec4d", "score": "0.6073526", "text": "def test_uuid(self):\n print(\"testing uuid...\")\n self.assertTrue(hasattr(self.a1, \"id\"))\n self.assertNotEqual(self.a1.id, self.a2.id)\n self.assertIsInstance(self.a1.id, str)", "title": "" }, { "docid": "3eeb0c2680ce592bafd9746a7be7b308", "score": "0.60733944", "text": "def _is_duplicate_id(self, node_id, filename):\n if node_id in self.nodes:\n return self.nodes[node_id].filename\n return False", "title": "" }, { "docid": "7d70ecc841aab1b9723d2636304e8bdb", "score": "0.6070563", "text": "def test_verify_ID(self): #DONE\n s_val = '123'\n f_val = '234'\n success = models.verify_id(s_val)\n failure = models.verify_id(f_val)\n assert success #verify_id did not return true when input and item_id were equal\"\n assert not failure #verify_id did not return false when input and item_id were different\"", "title": "" }, { "docid": "bce8d145e22bd308b44f293e9e5b835d", "score": "0.60512036", "text": "def has_id_or_name(self):\n return self.id_or_name() is not None", "title": "" }, { "docid": "e8fde095ee427af1b4f68c48544c929e", "score": "0.6047332", "text": "def uniqueId(self, prefix=\"\"):\n _IdCounter.count += 1\n id = _IdCounter.count\n if prefix:\n return self._wrap(prefix + str(id))\n else:\n return self._wrap(id)", "title": "" }, { "docid": "b41cef4bc68e486977f321296cb9b49b", "score": "0.6038919", "text": "def allowsDuplicates(self) -> bool:\n ...", "title": "" }, { "docid": "da3c46fe19760b91d57fbfdc521dd859", "score": "0.6029379", "text": "def add_unique(self, *args) -> \"bool\":\n return _ida_idd.meminfo_vec_t_add_unique(self, *args)", "title": "" }, { "docid": "1f8cde7763a761589211407233feaa18", "score": "0.60198045", "text": "def test_event_pkey(event):\n assert dh.is_unique(event, ['game_id', 'event_id'])", "title": "" }, { "docid": "84912b427023752830bafd08e5085553", "score": "0.60106295", "text": "def is_ID(s):\n\tif len(s) == 0 or len(s) > 250:\n\t\treturn False\n\tresult = bool(re.match(\"^[A-Za-z]+\\d*$\", s))\n\treturn result", "title": "" }, { "docid": "38f177db65dc153abd7731dd3d920f86", "score": "0.5996673", "text": "def ad_group_ids_are_unique(df):\n return len(df[['ad_group_id']].drop_duplicates()) == len(df)", "title": "" }, { "docid": "03509d80cd45d8d7e804a4cfae423ae8", "score": "0.5987766", "text": "def unique_id_base(self, id_base, note):\n if not hasattr(self, 'id_bases'):\n self.id_bases = {}\n is_unique = id_base not in self.id_bases.keys()\n if not is_unique:\n self.warn('ID Base collision {0} in {1} and {2}.'.format(\n id_base, note, self.id_bases[id_base]))\n self.id_bases[id_base] = note\n return is_unique", "title": "" }, { "docid": "d03938fe19aeb566f984a6bf92ac0a21", "score": "0.59839565", "text": "def _check_duplicate(self, article_id):\n return self.es.exists(\n index='spiegel-international-news',\n doc_type='_doc',\n id=article_id)", "title": "" }, { "docid": "94ce4b4b6aca61ebadb9e6a78e9e7372", "score": "0.5964941", "text": "def check_id(id_: str):\n if id_ == '' or all_base62.search(id_) is None:\n raise ConversionError(f'Invalid id: \"{id_}\"!')", "title": "" }, { "docid": "2efaa3bc94111155eb773c77d3e9df95", "score": "0.5960485", "text": "def testId(self):\n sixteenSRawSeqSet = self.session.create_16s_raw_seq_set()\n\n self.assertTrue(sixteenSRawSeqSet.id is None,\n \"New template SixteenSRawSeqSet has no ID.\")\n\n with self.assertRaises(AttributeError):\n sixteenSRawSeqSet.id = \"test\"", "title": "" }, { "docid": "7de892f540719047896cc659132967e9", "score": "0.59591466", "text": "async def test_unique_id(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_unique_id(hass, mqtt_mock_entry, sensor.DOMAIN)", "title": "" }, { "docid": "ae2843927b74abb5b346e719a8569009", "score": "0.59554", "text": "def test_comparison_on_different_unique_identifiers(self):\n a = payloads.ModifyAttributeResponsePayload(unique_identifier=\"1\")\n b = payloads.ModifyAttributeResponsePayload(unique_identifier=\"2\")\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "title": "" }, { "docid": "630ef154bc9ba5cea20d2ed8c3d115d8", "score": "0.5950699", "text": "def unique_id(self):\n return _COWN_swig.test_sptr_unique_id(self)", "title": "" }, { "docid": "5d836a34c20af0c80fdf56f856da9963", "score": "0.59498674", "text": "def test_uniqueIdentifier(self):\n class Dummy(BaseContactType):\n pass\n identifier = Dummy().uniqueIdentifier()\n self.assertTrue(isinstance(identifier, unicode))\n self.assertEqual(identifier, __name__ + '.' + Dummy.__name__)", "title": "" }, { "docid": "9d06ba7ad83c858c9edb64c3e1b9fb2a", "score": "0.5944057", "text": "def testatomidvalid(self):\n mol = Molecule(SMILES='CCCC')\n for index, atom in enumerate(mol.atoms):\n atom.id =index\n self.assertTrue(mol.atomIDValid())", "title": "" }, { "docid": "86ea37928938c49d1a243128f421bd98", "score": "0.5936986", "text": "def test_id(self):\r\n\r\n # Default initialized id value is \"\"\r\n self.assertEqual(self.tr.getID(), '')\r\n\r\n # Test id name setter and getter.\r\n for id_ in TEST_NAMES:\r\n self.tr.setID(id_)\r\n self.assertEqual(id_, self.tr.getID())", "title": "" }, { "docid": "4587a1493c0d2be92fe94aef561ee520", "score": "0.59270734", "text": "def _unique(self):\n\n return", "title": "" }, { "docid": "bbf3b714b0a65bfd05def30638c3a3d0", "score": "0.59194064", "text": "def check_for_test_id_entry(self, primary_id):\n\n if primary_id in self.test_id_set:\n return True\n return False", "title": "" }, { "docid": "d061a70aadc4ca5211555b3fad7f5334", "score": "0.59134346", "text": "def unique_id(self):\n return self._uniqe_id", "title": "" }, { "docid": "ea9cb2c981e18ab6c6f998c6b2477e06", "score": "0.589867", "text": "def test_not_unique_single_col(self):\n df = datasets.employee_dups\n g = dfx.GrainDf(df, uniq_threshold=.6)\n self.assertEqual(False, g.unique)\n self.assertEqual(g.columns, ('employee_id',))", "title": "" }, { "docid": "f12769f66a51a7e933af1ad528ca69af", "score": "0.5898389", "text": "def testId(self):\n self.new_id_1 = Place()\n self.new_id_2 = Place()\n self.assertNotEqual(self.new_id_1.id, self.new_id_2.id)", "title": "" }, { "docid": "3bcb13e2590a5a8bfb427d823439eac9", "score": "0.5897343", "text": "def test_fielding_pkey(fielding):\n assert dh.is_unique(fielding, ['player_id', 'game_id', 'pos'])", "title": "" }, { "docid": "500829e900f5d23196f8b943c0727b2b", "score": "0.58868414", "text": "def isUnique(self, word):\n key = word if len(word) <= 2 else word[0] + str(len(word[1:-1])) + word[-1]\n return key not in self.map or self.map[key] == word", "title": "" }, { "docid": "01f058a41d462ea47d30a8409cd0bf49", "score": "0.5886061", "text": "def is_primary_identifier(self):\n try:\n return self.uniform_depth() == 1\n except ValueError: # the depth of the path is not uniform\n return False", "title": "" }, { "docid": "fd1a8b673d815696f51683a6f644f534", "score": "0.58818084", "text": "def get_unique_id(url, username):\n iteration = 0\n hdigest = \"%s-%s\" % (url, random.random())\n while True:\n hdigest = hashlib.sha512(hdigest).hexdigest()\n if iteration < 10:\n short_url = hdigest[0:4]\n else:\n length = 4 + int(math.sqrt(iteration - 10))\n if iteration == 100:\n raise Exception(\"Can't find unique shorturl\")\n try:\n obj = Url.objects.create(short_url=short_url, short_name=short_url, username=username, destination_url=url)\n obj.save()\n return obj\n except IntegrityError:\n iteration += 1", "title": "" }, { "docid": "352e64b9b87a98d277230efbc03fbc99", "score": "0.58784366", "text": "def check_unique(ser : pd.Series) -> any:\n l = ser.unique()\n if len(l) != 1:\n raise Exception(\"Duplication found\", l)\n return(l[0])", "title": "" }, { "docid": "95463846bfd1211042327183b8b71b79", "score": "0.5875114", "text": "def __hash__(self):\n return hash(self.id_)", "title": "" }, { "docid": "95463846bfd1211042327183b8b71b79", "score": "0.5875114", "text": "def __hash__(self):\n return hash(self.id_)", "title": "" }, { "docid": "85d3df4eb6f8b7de639df8641f6edb69", "score": "0.58725786", "text": "def IsSaneUuid(self, uuid):\n return re.search(r'^[0-9A-Z\\-]+$', uuid) is not None", "title": "" }, { "docid": "e3165c07c96ffaeaf9ae511d9b477c12", "score": "0.5872402", "text": "def do_unique(self,args):\n if not self.ctlr.unique_cmd(args):\n self._invalid(\"unique\")", "title": "" }, { "docid": "4cbbd0ec2f4ac45546efd19e28c44fab", "score": "0.5868689", "text": "def isSetId(self):\n return _libsedml.SedBase_isSetId(self)", "title": "" }, { "docid": "b8e431ad148dc318b9ec9b1cd5548fe8", "score": "0.5864297", "text": "def check_uuid_authenticity(uuid: str):\n if (len(uuid) != pure_uuid_length):\n return False\n return True", "title": "" }, { "docid": "7fb239a39f0d81e5725385d0d9bf81b8", "score": "0.58615774", "text": "def test_uuid(self):\n bm1 = BaseModel()\n bm2 = BaseModel()\n self.assertTrue(hasattr(bm1, \"id\"))\n self.assertNotEqual(bm1.id, bm2.id)", "title": "" }, { "docid": "67baf551f1e652cede4a5cfda9bf20df", "score": "0.5857937", "text": "def is_service_name_unique(self, service_id, name):\n endpoint = \"/service/name/unique\"\n params = {\"service_id\": service_id, \"name\": name}\n return self.get(url=endpoint, params=params)[\"result\"]", "title": "" }, { "docid": "2fd10011026cdca9e02ebc46198b23c3", "score": "0.5856384", "text": "def test_lahman_people_pkey(lahman_people):\n assert dh.is_unique(lahman_people, ['player_id']) # lahman player id\n assert dh.is_unique(lahman_people, ['retro_id'], ignore_null=True) # retrosheet player id", "title": "" }, { "docid": "3729710a4522a52fac92f0f6b390210d", "score": "0.58553094", "text": "def validate_uniqueness(self, tola_user):\n data = self.cleaned_data\n if PinnedReport.objects.filter(name=data['name'], program=data['program'], tola_user=tola_user).exists():\n return False\n return True", "title": "" }, { "docid": "76d5e77f54f20be5ae2f530e5829dfa4", "score": "0.5850658", "text": "def is_unique_result(self):\n return not self.is_constant()", "title": "" }, { "docid": "b539ac657a339455a6e8e43472673092", "score": "0.5845991", "text": "def test_unique(self):\n form = KeyAdminForm({'key': self.key.key})\n self.assertFalse(form.is_valid())\n self.assertTrue(\"exists\" in form.errors['key'][0])", "title": "" } ]
d2627244a03bd1c854720b1d51c79609
kth force dependent contribution to the model covariance function.
[ { "docid": "5c0866cc3035e94fa5bc483edd5b21b9", "score": "0.0", "text": "def Lambdag_k(k, vecg, beta, logphi_k, gamma_k, mlfm,\n eval_gradient=True):\n # structure matrices\n if beta is None:\n A = np.asarray(\n [np.zeros((mlfm.dim.K, mlfm.dim.K)),\n *mlfm.basis_mats])\n else:\n A = np.asarray([sum(brd*Lrd for brd, Lrd in zip(br, mlfm.basis_mats))\n for br in beta])\n\n covs, grads = _ls_covar_k_wgrad(logphi_k, gamma_k,\n mlfm.ttc, mlfm.latentstates[k],\n return_Cxx_inv_grad=True)\n # unpack cov and grads\n Lxx, Mdx, Schol = covs\n Cxx_inv_grad, Mdx_grad, S_grad = grads\n\n # linear repr. of the flow as a function of g\n uk = uk_flow_rep(k, vecg.reshape(mlfm.dim.R, mlfm.dim.N), A)\n\n diagUk = [np.diag(uki) for uki in uk]\n diagUk[k] -= Mdx\n Skinv_Uk = np.column_stack([cho_solve((Schol, True), Dkj) for Dkj in diagUk])\n\n lamk = np.row_stack([Dki.T for Dki in diagUk]).dot(Skinv_Uk)\n\n # add the contribution from the prior, (Cxx inverse)\n lamk[k*mlfm.dim.N:(k+1)*mlfm.dim.N,\n k*mlfm.dim.N:(k+1)*mlfm.dim.N] += cho_solve((Lxx, True),\n np.eye(Lxx.shape[0]))\n\n if eval_gradient:\n\n # gradient wrt to g\n lamk_g_gradient = []\n en = np.zeros(mlfm.dim.N)\n for r in range(mlfm.dim.R):\n for n in range(mlfm.dim.N):\n en[n] = 1. # std. basis vector\n # gradient of diag(uk) wrt g_{rn}\n dUk_grn = [np.diag(A[r+1, k, j]*en) for j in range(mlfm.dim.K)]\n expr = np.row_stack(dUk_grn).dot(Skinv_Uk)\n lamk_g_gradient.append((expr + expr.T)[..., np.newaxis])\n # reset en\n en[n] = 0.\n lamk_g_gradient = np.dstack(lamk_g_gradient)\n\n # gradient wrt to logphi\n\n if isinstance(logphi_k, float):\n P = 1\n else:\n P = len(logphi_k)\n\n # gradient of Uk wrt logphi_k\n Uk_grad = np.zeros((mlfm.dim.N, mlfm.dim.N*mlfm.dim.K, P))\n for p in range(P):\n Uk_grad[:, k*mlfm.dim.N:(k+1)*mlfm.dim.N, p] -= Mdx_grad[..., p]\n expr1 = -np.stack([Skinv_Uk.T.dot(S_grad[..., p].dot(Skinv_Uk))\n for p in range(P)], axis=2)\n expr2 = np.stack([Uk_grad[..., p].T.dot(Skinv_Uk)\n for p in range(P)], axis=2)\n expr2t = np.stack([expr2[..., p].T\n for p in range(P)], axis=2)\n lamk_logphik_gradient = expr1 + expr2 + expr2t\n # add the gradient wrt to prior\n for p in range(Cxx_inv_grad.shape[-1]):\n lamk_logphik_gradient[k*mlfm.dim.N:(k+1)*mlfm.dim.N,\n k*mlfm.dim.N:(k+1)*mlfm.dim.N,\n p] += Cxx_inv_grad[..., p]\n\n # gradient wrt to gamma_k\n lamk_gammak_gradient = -Skinv_Uk.T.dot(Skinv_Uk)[..., np.newaxis]\n\n # gradient wrt to beta\n if beta is not None:\n lamk_beta_gradient = []\n L = mlfm.basis_mats\n for r in range(mlfm.dim.R+1):\n if r == 0:\n gr = np.ones(mlfm.dim.N)\n else:\n gr = vecg[(r-1)*mlfm.dim.N:r*mlfm.dim.N]\n for d in range(mlfm.dim.D):\n dUk_brd = [np.diag(L[d][k, j]*gr)\n for j in range(mlfm.dim.K)]\n expr = np.row_stack(dUk_brd).dot(Skinv_Uk)\n lamk_beta_gradient.append(expr + expr.T)\n lamk_beta_gradient = np.dstack(lamk_beta_gradient)\n else:\n lamk_beta_gradient = 0. # return something summable\n\n return lamk, lamk_g_gradient, \\\n lamk_beta_gradient, \\\n lamk_logphik_gradient, lamk_gammak_gradient\n\n else:\n return lamk", "title": "" } ]
[ { "docid": "c7c5de377203ac744f5111948e9c9ed8", "score": "0.66864216", "text": "def autocovariance(H, k):\n return 0.5 * (abs(k - 1) ** (2 * H) - 2 * abs(k) ** (2 * H) +\n abs(k + 1) ** (2 * H))", "title": "" }, { "docid": "88a1ca40a00dd305bc1120fc6500d2fb", "score": "0.62383014", "text": "def cdf(self, k):\n if isinstance(k, (numbers.Integral, framework.Variable)):\n return 1.0 - paddle.pow((1.0 - self.probs), k)\n else:\n raise TypeError(\n f\"Expected type of k is number.Real|framework.Variable, but got {type(k)}\"\n )", "title": "" }, { "docid": "1b67d737ef9e478c201afb132cbff6aa", "score": "0.61956054", "text": "def covariance_exact(Cl, Kk, Gk):\n # Rlk = et.mtxabat(Gk, Cl)\n # return Cl - et.mtxab(Kk, 2*et.mtxab(Gk, Cl) - et.mtxabt(Rlk, Kk)) \n return et.mtxabat(np.eye(Cl.shape[-1]) - et.mtxab(Kk, Gk), Cl)", "title": "" }, { "docid": "36b52f6ab6148880a8dcd9c83df52491", "score": "0.60510504", "text": "def calc_covariance( self, x1, x2 = None ):\n import types\n symmetric = None == x2 # is it symmetric?\n if symmetric: x2 = x1\n def f(i1,i2): return self.k( x1[i1], x2[i2], symmetric and i1 == i2 )\n return \\\n infpy.matrix_from_function(\n f,\n ( len( x1 ), len( x2 ) ),\n numpy.float64,\n symmetric )", "title": "" }, { "docid": "c72800705351a9a7864c4dd1b92dc694", "score": "0.5976111", "text": "def _compute_kyy(self):\n num_input = self.Y.size(0)\n\n return self.kernel.K(self.X) + \\\n (self.likelihood.variance.transform()).expand(\n num_input, num_input).diag().diag()", "title": "" }, { "docid": "38ac8d292058836e63517a9e885af31f", "score": "0.59097165", "text": "def covariance_fast(Cl, Kk, Hk):\n return et.mtxab(np.eye(Cl.shape[-1]) - et.mtxab(Kk, Hk), Cl)", "title": "" }, { "docid": "f12a77a1f880f8be1f03fef7d5c4e694", "score": "0.589635", "text": "def K_mm(self, eig_correction=1e-2):\n return self.cov.kron_cov(self.inputs_dists, eig_correction)", "title": "" }, { "docid": "2b5e5274b2dd177bdc69da9ea40e365b", "score": "0.5889225", "text": "def Cal_k(mu): # THERMC\r\n return(mu * Cp / Pr)", "title": "" }, { "docid": "c510f9f29c175002846924cdba7c956e", "score": "0.58370847", "text": "def covariances_determinants(self) :\n S = self.params[\"gamma\"]\n if S.shape[1] == 2*2: dets = S[:,0]*S[:,3] - S[:,1]*S[:,2]\n else : raise NotImplementedError\n return dets.view(-1,1)", "title": "" }, { "docid": "533a350f664d59a1786e8d6fe6e5f9d1", "score": "0.5828521", "text": "def cdf(self, k):\n k = int(k)\n\n if k < 0:\n return 0\n\n summation = 0\n for i in range(k + 1):\n summation += (self.lambtha ** i) / (self.factorial(i))\n\n return (self.EULER_NUMBER ** -self.lambtha) * summation", "title": "" }, { "docid": "c549c35b4d4f7604fefe3228f8d9ea18", "score": "0.5819809", "text": "def update_covariances(self) :\n (M,D,_) = self.A.shape\n self.params[\"gamma\"] = (self.A @ self.A.transpose(1,2)).view(M, D*D)", "title": "" }, { "docid": "31b9e019e83ca69643237e94290512b3", "score": "0.5815422", "text": "def k_corr():\n\n return None", "title": "" }, { "docid": "84b02f2630e009ba083c5749bf409a7b", "score": "0.5775948", "text": "def shrink_cov(data, k=None):\n\n shape = data.shape\n assert len(shape) == 2, 'input must be a 2d array'\n n, p = shape\n if k is None:\n data = data - np.mean(data, axis=0)\n k = 1\n\n n = n - k # effective sample size\n assert n >= 12, \"sample size n must be >= 12\"\n sample_cov = np.dot(data.T, data) / n\n\n # % extract sample eigenvalues sorted in ascending order and eigenvectors\n lam, u = np.linalg.eigh(sample_cov)\n # compute analytical nonlinear shrinkage kernel formula\n lam = lam[np.maximum(0, p - n):]\n if any(lam / sum(lam) < EPS):\n raise ValueError(\"Matrix is singular\")\n L = ml.repmat(lam.T, np.minimum(p, n), 1).T\n h = np.power(n, -1 / 3.)\n # % Equation(4.9)\n H = h * L.T\n x = (L - L.T) / H\n ftilde = (3 / 4. / np.sqrt(5)) * np.mean(np.maximum(\n 1 - x ** 2. / 5., 0) / H, 1)\n # % Equation(4.7)\n Hftemp = (-3 / 10 / np.pi) * x + (3 / 4. / np.sqrt(5)\n / np.pi) * (1 - x ** 2. / 5.) * np.log(\n np.abs((np.sqrt(5) - x) / (np.sqrt(5) + x)))\n # % Equation(4.8)\n Hftemp[np.abs(x) == np.sqrt(5)] = \\\n (-3 / 10 / np.pi) * x[np.abs(x) == np.sqrt(5)]\n Hftilde = np.mean(Hftemp / H, 1)\n if p <= n:\n dtilde = lam / ((np.pi * (p / n) * lam * ftilde) ** 2\n + (1 - (p / n) - np.pi * (p / n) * lam * Hftilde) ** 2)\n # % Equation(4.3)\n else:\n Hftilde0 = (1 / np.pi) * (3 / 10. / h ** 2 + 3 / 4. / np.sqrt(5) / h * (1 - 1 / 5. / h ** 2)\n * np.log((1 + np.sqrt(5) * h) / (1 - np.sqrt(5) * h))) * np.mean(1 / lam)\n # % Equation(C.8)\n dtilde0 = 1 / (np.pi * (p - n) / n * Hftilde0)\n # % Equation(C.5)\n dtilde1 = lam / (np.pi ** 2 * lam ** 2. * (ftilde ** 2 + Hftilde ** 2))\n # % Eq. (C.4)\n dtilde = np.concatenate([dtilde0 * np.ones((p - n)), dtilde1])\n\n sigmatilde = np.dot(np.dot(u, np.diag(dtilde)), u.T)\n\n return sigmatilde", "title": "" }, { "docid": "8905671078dc14933821fa09e498e75e", "score": "0.57634825", "text": "def covariance_full(Cl, Kk, Hk, Vk):\n return et.mtxabat(np.eye(Cl.shape[-1]) - et.mtxab(Kk, Hk), Cl) + et.mtxabat(Kk, Vk)", "title": "" }, { "docid": "2d40973d01073a2f9f06c94706ad2b28", "score": "0.57369643", "text": "def covariance_linear_stage(self):\n covariance = \\\n self.sigmaP**2 * np.identity(self.N) + \\\n + self.sigmaC**2 * np.outer(self.w, self.w)\n return covariance", "title": "" }, { "docid": "4a3a44343efad2203be199745dc7f158", "score": "0.5701624", "text": "def weighted_covariances(x, Z, nk, xk):\n K = xk.shape[0]\n x = np.expand_dims(x, axis=1) #[n, 1, dim]\n x = np.repeat(x, K, axis=1) #[n, K, dim]\n diff = x - xk #[n, K, dim]\n Z = np.expand_dims(Z, axis=-1) #[n, K, 1]\n w_diff = np.multiply(diff, Z) #[n, K, dim]\n diff = np.transpose(diff, (1, 0, 2))\n w_diff = np.transpose(w_diff, (1, 0, 2))\n\n s = np.einsum('kni,knj->kij', w_diff, diff) #[K, dim, dim]\n\n #divide (safely) by number of points in the cluster\n for k in range(K):\n if nk[k] > 1.0e-10:\n s[k, :, :] = s[k, :, :] / nk[k]\n return s", "title": "" }, { "docid": "41966b6b8ad9f97052b0bf60958f2ff2", "score": "0.56968343", "text": "def covariance_nonlinear_stage(self, s):\n V = self.v**2\n W = self.w**2\n X = self.v * self.w\n covariance = \\\n 2 * self.sigmaP**4 * np.identity(self.N) \\\n + 4 * self.sigmaP**2 * s**2 * np.diag(V) \\\n + 4 * self.sigmaP**2 * self.sigmaC**2 * np.diag(W) \\\n + 4 * s**2 * self.sigmaC**2 * np.outer(X, X) \\\n + 2 * self.sigmaC**4 * np.outer(W, W)\n return covariance", "title": "" }, { "docid": "b5492d9b14021430b7445cdd381b583a", "score": "0.56899744", "text": "def _update_force_params(self):\n self._cov_r_prod = self._covariance.dot(self.r) # old cov, new r\n self._covariance -= np.outer(self._cov_r_prod, self._cov_r_prod / (1.0 + self.r.dot(self._cov_r_prod)))", "title": "" }, { "docid": "c23ef9d9cba0cca0fd5e2a141c350113", "score": "0.56636935", "text": "def model_covariance(self, tau=0.0):\n\n # Calculate zero lag-covariance Q0 by solving Lyapunov equation\n Q0 = spl.solve_continuous_lyapunov(self.J.T, -self.Sigma)\n # Calculate the effect of the lag (still valid for tau = 0.0)\n if tau >= 0.0:\n return np.dot(Q0, spl.expm(tau * self.J))\n else:\n return np.dot(spl.expm(-tau * self.J.T), Q0)", "title": "" }, { "docid": "01874847138ce4c3f0d929c0c0b42f3c", "score": "0.5658942", "text": "def k_evolve(dt, k, psi_x):\n\n for n in range(2):\n\n psi_k = fft(psi_x[:,n])\n #psi_k = fftshift(psi_k)\n\n psi_k *= np.exp(-0.5 * 1j / m * (k * k) * dt)\n\n psi_x[:,n] = ifft(psi_k)\n\n\n #psi_x = property(_get_psi_x, _set_psi_x)\n #psi_k = property(_get_psi_k, _set_psi_k)\n #dt = property(_get_dt, _set_dt)", "title": "" }, { "docid": "5f0a984f8043cafc9c2c7b953a7cd93e", "score": "0.56581724", "text": "def get_covariances(self):\n return self.Sigma", "title": "" }, { "docid": "54bc646429edb513302123c19796bd80", "score": "0.5657121", "text": "def _indepFit(self,x,conc=1.0,K=1.0): \n \n return conc*(K**x)", "title": "" }, { "docid": "7e17b4aaf82124fdc14e5d3bff579837", "score": "0.5645009", "text": "def k_v(self) -> float:\n return self._k_v", "title": "" }, { "docid": "642acd2e7a1601438f564f1b8efd0f56", "score": "0.56301993", "text": "def calc_covariance_derivative( self, i, x1, x2 = None ):\n import types\n symmetric = None == x2 # is it symmetric?\n if symmetric: x2 = x1\n deriv = self.k.derivative_wrt_param( i )\n def f(i1,i2): return deriv( x1[i1], x2[i2], symmetric and i1 == i2 )\n return \\\n infpy.matrix_from_function(\n f,\n ( len( x1 ), len( x2 ) ),\n numpy.float64,\n symmetric )", "title": "" }, { "docid": "fce1de218b4a4723df1d662b5a3771ec", "score": "0.56166106", "text": "def K(self, X, X2=None): \n # model : -a d^2y/dx^2 + b dy/dt + c * y = U\n # kernel Kyy rbf spatiol temporal\n # vyt Y temporal variance vyx Y spatiol variance lyt Y temporal lengthscale lyx Y spatiol lengthscale\n # kernel Kuu doper( doper(Kyy))\n # a b c lyt lyx vyx*vyt \n X,slices = X[:,:-1],index_to_slices(X[:,-1])\n if X2 is None:\n X2,slices2 = X,slices\n K = np.zeros((X.shape[0], X.shape[0]))\n else:\n X2,slices2 = X2[:,:-1],index_to_slices(X2[:,-1])\n K = np.zeros((X.shape[0], X2.shape[0]))\n\n\n tdist = (X[:,0][:,None] - X2[:,0][None,:])**2\n xdist = (X[:,1][:,None] - X2[:,1][None,:])**2\n\n ttdist = (X[:,0][:,None] - X2[:,0][None,:])\n #rdist = [tdist,xdist]\n #dist = np.abs(X - X2.T)\n vyt = self.variance_Yt\n vyx = self.variance_Yx\n \n lyt=1/(2*self.lengthscale_Yt)\n lyx=1/(2*self.lengthscale_Yx)\n\n a = self.a ## -a is used in the model, negtive diffusion\n b = self.b\n c = self.c\n\n kyy = lambda tdist,xdist: np.exp(-lyt*(tdist) -lyx*(xdist))\n\n k1 = lambda tdist: (2*lyt - 4*lyt**2 * (tdist) )\n\n k2 = lambda xdist: ( 4*lyx**2 * (xdist) - 2*lyx )\n\n k3 = lambda xdist: ( 3*4*lyx**2 - 6*8*xdist*lyx**3 + 16*xdist**2*lyx**4 )\n\n k4 = lambda ttdist: 2*lyt*(ttdist)\n\n for i, s1 in enumerate(slices):\n for j, s2 in enumerate(slices2):\n for ss1 in s1:\n for ss2 in s2:\n if i==0 and j==0:\n K[ss1,ss2] = vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])\n elif i==0 and j==1:\n K[ss1,ss2] = (-a*k2(xdist[ss1,ss2]) + b*k4(ttdist[ss1,ss2]) + c)*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])\n #K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kuyp(np.abs(rdist[ss1,ss2])), kuyn(np.abs(rdist[ss1,ss2]) ) )\n #K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kuyp(rdist[ss1,ss2]), kuyn(rdist[ss1,ss2] ) )\n elif i==1 and j==1:\n K[ss1,ss2] = ( b**2*k1(tdist[ss1,ss2]) - 2*a*c*k2(xdist[ss1,ss2]) + a**2*k3(xdist[ss1,ss2]) + c**2 )* vyt*vyx* kyy(tdist[ss1,ss2],xdist[ss1,ss2])\n else:\n K[ss1,ss2] = (-a*k2(xdist[ss1,ss2]) - b*k4(ttdist[ss1,ss2]) + c)*vyt*vyx*kyy(tdist[ss1,ss2],xdist[ss1,ss2])\n #K[ss1,ss2]= np.where( rdist[ss1,ss2]>0 , kyup(np.abs(rdist[ss1,ss2])), kyun(np.abs(rdist[ss1,ss2]) ) )\n #K[ss1,ss2] = np.where( rdist[ss1,ss2]>0 , kyup(rdist[ss1,ss2]), kyun(rdist[ss1,ss2] ) )\n \n #stop\n return K", "title": "" }, { "docid": "c24175815c3101800afb8e213b73d74a", "score": "0.56101805", "text": "def update_covariance(self, j, Sj):\n raise NotImplementedError", "title": "" }, { "docid": "da312a9519eda3b11d8dacae94ee0327", "score": "0.5601365", "text": "def update_K(K):\n \n # Get the mixture composition for the current K-factor\n xi, beta = gas_liq_eq(m, M, K)\n \n # Get tha gas and liquid fugacities for the current composition\n f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, delta, \n Aij, Bij, delta_groups, calc_delta)[0,:]\n f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, delta, \n Aij, Bij, delta_groups, calc_delta)[1,:]\n \n # Update K using K = (phi_liq / phi_gas)\n K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))\n \n # If the mass of any component in the mixture is zero, make sure the\n # K-factor is also zero.\n K_new[np.isnan(K_new)] = 0.\n \n # Follow what is said by Michelsen & Mollerup, at page 259, just \n # above equation 27:\n if steps==0.:\n moles = m / M\n zi = moles / np.sum(moles)\n if np.sum(zi*K_new) - 1. <= 0.: # Condition 4 page 252\n xi[0,:] = K_new * zi / np.sum(K_new*zi)\n xi[1,:] = zi\n \n # Recompute fugacities of gas and liquid:\n # Get tha gas and liquid fugacities for the current \n # composition\n f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, \n delta, Aij, Bij, delta_groups, calc_delta)[0,:]\n f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, \n delta, Aij, Bij, delta_groups, calc_delta)[1,:]\n \n # Update K using K = (phi_liq / phi_gas)\n K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))\n K_new[np.isnan(K_new)] = 0.\n \n elif (1.-np.sum(zi/K_new))>=0.: # % Condition 5 page 252\n xi[0,:] = zi\n xi[1,:] = (zi/K_new)/np.sum(zi/K_new)\n \n # Recompute fugacities of gas and liquid:\n # Get tha gas and liquid fugacities for the current \n # composition\n f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, \n delta, Aij, Bij, delta_groups, calc_delta)[0,:]\n f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, \n delta, Aij, Bij, delta_groups, calc_delta)[1,:]\n \n # Update K using K = (phi_liq / phi_gas)\n K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))\n K_new[np.isnan(K_new)] = 0.\n \n # Return an updated value for the K factors\n return (K_new, beta)", "title": "" }, { "docid": "df71a3e35019bc64dfcf751ed0a05911", "score": "0.5596735", "text": "def k(self):\n if self._k is None:\n self._k = self.distributions.constant(0)\n return self._k", "title": "" }, { "docid": "9968bf45f56c4f76bb3b906dba06b389", "score": "0.55535585", "text": "def dCov_0(self, t):\n # !!! I changed this in line_search new, Covd_0 <-> dCov_0\n \n assert isinstance(t, (float, np.float32, np.float64))\n assert self.ready\n \n T = np.array(self.ts)\n dk0t = self.kd(t, 0.)\n dk0T = self.kd(T, 0.)\n dkd0T = self.dkd(0., T)\n ktT = self.k(t, T)\n kdtT = self.kd(t, T)\n kvec_a = np.concatenate([dk0T, dkd0T])\n kvec_b = np.concatenate([ktT, kdtT])\n \n return dk0t - np.dot(kvec_a, self.solve_G(kvec_b))", "title": "" }, { "docid": "f7ae4a8eeef9349f883e8b6c637ff056", "score": "0.5529687", "text": "def cdf(self, k):\n if not isinstance(k, int):\n k = int(k)\n if k < 0 or k > self.n:\n return 0\n retval = 0\n for i in range(0, k + 1):\n retval += self.pmf(i)\n return retval", "title": "" }, { "docid": "70cd0f97c141935c9f0ee5adf34dda92", "score": "0.55272883", "text": "def covariance_normal(Cl, Kk, Hk, Rlk):\n return Cl - et.mtxab(Kk, 2*et.mtxab(Hk, Cl) - et.mtxabt(Rlk, Kk))", "title": "" }, { "docid": "afdaa866537b8a4ba3c3cc03b4380854", "score": "0.5513447", "text": "def covariance(x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y)) / (n -1)", "title": "" }, { "docid": "626410788e63f555e844eccbb2a077cf", "score": "0.55026394", "text": "def __omega_c_formula__(self, dim_to_use: int):\n pass", "title": "" }, { "docid": "7fad2014a929687a4581434154f36372", "score": "0.549566", "text": "def DynamicsCo(x, t, T0, cost_co, cost_ch, K_co, K_ch, n, r):\n y=np.zeros([np.size(x)])\n D=np.zeros([2]) \n #define fitnss\n D[0]=dmax*x[0]**n/(K_co**n+x[0]**n) #cooperator\n D[1]=dmax*x[0]**n/(K_ch**n+x[0]**n) #cheater \n #degradation\n deg=fmax*x[1]/(x[1]+Kd) \n #ODE of eco-evo dynamics\n y[0]=alpha*T0-deg*x[0]-alpha*x[0] #dt/dt\n y[1]=x[1]*(r*(1-cost_co)*(1-x[1]-x[2])-D[0]-alpha)#d Co/dt\n y[2]=x[2]*(r*(1-cost_ch)*(1-x[1]-x[2])-D[1]-alpha) #d Ch/dt\n \n return y", "title": "" }, { "docid": "2c7227e10743a53f6cd41e2d2d1d3770", "score": "0.549226", "text": "def covariance(x, y):\n n = len(x)\n return dot(de_mean(x), de_mean(y)) / (n - 1)", "title": "" }, { "docid": "2c868b631407a9cee76197de356eb54e", "score": "0.54839355", "text": "def covariantie_MM():\n xi = samples\n xi_2 = samples**2\n N = len(samples)\n\n gem_x = np.average(xi)\n gem_x2 = np.average(xi_2)\n\n dk_dx, dk_dx2, dt_dx, dt_dx2 = afgeleides_MM_schatters(gem_x, gem_x2)\n cov_x_x, cov_x_x2, cov_x2_x2 = covariantie_momenten_MM(xi, xi_2, gem_x, gem_x2, N)\n\n var_k = (dk_dx ** 2)*cov_x_x + (dk_dx2 ** 2)*cov_x2_x2 + (2 * dk_dx * dk_dx2)*cov_x_x2\n var_theta = (dt_dx ** 2)*cov_x_x + (dt_dx2 ** 2)*cov_x2_x2 + (2 * dt_dx * dt_dx2)*cov_x_x2\n cov_k_theta = (dk_dx * dt_dx)*cov_x_x + (dk_dx2 * dt_dx2)*cov_x2_x2 + (dk_dx * dt_dx2 + dk_dx2 * dt_dx) * cov_x_x2\n cor_k_theta = cov_k_theta / np.sqrt(var_k * var_theta)\n return var_k, var_theta, cor_k_theta", "title": "" }, { "docid": "1562ef8f61da3ce7cf8245819e37a7ed", "score": "0.5460084", "text": "def covariance(data):\n logger = logging.getLogger(__name__)\n logger.info(\"------ covariance(data) ------\")\n n = data.shape[0]\n return np.mean((data-np.mean(data, axis=0))**2) * n / (n-1)", "title": "" }, { "docid": "79735873134e7c84c7b9b7033b691558", "score": "0.545801", "text": "def covariance(data):\n return np.dot(data.T, data) / len(data)\n # return np.cov(data.T)", "title": "" }, { "docid": "cac1c7aa6da9292a936340cedb79041f", "score": "0.5455189", "text": "def covariance(x, y):\n n = len(x)\n return algebra.dot(de_mean(x), de_mean(y)) / (n - 1)", "title": "" }, { "docid": "c9ccf6963c943e9a1d4a4322a9dd0935", "score": "0.54470706", "text": "def get_x_end_tight_coupling(self,k):\n\n # Return x when tight coupling ends\n # XXX TODO XXX\n return 1.0", "title": "" }, { "docid": "22ce5284e1fc094d84aecf36015334b7", "score": "0.539402", "text": "def rho_k(xk, pk):\n return (rosenbrock_fun(xk) - rosenbrock_fun(xk + pk)) / (mk_fun(xk, [0, 0]) - mk_fun(xk, pk))", "title": "" }, { "docid": "6802d1eb2196066e3635bff433adb87b", "score": "0.53935045", "text": "def cov(self):\n if self.v is not None and self.r is not None:\n assert self.v.dim() == 1\n dim = self.v.dim()\n v = self.v.unsqueeze(1) # D * 1 vector\n rt = self.r.unsqueeze(0) # 1 * D vector\n A = torch.eye(dim) + v.mm(rt)\n return A.mm(torch.diag(self.sigma.pow(2)).mm(A.t()))\n else:\n return torch.diag(self.sigma.pow(2))", "title": "" }, { "docid": "f65e87f0f82341ff15a432957d10c72b", "score": "0.5390979", "text": "def _TFmdm_k_mpc(self, k):\n (omhh, f_hdm, f_cb, growth_k0, p_cb,\n alpha_gamma, sound_horizon_fit, \n beta_c) = (self.omhh, self.f_hdm, self.f_cb, self.growth_k0, \n self.p_cb,\n self.alpha_gamma, self.sound_horizon_fit,\n self.beta_c)\n num_degen_hdm = self.nnu\n# (Eqn 5)\n qq = k/omhh*(self.theta_cmb*self.theta_cmb)\n# Compute the scale-dependent growth function\n# (Eqn 14)\n y_freestream = (17.2*f_hdm*(1+0.488*pow(f_hdm,-7.0/6.0))*\n (num_degen_hdm*qq/f_hdm)**2)\n temp1 = pow(growth_k0, 1.0-p_cb)\n temp2 = np.power(growth_k0/(1+y_freestream),0.7)\n# (Eqn 12)\n growth_cb = np.power(1.0+temp2, p_cb/0.7)*temp1\n# (Eqn 13)\n growth_cbnu = np.power(pow(f_cb,0.7/p_cb)+temp2, p_cb/0.7)*temp1\n\n# Compute the master function\n# (Eqn 16)\n gamma_eff =(omhh*(alpha_gamma+(1-alpha_gamma)/\n (1+(k*sound_horizon_fit*0.43)**4)))\n# (Eqn 17)\n qq_eff = qq*omhh/gamma_eff\n# (Eqn 19)\n tf_sup_L = np.log(2.71828+1.84*beta_c*alpha_gamma*qq_eff)\n# (Eqn 20)\n tf_sup_C = 14.4+325/(1+60.5*pow(qq_eff,1.11))\n# (Eqn 18)\n tf_sup = tf_sup_L/(tf_sup_L+tf_sup_C*qq_eff**2)\n# (Eqn 23)\n qq_nu = 3.92*qq*np.sqrt(num_degen_hdm/f_hdm)\n# (Eqn 22)\n max_fs_correction = (1+1.2*pow(f_hdm,0.64)*\n pow(num_degen_hdm,0.3+0.6*f_hdm)/\n (np.power(qq_nu,-1.6)+np.power(qq_nu,0.8)))\n tf_master = tf_sup*max_fs_correction\n\n# Now compute the CDM+baryon transfer function\n tf_cb = tf_master*growth_cb/growth_k0\n# and the CDM+HDM+baryon transfer function\n tf_cbnu = tf_master*growth_cbnu/growth_k0\n return(tf_cb, tf_cbnu)", "title": "" }, { "docid": "11a658fef5d339f132584eefefcf9b19", "score": "0.5390455", "text": "def gen_cavity(size=25,mu=.0,sigma=1.,sigma_k=.1,gamma=-.5):\n\n\n q,v,h,phi=cavity_solve(size,mu,sigma,sigma_k,gamma)\n x=gen_x_cavity(size,q,v,h,phi,mu,sigma,sigma_k,gamma)\n #hist(x)\n avgN,varN,muA,muB=muAB(q,h,mu,sigma,gamma)\n print np.mean(x),avgN/phi\n sigma_lambda=sigma_k/sigma/avgN\n r=np.abs(np.random.normal(1,sigma_k,size))\n\n cov=np.zeros((size**2+size,size**2+size)) #covariance of a_ij, a_kl and r_i\n\n def ind4(i,j):\n return i//size, i%size, j//size,j%size\n\n #First, set variances\n it=np.nditer(cov,flags=['multi_index'])\n for v in it:\n i,j=it.multi_index\n if j>i:\n continue\n if i==j:\n if i<size**2:\n #Variance of a_ij\n ii,jj,k,l=ind4(i,j)\n #print ii,jj,k,l\n cov[i,i]=1./size\n else:\n #variance of r_i\n cov[i,i]=sigma_lambda**2\n\n\n it=np.nditer(cov,flags=['multi_index'])\n for v in it:\n i,j=it.multi_index\n if i>=j:\n continue\n if j<size**2:\n ii,jj,k,l=ind4(i,j)\n #Covariance of a_ij and a_kl\n varprod=np.sqrt( cov[i,i]*cov[j,j] ) / avgN**2\n #print varprod, avgN, cov[i,i],cov[j,j]\n #Bunin gives correlations, I need covariances, hence multiply by varprod\n if ii==k:\n cov[i,j]=cov[j,i]=-x[jj]*x[l]/size**2/(q+sigma_lambda**2) *varprod\n elif ii==l:\n cov[i,j]=cov[j,i]=-gamma*x[jj]*x[k]/size**2/(q+sigma_lambda**2) *varprod\n elif jj==l:\n cov[i,j]=cov[j,i]=-gamma**2*x[ii]*x[k]/size**2/(q+sigma_lambda**2) *varprod\n\n elif i<size**2:\n #Covariance of a_ij and r_i\n ii=i/size\n jj=j-size**2\n cov[i,jj]=x[ii]/size/(1+q/sigma_lambda**2)/ avgN\n cov[jj,i]=gamma*x[jj]/size/(1+q/sigma_lambda**2)/ avgN\n\n xx=np.vstack([x for i in range(x.size)]).T\n\n mu_eff=(mu-muA*xx*xx.T + muB *(gamma * xx + xx.T ))\n mu_eff=mu_eff.ravel()\n #hist(mu_eff)\n\n gen=np.random.multivariate_normal(np.concatenate((mu_eff,np.ones(size))),cov)\n\n aij=gen[:size**2].reshape((size,size))\n aij+=.5*gamma*aij.T\n\n r=gen[size**2:]\n\n\n A=-mu/size + sigma*aij\n #A[np.random.random(A.shape)>connectance ]=0\n #scatter(x,(np.dot(np.linalg.inv(A),r) ))\n x=np.dot(np.linalg.inv(A),r)\n return A,r,x", "title": "" }, { "docid": "d61cc3cbde2ba274c84dd28730a0f366", "score": "0.5390371", "text": "def G0_krho_phi_zz(k_rho,k):\n\n kz=np.sqrt(k**2-k_rho**2)\n Gzz_p=1j/(8*np.pi**2)*(k_rho**2/(kz*k**2))\n return Gzz_p", "title": "" }, { "docid": "6c05efd2a2929b52648d55581599efca", "score": "0.5388456", "text": "def plug_in_variance_estimator(x):\n if x.ndim < 2:\n x = x.reshape(1, -1)\n # the following two lines are a properly vectorized version of\n # middle_term = 0\n # for i in range(n):\n # middle_term += np.multiply.outer((x @ X[i] - (x @ X[i]) ** 2),\n # np.outer(X[i], X[i]))\n # where the matrix part does not involve x and has been computed above\n middle_term_scalar = x @ X.T - (x @ X.T) ** 2\n # print(middle_term_scalar)\n middle_term = np.tensordot(middle_term_scalar, middle_term_matrix, axes=1)\n covariances = delta_inverse @ (middle_term / n) @ delta_inverse\n return covariances", "title": "" }, { "docid": "fb7575dd6fae2b81b7aac3c066f6a00f", "score": "0.53637093", "text": "def set_covariance_disturb(self, cov_dict):\n qtarget = getattr(self.lsmhe, \"Q_mhe\")\n if self.diag_Q_R:\n for i in range(0, self.nfe_tmhe - 1):\n for x in self.x_noisy:\n for jth in self.x_vars[x]: #: the jth variable\n v_i = self.xkN_key[(x, jth)]\n qtarget[i, v_i] = 1 / cov_dict[(x, jth), (x, jth), i]\n else:\n sys.exit(1)\n\n # for key in cov_dict:\n # vni = key[0]\n # vnj = key[1]\n # _t = key[2]\n # v_i = self.xkN_key[vni]\n # v_j = self.xkN_key[vnj]\n # if self.diag_Q_R:\n # qtarget[_t, v_i] = 1 / cov_dict[vni, vnj, _t]\n # else:\n # qtarget[_t, v_i, v_j] = cov_dict[vni, vnj, _t]", "title": "" }, { "docid": "4ce10917bb0728011190d3b55cd16e6e", "score": "0.5338448", "text": "def autocov(self):\n\n Tmax = (self.M - 1)/self.fs\n Cx = np.empty((self.NX, self.M))\n\n Sx, fs = self.periodogram()\n\n for kX in range(self.NX):\n \n Sxk = np.hstack((Sx[kX,:], Sx[kX,-2:0:-1]))\n Cxk = np.fft.ifft(Sxk)*fs/2\n Cx[kX,:] = np.real(Cxk[:self.M])\n\n return Cx, Tmax", "title": "" }, { "docid": "1738030b4b1611b7942d51dfd5549c2f", "score": "0.53250736", "text": "def dCovd_0(self, t):\n \n assert isinstance(t, (float, np.float32, np.float64))\n assert self.ready\n \n T = np.array(self.ts)\n dkd0t = self.dkd(0., t)\n dk0T = self.kd(T, 0.)\n dkd0T = self.dkd(0., T)\n dktT = self.kd(T, t)\n dkdtT = self.dkd(t, T)\n kvec_a = np.concatenate([dk0T, dkd0T])\n kvec_b = np.concatenate([dktT, dkdtT])\n \n return dkd0t - np.dot(kvec_a, self.solve_G(kvec_b))", "title": "" }, { "docid": "07b74fcc58f44db4bad99a0fe5ef8a0d", "score": "0.5306611", "text": "def forcing(self, i, df):\n fco2 = self.params.forcing_co2_doubling\n ma = df.mass_atmosphere[i]\n mpi = self.params.mass_preindustrial\n fg = self.forcing_ghg[i]\n return ne.evaluate('fco2 * (log(ma / mpi) / log(2)) + fg')", "title": "" }, { "docid": "167b35747205920a9bb32825c21418e1", "score": "0.5305943", "text": "def plug_in_variance_estimator(x):\n if x.ndim < 2:\n x = x.reshape(1, -1)\n # the following two lines are a properly vectorized version of\n # middle_term = 0\n # for i in range(n):\n # middle_term += np.multiply.outer((x @ X[i] - (x @ X[i]) ** 2),\n # np.outer(X[i], X[i]))\n # where the matrix part does not involve x and has been computed above\n middle_term_scalar = x @ X.T - (x @ X.T) ** 2\n middle_term = np.tensordot(middle_term_scalar, middle_term_matrix, axes=1)\n covariances = delta_inverse @ (middle_term / n) @ delta_inverse\n return covariances", "title": "" }, { "docid": "736224b8eb8cda947f68c52aacaff959", "score": "0.53006256", "text": "def k_co2_splitting(temp):\n\n dg_zero = CO2Splitting().dg_zero_co2_splitting(temp)\n k_eq = pd.np.exp(dg_zero / (-R * temp))\n\n return k_eq", "title": "" }, { "docid": "716ec71556d7daf68ee39d2e1c23b192", "score": "0.52973485", "text": "def hurst_autocovariance(K, H):\n\n return np.array([0.5 * (np.abs(k - 1)**(2*H) - 2 * np.abs(k)**(2*H) + np.abs(k + 1)**(2*H)) for k in K])", "title": "" }, { "docid": "1c3c34c727e31dc48a544764646e8796", "score": "0.5293721", "text": "def tmatrix_cov(C, k=None):\n if issparse(C):\n warnings.warn(\"Covariance matrix will be dense for sparse input\")\n C = C.toarray()\n return dense.covariance.tmatrix_cov(C, row=k)", "title": "" }, { "docid": "ac636bb65f19bcf353774288aac6ac49", "score": "0.52891815", "text": "def compute_covariance(self, other, frame):\n return compute_covariance(self, other, frame)", "title": "" }, { "docid": "e550529c2dc3947737e7a1d86adb91a6", "score": "0.5284383", "text": "def Covd_0(self, t):\n # !!! I changed this in line_search new, Covd_0 <-> dCov_0\n \n assert isinstance(t, (float, np.float32, np.float64))\n assert self.ready\n \n T = np.array(self.ts)\n kd0t = self.kd(0., t)\n k0T = self.k(0., T)\n kd0T = self.kd(0., T)\n dktT = self.kd(T, t)\n dkdtT = self.dkd(t, T)\n kvec_a = np.concatenate([k0T, kd0T])\n kvec_b = np.concatenate([dktT, dkdtT])\n \n return kd0t - np.dot(kvec_a, self.solve_G(kvec_b))", "title": "" }, { "docid": "453ddce699391e295b554ca1bd4fa59b", "score": "0.527696", "text": "def get_knot_vector(self):\n return self.knots", "title": "" }, { "docid": "965a41ff73e0375ab5023c77b0dad367", "score": "0.52736807", "text": "def calcpotKF(r,c1,c2,phi, param):\n if( r > param.lmbda ):\n return 0\n elif( c1 >= param.c0 and -c2 >= param.c0):\n return -1.0\n else:\n return 0", "title": "" }, { "docid": "29548754dabeb9ee58f74252cb1f1608", "score": "0.5270337", "text": "def E_qGqB_Lambdag_k(k, Eg, Covg, Eb, Covb,\n Skinv, Mk, mlfm):\n # Useful names\n L = mlfm.basis_mats\n N, K, R, D = mlfm.dim\n\n # pad Eg\n _Eg = np.concatenate((np.ones(N), Eg))\n\n _Covg = block_diag(np.zeros((N, N)), Covg)\n \n EggT = _Covg + np.outer(_Eg, _Eg)\n EbbT = Covb + np.outer(Eb, Eb)\n \n # reshape E[beta]\n Eb = Eb.reshape((R+1, D))\n # reshape E[g]\n Eg = Eg.reshape((R, N)) \n # get the expectation of the vectors Uki\n EA = np.array([sum(Ebrd*Ld for Ebrd, Ld in zip(Ebr, L))\n for Ebr in Eb])\n\n Euk = uk_flow_rep(k, Eg, EA)\n\n res = np.zeros((N*K, N*K)) # to hold the result\n SkinvMk = Skinv.dot(Mk)\n \n for i in range(K):\n\n Eduki_SkinvkMk = np.diag(Euk[i]).dot(SkinvMk)\n \n for j in range(i+1):\n # calculate E[uki ukjT]\n E_uik_ukj_T = EukiukjT(k, i, j, EggT, EbbT, L)\n\n res[i*N:(i+1)*N, j*N:(j+1)*N] += E_uik_ukj_T * Skinv\n\n if i == k:\n res[i*N:(i+1)*N, j*N:(j+1)*N] -= \\\n Mk.T.dot(Skinv.dot(np.diag(Euk[j])))\n\n if j == k:\n res[i*N:(i+1)*N, j*N:(j+1)*N] -= \\\n np.diag(Euk[i]).dot(Skinv.dot(Mk))\n \n if i == k and j == k:\n res[i*N:(i+1)*N, j*N:(j+1)*N] += Mk.T.dot(SkinvMk)\n\n # Symmetric matrix\n res[j*N:(j+1)*N, i*N:(i+1)*N] = res[i*N:(i+1)*N, j*N:(j+1)*N].T\n\n return res", "title": "" }, { "docid": "66759586f7a534412ae361f5bb69c8ff", "score": "0.52664065", "text": "def G0_krho_zz(k_rho,k):\n kz=np.sqrt(k**2-k_rho**2)\n Gzz_p=1j/(8*np.pi*k**2)*(k_rho/kz)*(2*k_rho**2) \n return Gzz_p", "title": "" }, { "docid": "accb09c00dc5f64ba0f5b8b704ed0552", "score": "0.52609265", "text": "def k(self, value):\n self._k = self.distributions.constant(value)", "title": "" }, { "docid": "cb26d108fe4a94826d4d72bf3e9f2bf6", "score": "0.5259553", "text": "def calc_kde(model_name):\n df = get_data(model_name)\n coefs = df[['MB', 'As']+['c'+str(i) for i in range(1, int(model_name[5:]))]].values\n v, l, vinv = np.linalg.svd(np.cov(coefs.T))\n rescaled_coefs = (np.diag(1./np.sqrt(l)) @ vinv @ coefs.T).T\n \n params = {'bandwidth': np.logspace(-1, 1, 100)}\n grid = GridSearchCV(KernelDensity(), params, cv=3)\n grid.fit(rescaled_coefs)\n kde = grid.best_estimator_\n return kde, v, l, grid", "title": "" }, { "docid": "a08d85abb38060177f58f842dffe5448", "score": "0.5258223", "text": "def mk_cosk_dist(k, th_max):\n # Not applicable for k=-1\n assert k != -1\n\n # Normalization constant for the theta distribution\n th_norm = 1 - (cos(th_max))**(k+1)\n\n # The distribution function (inverse CDF) itself\n def cosk_dist(u_th, u_ph):\n theta = arccos((1 - th_norm*u_th)**(1 / (k+1)))\n phi = u_ph * (2*pi)\n return (theta, phi)\n return cosk_dist", "title": "" }, { "docid": "fd806f0d483b3311bd3b7fb863742226", "score": "0.52455056", "text": "def uv_coek(params, xdata, *args, **kwargs):\n\n ke = params[0]\n rho = params[1]\n\n h0 = xdata[0]\n\n # Calculate free monomer concentration [H] or alpha: \n # eq 146 from Thordarson book chapter\n\n a = np.ones(h0.shape[0])*(((ke*h0)**2) - (rho*((ke*h0)**2)))\n b = 2*rho*ke*h0 - 2*ke*h0 - ((ke*h0)**2)\n c = 2*ke*h0 + 1\n d = -1. * np.ones(h0.shape[0])\n\n # Rows: data points, cols: poly coefficients\n poly = np.column_stack((a, b, c, d))\n\n # Solve cubic in [H] for each observation\n h = np.zeros(h0.shape[0])\n for i, p in enumerate(poly):\n roots = np.roots(p)\n\n # Smallest real +ve root is [H]\n select = np.all([np.imag(roots) == 0, np.real(roots) >= 0], axis=0)\n if select.any():\n soln = roots[select].min()\n soln = float(np.real(soln))\n else:\n # No positive real roots, set solution to 0\n soln = 0.0\n \n h[i] = soln\n \n # n.b. these fractions are multiplied by h0 \n\n # Calculate \"in stack\" concentration [Hs] or epislon: eq 149 from Thordarson book chapter\n hs = (h0*rho*h*((h*ke*h0)**2))/((1 - h*ke*h0)**2)\n\n # Calculate \"at end\" concentration [He] or gamma: eq 150 from Thordarson book chapter\n he = (h0*(2*rho*h*h*ke*h0))/(1 - h*ke*h0)\n \n # Convert to free concentration\n hc = h0*h\n\n mf_fit = np.vstack((hc, hs, he)) # Free concentration for fitting\n mf = np.vstack((hc/h0, hs/h0, he/h0)) # Real molefraction\n return mf_fit, mf", "title": "" }, { "docid": "dc685e69c5e49fb34faeaaa09c32e3e7", "score": "0.5235205", "text": "def _KF_gain(self, covar, H, R):\n S = tf.matmul(H, tf.matmul(covar,\n tf.linalg.matrix_transpose(H))) + R\n S = self._make_valid(S)\n\n s_inv = tf.linalg.inv(S)\n K = tf.matmul(covar, tf.matmul(tf.linalg.matrix_transpose(H), s_inv))\n\n return K", "title": "" }, { "docid": "9ad9a2a3d184594c79ab4a7c88d6f701", "score": "0.52343756", "text": "def pca(X, k):\r\n n = X.shape[1]\r\n mean_f = get_mean_face(X)\r\n diff = X - mean_f\r\n exp_1 = np.expand_dims(diff, axis=2)\r\n exp_2 = np.expand_dims(diff, axis=1)\r\n\r\n temp = np.matmul(exp_1, exp_2)\r\n cov = np.sum(temp, axis=0)\r\n\r\n eigen_values, eigen_vectors = np.linalg.eigh(cov)\r\n k_eigen_values = eigen_values[n-k:n]\r\n k_eigen_vectors = eigen_vectors[:, n-k:n]\r\n\r\n flipped_values = np.flip(k_eigen_values, axis=0)\r\n flipped_vectors = np.flip(k_eigen_vectors, axis=1)\r\n\r\n return flipped_vectors, flipped_values", "title": "" }, { "docid": "1e3d7bfcdb6d586dbb4b911d29d3b9a1", "score": "0.52313155", "text": "def Cov_0(self, t):\n \n assert isinstance(t, (float, np.float32, np.float64))\n assert self.ready\n \n T = np.array(self.ts)\n k0t = self.k(0., t)\n k0T = self.k(0., T)\n kd0T = self.kd(0., T)\n ktT = self.k(t, T)\n kdtT = self.kd(t, T)\n kvec_a = np.concatenate([k0T, kd0T])\n kvec_b = np.concatenate([ktT, kdtT])\n \n return k0t - np.dot(kvec_a, self.solve_G(kvec_b))", "title": "" }, { "docid": "a09fd94ef79aff20e24e1939af596676", "score": "0.5226093", "text": "def constraint(self, solution):\n x = solution.get_x()\n return self._k-sum(x)", "title": "" }, { "docid": "bd3bd7179113cec96c64fde0e87b16da", "score": "0.52233976", "text": "def _update( self ):\n from numpy.linalg import cholesky, solve, LinAlgError\n from numpy import transpose, eye, matrix\n import types\n self._K = self.calc_covariance( self.X )\n if not self._K.shape[0]: # we didn't have any data\n self._L = matrix(zeros((0,0), numpy.float64))\n self._alpha = matrix(zeros((0,1), numpy.float64))\n self.LL = 0.\n else:\n try:\n self._L = matrix(cholesky(self._K))\n except LinAlgError, detail:\n raise RuntimeError( \"\"\"Cholesky decomposition of covariance \"\"\"\n \"\"\"matrix failed. Your kernel may not be positive \"\"\"\n \"\"\"definite. Scipy complained: %s\"\"\" % detail )\n self._alpha = solve(self._L.T, solve(self._L,self.y))\n self.LL = (\n - self.n * math.log( 2.0 * math.pi )\n - ( self.y.T * self._alpha )[0,0]\n ) / 2.0\n # print self.LL\n #import IPython; IPython.Debugger.Pdb().set_trace()\n self.LL -= log(diagonal(self._L)).sum()\n # print self.LL\n # print 'Done updating'", "title": "" }, { "docid": "d2d57ccb4649566fd281cd82b26648ef", "score": "0.5213783", "text": "def k_voigt(self):\n return self[:3, :3].mean()", "title": "" }, { "docid": "c1a0142f05a43a4ab02b060a19713f23", "score": "0.5203244", "text": "def covariance(variable1, variable2):\n n = len(variable1)\n return Vector.dot_product(Stats.de_mean(variable1), Stats.de_mean(variable2) / (n - 1))", "title": "" }, { "docid": "adcbd7745f0d0a76481a94feff9bfbdb", "score": "0.519874", "text": "def plug_in_variance_estimator(x: np.ndarray) -> np.ndarray:\n if x.ndim < 2:\n x = x.reshape(1, -1)\n # the following two lines are a properly vectorized version of\n # middle_term = 0\n # for i in range(n):\n # middle_term += np.multiply.outer((x @ X[i] - (x @ X[i]) ** 2),\n # np.outer(X[i], X[i]))\n # where the matrix part does not involve x and has been computed above\n middle_term_scalar = x @ X.T - (x @ X.T) ** 2\n middle_term = np.tensordot(middle_term_scalar, middle_term_matrix, axes=1)\n covariances = delta_inverse @ (middle_term / n) @ delta_inverse\n return covariances", "title": "" }, { "docid": "7667d51f7dc8043aa4d81c9a7b4597fb", "score": "0.51966", "text": "def k_va(self) -> float:\n return self._k_va", "title": "" }, { "docid": "44d1449f09bbfeb45d91e3a97bc1e7a2", "score": "0.51869136", "text": "def c_bound_third_form(gibbs_risk, disagreement):\n return 1.0 - (1.0 - 2*gibbs_risk)**2 / (1.0 - 2*disagreement)", "title": "" }, { "docid": "e8f3252940bc9b80e72301ad8149a7da", "score": "0.5180961", "text": "def _ls_covar_k_wgrad(logpsi, gamma, tt, gradientgp,\n return_Cxx_inv_grad=False):\n kernel = gradientgp.kernel.clone_with_theta(logpsi)\n Cxx, Cxx_grad = kernel(tt[:, None], eval_gradient=True)\n Cxx[np.diag_indices_from(Cxx)] += gradientgp.alpha\n\n Cxdx, Cxdx_grad = kernel(tt[:, None], comp='xdx', eval_gradient=True)\n Cdxdx, Cdxdx_grad = kernel(tt[:, None], comp='dxdx', eval_gradient=True)\n Lxx = np.linalg.cholesky(Cxx)\n Mdx = Cxdx[..., 0].T.dot(cho_solve((Lxx, True), np.eye(tt.size)))\n S = Cdxdx[..., 0, 0] - \\\n Cxdx[..., 0].T.dot(cho_solve((Lxx, True), Cxdx[..., 0]))\n S[np.diag_indices_from(S)] += gamma\n\n # Calculate the gradients\n P = Cxx_grad.shape[-1] # size of parameter vector.\n Cxx_inv_grad = -np.dstack([cho_solve((Lxx, True),\n cho_solve((Lxx, True), Cxx_grad[:, :, p]).T)\n for p in range(P)])\n M_grad = np.dstack([cho_solve((Lxx, True), Cxdx_grad[..., 0, p]).T\n for p in range(P)])\n\n M_grad -= np.dstack([Cxdx[:, :, 0].dot(Cxx_inv_grad[:, :, p])\n for p in range(P)])\n \n Cdx_x_grad = Cdxdx_grad[:, :, 0, 0, :].copy()\n expr = np.dstack([Cxdx_grad[:, :, 0, p].T.dot(\n cho_solve((Lxx, True), Cxdx[..., 0]))\n for p in range(P)]) \n\n Cdx_x_grad -= expr\n Cdx_x_grad -= np.dstack([expr[:, :, p].T for p in range(P)])\n Cdx_x_grad -= np.dstack([Cxdx[:, :, 0].T.dot( \\\n Cxx_inv_grad[:, :, p].dot( \\\n Cxdx[:, :, 0])) for p in range(P)])\n\n if return_Cxx_inv_grad:\n return (Lxx, Mdx, np.linalg.cholesky(S)), \\\n (Cxx_inv_grad, M_grad, Cdx_x_grad)\n else:\n return (Lxx, Mdx, np.linalg.cholesky(S)), \\\n (Cxx_grad, M_grad, Cdx_x_grad)", "title": "" }, { "docid": "fe34af1bcf969d2c57e762945d61abf5", "score": "0.51751256", "text": "def _compute_constant(self):\n p, X_shape = self.x.shape[0], self.x.shape[-1]\n fft_shape = X_shape + self.D.shape[-1] - 1\n\n # Frequential domain representation\n self.fft_shape = fft_shape = next_fast_len(int(fft_shape))\n self.X_fft = X_fft = fft(self.x, n=fft_shape)\n self.D_fft = D_fft = fft(self.D, n=fft_shape)\n\n # Reshape so that all the variables have the same dimensions\n # [K, p, T]\n self.X_fft = X_fft = self.X_fft[None]\n\n # Precompute constants to accelerate frequency domain computations\n self.DtD_fft = (D_fft[:, None].conj() * D_fft[None]\n ).mean(axis=2, keepdims=False)\n self.DtX_fft = (D_fft.conj() * X_fft).mean(axis=1, keepdims=False)\n\n # Store extra dimensions\n self.T = p * np.prod(X_shape)\n\n # Compute DD\n self.DD = np.mean([[[fftconvolve(dk, dk1)\n for dk, dk1 in zip(d, d1)]\n for d1 in self.D]\n for d in self.D[:, :, ::-1]], axis=2)\n\n # Lipchitz constant\n # b_hat = np.random.rand(*self.pt.shape)\n # mu_hat = np.nan\n # for _ in range(100):\n # norm = np.linalg.norm(b_hat)\n # b_hat /= norm\n # fb_hat = self.grad(b_hat)\n # mu_old = mu_hat\n # mu_hat = np.sum(b_hat * fb_hat)\n # b_hat = fb_hat\n # if abs(mu_hat - mu_old) / mu_old < 1e-15:\n # break\n # self.L = mu_hat\n self.L = np.linalg.norm(self.DD, axis=(0, 1), ord=2).sum()\n # print(mu_hat, self.L)\n # np.linalg.norm(self.DtD_fft, axis=(0, 1), ord=2).sum()", "title": "" }, { "docid": "49d0dd634f4301a4cf3ceeb2a677a1b1", "score": "0.5174531", "text": "def C(P, h, bw):\n c0 = np.var(P[:, 2]) # sill variance, which is a priori variance\n if h == 0:\n return c0\n return c0 - SVh(P, h, bw)", "title": "" }, { "docid": "7673da528a6ba87c1ee44763af519052", "score": "0.5172947", "text": "def kerneldef3(h, k):\n return h(1e-4, 1e4, 10)*k('gaussian', \n [h(1e-4, 1e4, 0.1), h(1e-4, 1e4, 0.1), h(1e-4, 1e4, 0.1)])", "title": "" }, { "docid": "5472c51b2d2e92c229c8fce02efeac9c", "score": "0.5170199", "text": "def calc_C(phi):", "title": "" }, { "docid": "4d2613d47e0f1a6fa98cbe78484526e9", "score": "0.5165144", "text": "def error(self, pst):\n # Evaluate vector K_* and point K_**\n Kst = np.array([self.cov(pst, p1, self.hyperparams) for p1 in self.params])\n Kstst = self.cov(pst, pst, self.hyperparams)\n\n # Evaluate variance at y_*\n yst_var = Kstst - np.dot(Kst, np.dot(self.Kinv, Kst.T) )\n return np.sqrt(yst_var)", "title": "" }, { "docid": "96ea07a985a411851e0e104196df7988", "score": "0.51650155", "text": "def _compute_covariance(self):\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n # Compute the mean and residuals\n _mean = np.sum(self.weights * self.dataset, axis=1)\n _residual = (self.dataset - _mean[:, None])\n # Compute the biased covariance\n self._data_covariance = np.atleast_2d(np.dot(_residual * self.weights, _residual.T))\n # Correct for bias (http://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_covariance)\n self._data_covariance /= (1 - np.sum(self.weights ** 2))\n self._data_inv_cov = np.linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = np.sqrt(np.linalg.det(2*np.pi*self.covariance)) #* self.n", "title": "" }, { "docid": "2221fd7c1318c6ec7d5fea82b91d7c83", "score": "0.51634854", "text": "def SetContactKineticFrictionCoef(self, *args):\n return _pynewton.Material_SetContactKineticFrictionCoef(self, *args)", "title": "" }, { "docid": "373c1c324e0cab5f5faabf4ecfdf0965", "score": "0.51621443", "text": "def test_covariance_observer(self):\n if not available:\n self.skipTest(\"ASE version does not have CE!\")\n\n msg = \"\"\n no_throw = True\n from cemc.mcmc import FixEdgeLayers\n from cemc.mcmc import CovarianceMatrixObserver\n bc, args = get_ternary_BC(ret_args=True)\n ecis = get_example_ecis(bc=bc)\n atoms = get_atoms_with_ce_calc(bc, args, eci=ecis, size=[8, 8, 8], db_name=\"covariance_obs.db\")\n\n T = 200\n nn_names = [name for name in bc.cluster_family_names\n if int(name[1]) == 2]\n\n mc = FixedNucleusMC(\n atoms, T, network_name=nn_names,\n network_element=[\"Mg\", \"Si\"])\n\n fixed_layers = FixEdgeLayers(atoms=mc.atoms, thickness=3.0)\n mc.add_constraint(fixed_layers)\n elements = {\"Mg\": 6, \"Si\": 6}\n mc.insert_symbol_random_places(\"Mg\", num=1, swap_symbs=[\"Al\"])\n mc.grow_cluster(elements)\n\n cov_obs = CovarianceMatrixObserver(atoms=mc.atoms, cluster_elements=[\"Mg\", \"Si\"])\n mc.attach(cov_obs)\n for _ in range(10):\n mc.runMC(steps=100, elements=elements, init_cluster=False)\n\n obs_I = cov_obs.cov_matrix\n indices = []\n for atom in mc.atoms:\n if atom.symbol in [\"Mg\", \"Si\"]:\n indices.append(atom.index)\n cluster = mc.atoms[indices]\n pos = cluster.get_positions()\n com = np.mean(pos, axis=0)\n pos -= com\n cov_matrix = np.zeros((3, 3))\n for i in range(pos.shape[0]):\n x = pos[i, :]\n cov_matrix += np.outer(x, x)\n self.assertTrue(np.allclose(obs_I, cov_matrix))\n os.remove(\"covariance_obs.db\")", "title": "" }, { "docid": "6146538a11dbd0553bd48c7f986e9629", "score": "0.5161716", "text": "def covariance(x, mean_x, y, mean_y):\n\t\n\tcovar = 0.0\n\tfor i in range(x.shape[1]):\n\t\tcovar += (x[0][i]-mean_x) * (y[0][i]-mean_y)\n\treturn covar", "title": "" }, { "docid": "6f5f2f32e2912dc32ef0e0a19b2bf05e", "score": "0.5157495", "text": "def generate_covariance_matrix(self, diff_regime: int):\n\n def _model_variance(dt: np.ndarray, a: float) -> np.ndarray:\n \"\"\"\n Determine the model variance, based on a quadratic relationship with the number of\n independent samples as a divisor.\n\n :param dt: Timestep value\n :param a: Quadratic coefficient\n :return: Model variances\n \"\"\"\n return a / self._n_o[diff_regime:] * dt**2\n\n self._popt, _ = curve_fit(_model_variance, self.dt[diff_regime:], self._v[diff_regime:])\n self._model_v = _model_variance(self.dt[diff_regime:], *self._popt)\n self._covariance_matrix = _populate_covariance_matrix(self._model_v, self._n_o[diff_regime:])\n self._npd_covariance_matrix = self._covariance_matrix\n return find_nearest_positive_definite(self._covariance_matrix)", "title": "" }, { "docid": "0c9b8e8136e29e2cae460cee5fde7375", "score": "0.51567024", "text": "def calculate_covariace(matrixX):\n # Calculate covariave matrix\n covariance = (matrixX * np.transpose(matrixX)) / (np.shape(matrixX)[1]-1)\n\n return covariance", "title": "" }, { "docid": "eca8111407e7b628db0a343483243056", "score": "0.5142162", "text": "def calculate_new_k(self):\n self.k = calculate_k_vector(self.vx, self.vy, self.vz, self.w)", "title": "" }, { "docid": "28616b2946e5d1644207a354dbfc8e34", "score": "0.51415783", "text": "def G0_krho_xx(k_rho,k): \n kz=np.sqrt(k**2-k_rho**2)\n Gxx_s=1j/(8*np.pi*k**2)*k_rho*k**2/kz\n Gxx_p=-1j/(8*np.pi*k**2)*k_rho*kz\n return Gxx_p,Gxx_s", "title": "" }, { "docid": "23a4e45ac3acb088f52a5357a0a22afd", "score": "0.514062", "text": "def covariance_matrix(self, endog_expval, index):\n raise NotImplementedError", "title": "" }, { "docid": "93b2738a954fa072c7bad0a4b815a0c4", "score": "0.513444", "text": "def K_gradients(self, X1, X2):\n \n r = self.r(X1, X2)\n K_r = self.K_r(r)\n \n # W.r.t variance\n dK_dv = K_r/self.variance\n \n # W.r.t lengthscales\n dK_dr = self.dK_dr(r)\n dr_dl = self.dr_dl(X1, X2)\n \n dK_dl = [dK_dr*dr_dl_i for dr_dl_i in dr_dl]\n\n return [dK_dv] + dK_dl", "title": "" }, { "docid": "32eb32181f0d6f220d0e9ce7e3845fb7", "score": "0.513428", "text": "def covariance(self) -> NONEARRAY:\n\n if self.save_residuals:\n return np.nan*np.zeros((2, 2), dtype=np.float64)\n else:\n return None", "title": "" }, { "docid": "bd6d775e45dfdc0803e1fe75b7959776", "score": "0.5121511", "text": "def eval_uncertainty(self, params=None, sigma=1, **kwargs):\n userkws = self.userkws.copy()\n userkws.update(kwargs)\n if params is None:\n params = self.params\n\n nvarys = self.nvarys\n # ensure fjac and df2 are correct size if independent var updated by kwargs\n feval = self.model.eval(params, **userkws)\n ndata = len(feval.view('float64')) # allows feval to be complex\n covar = self.covar\n if any(p.stderr is None for p in params.values()):\n return np.zeros(ndata)\n\n # '0' would be an invalid prefix, here signifying 'Full'\n fjac = {'0': np.zeros((nvarys, ndata), dtype='float64')}\n df2 = {'0': np.zeros(ndata, dtype='float64')}\n\n for comp in self.model.components:\n label = comp.prefix if len(comp.prefix) > 1 else comp._name\n fjac[label] = np.zeros((nvarys, ndata), dtype='float64')\n df2[label] = np.zeros(ndata, dtype='float64')\n\n # find derivative by hand!\n pars = params.copy()\n for i in range(nvarys):\n pname = self.var_names[i]\n val0 = pars[pname].value\n dval = pars[pname].stderr/3.0\n pars[pname].value = val0 + dval\n res1 = {'0': self.model.eval(pars, **userkws)}\n res1.update(self.model.eval_components(params=pars, **userkws))\n\n pars[pname].value = val0 - dval\n res2 = {'0': self.model.eval(pars, **userkws)}\n res2.update(self.model.eval_components(params=pars, **userkws))\n\n pars[pname].value = val0\n for key in fjac:\n fjac[key][i] = (res1[key].view('float64')\n - res2[key].view('float64')) / (2*dval)\n\n for i in range(nvarys):\n for j in range(nvarys):\n for key in fjac:\n df2[key] += fjac[key][i] * fjac[key][j] * covar[i, j]\n\n if sigma < 1.0:\n prob = sigma\n else:\n prob = erf(sigma/np.sqrt(2))\n\n scale = t.ppf((prob+1)/2.0, self.ndata-nvarys)\n\n # for complex data, convert back to real/imag pairs\n if feval.dtype in ('complex64', 'complex128'):\n for key in fjac:\n df2[key] = df2[key][0::2] + 1j * df2[key][1::2]\n\n self.dely = scale * np.sqrt(df2.pop('0'))\n\n self.dely_comps = {}\n for key in df2:\n self.dely_comps[key] = scale * np.sqrt(df2[key])\n return self.dely", "title": "" }, { "docid": "00775e138b697d5239f8795c34638a65", "score": "0.5120621", "text": "def covariance(atm,rho):\n c1 = (24*spc.gamma(6/5)/5)**(5/6)\n c2 = spc.gamma(11/6)/(2**(5/6)*np.pi**(8/3)) \n c3 = spc.gamma(11/6)*spc.gamma(5/6)/(2*np.pi**(8/3))\n L0r0ratio= (atm.L0/atm.r0)**(5/3) \n \n if not np.isscalar(rho):\n cov = c1*c3*L0r0ratio*np.ones(rho.shape) \n index = rho!=0\n u = 2*np.pi*rho[index]/atm.L0\n cov[index] = c1*c2*L0r0ratio*u**(5/6)*spc.kv(5/6,u)\n else:\n if rho==0:\n cov = c1*c3*L0r0ratio\n else:\n u = 2*np.pi*rho/atm.L0\n cov = c1*c2*L0r0ratio*u**(5/6)*spc.kv(5/6,u)\n \n return cov", "title": "" }, { "docid": "a5ac711dd1d025f7fd2e4a5d91a710f0", "score": "0.5112961", "text": "def __call__(self, pst):\n # Evaluate vector K_*\n Kst = np.array([self.cov(pst, p1, self.hyperparams) for p1 in self.params])\n\n # Evaluate y_*\n return np.dot(Kst, self.Kinv_dot_y)", "title": "" }, { "docid": "4c1ffb9f4e9a846355acea2cfdb9e097", "score": "0.5112682", "text": "def calc_constraints_forces(self, constraint):\n Fk = constraint._force_vector_a2b\n if constraint.b is self:\n Fk = -Fk\n self.total_forces = self.total_forces + Fk\n return Fk", "title": "" }, { "docid": "d4aa0889494245740126c269689a95cd", "score": "0.51103", "text": "def mgcyc(self, k, gamma, ukm, fk, nu1, nu2 \\\n , smoother=None, coarse_solver=None):\n if smoother is None:\n smoother = self.smoother\n if coarse_solver is None:\n coarse_solver = self.coarse_solver\n\n nlevels = self.nlevels + 1\n\n lvl = self.levels[::-1][nlevels-k]\n lvl1 = self.levels[::-1][nlevels-k-1]\n\n Rk = lvl.R\n Pk = lvl.P\n Lk = lvl1.A\n Lk1 = lvl.A\n\n # ----------------------------\n # presmoothing\n # ----------------------------\n ukm_s = lvl1.smoother(ukm, fk, nu1)\n# ukm_s = smoother(nu1, ukm, Lk, fk)\n # ----------------------------\n\n # ----------------------------\n # coarse grid correction\n # ----------------------------\n # Compute the defect\n dkm = fk - Lk.dot(ukm_s)\n # Restrict the defect\n dk1m = Rk.dot(dkm)\n # Compute an approximate solution vk1m of the defect equation on Omega_{k-1}\n # if k = 1, use a direct or fast iterative solver, by calling\n if k == 1:\n # TODO : enlever le guess\n guess = np.zeros_like(dk1m)\n vk1m = lvl.solve(dk1m)\n# vk1m = coarse_solver(Lk1, guess, dk1m)\n if k > 1:\n a = np.zeros_like(dk1m)\n vk1m_ = dk1m\n for i in range(0, gamma):\n dk1m_ = vk1m_\n vk1m_, err_ = self.mgcyc(k-1, gamma, a, dk1m_, nu1, nu2 \\\n , smoother=smoother \\\n , coarse_solver=coarse_solver)\n vk1m = vk1m_\n\n # Interpolate the correction\n# print \"vk1m : \", vk1m.__class__.__name__, vk1m.shape\n# print \"Pk : \", Pk.__class__.__name__, Pk.shape\n vkm = Pk.dot(vk1m)\n # Compute the corrected approximation\n ukm += vkm\n # ----------------------------\n\n # ----------------------------\n # postsmoothing\n # ----------------------------\n ukp1m = lvl1.smoother(ukm, fk, nu2)\n# ukp1m = smoother(nu2, ukm, Lk, fk)\n # ----------------------------\n\n err = residual_norm(Lk, ukp1m, fk)\n\n return ukp1m, err", "title": "" }, { "docid": "b89ba8536b312710d0ee19ca6d0ddefc", "score": "0.50979984", "text": "def cov(self, m, n):\n # see [1]_ for the formula and [2]_ for implementation\n # cov( x_i,x_j ) = -n * (M-n)/(M-1) * (K_i*K_j) / (M**2)\n M, m, n, _, _, mncond = self._process_parameters(m, n)\n # check for empty arrays\n if m.size != 0:\n M = M[..., np.newaxis, np.newaxis]\n n = n[..., np.newaxis, np.newaxis]\n cond = (M == 0) & (M-1 == 0)\n M = np.ma.masked_array(M, mask=cond)\n output = (-n * (M-n)/(M-1) *\n np.einsum(\"...i,...j->...ij\", m, m) / (M**2))\n # check for empty arrays\n if m.size != 0:\n M, n = M[..., 0, 0], n[..., 0, 0]\n cond = cond[..., 0, 0]\n dim = m.shape[-1]\n # diagonal entries need to be computed differently\n for i in range(dim):\n output[..., i, i] = (n * (M-n) * m[..., i]*(M-m[..., i]))\n output[..., i, i] = output[..., i, i] / (M-1)\n output[..., i, i] = output[..., i, i] / (M**2)\n if m.size != 0:\n mncond = (mncond[..., np.newaxis, np.newaxis] |\n np.zeros(output.shape, dtype=np.bool_))\n return self._checkresult(output, mncond, np.nan)", "title": "" }, { "docid": "9e1a67b034ef7149f787dd4a11523a0d", "score": "0.5088745", "text": "def __variable_density(self, x, mean_standard_deviation):\n\n n = len(x)\n\n Kck = np.zeros((n,1))\n\n for i in range(0,n):\n\n gaussian_kernel_sum = 0\n\n for j in range(0,n):\n\n gaussian_kernel_sum = gaussian_kernel_sum + self.__gaussian_kernel(x[i], x[j], n, mean_standard_deviation)\n\n Kck[i] = 1/n * gaussian_kernel_sum\n\n return(Kck)", "title": "" }, { "docid": "62dee25d095c82810401704e7e89c5d4", "score": "0.50866556", "text": "def qKq(self) -> np.float:\n raise NotImplementedError", "title": "" }, { "docid": "ce012dc7285ed49c6c5bdf5f57be43fc", "score": "0.5084312", "text": "def pc_calc_helk(var,k):\n\n aa_k = pc_calc_Ak(var,k)\n bb_k = pc.curl(aa_k,var.dx,var.dy,var.dz)\n \n hel = (pc.dot(aa_k,bb_k))[3:-3,3:-3,3:-3]\n mag = (pc.dot2(bb_k))[3:-3,3:-3,3:-3]\n vol = (var.x[-3]-var.x[3])*(var.y[-3]-var.y[3])*(var.z[-3]-var.z[3])\n\n # NB: this assumes cubic zones\n dk = FourierFilter(aa_k[0]).dk * 2*na.pi/var.dx\n if k == 0:\n k = 1\n\n # i think it should be k, and not dk; but i'm not sure yet\n return hel.mean()/k, mag.mean()/(2.*k)\n #return hel.mean()*vol/dk, mag.mean()*vol/dk", "title": "" }, { "docid": "715fd1bb79b15d117b22021d4916aec6", "score": "0.5083044", "text": "def cdf(k, r, p):\n with mp.extradps(5):\n r, p = _validate_params(r, p)\n k = _validate_k(k)\n if k < 0:\n return mp.zero\n return mp.betainc(k + 1, r, p, 1, regularized=True)", "title": "" } ]
7f7d4113fc302ccc6ad2684a5d6e1a10
Nothing should be checked w/no Manifest files.
[ { "docid": "03e3aaa9c501bc5a9576d6fe39437b18", "score": "0.679476", "text": "def testNoManifests(self):\n self.file_mock.return_value = [\n '/foo/bar.txt',\n '/readme.md',\n '/manifest',\n '/Manifest.txt',\n ]\n ret = pre_upload._check_manifests('PROJECT', 'COMMIT')\n self.assertIsNone(ret)", "title": "" } ]
[ { "docid": "e5c8ce4283e35b3c3f0bc3eb507785a3", "score": "0.6807853", "text": "def _can_run(self, manifest):\n pass", "title": "" }, { "docid": "3a57e0dad785654907e4bb733f8d9293", "score": "0.6682673", "text": "def manifest(self):", "title": "" }, { "docid": "2a325555b1c46eb60967baa677b0722d", "score": "0.66454595", "text": "def test_manifest006(self, ret, stdout, stderr):\n self.assertNotIn(\"correct\", stderr)\n self.assertIn(\"WARNING userland.manifest006.0 \"\n \"file usr/lib/python/without.py doesn't have corresponding .pyc file\\n\", stderr)\n self.assertIn(\"ERROR userland.manifest006.9 \"\n \"file usr/lib/python/orphan.pyc doesn't have corresponding .py source file\\n\", stderr)\n self.assertIn(\"ERROR userland.manifest006.1 \"\n \"bad pyc magic number in usr/lib/python/__pycache__/magicmismatch.cpython-37.pyc\\n\", stderr)\n\n self.assertIn(\"ERROR userland.manifest006.5 \"\n \"bytecode is stale in usr/lib/python/stale2.pyc\\n\", stderr)\n self.assertIn(\"ERROR userland.manifest006.5 \"\n \"bytecode is stale (timestamp) in usr/lib/python/__pycache__/stale3.cpython-39.pyc\\n\", stderr)", "title": "" }, { "docid": "4a2fd073c007cda468f75c904310d910", "score": "0.6596255", "text": "def file_required(self):\n pass", "title": "" }, { "docid": "7cbc24f45e078bee7a80735be45bbcfb", "score": "0.6511075", "text": "def Manifest(self) -> _n_9_t_1:", "title": "" }, { "docid": "9efd4e1b299b45c2f45c6af75db8bbce", "score": "0.6506366", "text": "def test_blank_manifest(self):\n # element = self._get_to_manifest_upload_point()\n # self.dtol_manifest.values()\n # print(len(self.dtol_manifest))\n\n pass", "title": "" }, { "docid": "6369105a3b42621adc0c2d8a5cec7f45", "score": "0.6451423", "text": "def test_multiple_load(self):\n m = gemato.manifest.ManifestFile()\n m.load(io.StringIO(TEST_MANIFEST))\n m.load(io.StringIO(TEST_DEPRECATED_MANIFEST))\n self.assertIsNone(m.find_timestamp())\n self.assertIsNone(m.find_path_entry('eclass/Manifest'))", "title": "" }, { "docid": "5e67593d05e2f76226ca0e7f14fa6027", "score": "0.63563204", "text": "def test_empty(self):\n manifest = self.getXmlManifest(\n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>' \"<manifest></manifest>\"\n )\n self.assertEqual(manifest.remotes, {})\n self.assertEqual(manifest.projects, [])", "title": "" }, { "docid": "a8cb5918e55e9fabc0ab25bd38414067", "score": "0.6342134", "text": "def test_manifest001_empty(self, ret, stdout, stderr):\n self.assertEqual(ret, 0)\n self.assertEqual(stderr, \"\")", "title": "" }, { "docid": "21b64a0db2c8a5b038840d6371e610d2", "score": "0.6332339", "text": "def validate_manifests(path):\n manifest = Manifest(path=path)\n\n assert_true(manifest.data)\n assert_true(manifest.data['provider'])\n assert_true(manifest.data['bootstrapper'])\n assert_true(manifest.data['image'])\n assert_true(manifest.data['volume'])\n assert_true(manifest.data['system'])", "title": "" }, { "docid": "d2b004ced30e9e852d1693417d03a60f", "score": "0.6269182", "text": "def test_destiny2_get_destiny_manifest(self):\n pass", "title": "" }, { "docid": "68c9588a3d71876db17746465d193921", "score": "0.6265405", "text": "def is_manifest(self):\n xml = self.read()\n return xml is not None \\\n and xml.documentElement.tagName == 'extension'", "title": "" }, { "docid": "e79f1017a24c5be030acca5ea1d07c50", "score": "0.62611353", "text": "def _validate_manifest(opts, project, pkgflags, mappings):\n warnings = []\n errors = []\n\n mf = project.manifest\n if not mf:\n return warnings, errors # hrm, shouldn't have gotten this far\n\n # perform simple checks\n pkg_id = mf.package_name\n if not pkg_id:\n warnings.append(\"manifest is lacking a <manifest ml:package> attribute\")\n else:\n # make sure package name is legitimate\n pkg_id_regex = r'[a-z0-9_]+(\\.[a-z0-9_]+)*(-[a-zA-Z0-9]*)?'\n if not re.fullmatch(pkg_id_regex, pkg_id):\n warnings.append(\"<manifest ml:package> '{}' does not follow convention (regex: {})\".format(\n pkg_id, pkg_id_regex))\n elif len(pkg_id) > 127:\n warnings.append(\"<manifest ml:package> is too long (maximum 127 characters)\", )\n\n report_contents = False\n if pkgflags.validate_manifest:\n # make sure all the binaries mentioned in the manifest actually ship\n all_remotes = [remote.replace('\\\\', '/') for (local, remote)\n in mappings.build_mapping + mappings.data_mapping if remote]\n\n for component in mf.components:\n report = errors if pkgflags.enforce_manifest and pkgflags.validate_manifest else warnings\n if not component.name:\n report.append(\"<component> missing a ml:name attribute\")\n if not component.binary_file:\n report.append(\n \"<component ml:name=\\\"{0}\\\"> missing the ml:binary_name attribute\".format(component.name))\n else:\n # make sure it's recorded\n bin_found = component.binary_file in all_remotes or \\\n (\"./\" + component.binary_file) in all_remotes\n if not bin_found:\n report.append(\n \"<component ml:name=\\\"{}\\\"> has ml:binary_name \\\"{}\\\" which is not bundled in this package\"\n .format(component.name, component.binary_file))\n report_contents = True\n\n if report_contents:\n report.append(\"these are the package's files (see --print-outputs):\\n\"\n \"\\t{}\".format(\"\\n\\t\".join(all_remotes)))\n\n # now, validate the schema\n vwarnings, verrors = project.manifest.validate_schema(verbose=opts.verbose, strict=pkgflags.strict_manifest)\n if pkgflags.enforce_manifest:\n # problems are errors\n warnings += vwarnings\n errors += verrors\n else:\n # problems are just warnings\n warnings += vwarnings + verrors\n\n return warnings, errors", "title": "" }, { "docid": "e5e2dcebab1475878a72774d9fc4da25", "score": "0.62604743", "text": "def only_template_manifest_does_not_exist(args):\n if args == Path(\"/any/existing/local/path/manifest.yaml\"):\n return False\n return True", "title": "" }, { "docid": "44a59760fc97d3b9c6300fadfe79f7cb", "score": "0.6226091", "text": "def _file_check():\n pass", "title": "" }, { "docid": "a3656a4d5c09b453e2a8587b5351e691", "score": "0.6184747", "text": "def dir_has_manifest(models_dir):\n return os.path.isfile(ModelsManifest.make_manifest_path(models_dir))", "title": "" }, { "docid": "a20bd8372aefe5bba42ef6c05cd00794", "score": "0.6177393", "text": "def check_for_setup_error(self):", "title": "" }, { "docid": "819925b915997a7c1557a03a6891bfde", "score": "0.6120706", "text": "def test_bad_name_checks(self):\n\n def parse(name):\n name = self.encodeXmlAttr(name)\n # Setup target of the include.\n with open(\n os.path.join(self.manifest_dir, \"target.xml\"),\n \"w\",\n encoding=\"utf-8\",\n ) as fp:\n fp.write(f'<manifest><include name=\"{name}\"/></manifest>')\n\n manifest = self.getXmlManifest(\n \"\"\"\n<manifest>\n <remote name=\"default-remote\" fetch=\"http://localhost\" />\n <default remote=\"default-remote\" revision=\"refs/heads/main\" />\n <include name=\"target.xml\" />\n</manifest>\n\"\"\"\n )\n # Force the manifest to be parsed.\n manifest.ToXml()\n\n # Handle empty name explicitly because a different codepath rejects it.\n with self.assertRaises(error.ManifestParseError):\n parse(\"\")\n\n for path in INVALID_FS_PATHS:\n if not path:\n continue\n\n with self.assertRaises(error.ManifestInvalidPathError):\n parse(path)", "title": "" }, { "docid": "e84c825c9298e4c7b0aa0818a77aa9fe", "score": "0.6055252", "text": "def check_if_required(self):", "title": "" }, { "docid": "f107da661fb6414acb2a9df55dc6e3c3", "score": "0.60428727", "text": "def accepts(self, manifest: Any) -> bool:\n return True", "title": "" }, { "docid": "f107da661fb6414acb2a9df55dc6e3c3", "score": "0.60428727", "text": "def accepts(self, manifest: Any) -> bool:\n return True", "title": "" }, { "docid": "45a3e10f127de197f39b60786441fd86", "score": "0.60044044", "text": "def _check_integrity(self):\n return True", "title": "" }, { "docid": "7cce1678964081bf259fb98911615a87", "score": "0.5990659", "text": "def _check_before_run(self):\n if not osp.exists(self.data_list):\n raise RuntimeError(\"'{}' is not available\".format(self.data_list))\n \"\"\"\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))\n if not osp.exists(self.probe_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.probe_dir))\n \"\"\"", "title": "" }, { "docid": "9d8f5a863926827d55b1e446a8d9024e", "score": "0.59790415", "text": "def validate(self):\n pass\n # TODO: This has to check both the local directory and the\n # directory in the source distribution", "title": "" }, { "docid": "8f09c83742370a9372a28c8ad975abb4", "score": "0.5971247", "text": "def test_manifest003_missing_incorporate(self, ret, stdout, stderr):\n package = \"pkg:/library/[email protected],11.4-11.4.33.0.0.92.0\"\n\n self.assertIn(\"ERROR userland.manifest003.2 \"\n f\"package {package} depends on CFFI, but does not incorporate it (should be at\", stderr)", "title": "" }, { "docid": "78e51cc3093f0b608e0eef8a6255ff52", "score": "0.596226", "text": "def __validate(data = {}):\n #check for the required parameters describing an application\n \n print data.keys()\n for rf in ManifestParser.REQUIRED_FIELDS:\n if not data.has_key(rf):\n raise Exception(\"Missing field from manifest : <%s>.\" % rf)", "title": "" }, { "docid": "6c3b8681e837a7ff9d724399737be5b8", "score": "0.5947659", "text": "def test_missing_manifest_elements():\n\n with open(\"jetpack/addon-sdk/python-lib/cuddlefish/\"\n \"app-extension/bootstrap.js\") as bootstrap_file:\n bootstrap = bootstrap_file.read()\n bootstrap_hash = hashlib.sha256(bootstrap).hexdigest()\n\n harnessoptions = {\n \"sdkVersion\": \"foo\",\n \"jetpackID\": \"foobar\",\n \"bundleID\": \"\",\n \"manifest\":\n {\"resource://bootstrap.js\":\n {\"requirements\": {},\n \"packageName\": \"addon-kit\",\n \"moduleName\": \"drawing\",\n \"jsSHA256\": bootstrap_hash,\n \"docsSHA256\": bootstrap_hash}}}\n\n with open(\"jetpack/addon-sdk/python-lib/cuddlefish/\"\n \"app-extension/components/harness.js\") as harness_file:\n harness = harness_file.read()\n\n err = _do_test(MockXPI({\"bootstrap.js\": bootstrap,\n \"resources/bootstrap.js\": bootstrap,\n \"components/harness.js\": harness,\n \"harness-options.json\":\n json.dumps(harnessoptions)}))\n print err.print_summary(verbose=True)\n assert err.failed()", "title": "" }, { "docid": "b498cb747170f5777eee74958c3ab3f1", "score": "0.5935656", "text": "def test_manifest002_allowed_publishers(self, ret, stdout, stderr):\n self.assertEqual(ret, 0)\n self.assertEqual(stderr, \"\")", "title": "" }, { "docid": "197e5e647f3dd17df03a513eac786d91", "score": "0.5926795", "text": "def test_corrupt_manifest(self):\n collector = NpmCollector()\n collector.parse_and_collect(\n MANIFEST_START.replace('\"repository\": {', '') + DEP_1 + MANIFEST_END, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'body-parser': 1\n }", "title": "" }, { "docid": "944022e1071fdc418b19ac6102b69f3a", "score": "0.59155476", "text": "def CheckFile(self):\n raise Exception('need to be implemented')", "title": "" }, { "docid": "550307f72db10d505932da99e6f4b81a", "score": "0.59019893", "text": "def check_for_setup_error(self):\r\n pass", "title": "" }, { "docid": "550307f72db10d505932da99e6f4b81a", "score": "0.59019893", "text": "def check_for_setup_error(self):\r\n pass", "title": "" }, { "docid": "550307f72db10d505932da99e6f4b81a", "score": "0.59019893", "text": "def check_for_setup_error(self):\r\n pass", "title": "" }, { "docid": "550307f72db10d505932da99e6f4b81a", "score": "0.59019893", "text": "def check_for_setup_error(self):\r\n pass", "title": "" }, { "docid": "542ea57170fa23bcbb1b28a160a0e9ec", "score": "0.58889806", "text": "def test_manifest001_missing_license(self, ret, stdout, stderr):\n self.assertIn(\"ERROR userland.manifest001.0 \"\n \"missing license action in pkg:/library/[email protected]\", stderr)\n self.assertIn(\"ERROR userland.manifest001.0 \"\n \"missing ARC data (org.opensolaris.arc-caseid) in pkg:/library/[email protected]\", stderr)", "title": "" }, { "docid": "7ef7b4b738abd5440d292de8914c78ca", "score": "0.587596", "text": "def testAcNameTypeChecking(self):\n with self.assertRaises(ValueError):\n manifest = copy.deepcopy(SIMPLE_MANIFEST)\n manifest['apps'][0]['name'] = u'我喜欢乌龟'\n self._generator.GetSandboxSpec(manifest, SANDBOX_NAME)\n with self.assertRaises(ValueError):\n manifest = copy.deepcopy(SIMPLE_MANIFEST)\n manifest['apps'][0]['image']['name'] = u'我是一只猫'\n self._generator.GetSandboxSpec(manifest, SANDBOX_NAME)", "title": "" }, { "docid": "b0459a18fe16c2dd1cc3bb360afd54d0", "score": "0.58683264", "text": "def test_manifest005(self, ret, stdout, stderr):\n package = \"pkg:/library/[email protected],11.4-11.4.33.0.0.92.0\"\n\n self.assertIn(\"ERROR userland.manifest005.1 \"\n f\"Package {package} is being published for wrong architecture {{'arm64'}} instead of {platform.processor()}:\\n\"\n \"('variant.arch', {'arm64'})\", stderr)\n self.assertIn(\"ERROR userland.manifest005.2 \"\n f\"The manifest {package} contains action with wrong architecture '['arm64']' (instead of '{platform.processor()}'):\\n\"\n \"file NOHASH group=bin mode=0444 owner=root path=usr/lib/pkglinttest/xyz.py variant.arch=arm64\", stderr)\n\n # the following assertions differ based on the architecture\n if platform.processor() == \"i386\":\n self.assertIn(\"ERROR userland.manifest005.2 \"\n f\"The manifest {package} contains action with wrong architecture '['sparc']' (instead of 'i386'):\\n\"\n \"file NOHASH group=bin mode=0444 owner=root path=usr/lib/pkglinttest/foo.py variant.arch=sparc\", stderr)\n else:\n self.assertIn(\"ERROR userland.manifest005.2 \"\n f\"The manifest {package} contains action with wrong architecture '['i386']' (instead of 'sparc'):\\n\"\n \"file NOHASH group=bin mode=0444 owner=root path=usr/lib/pkglinttest/bar.py variant.arch=i386\", stderr)", "title": "" }, { "docid": "03e3aaf404fd44bf0447bb6551f1b22d", "score": "0.5856059", "text": "def test_manifest(self):\n from ambry.warehouse.manifest import Manifest\n from ambry.util import get_logger\n from ambry.util import print_yaml\n\n m = Manifest(self.m,get_logger('TL') )\n\n self.assertEqual(self.m_contents.strip(), str(m).strip())\n\n\n l = self.get_library()\n l.put_bundle(self.bundle)\n\n for k, ident in l.list().items():\n print ident\n\n w = self.get_warehouse(l, 'sqlite')\n print 'Installing to ', w.database.path\n\n w.title = \"This is the Warehouse!\"\n\n w.about = \"A Warehouse full of wonder\"\n\n w.install_manifest(m)\n\n extracts = w.extract(force=True)\n\n print print_yaml(extracts)", "title": "" }, { "docid": "646ab760f5970db780cd96604c6f3bfb", "score": "0.58514285", "text": "def _read_manifest(self):\n logging.debug('STARTUP: Reading manifest')\n lines = self._raw_manifest()\n\n # Remove trailing newlines, they do not help keep absolute position.\n while lines and lines[-1] == \"\":\n lines = lines[:-1]\n\n for num, line in enumerate(lines):\n if not line:\n continue\n\n # Remove duplicates\n if line in lines[0:num]:\n lines[num] = \"\"\n logging.warning(\"Bundle %s: duplicate entry in MANIFEST: %s\"\n % (self._name,line))\n continue\n \n # Remove MANIFEST\n if line == \"MANIFEST\":\n lines[num] = \"\"\n logging.warning(\"Bundle %s: MANIFEST includes itself: %s\"\n % (self._name,line))\n \n # Remove invalid files\n if not self.is_file(line):\n lines[num] = \"\"\n logging.warning(\"Bundle %s: invalid entry in MANIFEST: %s\"\n % (self._name,line))\n\n return lines", "title": "" }, { "docid": "9e8fc814ef4a7931903662fe0dbe8fe7", "score": "0.5841003", "text": "def _create_manifest(self):\n raise NotImplementedError()", "title": "" }, { "docid": "9bdc973d3c04e5a160db4c4046c9b6aa", "score": "0.5828794", "text": "def check_requirements_is_only_for_applications() -> None: # pragma: no cover\n _check_requirements_is_only_for_applications_impl()", "title": "" }, { "docid": "59fafeaf8223b851c4852392548a2eb5", "score": "0.5822405", "text": "def check_for_setup_error(self):\r\n return", "title": "" }, { "docid": "8ff63e37f47bdfb8d7ea4ab114046051", "score": "0.5820257", "text": "def clean(self):\n return not os.path.exists(self.module_path()) and not os.path.exists(\n self.modulefile_path()\n )", "title": "" }, { "docid": "6aff598a543ecc75e5ac976a0fef07dc", "score": "0.5818763", "text": "def check_for_setup_error(self):\n pass", "title": "" }, { "docid": "6aff598a543ecc75e5ac976a0fef07dc", "score": "0.5818763", "text": "def check_for_setup_error(self):\n pass", "title": "" }, { "docid": "6aff598a543ecc75e5ac976a0fef07dc", "score": "0.5818763", "text": "def check_for_setup_error(self):\n pass", "title": "" }, { "docid": "6aff598a543ecc75e5ac976a0fef07dc", "score": "0.5818763", "text": "def check_for_setup_error(self):\n pass", "title": "" }, { "docid": "6aff598a543ecc75e5ac976a0fef07dc", "score": "0.5818763", "text": "def check_for_setup_error(self):\n pass", "title": "" }, { "docid": "6aff598a543ecc75e5ac976a0fef07dc", "score": "0.5818763", "text": "def check_for_setup_error(self):\n pass", "title": "" }, { "docid": "6aff598a543ecc75e5ac976a0fef07dc", "score": "0.5818763", "text": "def check_for_setup_error(self):\n pass", "title": "" }, { "docid": "603eebb14c308a5a1d1e13cd5491dadb", "score": "0.58148795", "text": "def check_requirements_integrity() -> None: # pragma: no cover\n _check_requirements_integrity_impl()", "title": "" }, { "docid": "94a5339402de940c8d94af1f3707cfec", "score": "0.5812505", "text": "def _check_before_run(self):\r\n if not osp.exists(self.root):\r\n raise RuntimeError(\"'{}' is not available\".format(self.root))\r\n # if not osp.exists(self.train_dir):\r\n # raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\r\n if not osp.exists(self.query_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\r\n if not osp.exists(self.gallery_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "5c569fe96611ef72d674b8585663221b", "score": "0.57960975", "text": "def test_manifest001_license_only(self, ret, stdout, stderr):\n self.assertEqual(ret, 0)\n self.assertEqual(stderr, \"\")", "title": "" }, { "docid": "33d95c4d911ca8322ee3929328fddf10", "score": "0.57920164", "text": "def _check_before_run(self):\n\t\tif not osp.exists(self.root):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.root))\n\t\tif not osp.exists(self.train_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n\t\tif not osp.exists(self.query_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n\t\tif not osp.exists(self.gallery_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "f8b2e19c5c0aa08627f74866734e81ad", "score": "0.5790237", "text": "def _check_before_run(self):\r\n if not osp.exists(self.root):\r\n raise RuntimeError(\"'{}' is not available\".format(self.root))\r\n if not osp.exists(self.data_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\r\n if not osp.exists(self.split_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.split_dir))", "title": "" }, { "docid": "3a107316a974115ac1c3f24cd953df05", "score": "0.5784733", "text": "def check_main(self):\n self.print_check_file(MAIN_FILE_NAME)\n\n pathh = self.path + \"/\" + MAIN_FILE_NAME\n\n if pathh in self.array_of_files:\n if os_path.isfile(pathh):\n return True\n\n return False", "title": "" }, { "docid": "2afb1586640d01c1427c26abbbcbf078", "score": "0.57765776", "text": "def __check_input(self, file_name):\n # check file exist\n if not os.access(file_name, os.F_OK):\n print('File not found')\n exit(1)\n # check permistion of file\n if not os.access(file_name, os.R_OK):\n print('Permistion denined')\n exit(1)", "title": "" }, { "docid": "0a0704c5e1e3ae3e94d9e5077bfe2c2a", "score": "0.57734084", "text": "def __verify__(cls):\n\n return os.path.exists(INIT_D)", "title": "" }, { "docid": "e28026e91f99cdbce838d3ddac07fa6b", "score": "0.57720006", "text": "def main():\n check_for_runestone_package()", "title": "" }, { "docid": "766623748ffb5972fc866a027c206f84", "score": "0.5771993", "text": "def verify_setup(self):", "title": "" }, { "docid": "70d0823cbe5e0c885f5a80c312174cc0", "score": "0.5770649", "text": "def check_existence(self):\n\n return False", "title": "" }, { "docid": "5ef180b2f3d82d5f02a45bf01c4c3a74", "score": "0.57593447", "text": "def test_signed_files_bad(self):\n with pytest.raises(argparse.ArgumentError) as argexc:\n ba.signed_file_args([None])\n assert \"requires\" in str(argexc.value)", "title": "" }, { "docid": "a40c83cbba3be7e7c933560d7ec93115", "score": "0.57553804", "text": "def _check_integrity(self) -> bool:\n filepath = os.path.join(self.root, self.directory)\n if not os.path.exists(filepath):\n return False\n\n return True", "title": "" }, { "docid": "1d01a02eaf85888b32bb9fe6524504d1", "score": "0.57550555", "text": "def test_write_manifest_skips_non_utf8_filenames(self):\n dist = Distribution(SETUP_ATTRS)\n dist.script_name = 'setup.py'\n mm = manifest_maker(dist)\n mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')\n os.mkdir('sdist_test.egg-info')\n\n # Latin-1 filename\n filename = os.path.join(b('sdist_test'), LATIN1_FILENAME)\n\n # Add filename with surrogates and write manifest\n with quiet():\n mm.run()\n u_filename = filename.decode('utf-8', 'surrogateescape')\n mm.filelist.append(u_filename)\n # Re-write manifest\n mm.write_manifest()\n\n manifest = open(mm.manifest, 'rbU')\n contents = manifest.read()\n manifest.close()\n\n # The manifest should be UTF-8 encoded\n contents.decode('UTF-8')\n\n # The Latin-1 filename should have been skipped\n assert posix(filename) not in contents\n\n # The filelist should have been updated as well\n assert u_filename not in mm.filelist.files", "title": "" }, { "docid": "d0a2b0124eff10d5c5ea5daec48decec", "score": "0.5748207", "text": "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "26b34a2b4b31758df9394b0d2c86c54e", "score": "0.57395595", "text": "def _check_before_run(self):\n if not osp.exists(self.train_dir): raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.gallery_dir): raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))\n if not osp.exists(self.query_dir): raise RuntimeError(\"'{}' is not available\".format(self.query_dir))", "title": "" }, { "docid": "165cffad3d77171c1082d992e7360a6b", "score": "0.5739133", "text": "def _check_integrity_light(self) -> bool:\n for filename in self.light_filenames:\n filepath = os.path.join(self.root, filename)\n if not os.path.exists(filepath):\n return False\n return True", "title": "" }, { "docid": "c82412f82325c0cb4bf652ef0d2fd65f", "score": "0.5731707", "text": "def validate_(manifest: Pathlike, read_data: bool):\n data = load_manifest(manifest)\n validate(data, read_data=read_data)", "title": "" }, { "docid": "aad6fd360441ce039ecad6ec7e294f13", "score": "0.5730514", "text": "def check_load():\n pass", "title": "" }, { "docid": "d4c418323c6ac3d54e213b6f2e9c3753", "score": "0.5696295", "text": "def check_compatible(self):\n ...", "title": "" }, { "docid": "6c1b22e8147ee644a5f7927993155595", "score": "0.569214", "text": "async def check_files(hass: HomeAssistant) -> bool:\r\n\r\n # Verify that the user downloaded all files.\r\n base = f\"{hass.config.path()}/custom_components/{DOMAIN}\"\r\n missing = []\r\n for file in REQUIRED_FILES:\r\n fullpath = f\"{base}/{file}\"\r\n if not os.path.exists(fullpath):\r\n missing.append(file)\r\n\r\n if missing:\r\n _LOGGER.critical(\"The following files are missing: %s\", str(missing))\r\n return False\r\n return True", "title": "" }, { "docid": "4401193a9d5386d6b51f816e8653cead", "score": "0.5688131", "text": "def _validate_manifest(self):\n file_entries = set()\n for manifest_file_entry in self._json_manifest['files']:\n entry_name = manifest_file_entry['name']\n if entry_name in file_entries:\n msg = 'The output parameter %s appears multiple times in the results manifest' % entry_name\n raise InvalidResultsManifest(msg)\n file_entries.add(entry_name)\n\n if 'path' in manifest_file_entry and 'paths' in manifest_file_entry:\n msg = 'The output parameter %s cannot have both path and paths objects in the results manifest'\n raise InvalidResultsManifest(msg % entry_name)\n if 'path' not in manifest_file_entry and 'paths' not in manifest_file_entry:\n msg = 'The output parameter %s must have either a path or paths object in the results manifest'\n raise InvalidResultsManifest(msg % entry_name)", "title": "" }, { "docid": "58d703f06962fa9a3d4849ac68755078", "score": "0.5656309", "text": "def test_manifest(self):\n\n m_contents = None\n\n with open(self.mf) as f:\n m_contents = f.read()\n mf = Manifest(self.mf, get_logger('TL'))\n\n orig_mf = m_contents.replace('\\n', '').strip()\n conv_mf = str(mf).replace('\\n', '').strip()\n\n self.assertEqual(orig_mf, conv_mf)", "title": "" }, { "docid": "b7a4517b7981669e86679122eedff4b3", "score": "0.56539387", "text": "def test_file_exists_good(self):\n assert ba.file_exists(__file__)", "title": "" }, { "docid": "ce708a232959302036ef773c8175a676", "score": "0.56503326", "text": "def test_toxml_empty(self):\n manifest = self.getXmlManifest(\n '<?xml version=\"1.0\" encoding=\"UTF-8\"?>' \"<manifest></manifest>\"\n )\n self.assertEqual(\n manifest.ToXml().toxml(), '<?xml version=\"1.0\" ?><manifest/>'\n )", "title": "" }, { "docid": "d2f02857f1d7358788f7258cec85128f", "score": "0.5631409", "text": "def test_app_dirs_and_loaders(self):\n self.assertEqual(self.func(None), [E001])", "title": "" }, { "docid": "42f190872fe96ff5e1c54e35948fccf2", "score": "0.5619142", "text": "def _check_integrity(self) -> bool:\n for filename in self.splits:\n filepath = os.path.join(self.root, self.directory, filename)\n if not os.path.exists(filepath):\n return False\n return True", "title": "" }, { "docid": "1e20aa29648f8284e67783834c5443fd", "score": "0.5618163", "text": "def check(self):\n pass", "title": "" }, { "docid": "1e20aa29648f8284e67783834c5443fd", "score": "0.5618163", "text": "def check(self):\n pass", "title": "" }, { "docid": "b6e4765b50c962489ec7cdf98834955f", "score": "0.561263", "text": "def validate_manifests(self, manifests):\n for manifest in manifests:\n validate = self._validate(manifest + '.pp', sp_paths['manifests'])\n if not validate:\n return manifest", "title": "" }, { "docid": "1731e55099ea87060a45e22b3f5e9a68", "score": "0.5611092", "text": "def _check_before_run(self):\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "b0f9cb7eaf06808b4060623f787577e8", "score": "0.56075245", "text": "def test_middleware_not_installed(self):\n self.assertEqual(base.check_xframe_deny(None), [])", "title": "" }, { "docid": "28ce5221cd5cd5d36f4922895ab89a5d", "score": "0.5600471", "text": "def verify_install(self):\n self.installed = all(\n os.access(\n os.path.join(self.target_dir, executable), (os.X_OK | os.R_OK)) for executable in self.executables\n )\n return self.installed", "title": "" }, { "docid": "3ec697cb3575ee1f77fe818ab41eb80f", "score": "0.55989313", "text": "def __len__(self):\n return len(self.manifests)", "title": "" }, { "docid": "28ac15e9527c1b26a11a80a7ec9e49cc", "score": "0.55965245", "text": "def required(self):\n return True", "title": "" }, { "docid": "df43bd22e0a0e7e1e980c4399cbae372", "score": "0.5595777", "text": "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.que_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.que_dir))\n if not osp.exists(self.gal_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gal_dir))", "title": "" }, { "docid": "44aa985f866c654ca480635b2d751ef1", "score": "0.55895346", "text": "def check_process(self):\n return True", "title": "" }, { "docid": "44aa985f866c654ca480635b2d751ef1", "score": "0.55895346", "text": "def check_process(self):\n return True", "title": "" }, { "docid": "b5359d2b5e3e201019fdb8f2696a39cf", "score": "0.5586998", "text": "def testRejectMissingUseManifests(self):\n self.content_mock.return_value = self.GetLayoutConf(\n filters=['use-manifests'])\n self.assertRejected(['metadata/layout.conf'])", "title": "" }, { "docid": "09190291acd8558ebc02af0c3f0d4230", "score": "0.55777234", "text": "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(f'{self.dataset_dir} is not available')\n if not osp.exists(self.train_dir):\n raise RuntimeError(f'{self.train_dir} is not available')\n if not osp.exists(self.query_dir):\n raise RuntimeError(f'{self.query_dir} is not available')\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(f'{self.gallery_dir} is not available')", "title": "" }, { "docid": "9d77db4aa956e7e9593ddd9cbeecd6a9", "score": "0.5572583", "text": "def test_no_scripts(wheel_paths):\n path = next(path for path in wheel_paths if 'complex_dist' in path)\n for entry in ZipFile(path).infolist():\n assert '.data/scripts/' not in entry.filename", "title": "" }, { "docid": "5f6bd92cafd7bbdd3b0f4213f11fd095", "score": "0.5571424", "text": "def any_version_installed(self):\n if self.pseudo:\n return True\n\n candidates = []\n for fname in mod_folder.files:\n if os.path.isfile(mod_folder.file_path(fname)) and fname[0] != \".\" and not fname.endswith(\".json\"):\n candidates.append(fname)\n\n return self.name in candidates", "title": "" }, { "docid": "d050a5fb245ea85586dbaa32c6dd1423", "score": "0.55706817", "text": "def confirm_directory():\n assert('xml' in os.listdir() and 'raw_txt' in os.listdir())", "title": "" }, { "docid": "8c35f54b8f3e2a00cd6bd034d4e6e35d", "score": "0.55694216", "text": "def check_before_run(self, required_files):\n for f in required_files:\n if not osp.exists(f):\n raise RuntimeError('\"{}\" is not found'.format(f))", "title": "" }, { "docid": "3574d0647113215f8f44c255d233aeb6", "score": "0.55651754", "text": "def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\r\n if not osp.exists(self.query_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\r\n if not osp.exists(self.gallery_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" }, { "docid": "d5de8e11d84b07fa5db2dfa4a2262876", "score": "0.5564517", "text": "def check_permission(self):\n pass", "title": "" }, { "docid": "755187838698c142930087165ded41dc", "score": "0.5561944", "text": "def is_check(fn):\n if not fn[-3:] == \".py\":\n return False\n\n if fn[-11:] == \"__init__.py\":\n return False\n\n if \"inprogress\" in fn:\n return False\n\n return True", "title": "" }, { "docid": "0a54639d7b83c110ed23ce71d8cccce9", "score": "0.556148", "text": "def check_dependencies(self):\r\n return True", "title": "" }, { "docid": "38f0daf3f64833ac5b2d4a5e6e97b42a", "score": "0.55591047", "text": "def testNoUpdate(self):\n self._WriteManifest()\n revision = self._RunAndExtractRevision()\n self.assertEqual(revision, self.current_revision)", "title": "" }, { "docid": "4ad70c80a6d68199fc18f33577c89304", "score": "0.5553992", "text": "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "title": "" } ]
d908c79cf431b7186e893f10afbe7144
Add a new permission on the host to the group in the system.
[ { "docid": "6b3f4d4434a976a4e1a139caaabe4aa1", "score": "0.6871351", "text": "def add_host_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" } ]
[ { "docid": "91f301f95dfc9b19905b0ce6fd7c4a77", "score": "0.76909554", "text": "def add(self, group_name, permission_name):\n\n self.permissions[group_name].append(permission_name)", "title": "" }, { "docid": "17571a8f960466ab9f49a77f66a96c26", "score": "0.74283016", "text": "def add_group_permission(sender, instance, **kwargs):\n _process_permission(instance)", "title": "" }, { "docid": "75b96ee4bac2c46e4cfe8c107d16f92b", "score": "0.7174189", "text": "def add_perrmission_to_group(permission_code_name, group):\n\n try:\n #Filter if permission is added\n permiss_obj = Permission.objects.get(codename=permission_code_name)\n group.permissions.add(permiss_obj)\n group.save()\n logger.info('Added permission %s into group %s', permission_code_name, group.name)\n except ObjectDoesNotExist:\n logger.error('Missing permission %s', permission_code_name)", "title": "" }, { "docid": "8d4fadd7261974a5b78a0da18badab8c", "score": "0.7161086", "text": "def add_permission(self, permission):\n uri = '/PermissionGroupPermissionEntry/{}/{}/'.format(self._group_name, \n permission)\n DynectSession.get_session().execute(uri, 'POST')\n self._permission.append(permission)", "title": "" }, { "docid": "51d4ff1e12035e717de5e893aef973e2", "score": "0.68456477", "text": "def add_permission(self, permission):\r\n self.add_permissions([permission])", "title": "" }, { "docid": "37e845117bd20d2abd20eb18a65c8a59", "score": "0.67212224", "text": "async def permissions_add_permission(self, ctx, permission, *, command):\r\n # Guard Clause\r\n if (\r\n ctx.guild == None # Not in a guild means DM or Group chat.\r\n or Database.Bot[\"sleeping\"] # If the bot is sleeping, don't do anything.\r\n or not self.valid_command(ctx.guild, command) # Check if the command is valid\r\n ):\r\n return\r\n\r\n handle = self.perm_handle(ctx, command)\r\n\r\n # Check to make sure it's a valid permission\r\n if permission in Permissions.perms_list:\r\n\r\n if permission not in handle[\"_permissions\"]:\r\n handle[\"_permissions\"].append(permission)\r\n if \"none\" in handle[\"_permissions\"]:\r\n handle[\"_permissions\"].remove(\"none\")\r\n self.save_permissions(ctx)\r\n\r\n await ctx.message.add_reaction(\"\\u2611\") # ballot_box_with_check\r\n\r\n elif permission == \"none\": # Disable default permissions\r\n if permission not in handle[\"_permissions\"]:\r\n handle[\"_permissions\"].append(permission)\r\n self.save_permissions(ctx)\r\n\r\n await ctx.message.add_reaction(\"\\u2611\") # ballot_box_with_check\r\n\r\n else:\r\n await ctx.message.add_reaction(\"🚫\")", "title": "" }, { "docid": "e175bc28de7ac809bf38f5cb9a9de2e1", "score": "0.6665427", "text": "def test_add_group_permission(self):\n self.request.named_subpaths = {\n \"type\": \"group\",\n \"package\": \"p\",\n \"name\": \"g\",\n \"permission\": \"read\",\n }\n self.request.method = \"PUT\"\n AdminEndpoints(self.request).edit_permission()\n self.access.edit_group_permission.assert_called_with(\"p\", \"g\", \"read\", True)", "title": "" }, { "docid": "fcea15f20784f946a6e7c61916feada4", "score": "0.6620034", "text": "def test_permissions_create_permission_for_group(self):\n pass", "title": "" }, { "docid": "926cac6e39c5244593b44bceaa2e8ad7", "score": "0.65885043", "text": "def add_group_level(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "926cac6e39c5244593b44bceaa2e8ad7", "score": "0.65885043", "text": "def add_group_level(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "638e3f92ac466d97a482cea4fe7f80ad", "score": "0.64539033", "text": "def add_permission(self, perm_name):\n try:\n perm_set = self.permissions[perm_name]\n except KeyError:\n self.permissions[perm_name] = set()\n else:\n raise PermissionError(\"Permission Exists\")", "title": "" }, { "docid": "148f38674ada48c90f9c81be5809fe19", "score": "0.64152783", "text": "def add_permission(self, ip):\n new_p = Permission(ip, TIME_TO_EXPIRY_PERMISSION)\n for p in self.__permissions:\n if p == new_p:\n p.refresh()\n return\n self.__permissions.append(new_p)", "title": "" }, { "docid": "d02c849bcf6fcf20a96873aa12cd6e43", "score": "0.630783", "text": "def add_cluster_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "d02c849bcf6fcf20a96873aa12cd6e43", "score": "0.630783", "text": "def add_cluster_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "1bd42314858359079fced61aea5f6dc1", "score": "0.62403935", "text": "def add_group(self, path):\n raise NotImplementedError(\"Group adding not implemented!\")", "title": "" }, { "docid": "4d4211820ed9fa9cde96bcf9ca78cfaf", "score": "0.620096", "text": "def replace_permissions(self, permission=None):\n api_args = {}\n if permission is not None:\n api_args['permission'] = permission\n uri = '/PermissionGroupPermissionEntry/{}/'.format(self._group_name)\n DynectSession.get_session().execute(uri, 'PUT', api_args)\n if permission:\n self._permission = permission\n else:\n self._permission = []", "title": "" }, { "docid": "3f183ceebe635abe52773089a2d31b27", "score": "0.6197173", "text": "def _add_group(self, group):\n\t\tself.groups.add(group)", "title": "" }, { "docid": "b6e0e0961ec9e4f3e21b200c3adee222", "score": "0.6186415", "text": "def add_permission(self, permission, value=1):\n role_permission = RolesHasPermission(value=value)\n role_permission.permission = permission\n self.permissions.append(role_permission)", "title": "" }, { "docid": "bdb8db4d2579d790e28efba5e738a293", "score": "0.61641794", "text": "def add_permission(self, permission, value=1):\n user_permission = UsersHasPermission(value=value)\n user_permission.permission = permission\n self.permissions.append(user_permission)", "title": "" }, { "docid": "b0e726833983e0016999b9cceecae3da", "score": "0.6111041", "text": "def for_groups(self, *args) -> 'Permission':\n args = self._process_groups(args)\n self.groups.update(args)\n return self", "title": "" }, { "docid": "dc253843a1af790c1b1c7e768e06caa6", "score": "0.60926586", "text": "def test_has_perm_by_global_permission_on_group(self):\n\n tjibbe = User.objects.create(username=\"Tjibbe\")\n tjibbes = Group.objects.create(name=\"Tjibbes\")\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\"))\n\n tjibbes.permissions.add(self.perm)\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\"))\n\n tjibbe.groups.add(tjibbes)\n\n self.assertTrue(self.backend.has_perm(tjibbe, \"app.do_something\"))\n\n tjibbe.groups.remove(tjibbes)\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\"))", "title": "" }, { "docid": "a4daad7d5017bf2fcab7727ab9af0bb4", "score": "0.6089048", "text": "def add_data_center_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "a4daad7d5017bf2fcab7727ab9af0bb4", "score": "0.6089048", "text": "def add_data_center_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "a79f82179887355fbb05ce8d385db36a", "score": "0.60835165", "text": "def create_permission(self, permission):\n raise exception.NotImplemented()", "title": "" }, { "docid": "3775149f50d3cd01f05582d97b37b80c", "score": "0.6069194", "text": "def grant_permission(self, user_obj, rules, name='Test Group'):\n grant_permission(user_obj, rules, name)", "title": "" }, { "docid": "ab0344293cdc64441a67c352edb8fa66", "score": "0.6045531", "text": "def add_permissions(self, permissions):\r\n if self.permissions_mask is None:\r\n self.permissions_mask = UserPermission.get_mask(permissions)\r\n else:\r\n self.permissions_mask |= UserPermission.get_mask(permissions)", "title": "" }, { "docid": "7c2c6eb2340be394356417407be4c0e7", "score": "0.60379094", "text": "def add_group(cmd):\n exaconf = read_exaconf(cmd.exaconf)\n exaconf.add_group(cmd.name, cmd.id)", "title": "" }, { "docid": "2af7384a5b09e6793e4f7d11b4bba822", "score": "0.603144", "text": "def add_vm_pool_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "2af7384a5b09e6793e4f7d11b4bba822", "score": "0.603144", "text": "def add_vm_pool_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "62af2201e78863fbe642faecf15cddad", "score": "0.59764665", "text": "def add_group(self, *args, **kwargs):\n self.groups.append(Group(*args, **kwargs))", "title": "" }, { "docid": "aa909e4bf6710da5de491939d83b1c6f", "score": "0.5972471", "text": "def add_user_to_group(self, user: User, group: Group) -> None: # pragma: no cover\n ...", "title": "" }, { "docid": "0201ad7053becb611c3e56b5be8b75de", "score": "0.5947228", "text": "def create(self):\n\n _permission_cache = {}\n log.info(\"Starting permission creation.\")\n\n self._create_missing_groups()\n self._create_missing_permissions()\n\n for group_name, permissions in self.permissions.iteritems():\n group = self._group_cache[group_name]\n\n for permission_name in permissions:\n permission = _permission_cache.get(permission_name)\n\n if permission not in group.permissions:\n group.permissions.append(permission)\n\n session.commit()\n log.info(\"Successfully created permissions.\")", "title": "" }, { "docid": "842acb2a4afefb39e2d9dfc18d8fdb9a", "score": "0.5930441", "text": "def add_vm_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "842acb2a4afefb39e2d9dfc18d8fdb9a", "score": "0.5930441", "text": "def add_vm_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "b11c3fc852c436154bc6359c2b168a30", "score": "0.59126115", "text": "async def permissions_add(self, ctx):\r\n # Guard Clause\r\n if (\r\n ctx.guild == None # Not in a guild means DM or Group chat.\r\n or Database.Bot[\"sleeping\"] # If the bot is sleeping, don't do anything.\r\n or ctx.invoked_subcommand is not None # A subcommand was used as intended.\r\n ):\r\n return\r\n\r\n await ctx.send_help(ctx.command)", "title": "" }, { "docid": "9bee9ef8876a0101e6a74eae74f871e9", "score": "0.59044486", "text": "def test_permissions_modify_permission(self):\n pass", "title": "" }, { "docid": "837e90468538bd9bd0ab67bc9d63113b", "score": "0.5890819", "text": "def add_user_permission(sender, instance, **kwargs):\n _process_permission(instance)", "title": "" }, { "docid": "c992f15d1a246246af058b1ff839455f", "score": "0.58734524", "text": "def principal_permission_grant(principal, permission):", "title": "" }, { "docid": "2bc4e7038d523c51cac64ea1b71f0979", "score": "0.586376", "text": "def add_permission(self, label, aws_account_id, action_name):\n return self.connection.add_permission(self, label, aws_account_id,\n action_name)", "title": "" }, { "docid": "8119356575d8765bb8869bed447dc93f", "score": "0.5843098", "text": "def test_has_perm_by_global_role_on_group(self):\n\n tjibbe = User.objects.create(username=\"Tjibbe\")\n tjibbes = Group.objects.create(name=\"Tjibbes\")\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\"))\n\n assign_global_role(tjibbes, self.owner)\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\"))\n\n tjibbe.groups.add(tjibbes)\n\n self.assertTrue(self.backend.has_perm(tjibbe, \"app.do_something\"))\n\n tjibbe.groups.remove(tjibbes)\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\"))", "title": "" }, { "docid": "7d16313f1782611d8c06fb9c241d2b50", "score": "0.58363974", "text": "def setGroupPermission( path, groupName, mode ):\n from grp import getgrnam\n\n Any.requireIsTextNonEmpty( path ) # do not check if exists\n Any.requireIsTextNonEmpty( groupName )\n Any.requireIsIntNotZero( mode )\n\n groupID = getgrnam( groupName ).gr_gid\n\n try:\n os.chmod( path, mode )\n except OSError:\n pass\n\n try:\n os.chown( path, -1, groupID ) # -1 == don't change userID\n except OSError:\n pass", "title": "" }, { "docid": "a887d5e43f87727b8a36a26a6c7d3cc6", "score": "0.58356875", "text": "def test_permissions_create_permission_for_system(self):\n pass", "title": "" }, { "docid": "fc918d8bfa4c54f7beb1d4b9718698ae", "score": "0.5833883", "text": "def add_storage_domain_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "fc918d8bfa4c54f7beb1d4b9718698ae", "score": "0.5833883", "text": "def add_storage_domain_permission(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "62d8beab20c8bc2f099e1bc03a681f8c", "score": "0.5833158", "text": "def add(self, group: \"Group\") -> None:\n # only add the group if it doesn't exist\n if not self.__contains__(group):\n self.append(group)", "title": "" }, { "docid": "7a21d15063b9b7ca34b50e1c6fbb18db", "score": "0.5803711", "text": "def add_subgroup(self, name):\n uri = '/PermissionGroupSubgroupEntry/{}/{}/'.format(self._group_name, \n name)\n DynectSession.get_session().execute(uri, 'POST')\n self._subgroup.append(name)", "title": "" }, { "docid": "f4bf2a9011d641889e0ffde52bcf5868", "score": "0.5802535", "text": "def replace_permissions_group(self, groups=None):\n api_args = {}\n if groups is not None:\n api_args['groups'] = groups\n self.groups = groups\n else:\n self.groups = []\n uri = '/UserGroupEntry/{}/'.format(self._user_name)\n DynectSession.get_session().execute(uri, 'PUT', api_args)", "title": "" }, { "docid": "ccf7e3af496c7cc990537e0870fe60ae", "score": "0.5761337", "text": "def add_group_to_user(sender, instance, created, using, **kwargs):\n if created:\n user = instance\n\n # Filter all those groups with assignable attribute set to True\n groups = list(Group.objects.filter(is_auto_assign=True))\n user.groups.add(*groups)\n\n if SET_STAFF_ON_REGISTRATION:\n user.is_staff = True\n user.save()", "title": "" }, { "docid": "09c5b45b08216385d4b5c4aace2f43f6", "score": "0.57244855", "text": "def add(name, gid=None, **kwargs):\n if salt.utils.data.is_true(kwargs.pop(\"system\", False)):\n log.warning(\"solaris_group module does not support the 'system' argument\")\n if kwargs:\n log.warning(\"Invalid kwargs passed to group.add\")\n\n cmd = \"groupadd \"\n if gid:\n cmd += \"-g {} \".format(gid)\n cmd += name\n\n ret = __salt__[\"cmd.run_all\"](cmd, python_shell=False)\n\n return not ret[\"retcode\"]", "title": "" }, { "docid": "65d3d656e00e21b01755adaad7e16e7e", "score": "0.5721241", "text": "def _set_global_permission(self, msg):\n # Check what the permission is on the workspace\n workspace_id = msg['wsid']\n is_public = is_workspace_public(workspace_id)\n # Push the event to the elasticsearch writer queue\n self.sock.send_json({\n '_action': 'set_global_perm',\n 'workspace_id': workspace_id,\n 'is_public': is_public\n })", "title": "" }, { "docid": "3a6cb63bec48fb207336e506ad68a75e", "score": "0.5710357", "text": "def test_group_permissions_extend_to_user(self):\r\n\r\n layer = Layer.objects.all()[0]\r\n backend = get_backends()[0]\r\n # Set the default permissions\r\n layer.set_default_permissions()\r\n\r\n # Test that LEVEL_READ is set for ANONYMOUS_USERS and AUTHENTICATED_USERS\r\n self.assertEqual(layer.get_gen_level(ANONYMOUS_USERS), layer.LEVEL_READ)\r\n self.assertEqual(layer.get_gen_level(AUTHENTICATED_USERS), layer.LEVEL_READ)\r\n\r\n # Test that the default perms give Norman view permissions but not write permissions\r\n read_perms = backend.objects_with_perm(self.norman, 'layers.view_layer', Layer)\r\n write_perms = backend.objects_with_perm(self.norman, 'layers.change_layer', Layer)\r\n self.assertTrue(layer.id in read_perms)\r\n self.assertTrue(layer.id not in write_perms)\r\n\r\n # Make sure Norman is not in the bar group.\r\n self.assertFalse(self.bar.user_is_member(self.norman))\r\n\r\n # Add norman to the bar group.\r\n self.bar.join(self.norman)\r\n\r\n # Ensure Norman is in the bar group.\r\n self.assertTrue(self.bar.user_is_member(self.norman))\r\n\r\n # Test that the bar group has default permissions on the layer\r\n bar_read_perms = backend.objects_with_perm(self.bar, 'layers.view_layer', Layer)\r\n bar_write_perms = backend.objects_with_perm(self.bar, 'layers.change_layer', Layer)\r\n self.assertTrue(layer.id in bar_read_perms)\r\n self.assertTrue(layer.id not in bar_write_perms)\r\n\r\n # Give the bar group permissions to change the layer.\r\n layer.set_group_level(self.bar, Layer.LEVEL_WRITE)\r\n bar_read_perms = backend.objects_with_perm(self.bar, 'layers.view_layer', Layer)\r\n bar_write_perms = backend.objects_with_perm(self.bar, 'layers.change_layer', Layer)\r\n self.assertTrue(layer.id in bar_read_perms)\r\n self.assertTrue(layer.id in bar_write_perms)\r\n\r\n # Test that the bar group perms give Norman view and change permissions\r\n read_perms = backend.objects_with_perm(self.norman, 'layers.view_layer', Layer)\r\n write_perms = backend.objects_with_perm(self.norman, 'layers.change_layer', Layer)\r\n self.assertTrue(layer.id in read_perms)\r\n self.assertTrue(layer.id in write_perms)", "title": "" }, { "docid": "b49821ddca2387821166425fab9fca69", "score": "0.568041", "text": "def add_permission(self, resource: str, actions: list):\n _actions = self.permissions.get(resource, [])\n _actions.extend(actions)\n self.permissions[resource] = _actions", "title": "" }, { "docid": "8a2ef009351564888b1d8a9a8386cef6", "score": "0.5673735", "text": "def add(group, name, value=None, p_type=None,\n apply=None, on_change=None):\n\n __instance.add(group, name, value, p_type, apply, on_change)", "title": "" }, { "docid": "11edbdaefa58d32225ea2f963bb98010", "score": "0.56724155", "text": "def test_patch_iam_permission(self):\n pass", "title": "" }, { "docid": "771cc77fa385adca884cec89d64a74c7", "score": "0.56651706", "text": "def add_user_level(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "771cc77fa385adca884cec89d64a74c7", "score": "0.56651706", "text": "def add_user_level(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "baa84ef2c197447c6bf5cd71045dad02", "score": "0.56619537", "text": "def grant_permission_to_role(permission_id, role_id):", "title": "" }, { "docid": "bd59707a3dc6f5cd3f70807a69ed8f79", "score": "0.5660978", "text": "def init_permissions():\n try:\n # Get or Create the default group\n default_group, created = Group.objects.get_or_create(name=main_rights.default_group)\n\n # Get custom queries permissions\n custom_queries_access_perm = Permission.objects.get(codename=custom_queries_rights.custom_queries_access)\n\n # Add permissions to default group\n default_group.permissions.add(custom_queries_access_perm)\n\n except Exception, e:\n print('ERROR : Impossible to init the permissions : ' + e.message)", "title": "" }, { "docid": "8c0e12c8ef590727821ee36f6ad85c15", "score": "0.56425565", "text": "def create_permission(self):\n\t\tentry = Permission()\n\t\tform = PermissionCreateForm()\n\t\tgroup_id = self.request.matchdict.get('group')\n\t\t\n\t\tif group_id:\n\t\t\tif self.request.method = 'POST' and form.validate:\n\t\t\t\tform_populate.populate_obj(entry)\n\t\t\t\t\n\t\t\t\t# From the form url, set the group_id of the new entry\n\t\t\t\tentry.group_id = group_id\n\n\t\t\t\tDBSession.add(entry)\n\t\t\t\treturn HTTPFound(location=self.request.route_url(\n\t\t\t\t\t\t'group',\n\t\t\t\t\t\tgroup_id=group_id\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\t# We need a dynamic list of choices for the form.\n\t\t\t\t# Step 1: get all of the permissions\n\t\t\t\tpowers = PowerRecordService.all()\n\t\t\t\t\n\t\t\t\t# Step 2: create a list in the correct scope.\n\t\t\t\tchoices = list()\n\t\t\t\t\n\t\t\t\t# Step 3: populate the choices list.\n\t\t\t\tfor power in powers:\n\t\t\t\t\tchoice = (power.power_id, power.title)\n\t\t\t\t\tchoices.append(choice)\n\t\t\t\t\n\t\t\t\t# Step 4: set the form power_id choices.\n\t\t\t\tform.power_id.choices = choices\n\t\t\t\t\n\t\t\t\t# Get the group from the group id.\n\t\t\t\tgroup = GroupRecordService.by_id(group_id)\n\t\t\t\t\n\t\t\t\treturn {\n\t\t\t\t\t'form': form, \n\t\t\t\t\t'action': self.request.matchdict('action'),\n\t\t\t\t\t'group': group\n\t\t\t\t}\n\t\telse:\n\t\t\treturn HTTPNotFound()", "title": "" }, { "docid": "6feeb4b5a495dd51067ae91ad4baf18c", "score": "0.562737", "text": "def test_add_user_permission(self):\n self.request.named_subpaths = {\n \"type\": \"user\",\n \"package\": \"p\",\n \"name\": \"u\",\n \"permission\": \"read\",\n }\n self.request.method = \"PUT\"\n AdminEndpoints(self.request).edit_permission()\n self.access.edit_user_permission.assert_called_with(\"p\", \"u\", \"read\", True)", "title": "" }, { "docid": "3ac94386124d25439d6467a1c4bd7f88", "score": "0.5614305", "text": "def test_add_group_member(self):\n self.request.named_subpaths = {\"username\": \"a\", \"group\": \"b\"}\n self.request.method = \"PUT\"\n AdminEndpoints(self.request).mutate_group_member()\n self.access.edit_user_group.assert_called_with(\"a\", \"b\", True)", "title": "" }, { "docid": "59b976c49b934bc8731e39f21797a69a", "score": "0.5603628", "text": "def grant_permission_to_principal(permission_id, principal_id):", "title": "" }, { "docid": "5570b494d6275dff7738d1e75b37552e", "score": "0.55947745", "text": "def add_to_group(self, who, where):\n return ad_add_members_to_groups(self.__conn, who, where)", "title": "" }, { "docid": "9545a11d4eaaa081eea40ded4e3fed95", "score": "0.55908716", "text": "def add_member_cli(api_client, parent_name, user_name, group_name):\n GroupsApi(api_client).add_member(parent_name=parent_name,\n user_name=user_name,\n group_name=group_name)", "title": "" }, { "docid": "34d4337712c77a2086a606ad3e368681", "score": "0.55639124", "text": "def addGroup(self, group):\n self.repartition.append(group)", "title": "" }, { "docid": "275da7917a47670d303c44d37a69a8dd", "score": "0.55625546", "text": "def test_user_has_access_user_in_group(self):\n group = mommy.make('groups.Group', private=True)\n resource = mommy.make(Resource, groups=[group])\n user = self.create_user()\n user.add_to_group(group.pk)\n self.assertTrue(resource.user_can_download(user.pk))", "title": "" }, { "docid": "f93d09c1b2a1ae2d4fa62a9eadea118d", "score": "0.5561041", "text": "def add_user(self, group, user):\n raise NotImplementedError", "title": "" }, { "docid": "f9cbed4b10c98615db778856d76fadf1", "score": "0.55608976", "text": "def test_add_and_remove_privilege(self):\n\n self.create_common_users_and_groups()\n\n sgp = SetGroupPrivilegesAPI(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n sgp.add_privilege(\n groups=[\"Group 1\", \"Group 2\"], privilege=Privileges.CAN_USE_SPOTIQ\n )\n\n privs1 = sgp.get_privileges_for_group(\"Group 1\")\n self.assertTrue(Privileges.CAN_USE_SPOTIQ in privs1)\n privs2 = sgp.get_privileges_for_group(\"Group 2\")\n self.assertTrue(Privileges.CAN_USE_SPOTIQ in privs2)\n\n sgp.remove_privilege(\n groups=[\"Group 1\"], privilege=Privileges.CAN_USE_SPOTIQ\n )\n privs1 = sgp.get_privileges_for_group(\"Group 1\")\n self.assertFalse(Privileges.CAN_USE_SPOTIQ in privs1)\n privs2 = sgp.get_privileges_for_group(\"Group 2\")\n self.assertTrue(Privileges.CAN_USE_SPOTIQ in privs2)", "title": "" }, { "docid": "db4283eac9b3c55ed168d0f184c18d3d", "score": "0.55599", "text": "def subgroup_create_perm(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n allowed = False # Init allowed flag\n\n if \"group_id\" in request.view_args:\n group_id = request.view_args['group_id']\n group = db.session.query(Group).filter_by(id=group_id).one_or_none()\n\n # Check condition 1\n ancestors = group.path_to_root().all()\n for ancestor in ancestors[1:]:\n if current_user in ancestor.members:\n allowed = True\n kwargs['group'] = group\n break\n\n # If condition 1 doesn't meet, check condition 2\n if not allowed:\n perms = (db.session.query(CreateGroupPerm)\n .join(CreateGroupPerm.group)\n .filter(CreateGroupPerm.target_group_id == group_id)\n .filter(Group.members.any(User.id == current_user.id)).all())\n if perms:\n allowed = True\n kwargs['group'] = group\n\n if not allowed:\n flash('Permission denied')\n return redirect(request.referrer)\n\n return func(*args, **kwargs)\n\n return wrapper", "title": "" }, { "docid": "34bdfa346cddd65198781b9ae8ad807a", "score": "0.5555068", "text": "def test_permissions_get_permissions_for_group(self):\n pass", "title": "" }, { "docid": "aab545e20df27a0b47bd4f74f11b8e81", "score": "0.5555065", "text": "def add_canvas_permission(profile_path, canvas_allowed_hosts):\n connect_to_db = sqlite3.connect\n perm_db = connect_to_db(join(profile_path, \"permissions.sqlite\"))\n cursor = perm_db.cursor()\n # http://mxr.mozilla.org/mozilla-esr31/source/build/automation.py.in\n cursor.execute(\"PRAGMA user_version=3\")\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS moz_hosts (\n id INTEGER PRIMARY KEY,\n host TEXT,\n type TEXT,\n permission INTEGER,\n expireType INTEGER,\n expireTime INTEGER,\n appId INTEGER,\n isInBrowserElement INTEGER)\"\"\")\n for host in canvas_allowed_hosts:\n if host:\n qry = \"\"\"INSERT INTO 'moz_hosts'\n VALUES(NULL,'%s','canvas/extractData',1,0,0,0,0);\"\"\" % host\n cursor.execute(qry)\n perm_db.commit()\n cursor.close()", "title": "" }, { "docid": "0d12cbbb319e5a5927e4daf836181133", "score": "0.55357313", "text": "def test_has_perm_on_object_with_userpermission(self):\n\n tjibbe = User.objects.create(username=\"Tjibbe\")\n\n content = Group.objects.create(name=\"Tjibbes\")\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\",\n obj=content))\n\n tjibbe.user_permissions.add(self.perm)\n\n self.assertTrue(self.backend.has_perm(tjibbe, \"app.do_something\",\n obj=content))\n\n tjibbe.user_permissions.remove(self.perm)\n\n self.assertFalse(self.backend.has_perm(tjibbe, \"app.do_something\",\n obj=content))", "title": "" }, { "docid": "d4b0e59de2214df6a3e884c2d36878b4", "score": "0.552847", "text": "def test_create_iam_permission(self):\n pass", "title": "" }, { "docid": "c07c5a479a1fae2c56fa16a2b0d8e760", "score": "0.55024195", "text": "def add(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "c07c5a479a1fae2c56fa16a2b0d8e760", "score": "0.55024195", "text": "def add(\n self,\n permission,\n headers=None,\n query=None,\n wait=True,\n **kwargs\n ):\n # Check the types of the parameters:\n Service._check_types([\n ('permission', permission, types.Permission),\n ])\n\n # Build the URL:\n query = query or {}\n\n # Send the request and wait for the response:\n return self._internal_add(permission, headers, query, wait)", "title": "" }, { "docid": "239cb71226ea5622eb535e10860e3234", "score": "0.54922014", "text": "def add_group(self, group: ParticleGroup):\n self.groups.append(group)", "title": "" }, { "docid": "7a47b768eb9b1f93ab85ee2c419f5c77", "score": "0.548695", "text": "def setPermission(self, puff_number, value):\r\n getattr(self.RPKoheron, 'set_fast_permission_%d' % puff_number)(int(value))", "title": "" }, { "docid": "ef0169a4e3c8069ddbc31463e970bcb4", "score": "0.548225", "text": "def add_user(self, user):\n self.groups.add_user(user)", "title": "" }, { "docid": "12501fa0a9234b37b877ceded4b938da", "score": "0.5464892", "text": "def AddSubGroup(self, group):\n self.groups.append(group)\n self.all_sub_names.add(group.name)\n self._parser.usage = usage_text.GenerateUsage(self, self._ai)", "title": "" }, { "docid": "a530795bff23b68a36422c2d472c6144", "score": "0.54603386", "text": "def _set_user_group(self, user):\n\n if self.group_name:\n group = Group.query.filter_by(group_name=self.group_name).one()\n user.groups.append(group)", "title": "" }, { "docid": "beb2454db57e1d05b4bc73f1b94acc9f", "score": "0.5455339", "text": "def group_user_add(self, user_id, group_id):\n self.admin.group_user_add(user_id, group_id)", "title": "" }, { "docid": "d01f410be3d8ae9e0c93d900880f1583", "score": "0.5453057", "text": "def add_to_group(account_id, group_id):\n\n sql = 'insert into GroupMembers (accountID, groupID) values (?, ?)'\n cursor.execute(sql, (account_id, group_id))\n database.commit()", "title": "" }, { "docid": "05cfb5ef208812edab1ea6b1d37033b9", "score": "0.5446834", "text": "async def permissions_add_role(self, ctx, role, *, command):\r\n # Guard Clause\r\n if (\r\n ctx.guild == None # Not in a guild means DM or Group chat.\r\n or Database.Bot[\"sleeping\"] # If the bot is sleeping, don't do anything.\r\n or not self.valid_command(ctx.guild, command) # Check if the command is valid\r\n ):\r\n return\r\n\r\n handle = self.perm_handle(ctx, command)\r\n\r\n if (role == \"any\") or (role == \"none\"):\r\n if role not in handle[\"_roles\"]:\r\n handle[\"_roles\"].append(role)\r\n self.save_permissions(ctx)\r\n\r\n elif role == \"everyone\": # This only fires if everyone isn't mentioned,\r\n # if @everyone is mentioned it is caught in the else.\r\n\r\n if ctx.guild.id not in handle[\"_roles\"]: # Everyone role uses the guild id\r\n handle[\"_roles\"].append(ctx.guild.id)\r\n self.save_permissions(ctx)\r\n\r\n else:\r\n # Make sure the role passed is a role object, unless it's any of everyone\r\n role = await commands.RoleConverter().convert(ctx, role)\r\n\r\n if role.id not in handle[\"_roles\"]:\r\n handle[\"_roles\"].append(role.id)\r\n self.save_permissions(ctx)\r\n\r\n # If none is in the list, go ahead and remove it.\r\n if \"none\" in handle[\"_roles\"]:\r\n handle[\"_roles\"].remove(\"none\")\r\n self.save_permissions(ctx)\r\n\r\n await ctx.message.add_reaction(\"\\u2611\") # ballot_box_with_check\r", "title": "" }, { "docid": "c564dc8fa42031f99c5d16839ff5efa2", "score": "0.5444934", "text": "def test_create_permission_set(self):\n pass", "title": "" }, { "docid": "9a47590350b0eb6d888594e3bb4fa785", "score": "0.54347533", "text": "def group_add(group):\n\n # Get form data\n name = request.forms.get('name')\n\n # Check if group doesn't exist\n if not group_exists(group):\n return 'Group does not exist'\n\n # Check for if account doesn't exist.\n if not account_exists(name):\n return 'That user does not exist'\n\n group_id = get_group_id(group)\n uid = get_user_id(name)\n\n # Check if user is already in the group.\n if user_in_group(uid, group_id):\n redirect('/messenger/group/{}'.format(group))\n\n # Add the user to the group\n add_to_group(uid, group_id)\n redirect('/messenger/group/{}'.format(group))", "title": "" }, { "docid": "1f391f2db192069453b6a97eb152202f", "score": "0.5432471", "text": "def group_role_add(self, role_name, group_id):\n role = self.role_get(role_name)\n self.admin.assign_group_realm_roles(group_id, [role])", "title": "" }, { "docid": "854ee70164a04ec4f364e2a3b2feab5d", "score": "0.54245013", "text": "def add_permissions(self, *names):\n for name in names:\n ct, action = resolve_permission_ct(name)\n obj_perm = ObjectPermission(name=name, actions=[action])\n obj_perm.save()\n obj_perm.users.add(self.user)\n obj_perm.object_types.add(ct)", "title": "" }, { "docid": "df593636ed835a55f768687a4c17f798", "score": "0.5422264", "text": "def add_app_permissions(cls, sender, **kwargs):\n app = sender.name\n counter = 0\n app_permissions = cls.GROUP_PERMISSIONS[app]\n for group, verbs in app_permissions.items():\n print(f\"Assigning '{app}' application's permissions for '{group}' group.\")\n group = Group.objects.get(name=group)\n for verb, models in verbs.items():\n for model in models:\n permission_code = ('_'.join([verb, model, INSTANCE]))\n assign_perm(f'{app}.{permission_code}', group)\n print(f'Assigned permission: {app}.{permission_code}')\n counter += 1\n print(f\"{counter} '{app}' group permissions assigned.\")", "title": "" }, { "docid": "8cf0bbe66cdd84b93bc2848e90c715a2", "score": "0.54212505", "text": "def _add_access_rights_for_share_group(self, cr, uid, group_id, mode, fields_relations, context=None):\n model_access_obj = self.pool.get('ir.model.access')\n user_obj = self.pool.get('res.users')\n target_model_ids = [x[1].id for x in fields_relations]\n perms_to_add = (mode == 'readonly') and READ_ONLY_ACCESS or READ_WRITE_ACCESS\n current_user = user_obj.browse(cr, uid, uid, context=context)\n\n current_user_access_map = self._get_access_map_for_groups_and_models(cr, uid,\n [x.id for x in current_user.groups_id], target_model_ids, context=context)\n group_access_map = self._get_access_map_for_groups_and_models(cr, uid,\n [group_id], target_model_ids, context=context)\n _logger.debug(\"Current user access matrix: %r\", current_user_access_map)\n _logger.debug(\"New group current access matrix: %r\", group_access_map)\n\n # Create required rights if allowed by current user rights and not\n # already granted\n for dummy, model in fields_relations:\n # mail.message is transversal: it should not received directly the access rights\n if model.model in ['mail.message']: continue\n values = {\n 'name': _('Copied access for sharing'),\n 'group_id': group_id,\n 'model_id': model.id,\n }\n current_user_access_line = current_user_access_map.get(model.model,set())\n existing_group_access_line = group_access_map.get(model.model,set())\n need_creation = False\n for perm in perms_to_add:\n if perm in current_user_access_line \\\n and perm not in existing_group_access_line:\n values.update({perm:True})\n group_access_map.setdefault(model.model, set()).add(perm)\n need_creation = True\n if need_creation:\n model_access_obj.create(cr, UID_ROOT, values)\n _logger.debug(\"Creating access right for model %s with values: %r\", model.model, values)", "title": "" }, { "docid": "709f693a79000c89f947904392333e94", "score": "0.5417232", "text": "def addGroup(self, group):\n if self._illegalGroupName(group):\n raise ValueError('illegal group name %s' % group)\n\n if group not in self._group:\n self._group[group] = []\n else:\n raise ValueError('group %s exists' % group)", "title": "" }, { "docid": "cd1d552ba2e35f9d5d126f9ec294d86e", "score": "0.54152226", "text": "def has_add_permission(self, request, obj=None):\n return False", "title": "" }, { "docid": "cd1d552ba2e35f9d5d126f9ec294d86e", "score": "0.54152226", "text": "def has_add_permission(self, request, obj=None):\n return False", "title": "" }, { "docid": "cd1d552ba2e35f9d5d126f9ec294d86e", "score": "0.54152226", "text": "def has_add_permission(self, request, obj=None):\n return False", "title": "" }, { "docid": "164cc2f3b2bdd37435349cdea036e1d6", "score": "0.5413139", "text": "async def permissions_add_user(self, ctx, member, *, command):\r\n\r\n # Guard Clause\r\n if (\r\n ctx.guild == None # Not in a guild means DM or Group chat.\r\n or Database.Bot[\"sleeping\"] # If the bot is sleeping, don't do anything.\r\n or not self.valid_command(ctx.guild, command) # Check if the command is valid\r\n ):\r\n return\r\n\r\n # Read in any mentions\r\n mentions = ctx.message.mentions\r\n\r\n # Check if a user was mentioned, if not convert member to a mention\r\n if len(mentions) == 0:\r\n mentions.append(await commands.MemberConverter().convert(ctx, member))\r\n\r\n # Now that we have our mentioned user\r\n if len(mentions) == 1:\r\n # We have our user and a valid command, add them to the database\r\n member = mentions[0]\r\n\r\n handle = self.perm_handle(ctx, command)\r\n\r\n if member.id not in handle[\"_users\"]:\r\n handle[\"_users\"].append(member.id)\r\n self.save_permissions(ctx)\r\n\r\n await ctx.message.add_reaction(\"\\u2611\") # ballot_box_with_check\r\n\r\n else:\r\n # More then one user was mentioned for some reason\r\n raise commands.BadArgument", "title": "" }, { "docid": "c12d31e3cacfc2738ceacd017ae1a769", "score": "0.5395695", "text": "def owner_add(course_key, instance_key, **kwargs):\n course_instance = kwargs['course_instance']\n\n group_id = request.args.get('group_id')\n owner_type = request.args.get('owner_type')\n group = db.session.query(Group).filter_by(id=group_id).one_or_none()\n\n manage_course_perm = ManageCoursePerm(course_instance=course_instance, group=group,\n type=CourseOwnerType[owner_type])\n\n try:\n course_instance.owners.append(group)\n db.session.add(manage_course_perm)\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n error_message = dumps({'message': 'Error occurs and could not remove the owner. Error:%s' % e})\n abort(Response(error_message, 501))\n\n return jsonify(status='success')", "title": "" }, { "docid": "adcd234f271ee5b9dbcea967d95dead7", "score": "0.5386709", "text": "def add_attribute_group(self, group_name, group: 'AttributeGroup'):\n if group_name in self._groups.keys():\n raise ValueError(f'Group {group_name} already exists!')\n self._groups[group_name] = group\n try:\n setattr(self, group_name, group)\n except:\n print(f'Warning - could not set dynamic group for {group}')", "title": "" }, { "docid": "5b98dbcda7c05a472a5a328c775de6b4", "score": "0.53810763", "text": "def group_add(self, group, channel, expiry=None):\n self.group_model.objects.update_or_create(\n group=group,\n channel=channel,\n defaults={\"expiry\": now() + datetime.timedelta(seconds=expiry or self.expiry)},\n )", "title": "" }, { "docid": "6bbea9bab7fedc0496c786e7cef39e26", "score": "0.5378149", "text": "async def group_add(self, group, channel):\n # Check the inputs\n assert self.valid_group_name(group), \"Group name not valid\"\n assert self.valid_channel_name(channel), \"Channel name not valid\"\n # Add to group dict\n self.groups.setdefault(group, {})\n self.groups[group][channel] = time.time()", "title": "" }, { "docid": "239bef56e0741b307f6d453e8ae9b7e6", "score": "0.5369136", "text": "def add_access_to_resource(self, resource):\n LOG.debug(\"[Role] Adding access for role {} to resource {}\".format(\n self.name, resource.__name__\n ))\n self.accesses.append(resource.__name__)\n LOG.debug(\"[Role] Access added for role {}\".format(self.name))", "title": "" } ]
409a756a82c98341bd6b8d5cb1648861
Return Position instance for given node (or None if no node).
[ { "docid": "054171e61f10cfe8177f27ab038fc20a", "score": "0.8076458", "text": "def _make_position(self, node):\n return self.Position(self, node) if node is not None else None", "title": "" } ]
[ { "docid": "4a6fb3b81190a656753251f00e63b148", "score": "0.73565936", "text": "def newPosition(self, node):\n return self.Position(node, self) if node is not None else None", "title": "" }, { "docid": "7ef673963e5db890c4f42bcebc464d13", "score": "0.6869997", "text": "def _make_position(self, node):\n if node is self._header or node is self._trailer:\n return None\n else:\n return self.Position(self, node)", "title": "" }, { "docid": "7ef673963e5db890c4f42bcebc464d13", "score": "0.6869997", "text": "def _make_position(self, node):\n if node is self._header or node is self._trailer:\n return None\n else:\n return self.Position(self, node)", "title": "" }, { "docid": "3ee03e9a7efa6348104e5c37f0a6aa23", "score": "0.6666255", "text": "def _make_position(self,node):\n\t\treturn self.Position(self,node)", "title": "" }, { "docid": "276a65e21d80f15688cf2442af29754a", "score": "0.6219851", "text": "def check_position(self, p):\n if p.container is not self:\n raise ValueError(\"Position does not belong to this tree\")\n if not isinstance(p, self.Position):\n raise TypeError(\"Must be Position type\")\n if p.node.parent is p.node: # when a node is deleted, its child takes its place\n raise ValueError(\"Deleted node\")\n return p.node", "title": "" }, { "docid": "88f2423decd2d908280ae5af6ee12741", "score": "0.61537135", "text": "def _node(self, type_, pos_from_node, **kwargs):\n # type: (Any, Any, Any) -> Any\n\n # Some nodes (like Module) dont have position\n lineno = getattr(pos_from_node, \"lineno\", 1)\n col_offset = getattr(pos_from_node, \"col_offset\", 0)\n\n if PY30_37:\n # No end_lineno or end_pos_offset\n return type_(lineno=lineno, col_offset=col_offset, **kwargs)\n\n # Py38+\n end_lineno = getattr(pos_from_node, \"end_lineno\", 1)\n end_col_offset = getattr(pos_from_node, \"end_col_offset\", 0)\n\n return type_(\n lineno=lineno, end_lineno=end_lineno, col_offset=col_offset, end_col_offset=end_col_offset, **kwargs\n )", "title": "" }, { "docid": "edf7bec0fb23317422f561042bd13d82", "score": "0.6073126", "text": "def get_node_at_point(self, pos):\n hit_items = self.items(pos)\n if not len(hit_items):\n return\n hit_item = None\n for hi in hit_items:\n if isinstance(hi, GungNode):\n hit_item = hi\n break\n\n return hit_item", "title": "" }, { "docid": "ed38a623890554a3d51a4091dad3e90a", "score": "0.59443504", "text": "def p_node(self):\n for node in self.neighbors:\n if node.type == \"PNode\":\n return node\n return None", "title": "" }, { "docid": "f879c19f92cc62498a9e84769961cf6e", "score": "0.5928719", "text": "def get_position(self, position):\n if position > 0:\n current = self.head\n counter = 1\n while current:\n if counter == position:\n return current\n current = current.next\n counter += 1\n # position too large\n return None\n else:\n # position too small\n return None", "title": "" }, { "docid": "5e360bdf43193d7b232b3e49a0ebfdcf", "score": "0.5901272", "text": "def get_position(self, position):\n print position = LinkedList.self\n if Element.position:\n print Element.position\n else:\n return None", "title": "" }, { "docid": "b7b12325e3f6bfe57ef68cda403b5f73", "score": "0.5898462", "text": "def validatePosition(self, p):\n if not isinstance(p, self.Position):\n raise TypeError(\"p must be a proper Position type.\")\n if p.tree() is not self:\n raise ValueError(\"p does not belong to this tree.\")\n if p.node().parent() is p.node():\n raise ValueError(\"P is no longer valid\")\n # if p.node().parent() is None and p != self.root():\n # raise TreeException(\"P is no more valid.\")\n\n return p.node()", "title": "" }, { "docid": "1c6626a420255c0ae4faa7bb387ed94d", "score": "0.5879422", "text": "def get_node_as_point(self, node_name):\n node_data = self.nodes[node_name]\n return node_data[self.nodes_geometry_key]", "title": "" }, { "docid": "7bc4596c8262aa37fd0e96db6e06bdc2", "score": "0.58563256", "text": "def __select(self, node, pos):\n if node == None:\n return None\n\n size_left = self.__size(node['left'])\n if size_left > pos:\n return self.__select(node['left'], pos)\n elif size_left < pos:\n return self.__select(node['right'], (pos - size_left - 1))\n else:\n return node", "title": "" }, { "docid": "155b2cf66024947351f8a766e2232843", "score": "0.5847577", "text": "def get_object_at_position(self, x, y):\n\n if (x, y) in self._position_object_dict:\n return self._position_object_dict[(x, y)]\n else:\n return None", "title": "" }, { "docid": "3c2db3a61844d2bdae928a11b77fdd2f", "score": "0.58345085", "text": "def get_position(self, position):\n current = self.head\n currentPos = 1\n while currentPos < position:\n if current.next:\n current = current.next\n currentPos += 1\n else:\n return None\n return current", "title": "" }, { "docid": "0de18f8cc28facd4925e9bf4f95ea93d", "score": "0.5780734", "text": "def find_node(self, node) -> Node:\n return self.root.look_down(node)", "title": "" }, { "docid": "8d6ae4065ac7078833e426e664b07a8e", "score": "0.57699746", "text": "def node_index(self):\n if 'node' in self.name:\n return int(self.name.split('node')[1])\n else:\n return None", "title": "" }, { "docid": "5a759f36345fc2e716bd70d7512818b9", "score": "0.570083", "text": "def get_position(maze, element):\n if element == 'pony':\n return int(maze['pony'][0])\n elif element == 'domokun':\n return int(maze['domokun'][0])\n elif element == 'end-point':\n return int(maze['end-point'][0])\n return None", "title": "" }, { "docid": "233d5c21184eddb229bc7ff6a3f3e0d5", "score": "0.56903076", "text": "def _get_position(self):\n return self.__position", "title": "" }, { "docid": "fb5caa1f89258a7e989e65a302a453ab", "score": "0.5672333", "text": "def _get_node(self, node):\n return self.nodes[self.nodes_index.index(node.index)]", "title": "" }, { "docid": "5ef3218e4d211fab8aa819c76a48c90f", "score": "0.5672326", "text": "def get_valid_node(node):\n try:\n PyNode = pm.PyNode(node)\n except pm.MayaNodeError:\n print(\"Error: no node named : %s\" % node)\n return None\n\n return PyNode", "title": "" }, { "docid": "056d1f0727aff6d0b0eb2025488ebf6b", "score": "0.5668393", "text": "def get_index_from_node(self, node):\n try:\n return self.__nodes.index(node)\n except ValueError as _:\n return -1", "title": "" }, { "docid": "15f08745e8788c308fc981f2d95be17b", "score": "0.5661258", "text": "def get_position(self, position):\n counter = 1\n current = self.head\n if counter < 1:\n return None\n while counter <= position and current:\n if counter == position:\n return current\n current = current.next\n counter += 1\n return None", "title": "" }, { "docid": "4a349f2009dda6c1b400e4c89ec35008", "score": "0.56539154", "text": "def get_position(self):\r\n position = Vector3()\r\n if 'position' in self.data:\r\n position.x = self.data['position'][0]\r\n position.y = self.data['position'][1]\r\n position.z = self.data['position'][2]\r\n else:\r\n position.x = self.data['x']\r\n position.y = self.data['y']\r\n position.z = self.data['z']\r\n\r\n return position", "title": "" }, { "docid": "b3b66f3930e6b1da9ce4c9588e13ec78", "score": "0.56536806", "text": "def _get_min_node(self, node):\n\n if node.left is None:\n return node\n return self._get_min_node(node.left)", "title": "" }, { "docid": "46b3e11cc5fa28392b480b18f1a84fe3", "score": "0.5650953", "text": "def _validate(self, p):\n if not isinstance(p, self.Position):\n raise TypeError('p must be proper Position type')\n if p._container is not self:\n raise ValueError('p does not belong to this container')\n if p._node._parent is p._node: # convention for deprecated nodes\n raise ValueError('p is no longer valid')\n return p._node", "title": "" }, { "docid": "cc5580f18b6164ef2f058bdf5ff0aded", "score": "0.56467086", "text": "def _validate(self, p):\n if not isinstance(p, self.Position):\n raise TypeError('p must be proper Position type')\n if p._container is not self:\n raise ValueError('p does not belong to this container')\n if p._node._parent is p._node: # convention for deprecated nodes\n raise ValueError('p is no longer valid')\n return p._node", "title": "" }, { "docid": "6df0358313db871d0c2d0d01af247d5d", "score": "0.5639297", "text": "def parent_position(self, pos):\n primary_pos = self.tree_walker.parent_position(pos[0])\n if primary_pos is None:\n return None\n return (primary_pos,)", "title": "" }, { "docid": "bfdee94be3b994a18756607c273386a7", "score": "0.55942875", "text": "def getNode(self):\n self.node = self.rect.x / 10, self.rect.y / 10\n return self.node", "title": "" }, { "docid": "b3c7148f97f008f08abd3c1d4fb72570", "score": "0.5571014", "text": "def parent_position(self, pos):\n return None", "title": "" }, { "docid": "b3c7148f97f008f08abd3c1d4fb72570", "score": "0.5571014", "text": "def parent_position(self, pos):\n return None", "title": "" }, { "docid": "055b85b7d813c8b3bc6a1469e338d2e2", "score": "0.55538267", "text": "def min(self, node):\n if node is None:\n return None\n if node.left is not None:\n return self.min(node.left)\n else:\n return node", "title": "" }, { "docid": "bc71f9fa7450237094b59051c6a9eec7", "score": "0.5512815", "text": "def get_nearby_node(self, node, nodes_list):\n\n # Stores all the nearby nodes in the range of current node\n nearby_nodes = []\n for n_node in nodes_list:\n if node is not n_node and check_dict_within_range(node.get_pos(), node.range, n_node.get_pos()):\n nearby_nodes.append(n_node)\n\n if len(nearby_nodes) == 0:\n return None\n\n # Returns the minimum distance node amont the nearby nodes\n min_dist = float('inf')\n min_dist_node = None\n for n_node in nearby_nodes:\n cur_dist = get_dict_distance(node.get_pos(), n_node.get_pos())\n if cur_dist < min_dist:\n min_dist = cur_dist\n min_dist_node = n_node\n return min_dist_node", "title": "" }, { "docid": "2f0336202025f01bb62bd6e231baec68", "score": "0.5507213", "text": "def get_node(self, node_name: str) -> Optional[Node]:\n Validator.check_value_type(\"node_name\", node_name, [str], \"SymbolTree\")\n node_impl = self._symbol_tree.get_node(node_name)\n if node_impl is None:\n return None\n return Node(node_impl)", "title": "" }, { "docid": "2206406b09f697d1ece6cf307aae65b4", "score": "0.5490293", "text": "def get_position(self, include_location=False):\n try:\n location = next(iter(self.positions.keys()))\n position = self.positions[location]\n except StopIteration:\n location = position = None\n\n if include_location:\n return position, location\n else:\n return position", "title": "" }, { "docid": "2206406b09f697d1ece6cf307aae65b4", "score": "0.5490293", "text": "def get_position(self, include_location=False):\n try:\n location = next(iter(self.positions.keys()))\n position = self.positions[location]\n except StopIteration:\n location = position = None\n\n if include_location:\n return position, location\n else:\n return position", "title": "" }, { "docid": "46f8c8b695bc5976a2deaa411a52f940", "score": "0.5478241", "text": "def get_node_by_coord(self, coord, relative=False):\n if not isinstance(coord, (list, tuple)) or False in list(map(lambda i: type(i)==int, coord)):\n logger.warning(\"%s.get_node_by_coord: node«%s», arg «coord»=«%s», «coord» must be list or tuple of integers.\" % (self.__class__.__name__, self.name, coord))\n return None\n if relative:\n _node = self\n else:\n _node = self._root # _node = self.get_rootnode()\n for idx in coord:\n _node = _node.childs[idx]\n if _node is None:\n logger.warning(\"%s.get_node_by_coord: node«%s», arg «coord»=«%s» not valid.\" % (self.__class__.__name__, self.name, coord))\n return None\n return _node", "title": "" }, { "docid": "5f23c2eaaf1e864e365b8765147b1184", "score": "0.5468953", "text": "def get_position(self):\n return self.position()", "title": "" }, { "docid": "7e961ffc792ffde979f4463742e62ad2", "score": "0.5461513", "text": "def get_node(self, node_id: int) -> \"Node\":\n if node_id not in self.node_map:\n v = Node(node_id)\n self.node_map[node_id] = v\n else:\n v = self.node_map[node_id]\n return v", "title": "" }, { "docid": "cee6aa7fe56ba9f19257909da6f51ae1", "score": "0.54507107", "text": "def get_object_position(self, obj):\n\n if obj in self._object_position_dict:\n return self._object_position_dict[obj]\n else:\n return None", "title": "" }, { "docid": "e9b4284aa12298f0dd7278681d80af26", "score": "0.54402345", "text": "def _validate(self,p):\n\t\tif not isinstance(p,self.Position):\n\t\t\traise TypeError(\"p must be proper Position type\")\n\t\tif p._container is not self:\n\t\t\traise ValueError(\"p does not belong to this container\")\n\t\tif p._node._parent is p._node: #convention for deprecated nodes\n\t\t\traise ValueError(\"p is no longer valid\")\n\t\treturn p._node", "title": "" }, { "docid": "3dfc048d0b5279fa0a593f05fb936fb4", "score": "0.543912", "text": "def getposition(self):\n return self.position.get()", "title": "" }, { "docid": "e2c284f6b6111ac1a6832e709e1c8386", "score": "0.54384875", "text": "def get_node(self):\n self._update_node_cloudstate()\n return self.node", "title": "" }, { "docid": "6fb15bfc5f8e432acfb44a4b46e4125c", "score": "0.54361975", "text": "def _validate(self, p):\n if not isinstance(p, self.Position):\n raise TypeError('p must be propper Position type')\n if p._container is not self:\n raise ValueError('p doesnt belowng to this container')\n if p._node._next is None: # as with deprecated nodes\n raise ValueError('p is no longer valid')\n return p._node", "title": "" }, { "docid": "f1ffc5b149ca10814335dc8941221f82", "score": "0.5434235", "text": "def get_position(self):\n return self.position", "title": "" }, { "docid": "f1ffc5b149ca10814335dc8941221f82", "score": "0.5434235", "text": "def get_position(self):\n return self.position", "title": "" }, { "docid": "0b1cf861707faaffc9f25949d14c6834", "score": "0.54060775", "text": "def _node(self):\n return self._graph.node[self.node_id]", "title": "" }, { "docid": "5be685f4ea46e7da4bf362de1763ab71", "score": "0.54003096", "text": "def get_position(self):\n return self.getPosition()", "title": "" }, { "docid": "fc9a80a4294db9c0893084505c92d3ca", "score": "0.5394454", "text": "def getChild(self, node):\r\n if node.hasChildNodes():\r\n return node.first()\r\n else:\r\n return None", "title": "" }, { "docid": "c34461d7c38c0f238e6df41019515f0f", "score": "0.538894", "text": "def get_child(self, column):\n return GameTree._Position(self._node._children[column])", "title": "" }, { "docid": "354084cb16d80dcc848e4164f5d445e1", "score": "0.5378677", "text": "def _confirm_pos(self, pos):\n candidate = None\n if self._get_node(self._treelist, pos) is not None:\n candidate = pos\n return candidate", "title": "" }, { "docid": "354084cb16d80dcc848e4164f5d445e1", "score": "0.5378677", "text": "def _confirm_pos(self, pos):\n candidate = None\n if self._get_node(self._treelist, pos) is not None:\n candidate = pos\n return candidate", "title": "" }, { "docid": "354c4b31eda4c5a87e9e8dc46bcf9ff2", "score": "0.5372442", "text": "def find(self, data: any, node: Optional[Node] = None):\n\n node = self.root if node is None else node\n\n if data == node.data:\n return node\n elif data < node.data:\n if node.left is None:\n return None\n return self.find(data, node.left)\n else:\n if node.right is None:\n return None\n return self.find(data, node.right)", "title": "" }, { "docid": "926d08668d06b0333d3f392a5285fe92", "score": "0.5364302", "text": "def first_child_position(self, pos):\n return None", "title": "" }, { "docid": "926d08668d06b0333d3f392a5285fe92", "score": "0.5364302", "text": "def first_child_position(self, pos):\n return None", "title": "" }, { "docid": "23f094d89026ea3cc4eb2cba046f34a9", "score": "0.5360973", "text": "def pointInOpenList(self, position, open_list):\n for node in open_list:\n if node.position == position:\n return node\n return None", "title": "" }, { "docid": "73e6d1a02ff2a2967ee1ec578cbc7f08", "score": "0.5353287", "text": "def get_node(self, nid: NodeId) -> Optional[Node]:\n node_addr = self._node_address(nid)\n if node_addr == 0:\n return None\n ans, _ = self._read_node_from_properties(node_addr)\n return ans", "title": "" }, { "docid": "77aefd704b16214a7ed3285951622fab", "score": "0.53532827", "text": "def get_position(self):\n \n return self._position", "title": "" }, { "docid": "ea38a81f14b13d3f5ace4e488252b1c0", "score": "0.5341228", "text": "def position(self) -> Optional[pulumi.Input['EnterpriseCrmEventbusProtoCoordinateArgs']]:\n return pulumi.get(self, \"position\")", "title": "" }, { "docid": "ea38a81f14b13d3f5ace4e488252b1c0", "score": "0.5341228", "text": "def position(self) -> Optional[pulumi.Input['EnterpriseCrmEventbusProtoCoordinateArgs']]:\n return pulumi.get(self, \"position\")", "title": "" }, { "docid": "784d5e02b6e5d74d4051f0151149d336", "score": "0.5328469", "text": "def node(self):\n self._verify_in_node_context()\n return self._node", "title": "" }, { "docid": "d07b9e8fd5a9cd1dfb9b669a34eac233", "score": "0.5321495", "text": "def nearest_node(self, point):\n # Find the nearest node in the rTree index\n idx = self.make_tree()[0]\n node_nearest_point = list(idx.nearest((point.x, point.y)))\n return node_nearest_point", "title": "" }, { "docid": "f97212fac51df289d2031c61b9e31edc", "score": "0.53181875", "text": "def get_node_at(self, node_position, reverse=False):\r\n\r\n if reverse:\r\n result = self.nodes[0 - node_position]\r\n else:\r\n result = self.nodes[node_position -1]\r\n\r\n return result", "title": "" }, { "docid": "b65d4433aceea4be13447ea1980ca9b9", "score": "0.530839", "text": "def __init__(self, node: Optional[Node] = None):\n self._node = node if node is not None else Node(parent=None)", "title": "" }, { "docid": "3f417204843e2e833c8d1c807f45ba87", "score": "0.53023344", "text": "def get_node(self):\r\n node = self.xmlnode.prop(\"node\")\r\n if not node:\r\n return None\r\n return node.decode(\"utf-8\")", "title": "" }, { "docid": "f92ac0c313833835e5b8a3b6c166d638", "score": "0.5301097", "text": "def findnode(self,item,node=None):\n if node is None: node = self.root\n while not node.isleaf():\n if item == node.value: return node\n node = (node.l if item < node.value else node.r)\n return None", "title": "" }, { "docid": "313ca22a3df6efa57591765792c42915", "score": "0.5293896", "text": "def get_node_as_parent(self, node_key):\n try:\n return self._index[node_key]\n except KeyError:\n return None", "title": "" }, { "docid": "b55eb2700456933631d892710d2c56e7", "score": "0.5293305", "text": "def getPosition(): \n if myro.globvars.robot:\n return myro.globvars.robot.getPosition()\n else:\n raise AttributeError, \"need to initialize robot\"", "title": "" }, { "docid": "1677ee430588bd44123d7285f0e5d596", "score": "0.5289629", "text": "def get_position(self, addr=0):\n return self.get_axis_parameter(1,addr=addr)", "title": "" }, { "docid": "7e855b21b20ddf6897da0d035c0b38ac", "score": "0.52833396", "text": "def get_node(self):\r\n node = self.xmlnode.prop(\"node\")\r\n if node is None:\r\n return None\r\n return node.decode(\"utf-8\")", "title": "" }, { "docid": "09d890d8e4d03fcd92c00779f9137729", "score": "0.52780837", "text": "def getPosition(self):\n return self._position", "title": "" }, { "docid": "2db5fa5ef7675fd17003f0779e3bd339", "score": "0.52742004", "text": "def findAtomAt(self, pos):\n key = (pos.x() - pos.x()%AtomPair.XSIZE, pos.y() - pos.y()%AtomPair.YSIZE)\n if key in self.selection_atoms:\n return self.selection_atoms[key]\n elif key in self.surface_atoms:\n return self.surface_atoms[key]\n else:\n return None", "title": "" }, { "docid": "957c767509cadfccb6e217a600bca88f", "score": "0.5271602", "text": "def get_position(self) -> Vector:\r\n return self.position", "title": "" }, { "docid": "bcde87650ee4b9ce49a72b4363dc9a39", "score": "0.5269476", "text": "def init(self, scene, node):\n if node is not None and scene is not None:\n self.set_npos(node.get_value('x'), node.get_value('y'))\n self.update()\n self._sibling = self._axis.get_sibling_ref(self)", "title": "" }, { "docid": "b7dcf3a43afec517bf8b89e8765339a7", "score": "0.52660954", "text": "def get_position(self) -> Vector:\n return self._position", "title": "" }, { "docid": "c820cc50f01ab39a115b0d60735fd6ba", "score": "0.5263065", "text": "def nodeAt(self, *args):\n item = QtWidgets.QGraphicsScene.itemAt(self, *args)\n if hasattr(item, 'node'):\n if hasattr(item.node, 'node_class'):\n item = item.node\n return item", "title": "" }, { "docid": "f8bb5886f218ede3f51c9c3156b73449", "score": "0.5259743", "text": "def get_pos(self):\n return self.position", "title": "" }, { "docid": "780e6ef54ccd175455583e9a33cd3d9e", "score": "0.5258354", "text": "def get_node_by_name(self, node_name: str) -> Optional[onnx.NodeProto]:\n if self._node_name_to_node is None:\n self._update_node_names()\n return self._node_name_to_node[node_name] if node_name in self._node_name_to_node else None", "title": "" }, { "docid": "60dac2a783494adf80ec79c731f15e70", "score": "0.52554566", "text": "def getposition(self):\n return self.__position", "title": "" }, { "docid": "295e1729d5c741afe52c318bf1563300", "score": "0.5246691", "text": "def getPosition(self):\r\n return self.position", "title": "" }, { "docid": "9abd4636465b7ee606cc8181baf34bd9", "score": "0.52438104", "text": "def getPosition(self):\n \n return self._position", "title": "" }, { "docid": "b403daedd3c13327a4b384db608063ba", "score": "0.5239151", "text": "def __min(self, node):\n if node['left'] == None:\n return node\n else:\n return self.__min(node['left'])", "title": "" }, { "docid": "ba9a5b26cd8b44ba8b87ef8c84414a0e", "score": "0.5227347", "text": "def position(da: Union[sc.DataArray, sc.Dataset]) -> sc.Variable:\n return _derived_coord(da, 'position')", "title": "" }, { "docid": "1482b81e81b839301852c1a6d09a898e", "score": "0.5226246", "text": "def get_next_position(self):\r\n if len(self.playing) == 0:\r\n return None\r\n \r\n next_player = self.get_next_player()\r\n return None if next_player is None else next_player.position", "title": "" }, { "docid": "4f0207ed373a686ab4fc139ecf0bdf21", "score": "0.5221137", "text": "def node(self, node_id):\n return self._get(['node', node_id])", "title": "" }, { "docid": "5d1cb3c891ccfa475d0ece1061661c53", "score": "0.52139527", "text": "def get_node(self):\r\n\r\n node=self.xmlnode.prop(\"node\")\r\n if not node:\r\n return None\r\n return node.decode(\"utf-8\")", "title": "" }, { "docid": "d15f7267371f1cb7d513203e316597df", "score": "0.5213563", "text": "def calculate_location(win, node):\r\n id_ = []\r\n while node != win.top:\r\n idx = node.parent().indexOfChild(node)\r\n id_.insert(0, idx)\r\n node = node.parent()\r\n return tuple(id_)", "title": "" }, { "docid": "5b326b64d272fe62e99054fcbc1eba39", "score": "0.5201837", "text": "def min_node(self, node):\n if node is None:\n return\n curr = node\n while curr.left is not None:\n curr = curr.left\n return curr", "title": "" }, { "docid": "71cca54443c847f70fad967f3ff10bf6", "score": "0.5198642", "text": "def _get_position(move):\n match = POSITION_REGEX.search(move)\n return Position(col=COL_MAP[match.group(1)], row=ROW_MAP[match.group(2)])", "title": "" }, { "docid": "e4e8f147aba477bbd2f087d44ddd42d4", "score": "0.51917624", "text": "def get_position(self):\n return self._send_message('get', '/position')", "title": "" }, { "docid": "e4e8f147aba477bbd2f087d44ddd42d4", "score": "0.51917624", "text": "def get_position(self):\n return self._send_message('get', '/position')", "title": "" }, { "docid": "edbd554911b67c7fa218d221944bb14c", "score": "0.51913095", "text": "def get_path_position(self, pos, units='relative'):\r\n if (units == \"relative\"):\r\n pos = pos * self.path_length\r\n for (dist, this_pos) in self.path:\r\n if (dist >= pos):\r\n return this_pos\r\n return None", "title": "" }, { "docid": "7eb4d13c04997a2cc0895f11e2d451b3", "score": "0.5185098", "text": "def from_node(self) -> INode:\n return self.node_store[self.from_node_name]", "title": "" }, { "docid": "8962706ae57e40a3a17afa228c6834a8", "score": "0.5184165", "text": "def get_node(name):\n nodes = list_nodes()\n if name in nodes:\n return nodes[name]\n return None", "title": "" }, { "docid": "4fe8bdd041f994d8b9e018293b3417f1", "score": "0.51779526", "text": "def position(self) -> Point:\n return Point(self._x_position, self._y_position)", "title": "" }, { "docid": "0289c303598bb4dde0177cb6e6aeba1c", "score": "0.51753414", "text": "def get_node_from_point(self, point, max_depth=None):\n def node_matches():\n if node.get_origin() == point:\n return True\n elif node.is_leaf():\n return True\n else:\n return False\n\n half_size = self.size / 2.0\n for i in xrange(self.DIMENSIONS):\n if abs(point[i]) > half_size:\n return None\n\n node = self._create_node_proxy(self._data, parent=None, index=0)\n depth = 0\n max_depth = max_depth if max_depth is not None else self.max_depth\n while depth < max_depth:\n if node_matches():\n return node\n node = node.get_closest_child(point)\n depth += 1\n\n return node", "title": "" }, { "docid": "9e2aff663d8600c16d24f4cc9d6a3c10", "score": "0.5175134", "text": "def get_position(self):\r\n pass", "title": "" }, { "docid": "db240cddc2ad01dbcce654d618eee039", "score": "0.5172795", "text": "def get_position(self) -> tuple:\n return self.__position.get_coordinates()", "title": "" }, { "docid": "d9937c68325eb8d5584a7d2a5efc8487", "score": "0.5168302", "text": "def get_position(self):\r\n position = Vector3()\r\n position.x = self.data['x']\r\n position.y = self.data['y']\r\n position.z = self.data['z']\r\n return position", "title": "" } ]
762d615cea8ef6f6790668f7190a5359
Reprioritise tasks to pri if current date is days before due date. Will not reprioritise tasks if they are already higher priority than pri.
[ { "docid": "50fea5512faf7e418884eb9e87225891", "score": "0.56365365", "text": "def main(todo_file, todo_full_sh, days=1, pri=\"A\", list_tasks=False):\n days = int(days)\n tasks_with_due_date = []\n\n # Open todo.txt file\n with open(todo_file, \"r\") as f:\n content = f.readlines()\n date = datetime.today()\n\n # Loop through content and look for due dates, assuming standard date format\n due_key = os.getenv(\"TODO_TXT_DUE_KEY\", \"due\")\n\n for i, task in enumerate(content):\n task = task.replace(\"\\n\", \"\")\n match = re.findall(r\"%s:(\\d{4}-\\d{2}-\\d{2})\" % due_key, task)\n\n if match:\n date = datetime.strptime(match[0], \"%Y-%m-%d\").date()\n tasks_with_due_date.append((i+1, task, date))\n\n # only non completed tasks\n non_complete_tasks_with_due_date = []\n for task in tasks_with_due_date:\n if (task[1][0] == \"x\") and (task[1][1] == \" \"):\n continue\n non_complete_tasks_with_due_date.append(task)\n\n # get tasks within set days\n tasks_within_days = []\n for task in non_complete_tasks_with_due_date:\n if task[2] < datetime.today().date() + timedelta(days+1):\n tasks_within_days.append(task)\n\n # get tasks within priority\n tasks_within_pri = []\n for task in tasks_within_days:\n match = re.search(r\"\\(([A-Z])\\)\\s\", task[1])\n if not match:\n tasks_within_pri.append(task)\n else:\n task_pri = task[1][1]\n if task_pri > pri:\n tasks_within_pri.append(task)\n\n repri_tasks = True\n if list_tasks:\n tasks_to_print = []\n zero_pad = int(math.log10(len(content))) + 1\n for task in tasks_within_pri:\n tasks_to_print.append(str(task[0]).zfill(zero_pad) + \" \" + task[1])\n # Print to console\n if len(tasks_to_print) > 0:\n print(\"Tasks to reprioritise\")\n print(\"=====================\\n\")\n for task in tasks_to_print:\n print(task)\n while True:\n user_input = input(\"\\nReprioritise tasks?(y/n)\")\n if user_input == \"y\":\n break\n if user_input == \"n\":\n repri_tasks = False\n break\n\n if repri_tasks:\n for task in tasks_within_pri:\n os.system(todo_full_sh + \" pri \" + str(task[0]) + \" \" + pri)", "title": "" } ]
[ { "docid": "f48dcd47e2fc719ffe12251564205dd0", "score": "0.6258063", "text": "def change_task_prio():\n\n user = current_user.self\n fields = 'proj_name', 'task_name', 'dir'\n fields = proj_name, task_name, dir_ = [request.args.get(i) for i in fields]\n\n if not all(fields) or not dir_ in ('1', '-1'):\n return dumps(dict(status='fail'))\n\n proj = Project.get_or_none(\n Project.owner == user and Project.name == proj_name)\n if not proj:\n return dumps(dict(status='fail'))\n\n task = Task.get_or_none(Task.project == proj and Task.name == task_name)\n if not task:\n return dumps(dict(status='fail'))\n\n i = task.priority\n swap = (Task\n .select()\n .where(Task.project == proj\n and Task.priority > i if dir_ == '1' else Task.priority < i)\n .order_by(Task.priority if dir_ == '1' else Task.priority.desc()))\n\n swap = swap.get() if swap.exists() else None\n if not swap:\n return dumps(dict(status='fail'))\n\n with db.atomic() as tract:\n try:\n\n tmp = task.priority\n swap.priority, task.priority = -1, swap.priority\n\n if not (swap.save() and task.save()):\n raise PeeweeException('failed to change tasks order')\n\n swap.priority = tmp\n\n if not swap.save():\n raise PeeweeException('failed to change tasks order')\n\n query = (Task\n .select()\n .where(Task.project == proj)\n .order_by(Task.priority.desc()))\n\n return dumps(dict(status='success',\n tasks=[get_task(i) for i in query]))\n\n except PeeweeException:\n tract.rollback()\n return dumps(dict(status='fail'))", "title": "" }, { "docid": "27f7e241b75b229b52fa8e0bd1a6d4b0", "score": "0.61331284", "text": "def change_priority(self, old, new):\r\n self.__current = self.__head\r\n found = 0\r\n priority = self.__current.get_priority()\r\n if(priority == old):\r\n # in case the task in the head is the one that its priority\r\n # needs to changed\r\n self.__head.get_task().set_priority(new)\r\n task_to_insert = self.deque()\r\n self.enque(task_to_insert)\r\n # the new priority may be lower than the old one\r\n found = 1\r\n while(not found):\r\n if (not self.__current.has_next()):\r\n # in case the priority isn't in the queue\r\n break\r\n priority = self.__current.get_next().get_priority()\r\n if(priority == old):\r\n self.__current.get_next().get_task().set_priority(new)\r\n if(self.__current.get_next().has_next()):\r\n if(self.__current.get_next().get_next().get_task()\r\n .get_priority() != new):\r\n # in case the new priority is not as the same as\r\n # the priority of the next link there is no need\r\n # to re enque the task\r\n task_to_insert = self.__current.get_next().get_task()\r\n self.__current.set_next(self.__current.get_next()\r\n .get_next())\r\n self.enque(task_to_insert)\r\n else:\r\n # in case the task is the last in the queue\r\n task_to_insert = self.__current.get_next().get_task()\r\n self.__current.set_next(None)\r\n self.enque(task_to_insert)\r\n found = 1\r\n else:\r\n # if the priority wasn't found the function\r\n # continues to the next task\r\n self.__current = self.__current.get_next()", "title": "" }, { "docid": "b9f61a3d43c2c7e700541fb06fbe898a", "score": "0.5849588", "text": "def df_rt_tasks(self, min_prio=100):\n df = self.trace.df_event('sched_switch')\n\n # Filters tasks which have a priority bigger than threshold\n df = df[df.next_prio <= min_prio]\n\n # Filter columns of interest\n rt_tasks = df[['next_pid', 'next_prio']]\n rt_tasks = rt_tasks.drop_duplicates()\n\n # Order by priority\n rt_tasks.sort_values(\n by=['next_prio', 'next_pid'], ascending=True, inplace=True)\n rt_tasks.rename(\n columns={'next_pid': 'pid', 'next_prio': 'prio'}, inplace=True)\n\n rt_tasks.set_index('pid', inplace=True)\n rt_tasks['comm'] = rt_tasks.index.map(self._get_task_pid_name)\n\n return rt_tasks", "title": "" }, { "docid": "f891608447f17caec401280bf2b453b8", "score": "0.5847407", "text": "def __default_prio_assign(self, func=None):\r\n # Iterate for max instances of task in taskset.\r\n # Priority minimum to maximum.\r\n taskset_schedulable = False\r\n total_intf = 0\r\n taskset_len = len(self.taskset)\r\n taskset_copy = copy(self.taskset)\r\n priority_vals = [i for i in range(taskset_len)]\r\n\r\n if func is None:\r\n func = self.__default_rtb\r\n # Pick each task, check if its eligible for lowest prio. if not push to\r\n # end of queue, pick a new one. repeat till all tasks are assigned priorities or\r\n # taskset is unschedulable.\r\n for prio in priority_vals:\r\n eligible = False\r\n task_pick = taskset_copy.popfront()\r\n taskset_len = len(taskset_copy)\r\n for var in range(taskset_len):\r\n total_intf = func(taskset_copy)\r\n if self.is_eligible(task_pick, total_intf):\r\n eligible = True\r\n self.prio_assigned_taskset[var].pr_lo = prio\r\n break\r\n else:\r\n taskset_copy.push(task_pick)\r\n task_pick = taskset_copy.popfront()\r\n if not eligible:\r\n taskset_schedulable = False\r\n break\r\n return taskset_schedulable", "title": "" }, { "docid": "559325e6725c1e943095f7ecb1ef74ae", "score": "0.5824812", "text": "def test_preemptive_priorities(self):\n\n # First without preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([7.5, float('inf')])],\n 'Class 1': [ciw.dists.Sequential([1.5, 3.5, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(4)], \n 'Class 1': [ciw.dists.Deterministic(5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [False])\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(recs[0].arrival_date, 1.5)\n self.assertEqual(recs[1].arrival_date, 5)\n self.assertEqual(recs[2].arrival_date, 7.5)\n self.assertEqual(recs[0].waiting_time, 0)\n self.assertEqual(recs[1].waiting_time, 1.5)\n self.assertEqual(recs[2].waiting_time, 4)\n self.assertEqual(recs[0].service_start_date, 1.5)\n self.assertEqual(recs[1].service_start_date, 6.5)\n self.assertEqual(recs[2].service_start_date, 11.5)\n self.assertEqual(recs[0].service_end_date, 6.5)\n self.assertEqual(recs[1].service_end_date, 11.5)\n self.assertEqual(recs[2].service_end_date, 15.5)\n\n # Now with preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([7.5, float('inf')])],\n 'Class 1': [ciw.dists.Sequential([1.5, 3.5, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(4)], \n 'Class 1': [ciw.dists.Deterministic(5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"])\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n all_recs = Q.get_all_records()\n recs = [r for r in all_recs if r.record_type == 'service']\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(recs[0].arrival_date, 1.5)\n self.assertEqual(recs[1].arrival_date, 5)\n self.assertEqual(recs[2].arrival_date, 7.5)\n self.assertEqual(recs[0].waiting_time, 0)\n self.assertEqual(recs[1].waiting_time, 6.5)\n self.assertEqual(recs[2].waiting_time, 0)\n self.assertEqual(recs[0].service_start_date, 1.5)\n self.assertEqual(recs[1].service_start_date, 11.5)\n self.assertEqual(recs[2].service_start_date, 7.5)\n self.assertEqual(recs[0].service_end_date, 6.5)\n self.assertEqual(recs[1].service_end_date, 16.5)\n self.assertEqual(recs[2].service_end_date, 11.5)\n\n # Test there are interrupted service data records\n interrupted_recs = [r for r in all_recs if r.record_type == 'interrupted service']\n self.assertEqual(len(interrupted_recs), 1)\n self.assertEqual(interrupted_recs[0].arrival_date, 5)\n self.assertEqual(interrupted_recs[0].service_start_date, 6.5)\n self.assertEqual(interrupted_recs[0].waiting_time, 1.5)\n self.assertEqual(interrupted_recs[0].exit_date, 7.5)\n self.assertEqual(interrupted_recs[0].service_time, 5)\n self.assertTrue(isnan(interrupted_recs[0].service_end_date))", "title": "" }, { "docid": "e6f1e25f3c7efc193a15a65ec3160389", "score": "0.5799506", "text": "def set_fixed_priorities(task_set, mode='dm'):\n if mode == 'rm':\n tasks = sorted(task_set.tasks, key=lambda t: t.period, reverse=True)\n elif mode == 'dm':\n tasks = sorted(task_set.tasks, key=lambda t: t.deadline, reverse=True)\n for idx, task in enumerate(tasks):\n task.static_prio = idx", "title": "" }, { "docid": "936f8f16e39864458f857312d3888aa6", "score": "0.5653596", "text": "def run_due_tasks(self):\n for task in self.tasks:\n task.run_if_due()\n self.remove_completed_tasks()", "title": "" }, { "docid": "989795b425d5bac1a5e8b74483540c50", "score": "0.5637353", "text": "def increase_task(self, i, task):\n #consider when to update the value of a task based on different criteria such as duration.\n #compare the recent task versus the parent and did swap if neccessary.\n if task[2] < self.heap[i][2]:\n print('new task\\'s duration is smaller than the current task\\'duration ')\n self.heap[i] = task\n while i > 0 and self.heap[parent(i)][2] < self.heap[i][2]:\n j = parent(i)\n holder = self.heap[j]\n self.heap[j] = self.heap[i]\n self.heap[i] = holder\n i = j", "title": "" }, { "docid": "f06d37348ad88594de0627ab00181f31", "score": "0.5448477", "text": "def schedule(self):\n \n #for tihs algorithm we travel the entire array anyway so no need to \n #actually start from curridx\n \n #if the current is not completed \n if self.tasks[self.curridx].STATE == STATE_RUN:\n min_prio = self.tasks[self.curridx].prio\n min_dead = self.tasks[self.curridx].dead\n schedule_this = self.curridx\n #else take them from IDLE\n #the effect is that if idle is the only one in the run queue it will keep running\n #else it will preempted by any possible task\n else:\n min_prio = self.tasks[-1].prio\n min_dead = self.tasks[-1].dead\n schedule_this = self.idle_id\n \n\n\n for tnext in self.idx_needs_schedul: \n \n \n tprio = self.tasks[tnext].prio\n tdead = self.tasks[tnext].dead\n \n \n if tprio == min_prio:\n #if the next deadline is shorter schedule this \n if tdead < min_dead:\n schedule_this = tnext\n \n #there is a task with higher priority \n if tprio < min_prio:\n #update the min prio\n min_prio = tprio\n min_dead = tdead\n schedule_this = tnext\n \n\n\n print(\"Schedule from {} to {}\".format( self.tasks[self.curridx].ID, self.tasks[schedule_this].ID ) ) \n self.curridx = schedule_this", "title": "" }, { "docid": "c9cf6037e5d67a751d299bc4d3de8a44", "score": "0.53451604", "text": "def priq_pri_1():\n from priorityq import PriQ\n new_priq = PriQ()\n new_priq.insert(7, 1)\n return new_priq", "title": "" }, { "docid": "a4241eac6bfed8bffa74be45a5ae8e88", "score": "0.5289745", "text": "def main(todo_file, future_days=1):\n # Prepare lists to store tasks\n overdue = list()\n due_today = list()\n due_tmr = list()\n due_future = list()\n tasks_with_date = list()\n\n # Open todo.txt file\n with open(todo_file, \"r\") as f:\n content = f.readlines()\n date = datetime.today()\n\n # Loop through content and look for due dates, assuming standard date format\n key = os.getenv(\"TODO_TXT_DUE_KEY\", \"due\")\n\n for i, task in enumerate(content):\n match = re.findall(r\"%s:(\\d{4}-\\d{2}-\\d{2})\" % key, task)\n\n if match:\n date = datetime.strptime(match[0], \"%Y-%m-%d\").date()\n tasks_with_date.append((i, task, date))\n\n # Sort tasks with a due date: regex by date, then priority\n sorted_tasks = sorted(tasks_with_date, key=lambda tup: (tup[2], tup[1]))\n zero_pad = int(math.log10(len(content))) + 1\n\n # Append to relevant lists for output\n for task in sorted_tasks:\n # Add matching tasks to list with line number\n if task[2] < datetime.today().date():\n overdue.append(str(task[0] + 1).zfill(zero_pad) + \" \" + task[1])\n elif task[2] == datetime.today().date():\n due_today.append(str(task[0] + 1).zfill(zero_pad) + \" \" + task[1])\n elif task[2] == datetime.today().date() + timedelta(days=1):\n due_tmr.append(str(task[0] + 1).zfill(zero_pad) + \" \" + task[1])\n elif task[2] < datetime.today().date() + timedelta(days=future_days + 1):\n due_future.append(str(task[0] + 1).zfill(zero_pad) + \" \" + task[1])\n\n # Print to console\n if len(overdue) > 0:\n print(\"===================================\")\n print(\"Overdue tasks:\")\n print(\"===================================\")\n for task in overdue:\n task_print(task)\n if len(due_today) > 0:\n print(\"\\n===================================\")\n print(\"Tasks due today:\")\n print(\"===================================\")\n for task in due_today:\n task_print(task)\n if len(due_tmr) > 0 and future_days >= 1:\n print(\"\\n===================================\")\n print(\"Tasks due tomorrow:\")\n print(\"===================================\")\n for task in due_tmr:\n task_print(task)\n if len(due_future) > 0:\n print(\"\\n===================================\")\n print(f\"Tasks due in the next {str(future_days)} days:\")\n print(\"===================================\")\n for task in due_future:\n task_print(task)", "title": "" }, { "docid": "872142e7e829c0e0ae8057146c621008", "score": "0.52759415", "text": "def find_missing_priorities (self):\r\n\r\n all_tasks = self.todo.values()\r\n present_priorities = [t[0] for t in all_tasks]\r\n missing_priorities = set(range(1,max(present_priorities))) - set(present_priorities)\r\n return missing_priorities", "title": "" }, { "docid": "2fac65037c9efd261f4048c9924f82d7", "score": "0.5255127", "text": "def work_prioritizer():\n\tdef signal_handler(sig, frame):\n\t\tlogging.info('work_prioritizer is being terminated')\n\t\tsys.exit(0)\n\n\t# handle CTRL-C to stop subprocess\n\tsignal.signal(signal.SIGINT, signal_handler)\n\n\t# instantiate the queue in interface\n\tqueue = PubSub.PubSubFactory.get_queue()\n\n\t# topics are arranged highest to lowest\n\ttr = TopicReader.Topics()\n\tif not tr:\n\t\tlogging.error('No topics found')\n\t\texit(-1)\n\n\t# get the topic where work to be prioritized is queued\n\tpriority_topic = tr.get_priority_topic()\n\n\twhile True:\n\t\t# TODO: always load the topics in case they have changed? wait until using memory cache\n\n\t\t# pull next work to prioritize\n\t\tlogging.debug('Pulling work from priority_topic: ' + priority_topic)\n\t\tmessages = queue.pull(priority_topic, 1)\n\n\t\tif not messages: # if there are no messages on that queue, move to next one.\n\t\t\tlogging.debug('no work found on prioritization queue')\n\t\t\ttime.sleep(10)\n\t\t\tcontinue # while loop\n\n\t\t# If we got any messages\n\t\tfor message in messages: # loop through all of the messages and process each one\n\t\t\tlogging.debug('message: ' + str(message) + ' pulled from: ' + str(priority_topic))\n\n\t\t\t# use the message to extract a priority. This is done in the user specific MyWork.py.\n\t\t\tscore = MyWork.prioritize(message)\n\t\t\ttopic_to_publish_on = tr.get_topic(score)\n\t\t\tif topic_to_publish_on:\n\t\t\t\tlogging.info('publishing: ' + str(message) + ' on topic: ' + str(topic_to_publish_on))\n\t\t\t\tqueue.publish(topic_to_publish_on, message)\n\t\t\telse:\n\t\t\t\tlogging.error('could not find a topic to send work to for score: ' + str(score))\n\t\t\t\tqueue.log_failed_work(message)\n\n\t\t\tqueue.ack(message) # make sure it doesn't get processed again", "title": "" }, { "docid": "4258794045746d00312b4c446cf7164b", "score": "0.5206503", "text": "def priq_1():\n from priorityq import PriQ\n new_priq = PriQ()\n new_priq.insert(7)\n return new_priq", "title": "" }, { "docid": "9f2e3488e10ab6e80c41032305e8cd05", "score": "0.5192854", "text": "def add_task(self, task):\n self.tasks.append(task)\n self.tasks.sort(key=lambda t: t.due_at)", "title": "" }, { "docid": "ed081420bc93ba94737fa7e02fa9e5f2", "score": "0.5158653", "text": "def assign_next_task(self,group_names=['main']):\n from datetime import datetime\n db = self.db\n queued = (db.task_scheduled.status==QUEUED)\n allocated = (db.task_scheduled.status==ALLOCATED)\n due = (db.task_scheduled.enabled==True)\n due &= (db.task_scheduled.group_name.belongs(group_names))\n due &= (db.task_scheduled.next_run_time<datetime.now())\n assigned_to_me = (db.task_scheduled.assigned_worker_name==self.worker_name)\n not_assigned = (db.task_scheduled.assigned_worker_name=='')|\\\n (db.task_scheduled.assigned_worker_name==None)\n # grab all queue tasks\n counter = db(queued & due & (not_assigned|assigned_to_me)).update(\n assigned_worker_name=self.worker_name,status=ALLOCATED)\n db.commit()\n if counter:\n # pick the first\n row = db(allocated & due & assigned_to_me).select(\n orderby=db.task_scheduled.next_run_time,limitby=(0,1)).first()\n # release others if any\n if row:\n row.update_record(status=RUNNING,last_run_time=datetime.now())\n db(allocated & due & assigned_to_me).update(\n assigned_worker_name=None,status=QUEUED)\n db.commit()\n else:\n row = None\n return row", "title": "" }, { "docid": "9dafc67995a164d7d4733b6221f6ccaf", "score": "0.5154344", "text": "def test_collect_before_due_date(self, exchange, course_dir, cache, db, before_duedate):\n # Release assignment\n self._release_and_fetch(\"ps1\", exchange, course_dir)\n\n # Submit something, wait, submit again. Due date is between.\n if before_duedate != 'nofirst':\n # We don't submit first assignment.\n self._submit(\"ps1\", exchange, cache)\n time.sleep(.05)\n time_duedate = datetime.datetime.utcnow()\n time.sleep(.05)\n self._submit(\"ps1\", exchange, cache)\n\n # Set the due date\n with Gradebook(db) as gb:\n gb.update_or_create_assignment('ps1', duedate=time_duedate)\n\n # Collect\n flags = ['--db', db]\n if before_duedate != 'no':\n flags.append('--before-duedate')\n self._collect(\"ps1\", exchange, flags=flags)\n\n root = os.path.os.path.join(os.path.join(course_dir, \"submitted\", get_username(), \"ps1\"))\n timestamp = self._read_timestamp(root)\n # Test both ways: with --before-duedate flag and without\n if before_duedate == 'yes':\n assert timestamp < time_duedate\n else: # 'no', 'nofirst'\n assert timestamp > time_duedate", "title": "" }, { "docid": "f6ba628404b87e8b05a2919d43d2a052", "score": "0.51210564", "text": "def sort_by_prio(self, mode=\"low\"):\r\n new_taskset = []\r\n if mode == \"low\":\r\n # Sort for low criticality.\r\n new_taskset = copy(self.taskset)\r\n new_taskset = sorted(new_taskset, key=lambda x: x.pr_lo, reverse=True)\r\n else:\r\n for task in self.taskset:\r\n if task.crit:\r\n new_taskset.append(task)\r\n new_taskset = sorted(new_taskset, key=lambda x: x.pr_hi, reverse=True)\r\n return new_taskset", "title": "" }, { "docid": "249d946030640bb8e55cf8e0dfc34771", "score": "0.5078571", "text": "def get_task_priority(tasks):\r\n priority = []\r\n for i, task in enumerate(tasks):\r\n if task == 'discrete_num' or 'string': # classification goes first\r\n priority = [i] + priority\r\n elif task == 'continuous_num': # regression goes last\r\n priority = priority + [i]\r\n return priority", "title": "" }, { "docid": "bbcd1437c6113b8b2d68ef3cb842e790", "score": "0.50658137", "text": "def ageQueue(self):\n [t.setPriority(t.getPriority() + 1) for t in self._queue]", "title": "" }, { "docid": "387946e641d9e0ced594ae644774bdb1", "score": "0.505955", "text": "def test_preemptive_priorities_at_class_change(self):\n # First without preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [False]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 1)\n self.assertEqual(float(recs[3].waiting_time), 1.5)\n self.assertEqual(float(recs[4].waiting_time), 2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 7)\n self.assertEqual(float(recs[3].service_start_date), 9.5)\n self.assertEqual(float(recs[4].service_start_date), 12)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 9.5)\n self.assertEqual(float(recs[3].service_end_date), 12)\n self.assertEqual(float(recs[4].service_end_date), 14.5)\n\n # Now with preemption:\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.NoArrivals()],\n 'Class 1': [ciw.dists.Sequential([2, 2, 2, 2, 2, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(2.5)], \n 'Class 1': [ciw.dists.Deterministic(2.5)]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"]),\n class_change_time_distributions=[[None, None], [ciw.dists.Deterministic(1.2), None]]\n )\n Q = ciw.Simulation(N, exact=26)\n Q.simulate_until_max_time(20)\n all_recs = Q.get_all_records()\n recs = [r for r in all_recs if r.record_type == 'service']\n recs.sort(key=lambda r: r.arrival_date)\n self.assertEqual(float(recs[0].arrival_date), 2)\n self.assertEqual(float(recs[1].arrival_date), 4)\n self.assertEqual(float(recs[2].arrival_date), 6)\n self.assertEqual(float(recs[3].arrival_date), 8)\n self.assertEqual(float(recs[4].arrival_date), 10)\n self.assertEqual(float(recs[0].waiting_time), 0)\n self.assertEqual(float(recs[1].waiting_time), 0.5)\n self.assertEqual(float(recs[2].waiting_time), 5.7)\n self.assertEqual(float(recs[3].waiting_time), 1.2)\n self.assertEqual(float(recs[4].waiting_time), 4.2)\n self.assertEqual(float(recs[0].service_start_date), 2)\n self.assertEqual(float(recs[1].service_start_date), 4.5)\n self.assertEqual(float(recs[2].service_start_date), 11.7)\n self.assertEqual(float(recs[3].service_start_date), 9.2)\n self.assertEqual(float(recs[4].service_start_date), 14.2)\n self.assertEqual(float(recs[0].service_end_date), 4.5)\n self.assertEqual(float(recs[1].service_end_date), 7)\n self.assertEqual(float(recs[2].service_end_date), 14.2)\n self.assertEqual(float(recs[3].service_end_date), 11.7)\n self.assertEqual(float(recs[4].service_end_date), 16.7)\n\n # Test interrupted service data records\n interrupted_recs = [r for r in all_recs if r.record_type == 'interrupted service']\n self.assertEqual(len(interrupted_recs), 1)\n self.assertEqual(float(interrupted_recs[0].arrival_date), 6)\n self.assertEqual(float(interrupted_recs[0].service_start_date), 7)\n self.assertEqual(float(interrupted_recs[0].waiting_time), 1)\n self.assertEqual(float(interrupted_recs[0].exit_date), 9.2)\n self.assertEqual(float(interrupted_recs[0].service_time), 2.5)\n self.assertTrue(isnan(interrupted_recs[0].service_end_date))", "title": "" }, { "docid": "274d5be51c3c83d17926e184a6d7b0f5", "score": "0.50162035", "text": "def run_if_due(self):\n now = time.time()\n due = now >= self.due_at\n if due and not self.completed:\n self.action()\n self.completed = True\n #", "title": "" }, { "docid": "1de8500068166a8ad23207f707910a5a", "score": "0.5015696", "text": "def min_priority_per_job(self) -> int:\n return pulumi.get(self, \"min_priority_per_job\")", "title": "" }, { "docid": "edad78d581f32100e8591436a546bc66", "score": "0.50047827", "text": "def tasksToday(self, projects, day, added=[]):\n entries = [entry for entry in self.entries if entry.getDay() == day]\n if entries == []:\n return []\n head, *tail = entries \n tasks = [(p, sp, i, task) for p in projects \n for sp in p.getSubprojects() \n for i, task in enumerate(sp.getAllTasks()) \n if not task.isDone() and not (p,sp,i,task) in added\n ]\n tasks = sorted(tasks, key=lambda x: x[3].getDueDate())\n l = [(p, sp, i, t) for p,sp,i,t in tasks \n if head.getProject().getName() == p.getName() and head.getSubproject().getName() == sp.getName()\n ]\n if l == []:\n return Schedule.fromDict(dict(\n self.toDict(), entries=[e.toDict() for e in tail]\n )).tasksToday(projects, day, added)\n first, *rest = l\n fp, fsp, _, ft = first\n if head.getDuration() > ft.getExpectedTime():\n return [(fp, fsp, ft)] + Schedule.fromDict(dict(\n self.toDict(), \n entries=[dict(head.toDict(), duration=head.getDuration() - ft.getExpectedTime())] + [\n e.toDict() for e in tail\n ]\n )).tasksToday(projects, day, [first] + added)\n elif head.getDuration() == ft.getExpectedTime():\n return [(fp, fsp, ft)] + Schedule.fromDict(dict(\n self.toDict(), \n entries=[e.toDict() for e in tail]\n )).tasksToday(projects, day, [first] + added)\n else:\n return [(fp, fsp, ft)] + Schedule.fromDict(dict(\n self.toDict(), entries=[e.toDict() for e in tail]\n )).tasksToday(projects, day, [first] + added)", "title": "" }, { "docid": "993ccabe7f59e3f45222c83f2de1bee3", "score": "0.4994197", "text": "def archive(self):\n for date in self.get_due_dates():\n # Add done tasks to archive list\n done_tasks = [task for task in self._tasks[date] if task.done]\n self._archive_tasks.extend(done_tasks)\n\n # Remove done_tasks from task_list\n self._tasks[date] = [task for task in self._tasks[date]\n if task not in done_tasks]", "title": "" }, { "docid": "706b7521cb11eddacd17c224d57a4419", "score": "0.4983851", "text": "def apply_task_defaults(subliminal, superliminal, pipeline, superliminal_before_tasks, superliminal_after_tasks):\n pipeline[__TASKS] = __apply_task_defaults(\n subliminal, superliminal, pipeline.get(__TASKS, []), superliminal_before_tasks, superliminal_after_tasks\n )\n\n return pipeline", "title": "" }, { "docid": "bd419819ccb3ba4cf33e8b5e7540e4be", "score": "0.4973389", "text": "def sort_by_crit(self):\r\n prev_ind = 0\r\n crit_prev = 0\r\n crit_curr = 0\r\n tasklen = len(self.taskset)\r\n prio_indices = []\r\n new_taskset = copy(self.taskset)\r\n new_taskset.sort(lambda x: x.crit, reverse=True)\r\n for i in range(tasklen):\r\n crit_curr = new_taskset[tasklen].crit\r\n if crit_curr != crit_prev:\r\n prio_indices.append((prev_ind, i))\r\n crit_prev = crit_curr\r\n prev_ind = i\r\n for ind in prio_indices:\r\n new_taskset[ind[0]:ind[1]] = sorted(new_taskset[ind[0]:ind[1]], key=lambda x: x.dl_lo, reverse=True)\r\n return new_taskset", "title": "" }, { "docid": "25a49d4dfbd5f09a32ed4486d0aa77f5", "score": "0.49663004", "text": "def get_due_dates(self):\n\n # None cannot be compared to datetime.date\n if None in self._tasks:\n sorted_keys = [None] + sorted(\n [i for i in self._tasks.iterkeys() if i is not None])\n else:\n sorted_keys = sorted(self._tasks.keys())\n return sorted_keys", "title": "" }, { "docid": "a5864794cfdccac628aedef94f26c470", "score": "0.49629995", "text": "def dirty_wait_scheduler(self):\n\n # During the search minimal average runtime can change (I dont take global lock on queues)\n # but it doesnt bother us. Process with lower higher wait will have higher priorities\n # Indeed this scheduler is a bad idea. The process with low waittime will never run\n # and has no chances to change this waittime.\n # Th e solution is - update waittime on every iteration of scheduling. Thats done in\n # dirty wait scheduler\n max_avg = 0.0\n # if all the ques are empty or working return the first user\n user = random.randrange(len(self.tasks))\n for u, task in self.tasks.iteritems():\n if not task.queue.q.empty() and task.queue.order_lock.acquire(False):\n if task.dirty_wait.value >= max_avg:\n user = u\n max_avg = task.dirty_wait.value\n task.queue.order_lock.release()\n return user", "title": "" }, { "docid": "8b0dbffead4f3af5714327bfdbca4c35", "score": "0.4961606", "text": "def update_task(connection, task):\n sql = ''' UPDATE tasks\n SET priority = ? ,\n begin_date = ? ,\n end_date = ?\n WHERE id = ?'''\n cur = connection.cursor()\n cur.execute(sql, task)\n connection.commit()", "title": "" }, { "docid": "7fdee5428aa2910f089b148ff00ed089", "score": "0.49532998", "text": "def synchronize_low_priority_email_scheduler():\n lock_id = 'synchronize_low_priority_email_scheduler'\n\n if acquire_lock(lock_id): # this should always work, but just in case..\n try:\n task_logger.info('synchronize scheduler starting')\n\n # Find email accounts which authentication info should be OK.\n email_accounts = EmailAccount.objects.filter(\n auth_ok__in=(OK_EMAILACCOUNT_AUTH, UNKNOWN_EMAILACCOUNT_AUTH),\n is_deleted=False,\n ).order_by('-last_sync_date')\n for email_account in email_accounts:\n email_address = email_account.email.email_address\n\n task_logger.info('attempting sync for %s', email_address)\n\n locked, status = lock_task('retrieve_low_priority_emails_for', email_account.id)\n if not locked:\n task_logger.info('skipping task \"retrieve_low_priority_emails_for\" for %s', email_address)\n continue\n\n task_logger.info('syncing %s', email_address)\n\n retrieve_low_priority_emails_for.apply_async(\n args=(email_account.id,),\n max_retries=1,\n default_retry_delay=100,\n kwargs={'status_id': status.pk},\n )\n\n task_logger.info('synchronize scheduler finished')\n finally:\n release_lock(lock_id)\n else:\n task_logger.info('synchronize scheduler already running')", "title": "" }, { "docid": "1b8df0699a3530e3811eeccf1c07b7f7", "score": "0.4951777", "text": "def set_prior_info(self, t):\n if self.professional:\n prior = self.operator.expected_fare_total_demand_per_zone_over_days(t)\n else:\n prior = None\n return prior", "title": "" }, { "docid": "2716644699e898b148d78d35ff7393c0", "score": "0.49332964", "text": "def skip_remaining_tasks(self):\n return self.exit or self.detours or self.additions or self.defuse_children or self.defuse_workflow", "title": "" }, { "docid": "a7997f656ebc177851cf698de68f1348", "score": "0.48904115", "text": "def addHardMinRequired_Task_onDay(self, nworkers, rtask, rshift, iday):\n\n if nworkers>0:\n print (\"debug.Assigning %i workers to day %i at task %s and shift %s\" %(nworkers, iday, self.nameTasks[rtask], self.nameShifts[rshift]))\n\n # set the number os tasks to do on this day\n self.solver.Add(self.num_workers_task_day[(rtask, rshift, iday)] == nworkers)", "title": "" }, { "docid": "ba1bd87610c246c9a7047ef1bda093fb", "score": "0.48886338", "text": "def change_priority(self, priority):\n old = self.priority\n self.priority = priority\n try:\n self.clean()\n except Exception as e:\n self.priority = old\n raise e\n self.save()", "title": "" }, { "docid": "fb9da0a088d93f286aa2302f5150db24", "score": "0.48857227", "text": "def reschedule(self) -> List[Task]:\n today = Today()\n task_list = list()\n for task in self.__db.get_selected(\"completed\", \"False\").to_list():\n if self.__calendar.is_past(DueDate(task.due_date), today) and task.deleted is False:\n task.due_date = today.to_date_string()\n task.due_date_timestamp = today.to_timestamp()\n task_list.append(task)\n\n return task_list", "title": "" }, { "docid": "c8d29d9e182e42d5693a7957105fb677", "score": "0.48846006", "text": "def _reschedule(self):\n new_tasks = []\n while True:\n to_run = super(RBRS,self).get_tasks_eligible_to_run()\n\n if len(to_run) == 1:\n new_tasks.append(self.allocate_resources(to_run[0]))\n del self.tasks_to_do[to_run[0].id]\n continue\n\n if len(to_run) < 1:\n break\n\n max = 0\n for task in to_run:\n if task.mean > max:\n max = task.mean\n ws = []\n for task in to_run:\n ws.append(max - task.mean)\n\n c = 1 / sum(ws)\n\n ps = []\n\n for w in ws:\n ps.append(w * c)\n\n the_task = util.choice(to_run, ps)\n new_tasks.append(self.allocate_resources(the_task))\n del self.tasks_to_do[the_task.id]\n if len(new_tasks) > 0:\n return new_tasks", "title": "" }, { "docid": "506f71d8fc91e6b01c9b5070501cecd4", "score": "0.48713562", "text": "def task_with_lowest_share(\n tasks: List[Task],\n allocated_tasks: List[Task],\n task_core_considered: DefaultDict[Task, Set[Core]],\n ) -> Optional[Tuple[Task, Core]]:\n lowest_requirement: Optional[float] = None\n task_lowest: Optional[Task] = None # task with the lowest requirement\n core_lowest: Optional[\n Core\n ] = None # core on which `task_lowest` should be executed\n\n for task in tasks:\n if task not in allocated_tasks:\n for core_id in range(task.core_count):\n core = Core(core_id)\n\n if core in task_core_considered[task]:\n continue\n utilization: float = task.get_utilization(core)\n if lowest_requirement is None or utilization < lowest_requirement:\n lowest_requirement = utilization\n task_lowest = task\n core_lowest = core\n\n if core_lowest:\n assert task_lowest is not None\n return (task_lowest, core_lowest)\n else:\n print(\"Task could not be assigned\")\n return None", "title": "" }, { "docid": "4de712f7c4629a53c9ba22e3fc4016cb", "score": "0.48552465", "text": "def test_preemptive_priorities_resume_options_due_to_schedule(self): \n # Testing under restart\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([3, float(\"inf\")])],\n 'Class 1': [ciw.dists.Sequential([1, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(10)], \n 'Class 1': [ciw.dists.Sequential([6, 3])]},\n number_of_servers=[[[1, 5], [2, 100]]],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"restart\"]),\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n r2, r1 = [r for r in recs if r.record_type == \"service\"]\n self.assertEqual(r1.arrival_date, 3)\n self.assertEqual(r1.service_start_date, 3)\n self.assertEqual(r1.service_end_date, 13)\n self.assertEqual(r1.service_time, 10)\n self.assertEqual(r1.waiting_time, 0)\n\n self.assertEqual(r2.arrival_date, 1)\n self.assertEqual(r2.service_start_date, 5)\n self.assertEqual(r2.service_end_date, 11)\n self.assertEqual(r2.service_time, 6)\n self.assertEqual(r2.waiting_time, 4)\n\n # Testing under continue\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([3, float(\"inf\")])],\n 'Class 1': [ciw.dists.Sequential([1, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(10)], \n 'Class 1': [ciw.dists.Sequential([6, 3])]},\n number_of_servers=[[[1, 5], [2, 100]]],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"continue\"]),\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n r2, r1 = [r for r in recs if r.record_type == \"service\"]\n self.assertEqual(r1.arrival_date, 3)\n self.assertEqual(r1.service_start_date, 3)\n self.assertEqual(r1.service_end_date, 13)\n self.assertEqual(r1.service_time, 10)\n self.assertEqual(r1.waiting_time, 0)\n\n self.assertEqual(r2.arrival_date, 1)\n self.assertEqual(r2.service_start_date, 5)\n self.assertEqual(r2.service_end_date, 9)\n self.assertEqual(r2.service_time, 4)\n self.assertEqual(r2.waiting_time, 4)\n\n # Testing under resample\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([3, float(\"inf\")])],\n 'Class 1': [ciw.dists.Sequential([1, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(10)], \n 'Class 1': [ciw.dists.Sequential([6, 3])]},\n number_of_servers=[[[1, 5], [2, 100]]],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"]),\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n r2, r1 = [r for r in recs if r.record_type == \"service\"]\n self.assertEqual(r1.arrival_date, 3)\n self.assertEqual(r1.service_start_date, 3)\n self.assertEqual(r1.service_end_date, 13)\n self.assertEqual(r1.service_time, 10)\n self.assertEqual(r1.waiting_time, 0)\n\n self.assertEqual(r2.arrival_date, 1)\n self.assertEqual(r2.service_start_date, 5)\n self.assertEqual(r2.service_end_date, 8)\n self.assertEqual(r2.service_time, 3)\n self.assertEqual(r2.waiting_time, 4)", "title": "" }, { "docid": "8e6b0e5ff85c5ac0f7a0d0f3b1d5f4ee", "score": "0.4851495", "text": "def weekly_tasks(dt: datetime):\n pass", "title": "" }, { "docid": "8ec917c542c50b79e6edf0839237e83c", "score": "0.48404855", "text": "def priority(self) -> int:\n ...", "title": "" }, { "docid": "be581af35cb819b71c45572dd8e75d64", "score": "0.48370177", "text": "def auto_assign():\n\n # Determine the due date, which the next Tuesday at 5 PM.\n this_time = pdt_now()\n while this_time.weekday() != 1: # 0 == Monday, 1 == Tuesday...\n this_time += timedelta(days=1) \n due_date = datetime(this_time.year, this_time.month, this_time.day, 17, 0)\n\n # Determine the homeworks to assign, which are the two last things due\n hw_a, hw_b = get_last_two_due_homeworks()\n \n print assign_tasks(hw_a.id, due_date, send_emails=False)\n print assign_tasks(hw_b.id, due_date, send_emails=True)", "title": "" }, { "docid": "a8d2377ebe0b7aea2777d071c5d029d0", "score": "0.4830657", "text": "def test_priority_change_while_waiting(self):\n N = ciw.create_network(\n arrival_distributions={'Class 0': [ciw.dists.Deterministic(4)],\n 'Class 1': [ciw.dists.Deterministic(3)]},\n service_distributions={'Class 0': [ciw.dists.Deterministic(4.5)],\n 'Class 1': [ciw.dists.Deterministic(4.5)]},\n number_of_servers=[1],\n class_change_time_distributions=[\n [None, None],\n [ciw.dists.Deterministic(7), None]],\n priority_classes={'Class 0': 0, 'Class 1': 1}\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(26)\n recs = Q.get_all_records()\n self.assertEqual(len(recs), 5)\n # Customer 1\n self.assertEqual(recs[0].arrival_date, 3)\n self.assertEqual(recs[0].waiting_time, 0)\n self.assertEqual(recs[0].service_start_date, 3)\n self.assertEqual(recs[0].service_end_date, 7.5)\n self.assertEqual(recs[0].customer_class, 1)\n self.assertEqual(recs[0].original_customer_class, 1)\n # Customer 2\n self.assertEqual(recs[1].arrival_date, 4)\n self.assertEqual(recs[1].waiting_time, 3.5)\n self.assertEqual(recs[1].service_start_date, 7.5)\n self.assertEqual(recs[1].service_end_date, 12)\n self.assertEqual(recs[1].customer_class, 0)\n self.assertEqual(recs[1].original_customer_class, 0)\n # Customer 3\n self.assertEqual(recs[2].arrival_date, 8)\n self.assertEqual(recs[2].waiting_time, 4)\n self.assertEqual(recs[2].service_start_date, 12)\n self.assertEqual(recs[2].service_end_date, 16.5)\n self.assertEqual(recs[2].customer_class, 0)\n self.assertEqual(recs[2].original_customer_class, 0)\n # Customer 4\n self.assertEqual(recs[3].arrival_date, 12)\n self.assertEqual(recs[3].waiting_time, 4.5)\n self.assertEqual(recs[3].service_start_date, 16.5)\n self.assertEqual(recs[3].service_end_date, 21)\n self.assertEqual(recs[3].customer_class, 0)\n self.assertEqual(recs[3].original_customer_class, 0)\n # Customer 5\n self.assertEqual(recs[4].arrival_date, 6)\n self.assertEqual(recs[4].waiting_time, 15)\n self.assertEqual(recs[4].service_start_date, 21)\n self.assertEqual(recs[4].service_end_date, 25.5)\n self.assertEqual(recs[4].customer_class, 0)\n self.assertEqual(recs[4].original_customer_class, 1)", "title": "" }, { "docid": "5d9f262135a3e09229f82dfe5a25c8ec", "score": "0.48276615", "text": "def trysuperseed(self, desire):\n if not self.isperforming:\n return\n\n if desire._priority < self.currentpriority:\n self.robot.cancelall()\n self.robot.invalid_context = True\n self.done.acquire()\n self.done.wait()\n self.done.release()", "title": "" }, { "docid": "3ba969f6629a6d8b3808aebb9ea7f611", "score": "0.48274118", "text": "def pid_priority(self, pid):\n self.writeCommand('pid_priority', pid)\n return self", "title": "" }, { "docid": "1691e5cc03be943dffcf21cdcada7951", "score": "0.48117495", "text": "def set_priority(self, name, priority):", "title": "" }, { "docid": "fca03d7b41fd2909e2a09b29e53228d9", "score": "0.47969598", "text": "def priq_2_same():\n from priorityq import PriQ\n new_priq = PriQ()\n new_priq.insert(7, 1)\n new_priq.insert(10, 2)\n new_priq.insert(14, 2)\n return new_priq", "title": "" }, { "docid": "23741578612e624c12df9e0134f77443", "score": "0.47952002", "text": "def preempt(self, other_task: Task):\n assert other_task.process_id == self.process_id\n assert other_task is not self\n\n self.is_running = False\n self.was_preempted = True\n\n self.times_preempted += 1\n\n self.slot = other_task.slot", "title": "" }, { "docid": "4dde855787c8a26425ecbb94a36be12a", "score": "0.47874683", "text": "def _skip_schedule_tags_to_date(\n schedule_tags: Dict[str, Dict],\n skip_to_date: datetime,\n schedule_next_run_times: Dict[str, datetime],\n) -> None:\n tags_moved = []\n for prev_tag in schedule_tags:\n if schedule_next_run_times[prev_tag] < skip_to_date:\n log.info(\n f\"Skipping schedule(s) for {prev_tag} from {schedule_next_run_times[prev_tag]} \"\n f\"to {skip_to_date}...\"\n )\n # Ensure that tzinfo is set so that isoformat() returns a format that Vault accepts\n if skip_to_date.tzinfo is None:\n skip_to_date = skip_to_date.replace(tzinfo=timezone.utc)\n endtoend.core_api_helper.update_account_schedule_tag(\n account_schedule_tag_id=prev_tag,\n schedule_status_override_start_timestamp=datetime.min.replace(\n tzinfo=timezone.utc\n ).isoformat(),\n schedule_status_override_end_timestamp=datetime.max.replace(\n tzinfo=timezone.utc\n ).isoformat(),\n schedule_status_override=\"ACCOUNT_SCHEDULE_TAG_SCHEDULE_STATUS_OVERRIDE_TO_SKIPPED\",\n test_pause_at_timestamp=skip_to_date.isoformat(),\n )\n tags_moved.append(prev_tag)\n if tags_moved:\n # Set the time element to start of day to ensure any schedule for that day is included\n # as we support a maximum frequency of DAILY\n skip_to_date = skip_to_date + relativedelta(\n hour=0, minute=0, second=0, microsecond=0\n )\n wait_for_schedule_operation_events(\n tag_names=tags_moved,\n use_e2e_mapping=False,\n wait_for_timestamp=skip_to_date,\n inter_message_timeout=0,\n matched_message_timeout=0,\n )", "title": "" }, { "docid": "eeda94e47ebac8a06c305876571a8cb6", "score": "0.47789925", "text": "def update_priority_fn(self, priority_fn):\n # This is particularly useful for Bayesian search that will use the information gained from evaluation to determine what the next step shoudl be\n # TODO determine whether this is the right way to do it or not.\n pass", "title": "" }, { "docid": "d51a19dfd68fdeed7a9599615218793d", "score": "0.47780627", "text": "def lower_priority(self):\n return self._lower_priority", "title": "" }, { "docid": "705d2d6db1745a3411cce4594f54c37b", "score": "0.4756274", "text": "def test_preemptive_priorities_resume_options(self): \n # Testing under restart\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([3, float(\"inf\")])],\n 'Class 1': [ciw.dists.Sequential([1, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(10)], \n 'Class 1': [ciw.dists.Sequential([6, 3])]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"restart\"]),\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n r1, r2 = [r for r in recs if r.record_type == \"service\"]\n self.assertEqual(r1.arrival_date, 3)\n self.assertEqual(r1.service_start_date, 3)\n self.assertEqual(r1.service_end_date, 13)\n self.assertEqual(r1.service_time, 10)\n self.assertEqual(r1.waiting_time, 0)\n\n self.assertEqual(r2.arrival_date, 1)\n self.assertEqual(r2.service_start_date, 13)\n self.assertEqual(r2.service_end_date, 19)\n self.assertEqual(r2.service_time, 6)\n self.assertEqual(r2.waiting_time, 12)\n\n # Testing under continue\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([3, float(\"inf\")])],\n 'Class 1': [ciw.dists.Sequential([1, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(10)], \n 'Class 1': [ciw.dists.Sequential([6, 3])]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"continue\"]),\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n r1, r2 = [r for r in recs if r.record_type == \"service\"]\n self.assertEqual(r1.arrival_date, 3)\n self.assertEqual(r1.service_start_date, 3)\n self.assertEqual(r1.service_end_date, 13)\n self.assertEqual(r1.service_time, 10)\n self.assertEqual(r1.waiting_time, 0)\n\n self.assertEqual(r2.arrival_date, 1)\n self.assertEqual(r2.service_start_date, 13)\n self.assertEqual(r2.service_end_date, 17)\n self.assertEqual(r2.service_time, 4)\n self.assertEqual(r2.waiting_time, 12)\n\n # Testing under resample\n N = ciw.create_network(\n arrival_distributions={\n 'Class 0': [ciw.dists.Sequential([3, float(\"inf\")])],\n 'Class 1': [ciw.dists.Sequential([1, float('inf')])]},\n service_distributions={\n 'Class 0': [ciw.dists.Deterministic(10)], \n 'Class 1': [ciw.dists.Sequential([6, 3])]},\n number_of_servers=[1],\n priority_classes=({'Class 0': 0, 'Class 1': 1}, [\"resample\"]),\n )\n Q = ciw.Simulation(N)\n Q.simulate_until_max_time(20)\n recs = Q.get_all_records()\n r1, r2 = [r for r in recs if r.record_type == \"service\"]\n self.assertEqual(r1.arrival_date, 3)\n self.assertEqual(r1.service_start_date, 3)\n self.assertEqual(r1.service_end_date, 13)\n self.assertEqual(r1.service_time, 10)\n self.assertEqual(r1.waiting_time, 0)\n\n self.assertEqual(r2.arrival_date, 1)\n self.assertEqual(r2.service_start_date, 13)\n self.assertEqual(r2.service_end_date, 16)\n self.assertEqual(r2.service_time, 3)\n self.assertEqual(r2.waiting_time, 12)", "title": "" }, { "docid": "277287cc438feff559dc8c13b28b952d", "score": "0.4753008", "text": "def leastInterval(self, tasks, n):\n def fix_order(l, pos):\n for i in range(pos, len(l) - 1):\n if l[i][1] < l[i+1][1]:\n temp = l[i]\n l[i] = l[i+1]\n l[i+1] = temp\n else:\n return\n \n intervals = 0\n remaining = len(tasks)\n order = [[t, num, 1] for t, num in Counter(tasks).most_common()]\n while remaining:\n intervals += 1\n # see which task we can do next\n for i in range(len(order)):\n # if no task is available\n if not order[i][1]:\n break\n \n # if we found a task to assign\n if order[i][2] <= intervals:\n order[i][1] -= 1\n order[i][2] = intervals + n + 1\n remaining -= 1\n fix_order(order, i)\n break\n \n return intervals", "title": "" }, { "docid": "00ab074bf794ca931da309b1e1d57cf2", "score": "0.47465175", "text": "def preprocess_task(self, task, global_sweep_points, sweep_points=None,\n **kw):\n task = super().preprocess_task(task, global_sweep_points, sweep_points,\n **kw)\n sweep_points = task['sweep_points']\n task['first_delay_point'] = sweep_points.get_sweep_params_property(\n 'values', 0, 'pulse_delay')[0]\n\n return task", "title": "" }, { "docid": "69cc6073652b959b5931b568448f5f7d", "score": "0.47252285", "text": "def make_due(self):\n # Update timestamp\n self.document.datetime = datetime.utcnow()\n # TODO: Notify reviewer of due date\n pass", "title": "" }, { "docid": "ed235f8ea403135e19633f0342d2e745", "score": "0.47249448", "text": "def period_reduce():\n date_entry1 = input('Enter a \"since\" date in YYYY-MM-DD format. ')\n date_entry2 = input('Enter a \"until\" date in YYYY-MM-DD format. ')\n try:\n SINCE = datetime.strptime(date_entry1, '%Y-%m-%d')\n UNTIL = datetime.strptime(date_entry2, '%Y-%m-%d')\n except ValueError:\n print(\"Invald date. Try again.\\n\")\n return period_reduce()\n print('\\nBeginning harvest...\\n\\n')\n limited_posts = takewhile(lambda p: p.date > UNTIL,\n dropwhile(lambda p: p.date > SINCE,\n posts))\n return limited_posts", "title": "" }, { "docid": "762a4e0887411332cb2d79c0499fb790", "score": "0.4724782", "text": "def priq_3_diff():\n from priorityq import PriQ\n new_priq = PriQ()\n new_priq.insert(7, 1)\n new_priq.insert(10, 2)\n new_priq.insert(14, 3)\n return new_priq", "title": "" }, { "docid": "275b1f4e4a0675a00b223e3e6df3b8a3", "score": "0.4687151", "text": "def verify_repo_priority(self, repoid, required_repos):\n res = True\n required_pri = self._limit_pri(required_repos)\n new_pri = OTHER_PRIORITY\n if self._get_pri(repoid) <= required_pri:\n if required_pri >= new_pri:\n new_pri = min(99, required_pri+10)\n # self._set_pri(repoid, new_pri)\n self.resolved_repos[repoid] = new_pri\n res = False\n return res", "title": "" }, { "docid": "e37c4b70aa7f97bebf635f1f11747fcd", "score": "0.4684523", "text": "def mark_due(self):\n # TODO: Notify reviewer who pays.\n pass", "title": "" }, { "docid": "770e573105f3bd8a3316e9cc6a5b90ab", "score": "0.4681931", "text": "def decrease(self, today):\n streak = max(0, self.current_streak - 1)\n self.set_streak(streak)\n self.item.update(due={'string': 'ev day starting {}'.format(today)})", "title": "" }, { "docid": "a662cea801d9c434d75c66b6072742f7", "score": "0.46774432", "text": "def get_priority(self, name):", "title": "" }, { "docid": "3c7ccd7e2ff1ad3e86e570e6b9405ec7", "score": "0.46655235", "text": "def generate_priority(self, feats):\n raise NotImplementedError()", "title": "" }, { "docid": "074b9b32aa1a0b31fa5403445b99b0c7", "score": "0.46642584", "text": "def _startTask(self, availableTasks):\n c = self._database[self.TASK_COLLECTION]\n now = datetime.utcnow()\n \n # Ok... since we can't use the full index with find_and_modify, we'll\n # just use the find / update operations over and over\n task = None\n updates = dict(\n state = 'working'\n , tsStart = now\n , host = socket.gethostname()\n )\n while True:\n # SLOW\n #task = c.find_and_modify(\n # {\n # 'taskClass': { '$in': availableTasks.keys() }\n # , 'state': 'request'\n # , 'tsRequest': { '$lte': now }\n # }\n # , {\n # '$set': updates\n # }\n # , new = True\n # , sort = [ ('priority', -1), ('tsRequest', 1) ]\n #)\n #return task\n task = c.find_one(\n {\n 'taskClass': { '$in': availableTasks.keys() }\n , 'state': 'request'\n , 'tsRequest': { '$lte': now }\n }\n , sort = [ ('priority', -1), ('tsRequest', 1) ]\n )\n if task is None:\n # No tasks are waiting to run\n break\n\n newUpdated = updates\n splinterUpdated = None\n if task.get('batch') or task.get('schedule'):\n # For batch and scheduled tasks, we'll need to create a task\n # that we're actually going to run, and point back to that from\n # the batch / scheduled task.\n splinterUpdated = updates.copy()\n splinterUpdated['splinterOf'] = task['_id']\n\n splinterTask = self._createTask(\n now\n , task['taskClass']\n , splinterUpdated\n , task['kwargs']\n )\n\n newUpdated = updates.copy()\n newUpdated['splinterId'] = splinterTask\n splinterUpdated['_id'] = splinterTask\n\n r = c.update(\n { '_id': task['_id'], 'state': 'request' }\n , { '$set': newUpdated }\n , safe = True\n )\n if r.get('updatedExisting') == True:\n # Successfully acquired the task\n if splinterUpdated is not None:\n task.update(splinterUpdated)\n else:\n task.update(newUpdated)\n break\n\n return task", "title": "" }, { "docid": "bfd37ec5c5e13b0c55fbd302a9631b32", "score": "0.46567732", "text": "def set_prior(self):", "title": "" }, { "docid": "bfd37ec5c5e13b0c55fbd302a9631b32", "score": "0.46567732", "text": "def set_prior(self):", "title": "" }, { "docid": "b0f8100924c4babe190e2445a237b678", "score": "0.46479672", "text": "def reevaluate_schedule(self, nexttime): \n future = self.vm_scheduler.get_future_reschedulable_leases()\n for l in future:\n # We can only reschedule leases in the following four states\n if l.get_state() in (Lease.STATE_PREPARING, Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_SUSPENDED_SCHEDULED):\n # For each reschedulable lease already scheduled in the\n # future, we cancel the lease's preparantion and\n # the last scheduled VM.\n vmrr = l.get_last_vmrr()\n self.preparation_scheduler.cancel_preparation(l)\n self.vm_scheduler.cancel_vm(vmrr)\n l.remove_vmrr(vmrr)\n if l.get_state() in (Lease.STATE_READY, Lease.STATE_SCHEDULED, Lease.STATE_PREPARING):\n l.set_state(Lease.STATE_PENDING)\n elif l.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:\n l.set_state(Lease.STATE_SUSPENDED_PENDING)\n\n # At this point, the lease just looks like a regular\n # pending lease that can be handed off directly to the\n # __schedule_lease method.\n # TODO: We should do exception handling here. However,\n # since we can only reschedule best-effort leases that were\n # originally schedule in the future, the scheduling function \n # should always be able to schedule the lease (worst-case \n # scenario is that it simply replicates the previous schedule)\n self.__schedule_lease(l, nexttime)", "title": "" }, { "docid": "51799e0abf8fd4b838cf61bbc9f0a618", "score": "0.46352315", "text": "def with_lower_priority(self, lower_priority):\n assert isinstance(lower_priority, bool)\n ret = self._copy()\n ret._lower_priority = lower_priority\n return ret", "title": "" }, { "docid": "976b875169562f2bd35d407f4d6c5389", "score": "0.46279812", "text": "def add_task (self,priority,task):\r\n self.todo[self.uid] = (priority, task)\r\n self.uid += 1", "title": "" }, { "docid": "208f19abd7310d2183674249439378a3", "score": "0.46267503", "text": "def PURGE(config, tasks):\n currentTasks = []\n for task in tasks:\n taskDTG = datetime.datetime.strptime(task['DTS'], ISODTSFormat)\n if taskDTG > config['meta']['bkwdDTG']:\n currentTasks.append(task)\n\n return(currentTasks)", "title": "" }, { "docid": "208f19abd7310d2183674249439378a3", "score": "0.46267503", "text": "def PURGE(config, tasks):\n currentTasks = []\n for task in tasks:\n taskDTG = datetime.datetime.strptime(task['DTS'], ISODTSFormat)\n if taskDTG > config['meta']['bkwdDTG']:\n currentTasks.append(task)\n\n return(currentTasks)", "title": "" }, { "docid": "cdce8d5685f4de9c22e727ca680ea41a", "score": "0.46172267", "text": "def set_thread_priority(self, *args, **kwargs):\n return _bs_swig.ec_invert_sync_sptr_set_thread_priority(self, *args, **kwargs)", "title": "" }, { "docid": "4076a9433647fd065adcb0ed822b6264", "score": "0.46087345", "text": "def _upcoming(self):\n actions = ScheduledAction.select()\n _nexttimes = []\n for a in actions:\n _next = CronHandler(a.cron).nextenabled()\n if _next:\n _nexttimes.append((_next, a))\n if _nexttimes:\n return list(sorted(_nexttimes))[0] #return the first time for action along with the action", "title": "" }, { "docid": "a3a64678d94233a7f900562adbe9bc02", "score": "0.46041405", "text": "def update_priorities(self, idxs, priorities):\r\n\r\n self._is_accessed = True\r\n\r\n self.priorities[idxs] = priorities\r\n self.accesses[idxs] = 1.", "title": "" }, { "docid": "5906feecf18e1431616fbb52448f8442", "score": "0.45941284", "text": "def __lt__(self, other):\n return self.priority < other.priority", "title": "" }, { "docid": "2fe7ceaff749f6ae9dcf7c0f6fd6d611", "score": "0.45937002", "text": "def test_due_date_in_past(self):\n time = timezone.now() - datetime.timedelta(days=1)\n past_task = Task(date_due = time.date())\n self.assertEqual(past_task.to_show(), False)", "title": "" }, { "docid": "dd680568ed99206157b0d8f34a38f893", "score": "0.45902607", "text": "def temp_lower_than(temp_thr):\n\n return lambda step, curr_obj, curr_optimized_obj, curr_temp: curr_temp<temp_thr", "title": "" }, { "docid": "d5b9ee7d68a698e2a1095568adcddd1d", "score": "0.45880526", "text": "def update_priority_question():\n try:\n id = request.form['id']\n priority = request.form['priority']\n priority = float(priority)\n question = models.Question.objects.get(id=id)\n question.priority = priority\n question.save()\n return template_modify_all_tags(question)\n except:\n print traceback.print_exc()", "title": "" }, { "docid": "feff16812b32bbe8f1b23272ad15e809", "score": "0.45880187", "text": "def find_precal(self,date,threshold,**kwargs):\n override = kwargs.get('override')\n tag = kwargs.get('tag')\n nitestring = \"%s-%s-%s\" % (date[:4],date[4:6],date[6:])\n nite = datetime.strptime(nitestring,\"%Y-%m-%d\")\n days=1\n while days <= threshold:\n find_precal = \"select distinct unitname,reqnum,attnum from pfw_attempt where unitname='%s'\" % str((nite - timedelta(days=days)).date()).replace('-','')\n self.cur.execute(find_precal)\n results = self.cur.fetchall()\n max = len(results) - 1\n if len(results) != 0:\n precal_unitname,precal_reqnum,precal_attnum = results[max][0],results[max][1],results[max][2]\n status_precal = \"select distinct status from task where id in (select distinct task_id from pfw_attempt where unitname='%s' and reqnum=%s and attnum=%s)\" % (precal_unitname,precal_reqnum,precal_attnum)\n self.cur.execute(status_precal)\n status = self.cur.fetchone()[0] \n break\n elif len(results) == 0 or status == 1 or status is None:\n days +=1\n if days > threshold:\n break\n if days > threshold:\n if override is True:\n if tag is None:\n print \"Must specify tag if override is True!\"\n exit(0)\n else:\n max_tagged = \"select distinct unitname,reqnum, attnum from ops_proctag where tag = '%s' and unitname in (select max(unitname) from ops_proctag where tag = '%s' and unitname < %s)\" % (tag,tag,date)\n self.cur.execute(max_tagged)\n last_results = self.cur.fetchall()\n try:\n precal_unitname,precal_reqnum,precal_attnum = last_results[0][0],last_results[0][1],last_results[0][2]\n except:\n print \"No tagged precals found. Please check tag or database section used...\"\n exit(0)\n elif override is False or override is None:\n if results is None:\n print \"No precals found. Please manually select input precal...\"\n exit(0)\n precal_nite = precal_unitname\n precal_run = 'r%sp0%s' % (precal_reqnum,precal_attnum)\n return precal_nite, precal_run", "title": "" }, { "docid": "1d8ee8d1fa66ab721a46672f6b4fe690", "score": "0.4584149", "text": "def update_priorities(self, priorities: np.ndarray):\n # add a small number to ensure every transition can be sampled\n tempered_priorities = priorities + self.priority_eps\n for idx, priority in zip(self.sampled_indices, tempered_priorities):\n assert priority > 0\n assert 0 <= idx < self.num_stored\n\n self.sum_tree[idx] = priority ** self.alpha\n self.min_tree[idx] = priority ** self.alpha\n\n self.max_priority = max(self.max_priority, priority)\n self.sampled_indices = None", "title": "" }, { "docid": "6c239ec5f2178a5243399c72eb50f9ba", "score": "0.45767507", "text": "def add(self, task, priority=0):\n if task in self.entries:\n self.remove(task)\n\n count = next(self.counter)\n # weight = -priority since heap is a min-heap\n entry = [-priority, count, task]\n self.entries[task] = entry\n heapq.heappush(self.heap, entry)\n pass", "title": "" }, { "docid": "74d6b18d245ab6fb0422d9937f5d1717", "score": "0.45748064", "text": "def setPrio(self, prio):\n self.setByName(prio, 'prio')", "title": "" }, { "docid": "141dfc3fbcd43e0c9fe782373c855f7f", "score": "0.45747882", "text": "def test3():\r\n q = make_priority_queue()\r\n count = 0\r\n while True:\r\n if count == 10:\r\n break\r\n i = 5\r\n task = \"Task\" + str(count + 1)\r\n enqueue(q, Task(task, i))\r\n count += 1\r\n print(\"Created Queue: \", q)\r\n t = front(q)\r\n print(\"Highest priority task is\", t.name, \"with priority\", t.priority)\r\n t = back(q)\r\n print(\"Lowest priority task is\", t.name, \"with priority\", t.priority)\r\n while not is_empty(q):\r\n t = front(q)\r\n dequeue(q)\r\n if is_empty(q) is True:\r\n print(\"Dequeue Success? - True\")\r\n else:\r\n print(\"Dequeue Success? - False\")", "title": "" }, { "docid": "8939bd22b0f76678c58d32da915b3d4c", "score": "0.4567063", "text": "def test5():\r\n q = make_priority_queue()\r\n enqueue(q, Task(\"Task1\", 1))\r\n enqueue(q, Task(\"Task2\", 2))\r\n enqueue(q, Task(\"Task3\", 3))\r\n enqueue(q, Task(\"Task4\", 4))\r\n enqueue(q, Task(\"Task5\", 5))\r\n enqueue(q, Task(\"Task6\", 5))\r\n enqueue(q, Task(\"Task7\", 4))\r\n enqueue(q, Task(\"Task8\", 3))\r\n enqueue(q, Task(\"Task9\", 2))\r\n enqueue(q, Task(\"Task10\", 1))\r\n print(\"Created Queue: \", q)\r\n t = front(q)\r\n print(\"Highest priority task is\", t.name, \"with priority\", t.priority)\r\n t = back(q)\r\n print(\"Lowest priority task is\", t.name, \"with priority\", t.priority)\r\n while not is_empty(q):\r\n t = front(q)\r\n dequeue(q)\r\n if is_empty(q) is True:\r\n print(\"Dequeue Success? - True\")\r\n else:\r\n print(\"Dequeue Success? - False\")", "title": "" }, { "docid": "50129cbb6b9fb20c2ed610e14d9fd9de", "score": "0.4562539", "text": "def test_priority_order_up_ok(self):\n self.execute('priority order critical up')\n rv, output = self.execute('priority list')\n self.assertEqual(0, rv, output)\n self.assertExpectedResult(output)", "title": "" }, { "docid": "0977a9423fe84559346241823d1e45c3", "score": "0.4556731", "text": "def addHardWorkersMustBeAssignedToAllowedTasks(self):\n #Example:\n #At least 2 M shifts must be set on day 0\n #exp1 = [self.shifts[(w, 0)] == 1 for w in range(self.num_workers)]\n #self.solver.Add(self.solver.Sum(exp1) >= 3)\n #numero de supervisores assignados =1 en turno manana\n #exp2 = [self.tasks[(w, 0)] == 1 for w in range(self.num_workers)]\n #self.solver.Add(self.solver.Sum(exp2) == 1)\n\n exp1 = [(self.task[(w, 0)] == 1) * (self.shift[(w, 0)] == 1) for w in range(self.num_workers)]\n exp2 = [(self.task[(w, 0)] == 2) * (self.shift[(w, 0)] == 1) for w in range(self.num_workers)]\n self.solver.Add(self.solver.Sum(exp1) >= 4)\n self.solver.Add(self.solver.Sum(exp2) >= 2)", "title": "" }, { "docid": "9876c4dc7a660bff4587aaeca7c837b8", "score": "0.4556545", "text": "def changeTask(title, description, due_date, deadline, children, state, completion_date, readback=True):\n rmResp = remove.removeTask(title, readback)\n if rmResp == 200:\n addResp = add.addTask(title=title, description=description, due_date=due_date, deadline=deadline, children=children, state=state, completion_date=completion_date, readback=readback)\n if addResp != 200:\n print(col.FAIL + \"Error while changing (adding) task. Error code: \" + str(addResp))\n else:\n print(col.FAIL + \"Error while changing (removing) task. Error code: \" + str(rmResp))\n return rmResp == addResp == 200", "title": "" }, { "docid": "8d0f89d972e83b9f514a158360c64179", "score": "0.4551205", "text": "def change_priority(arr, i, p):\n arr[i][1] += p\n arr = sift_down(arr, i, len(arr)-1)", "title": "" }, { "docid": "f8291778dd983f204b573198943ef3e1", "score": "0.45486617", "text": "def ensure_priority_combinations(self, num: int):\n self.combinator.ensure_priorities(num)", "title": "" }, { "docid": "78ba2bf3afd43831ce0de2bf33f1f6af", "score": "0.45472017", "text": "def prioritize(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prioritize\")", "title": "" }, { "docid": "78ba2bf3afd43831ce0de2bf33f1f6af", "score": "0.45472017", "text": "def prioritize(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prioritize\")", "title": "" }, { "docid": "3b7c06e4fc46849990c26dd2c23bf899", "score": "0.45469624", "text": "def is_due(self):\n due = self.next_due()\n now = time.time()\n if now >= due:\n _log.info('%s was due at %s', self.jobname, _fmt_time(due))\n self.touch(now)\n return True\n else:\n _log.info('%s is due at %s', self.jobname, _fmt_time(due))\n return False", "title": "" }, { "docid": "9ab1afaa90664aa19ba1afcade2fa59e", "score": "0.4543918", "text": "def rta_fixpoint(tk, tasks, no_cpus, min_delta=None):\n # response time iteration, start with cost\n last, resp = tk.cost, response_estimate(tk, tasks, no_cpus, tk.cost)\n\n while last != resp and resp <= tk.deadline:\n if resp > last and resp - last < min_delta:\n resp = min(last + min_delta, tk.deadline)\n last, resp = resp, response_estimate(tk, tasks, no_cpus, resp)\n\n return resp", "title": "" }, { "docid": "47f5cfacfc27b7f52c0c980b46bc1a1d", "score": "0.45406285", "text": "def delete(self, postpone: bool = True):\n if not postpone:\n # reduce the tasks duration\n\n # make sure this task is locked (on supported db backends)\n task = Task.objects.select_for_update().filter(pk=self.task_id).first()\n\n task.duration -= self.duration\n if task.duration <= 0:\n task.delete()\n else:\n task.save(update_fields=('duration',))\n super().delete()", "title": "" }, { "docid": "fc2229eb27a1b1f059b4502e26eaa6ff", "score": "0.45368075", "text": "def get_next(self):\n\n self.update_tasks_status()\n\n if self.dirty:\n self.tsort()\n self.dirty = False\n\n for key, task in self.tasks.iteritems():\n if task.is_new() and task.has_resolved_dependencies():\n return task\n\n return None", "title": "" }, { "docid": "4689806bdef8636ce06fb749e4648923", "score": "0.45301598", "text": "def __lt__(self, other): # overload the < operator\n return self.getPriority() < other.getPriority()", "title": "" }, { "docid": "1a0f05851c9d49496e1855f872c2d673", "score": "0.45258898", "text": "def addHardAllowedTasksForWorker(self, iworker, atasks):\n\n print (\"debug.Setting allowed tasks for worker %s are %s\" %(self.nameWorkers[iworker]['Name'], str(atasks)))\n\n for i in range(self.num_days):\n exp = [self.task[iworker, i] == atasks[t] for t in range(len(atasks))]\n self.solver.Add(self.solver.Max(exp) == 1)", "title": "" }, { "docid": "44f3f2c7e485209f0304fd4fdadd09f6", "score": "0.45243683", "text": "def test_due_date_in_future(self):\n time = timezone.now() + datetime.timedelta(days=5)\n future_task = Task(date_due = time.date())\n self.assertEqual(future_task.to_show(), True)", "title": "" }, { "docid": "215f1c1ad9aa97cfd30c2ea485dac744", "score": "0.4523343", "text": "def update_prior_mastery(self, new_prior_mastery):\n raise NotImplementedError", "title": "" }, { "docid": "3427afad31a9d699190ec9e0914fe927", "score": "0.45177287", "text": "def max_waittime_scheduler(self):\n # During the search minimal average runtime can change (I dont take global lock on queues)\n # but it doesnt bother us. Process with lower runtime still will have higher priorities\n max_time = float('inf')\n # if all the ques are empty or working return the first user\n user = random.randrange(len(self.tasks))\n for u, task in self.tasks.iteritems():\n if not task.queue.q.empty() and task.queue.order_lock.acquire(False):\n if task.last_task_finished.value < max_time:\n user = u\n max_time = task.last_task_finished.value\n task.queue.order_lock.release()\n return user", "title": "" }, { "docid": "ff7878d5e18f5e05a4f337df9c9460fe", "score": "0.4514913", "text": "def thread_priority(self):\n return _bs_swig.ec_invert_sync_sptr_thread_priority(self)", "title": "" } ]
b088dddebe4dd8c763a816df39d4bd07
Given a boolean array, we select generators from a barcode basis. Example >>> print(base3) Barcode basis [[0 2] [0 1] [1 3]] >>> base5 = base3.bool_select([True,False,True]) >>> print(base5) Barcode basis [[0 2] [1 3]]
[ { "docid": "c6cf5734120bfe5000e95948f7e28282", "score": "0.6890537", "text": "def bool_select(self, selection):\n if self.prev_basis is None:\n return barcode_basis(self.barcode[selection])\n\n return barcode_basis(self.barcode[selection], self.prev_basis,\n self.coordinates[:, selection])", "title": "" } ]
[ { "docid": "c9c6f4e162dc8602c6845ab763118195", "score": "0.5711035", "text": "def seqselect(test, list): \n selected = [ ]\n for item in list: \n if test(item) == True: \n selected.append(item)\n return selected", "title": "" }, { "docid": "9dc0b1d43ce0cbc171247b9ced451517", "score": "0.56585324", "text": "def randbool(self, name, **kwargs): #539 (line in Coconut source) #540 (line in Coconut source)\n return bool(self.choice(name, [False, True], **kwargs)) #541 (line in Coconut source)", "title": "" }, { "docid": "a8f683d98ae31f03bd5abbdb523b7ce3", "score": "0.5611374", "text": "def masked_select(self, mask):\n return array_ops.masked_select(self, mask)", "title": "" }, { "docid": "a5616a8760058fab87c33e4dfc3a0468", "score": "0.5286663", "text": "def get_bprop_select(self):\n select = P.Select()\n\n def bprop(cond, x, y, out, dout):\n return zeros_like(cond), select(cond, dout, zeros_like(x)), select(cond, zeros_like(y), dout)\n return bprop", "title": "" }, { "docid": "509faebf07288944446b0ccf76b4ef3a", "score": "0.5277187", "text": "def random_boolean_list(self, num_of_bools):\n return [random.choice([True, False]) for i in range(0, num_of_bools)]", "title": "" }, { "docid": "c384258967c1dfd01f256e23496b34e4", "score": "0.5228981", "text": "def select_action(\n self, state: np.ndarray, deterministic: bool = False\n ) -> np.ndarray:\n raise NotImplementedError", "title": "" }, { "docid": "6b3dc6d2eb083c27588f43023ae106bc", "score": "0.520966", "text": "def test_select_bool(self):\n\n # with class\n res = self.app.select_many('GtkButton', visible=True)\n self.assertGreater(len(res), 2)\n\n res = self.app.select_many('GtkAboutDialog', visible=False)\n self.assertGreater(len(res), 0)\n\n # without class\n res = self.app.select_many(visible=True)\n self.assertGreater(len(res), 5)\n\n res = self.app.select_many(visible=False)\n self.assertGreater(len(res), 4)", "title": "" }, { "docid": "2f8e7ab85610dafe8455c0119e50f714", "score": "0.51447254", "text": "def select(condlist, choicelist, default=0):\n # Check the size of condlist and choicelist are the same, or abort.\n if len(condlist) != len(choicelist):\n raise ValueError(\n 'list of cases must be same length as list of conditions')\n\n # Now that the dtype is known, handle the deprecated select([], []) case\n if len(condlist) == 0:\n raise ValueError(\"select with an empty condition list is not possible\")\n\n choicelist = [np.asarray(choice) for choice in choicelist]\n\n try:\n intermediate_dtype = np.result_type(*choicelist)\n except TypeError as e:\n msg = f'Choicelist elements do not have a common dtype: {e}'\n raise TypeError(msg) from None\n default_array = np.asarray(default)\n choicelist.append(default_array)\n\n # need to get the result type before broadcasting for correct scalar\n # behaviour\n try:\n dtype = np.result_type(intermediate_dtype, default_array)\n except TypeError as e:\n msg = f'Choicelists and default value do not have a common dtype: {e}'\n raise TypeError(msg) from None\n\n # Convert conditions to arrays and broadcast conditions and choices\n # as the shape is needed for the result. Doing it separately optimizes\n # for example when all choices are scalars.\n condlist = np.broadcast_arrays(*condlist)\n choicelist = np.broadcast_arrays(*choicelist)\n\n # If cond array is not an ndarray in boolean format or scalar bool, abort.\n for i, cond in enumerate(condlist):\n if cond.dtype.type is not np.bool_:\n raise TypeError(\n 'invalid entry {} in condlist: should be boolean ndarray'.format(i))\n\n if choicelist[0].ndim == 0:\n # This may be common, so avoid the call.\n result_shape = condlist[0].shape\n else:\n result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape\n\n result = np.full(result_shape, choicelist[-1], dtype)\n\n # Use np.copyto to burn each choicelist array onto result, using the\n # corresponding condlist as a boolean mask. This is done in reverse\n # order since the first choice should take precedence.\n choicelist = choicelist[-2::-1]\n condlist = condlist[::-1]\n for choice, cond in zip(choicelist, condlist):\n np.copyto(result, choice, where=cond)\n\n return result", "title": "" }, { "docid": "95e9bfed961677e6e98e4b23293001d5", "score": "0.51291037", "text": "def rand_bool_array(nb_true, out_shape):\n nb_element = 1\n for i in out_shape:\n nb_element = nb_element * i\n arr = np.zeros(nb_element, dtype=bool)\n nb_true = int(nb_true)\n arr[:nb_true] = True\n np.random.shuffle(arr)\n arr = arr.reshape(out_shape)\n\n return arr", "title": "" }, { "docid": "fee0974a0ccca1e29ffad4d69b17a6cc", "score": "0.50732875", "text": "def frombools(cls, bools=()):\n return cls.fromint(sum(compress(cls._atoms, bools)))", "title": "" }, { "docid": "8e8a36bca065a4a5ebf78a5cb8c88cee", "score": "0.4964432", "text": "def global_is_selected(bool):\n return 'selected' if bool else ''", "title": "" }, { "docid": "1cde7fb1a91c871019b7cebb87e9ecf4", "score": "0.4943031", "text": "def index_select(self, dim, index):\n return array_ops.index_select(self, dim, index)", "title": "" }, { "docid": "2e80c8ba403113046b123d909699fc6c", "score": "0.49364904", "text": "def select(self, choice_bit, x, y): # noqa:E501\n\n # TODO[Morten] optimize select when choice_bit is a public tensor\n\n # TODO[Morten]\n # these assertions should ideally be enabled but requires lifting to be\n # applied to the inputs first; make sure that's fixed during refactoring\n #\n # assert x.backing_dtype == y.backing_dtype\n # assert x.is_scaled == y.is_scaled\n # assert not choice_bit.is_scaled\n\n with tf.name_scope(\"select\"):\n return (y - x) * choice_bit + x", "title": "" }, { "docid": "5dfd9cabdfa7f8a745964e431a8c3010", "score": "0.49263495", "text": "def Select(array, index):\n return get_env().formula_manager.Select(array, index)", "title": "" }, { "docid": "1c779fc4a027367bbb593fb53cc95777", "score": "0.49091804", "text": "def _transform_selected(X, transform, selected=\"all\", copy=True):\n X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)\n\n if isinstance(selected, six.string_types) and selected == \"all\":\n return transform(X)\n\n if len(selected) == 0:\n return X\n\n n_features = X.shape[1]\n ind = np.arange(n_features)\n sel = np.zeros(n_features, dtype=bool)\n sel[np.asarray(selected)] = True\n not_sel = np.logical_not(sel)\n n_selected = np.sum(sel)\n\n if n_selected == 0:\n # No features selected.\n return X\n elif n_selected == n_features:\n # All features selected.\n return transform(X)\n else:\n X_sel = transform(X[:, ind[sel]])\n X_not_sel = X[:, ind[not_sel]]\n\n if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):\n return sparse.hstack((X_sel, X_not_sel))\n else:\n return np.hstack((X_sel, X_not_sel))", "title": "" }, { "docid": "6b646acdd56006cf7d9a334f532fa6b2", "score": "0.48926842", "text": "def bool_(self):\n return array_ops.cast(self, 'bool', self)", "title": "" }, { "docid": "c828ee692bf96cc62d833f2dfe68373c", "score": "0.48309216", "text": "def is_select(a):\n return is_app_of(a, Z3_OP_SELECT)", "title": "" }, { "docid": "475e57adcd1430eb67f453d6c49b3ae7", "score": "0.48201862", "text": "def asbool(arr):\n if arr.dtype in (np.uint8, np.int8, np.bool_):\n return arr.view(np.bool_)\n else:\n return arr.astype(np.bool_)", "title": "" }, { "docid": "8c5359a0fa03368f482b8880893df3da", "score": "0.48162824", "text": "def make_bool_mask(mask_binary):\n mask_bool = (mask_binary > 0.5)\n return mask_bool", "title": "" }, { "docid": "81b0bba17168b4bb01558e4acd3ac7b1", "score": "0.48128545", "text": "def select(arrays, index):\n if arrays is None or any(i is None for i in arrays):\n return arrays\n return tuple(i.ravel()[index] for i in arrays)", "title": "" }, { "docid": "826ed0d60727a50b50b33b6a0c8b4160", "score": "0.4751948", "text": "def bool2option(b):\n\n return 'yes' if b else 'no'", "title": "" }, { "docid": "0c51e304612693c5487c661a9f7e9b8e", "score": "0.47092107", "text": "def select(iterable, *predicates, **kwargs):\n update_dict(kwargs, dict(operator=all))\n predicate = CompoundPredicate(*predicates, **kwargs)\n return tuple(filter(predicate, iterable))", "title": "" }, { "docid": "b34b9a1dee8e9016e8bb1a275cb2a75c", "score": "0.46962547", "text": "def is_bool_array(idx):\n if isinstance(idx,list):\n return len(idx)>0 and isinstance(idx[0],bool)\n if isinstance(idx,np.ndarray):\n return idx.dtype==\"bool\"\n return False", "title": "" }, { "docid": "0243096b84aa760faa4129f32f892df7", "score": "0.46881524", "text": "def genT(self):\n\n R = [(b, a) for a, b in self.R]\n return BoolM(R)", "title": "" }, { "docid": "becfee63ed48cf3c6fc45f9a45893623", "score": "0.46688822", "text": "def _boolean_unmask(data_array, bool_array):\n assert data_array.ndim in (1, 2)\n assert bool_array.ndim == 1\n assert bool_array.sum() == data_array.shape[-1]\n\n unmasked_data = np.full(\n shape=bool_array.shape + data_array.T.shape[1:],\n fill_value=np.nan,\n dtype=data_array.dtype,\n )\n unmasked_data[bool_array] = data_array\n unmasked_data = unmasked_data.T\n return unmasked_data", "title": "" }, { "docid": "becfee63ed48cf3c6fc45f9a45893623", "score": "0.46688822", "text": "def _boolean_unmask(data_array, bool_array):\n assert data_array.ndim in (1, 2)\n assert bool_array.ndim == 1\n assert bool_array.sum() == data_array.shape[-1]\n\n unmasked_data = np.full(\n shape=bool_array.shape + data_array.T.shape[1:],\n fill_value=np.nan,\n dtype=data_array.dtype,\n )\n unmasked_data[bool_array] = data_array\n unmasked_data = unmasked_data.T\n return unmasked_data", "title": "" }, { "docid": "14c5d20df88e089bcae1e6c6055367c6", "score": "0.46596858", "text": "def select(self, **kwargs):\n time_selectors = ['dumps', 'timerange', 'scans', 'compscans', 'targets']\n freq_selectors = ['channels', 'freqrange']\n corrprod_selectors = ['corrprods', 'ants', 'inputs', 'pol']\n # Check if keywords are valid and raise exception only if this is explicitly enabled\n valid_kwargs = time_selectors + freq_selectors + corrprod_selectors + \\\n ['spw', 'subarray', 'weights', 'flags', 'reset', 'strict']\n # Check for definition of strict\n strict = kwargs.get('strict', True)\n if strict and set(kwargs.keys()) - set(valid_kwargs):\n raise TypeError(\"select() got unexpected keyword argument(s) %s, valid ones are %s\" %\n (list(set(kwargs.keys()) - set(valid_kwargs)), valid_kwargs))\n # If select() is called without arguments, reset all selections\n reset = 'TFB' if not kwargs else kwargs.pop('reset', 'auto')\n kwargs['spw'] = spw = kwargs.get('spw', self.spw)\n if spw >= len(self.spectral_windows):\n raise IndexError('Data set has %d spectral window(s): spw should be in range 0..%d, is %d instead' %\n (len(self.spectral_windows), len(self.spectral_windows) - 1, spw))\n kwargs['subarray'] = subarray = kwargs.get('subarray', self.subarray)\n if subarray >= len(self.subarrays):\n raise IndexError('Data set has %d subarray(s): subarray should be in range 0..%d, is %d instead' %\n (len(self.subarrays), len(self.subarrays) - 1, subarray))\n # In 'auto' mode, only reset flags for those dimensions that will be affected by selectors\n if reset == 'auto':\n reset = 'T' if set(kwargs.keys()).intersection(time_selectors) else ''\n reset += 'F' if set(kwargs.keys()).intersection(freq_selectors) else ''\n reset += 'B' if set(kwargs.keys()).intersection(corrprod_selectors) else ''\n # Change spectral window and/or subarray\n if spw != self.spw:\n reset += 'TF'\n self.spw = spw\n if subarray != self.subarray:\n reset += 'TB'\n self.subarray = subarray\n # Reset the selection flags on the appropriate dimensions\n if 'T' in reset:\n self._time_keep[:] = True\n self._time_keep &= (self.sensor.get('Observation/spw_index') == spw)\n self._time_keep &= (self.sensor.get('Observation/subarray_index') == subarray)\n for key in time_selectors:\n self._selection.pop(key, None)\n # Since the number of freqs / corrprods may change due to spw / subarray, create these flags afresh\n if 'F' in reset:\n self._freq_keep = np.ones(self.spectral_windows[self.spw].num_chans, dtype=bool)\n for key in freq_selectors:\n self._selection.pop(key, None)\n if 'B' in reset:\n self._corrprod_keep = np.ones(len(self.subarrays[self.subarray].corr_products), dtype=bool)\n for key in corrprod_selectors:\n self._selection.pop(key, None)\n # Now add the new selection criteria to the list (after the existing ones were kept or culled)\n self._selection.update(kwargs)\n\n for k, v in self._selection.items():\n # Selections that affect time axis\n if k == 'dumps':\n if np.asarray(v).dtype == bool:\n self._time_keep &= v\n else:\n dump_keep = np.zeros(len(self._time_keep), dtype=bool)\n dump_keep[v] = True\n self._time_keep &= dump_keep\n elif k == 'timerange':\n start_time = katpoint.Timestamp(v[0]).secs + 0.5 * self.dump_period\n end_time = katpoint.Timestamp(v[1]).secs - 0.5 * self.dump_period\n self._time_keep &= (self.sensor.timestamps[:] >= start_time)\n self._time_keep &= (self.sensor.timestamps[:] <= end_time)\n elif k in ('scans', 'compscans'):\n scans = _selection_to_list(v)\n scan_keep = np.zeros(len(self._time_keep), dtype=bool)\n scan_sensor = self.sensor.get('Observation/scan_state' if k == 'scans' else 'Observation/label')\n scan_index_sensor = self.sensor.get(f'Observation/{k[:-1]}_index')\n for scan in scans:\n if isinstance(scan, numbers.Integral):\n scan_keep |= (scan_index_sensor == scan)\n elif scan[0] == '~':\n scan_keep |= ~(scan_sensor == scan[1:])\n else:\n scan_keep |= (scan_sensor == scan)\n self._time_keep &= scan_keep\n elif k == 'targets':\n targets = v if is_iterable(v) else [v]\n target_keep = np.zeros(len(self._time_keep), dtype=bool)\n target_index_sensor = self.sensor.get('Observation/target_index')\n for t in targets:\n try:\n if isinstance(t, numbers.Integral):\n target_index = t\n elif isinstance(t, katpoint.Target) or isinstance(t, str) and ',' in t:\n target_index = self.catalogue.targets.index(t)\n else:\n target_index = self.catalogue.targets.index(self.catalogue[t])\n except (KeyError, ValueError):\n # Warn here, in case the user gets the target subtly wrong and wonders why it is not selected\n logger.warning(\"Skipping unknown selected target '%s'\", t)\n continue\n target_keep |= (target_index_sensor == target_index)\n self._time_keep &= target_keep\n # Selections that affect frequency axis\n elif k == 'channels':\n if np.asarray(v).dtype == bool:\n self._freq_keep &= v\n else:\n chan_keep = np.zeros(len(self._freq_keep), dtype=bool)\n chan_keep[v] = True\n self._freq_keep &= chan_keep\n elif k == 'freqrange':\n start_freq = v[0] + 0.5 * self.spectral_windows[self.spw].channel_width\n end_freq = v[1] - 0.5 * self.spectral_windows[self.spw].channel_width\n self._freq_keep &= (self.spectral_windows[self.spw].channel_freqs >= start_freq)\n self._freq_keep &= (self.spectral_windows[self.spw].channel_freqs <= end_freq)\n # Selections that affect corrprod axis\n elif k == 'corrprods':\n if v == 'auto':\n self._corrprod_keep &= [(inpA[:-1] == inpB[:-1])\n for inpA, inpB in self.subarrays[self.subarray].corr_products]\n elif v == 'cross':\n self._corrprod_keep &= [(inpA[:-1] != inpB[:-1])\n for inpA, inpB in self.subarrays[self.subarray].corr_products]\n else:\n v = np.asarray(v)\n if v.ndim == 2 and v.shape[1] == 2:\n all_corrprods = self.subarrays[self.subarray].corr_products\n v = v.tolist()\n v = np.array([list(cp) in v for cp in all_corrprods])\n if np.asarray(v).dtype == bool:\n self._corrprod_keep &= v\n else:\n cp_keep = np.zeros(len(self._corrprod_keep), dtype=bool)\n cp_keep[v] = True\n self._corrprod_keep &= cp_keep\n elif k == 'ants':\n ants = _selection_to_list(v)\n ant_names = [(ant.name if isinstance(ant, katpoint.Antenna) else ant) for ant in ants]\n if _is_deselection(ant_names):\n ant_names = [ant_name[1:] for ant_name in ant_names]\n self._corrprod_keep &= [(inpA[:-1] not in ant_names and inpB[:-1] not in ant_names)\n for inpA, inpB in self.subarrays[self.subarray].corr_products]\n else:\n self._corrprod_keep &= [(inpA[:-1] in ant_names and inpB[:-1] in ant_names)\n for inpA, inpB in self.subarrays[self.subarray].corr_products]\n elif k == 'inputs':\n inps = _selection_to_list(v)\n self._corrprod_keep &= [(inpA in inps and inpB in inps)\n for inpA, inpB in self.subarrays[self.subarray].corr_products]\n elif k == 'pol':\n pols = _selection_to_list(v)\n # Lower case and strip out empty strings\n pols = [i.lower() for i in pols if i]\n\n # Proceed if we have a selection\n if len(pols) > 0:\n # If given a selection assume we keep nothing\n keep = np.zeros(self._corrprod_keep.shape, dtype=bool)\n\n # or separate polarisation selections together\n for polAB in pols:\n polAB = polAB * 2 if polAB in ('h', 'v') else polAB\n keep |= [(inpA[-1] == polAB[0] and inpB[-1] == polAB[1])\n for inpA, inpB in self.subarrays[self.subarray].corr_products]\n\n # and into final corrprod selection\n self._corrprod_keep &= keep\n\n # Selections that affect weights and flags\n elif k == 'weights':\n self._weights_keep = v\n elif k == 'flags':\n self._flags_keep = v\n\n # Update the relevant data members based on selection made\n # These would all be more efficient as properties, but at the expense of extra lines of code...\n self.shape = (self._time_keep.sum(), self._freq_keep.sum(), self._corrprod_keep.sum())\n self.size = np.prod(self.shape, dtype=np.int64) * np.dtype('complex64').itemsize\n if not self.size:\n logger.warning('The selection criteria resulted in an empty data set')\n # This is quicker than indexing np.arange()\n self.dumps = self._time_keep.nonzero()[0]\n self.channels = self._freq_keep.nonzero()[0]\n self.freqs = self.channel_freqs = self.spectral_windows[self.spw].channel_freqs[self._freq_keep]\n self.channel_width = self.spectral_windows[self.spw].channel_width\n self.corr_products = self.subarrays[self.subarray].corr_products[self._corrprod_keep]\n self.inputs = sorted(set(np.ravel(self.corr_products)))\n input_ants = {inp[:-1] for inp in self.inputs}\n self.ants = [ant for ant in self.subarrays[self.subarray].ants if ant.name in input_ants]\n # Ensure that updated selections make their way to sensor cache, as\n # well as any underlying datasets and data lazy indexers that need it\n self._set_keep(self._time_keep, self._freq_keep, self._corrprod_keep,\n self._weights_keep, self._flags_keep)\n # Figure out which scans, compscans and targets are included in selection\n self.scan_indices = sorted(set(self.sensor['Observation/scan_index']))\n self.compscan_indices = sorted(set(self.sensor['Observation/compscan_index']))\n self.target_indices = sorted(set(self.sensor['Observation/target_index']))", "title": "" }, { "docid": "d8a333fe7da1caf23f736696c20368ac", "score": "0.4658625", "text": "def get_bool_selection_to_keep(big_selection, small_selection):\n assert big_selection.size >= small_selection.size()\n result = flex.bool(big_selection.size(), False)\n i_in_big = 0\n i_in_small = 0\n size_small = small_selection.size()\n size_big = big_selection.size()\n n_matches = 0\n nw = 0\n while (i_in_big < size_big) and (i_in_small < size_small):\n if big_selection[i_in_big] == small_selection[i_in_small]:\n result[i_in_big] = True\n i_in_big += 1\n i_in_small += 1\n n_matches += 1\n elif big_selection[i_in_big] > small_selection[i_in_small]:\n i_in_small += 1\n nw += 1\n else:\n i_in_big += 1\n # this assert is optional, in general case it is not guaranteed that\n # all numbers from small selection are present in big selection.\n assert n_matches == size_small, \"%d %d\" % (n_matches, size_small)\n return result", "title": "" }, { "docid": "c69dc621cc67de4096d474c85ce7373d", "score": "0.46452928", "text": "def subset_generator(a_generator):\n return a_generator", "title": "" }, { "docid": "b761f54647a120ff90dc0e8f930f4753", "score": "0.4627758", "text": "def bool(self):\n return array_ops.cast(self, 'bool')", "title": "" }, { "docid": "986ddfac5d87d0a0d55769160d3a4765", "score": "0.46202126", "text": "def choose(\n a: ndarray,\n choices: Sequence[ndarray],\n out: Optional[ndarray] = None,\n mode: BoundsMode = \"raise\",\n) -> ndarray:\n return a.choose(choices=choices, out=out, mode=mode)", "title": "" }, { "docid": "9a7ea57569b6ccef55094ef4453d8d56", "score": "0.46200755", "text": "def select_action(self, value):\n arg_select = torch.argmax(value, dim=-1)\n mask = torch.rand(arg_select.shape, device=value.device) < self.epsilon\n arg_rand = torch.randint(low=0, high=value.shape[-1], size=(mask.sum(),), device=value.device)\n arg_select[mask] = arg_rand\n return arg_select", "title": "" }, { "docid": "31cb3827ec5c3f982eef9de5d038261f", "score": "0.46191847", "text": "def get_boolean_true_false():\r\n faker = Faker()\r\n b = faker.words(1, ['True', 'False'], True)\r\n return b[0]", "title": "" }, { "docid": "154eb33273ae7be3bb77d3d6ca2e36b6", "score": "0.4615286", "text": "def select_all_items(self, selected: bool = True) -> None:\r\n ...", "title": "" }, { "docid": "19e4909200921417218438b2de272f1d", "score": "0.46107706", "text": "def _determine_select(self,bright=True,type=None,dr=None,\n interp_degree=_INTERPDEGREEBRIGHT,\n interp_type='tanh',\n robust=False,\n binedges=None):\n if bright:\n self.type_bright= type\n plateindx= self.brightplateindx\n else:\n self.type_faint= type\n plateindx= self.faintplateindx\n if type.lower() == 'platesn_r': #plateSN_r dependent r selection\n #Divide up plates in bins\n nbins= len(binedges)-1\n plate_in_bins= [[] for ii in range(nbins)]\n platebin_dict= {}\n theseplates= self.plates[plateindx]\n thisplatestr= self.platestr[plateindx]\n for ii in range(len(theseplates)):\n kk= 0\n while kk < nbins \\\n and thisplatestr[ii].platesn_r > binedges[kk+1]:\n kk+=1\n plate_in_bins[kk].append(theseplates[ii])\n #Also create dictionary with bin for each plate\n platebin_dict[str(theseplates[ii])]= kk \n #For each set of plates, instantiate new selection object\n platesn_sfs= []\n for kk in range(nbins):\n if bright:\n type_faint= 'constant'\n type_bright= 'r'\n else:\n type_faint= 'r'\n type_bright= 'constant'\n platesn_sfs.append(segueSelect(sample=self.sample,\n plates=plate_in_bins[kk],\n select=self.select,\n type_bright=type_bright,\n dr_bright=dr,\n interp_type_bright='tanh',\n interp_degree_bright=interp_degree,\n robust_bright=robust,\n type_faint=type_faint,\n dr_faint=dr,\n interp_type_faint='tanh',\n interp_degree_faint=interp_degree,\n robust_faint=robust,\n _platephot=copy.copy(self.platephot),\n _platespec=copy.copy(self.platespec)\n ,_spec=copy.copy(self.spec)))\n if bright:\n self.platesn_plate_in_bins_bright= plate_in_bins\n self.platesn_platebin_dict_bright= platebin_dict\n self.platesn_sfs_bright= platesn_sfs\n else:\n self.platesn_plate_in_bins_faint= plate_in_bins\n self.platesn_sfs_faint= platesn_sfs\n self.platesn_platebin_dict_faint= platebin_dict\n return None #Done here!\n #First determine the total weight for each plate\n if not hasattr(self,'weight'): self.weight= {}\n for ii in range(len(self.plates)):\n if bright and 'faint' in self.platestr[ii].programname: continue\n elif not bright \\\n and not 'faint' in self.platestr[ii].programname: continue\n plate= self.plates[ii]\n self.weight[str(plate)]= len(self.platespec[str(plate)])\\\n /float(len(self.platephot[str(plate)]))\n if type.lower() == 'constant':\n return #We're done!\n if type.lower() == 'sharprcut' or type.lower() == 'tanhrcut':\n #For each plate cut at the location of the faintest object\n if not hasattr(self,'rcuts'): self.rcuts= {}\n if not hasattr(self,'rcuts_correct'): self.rcuts_correct= {}\n for ii in range(len(self.plates)):\n if bright and 'faint' in self.platestr[ii].programname: continue\n elif not bright \\\n and not 'faint' in self.platestr[ii].programname: continue\n p= self.plates[ii]\n if self.weight[str(p)] == 0.:\n self.rcuts[str(p)]= 0.\n self.rcuts_correct[str(p)]= 0.\n continue\n self.rcuts[str(p)]= numpy.amax(self.platespec[str(p)].dered_r)\n denom= float(numpy.sum((self.platephot[str(p)].r <= self.rcuts[str(p)])))\n if denom == 0.: self.rcuts_correct[str(p)]= 0.\n else:\n self.rcuts_correct[str(p)]= \\\n float(len(self.platephot[str(p)]))/denom\n elif type.lower() == 'tanhrcut+brightsharprcut':\n #For each plate cut at the location of the brightest and faintest object\n if not hasattr(self,'rcuts_faint'): self.rcuts_faint= {}\n if not hasattr(self,'rcuts_bright'): self.rcuts_bright= {}\n if not hasattr(self,'rcuts_correct'): self.rcuts_correct= {}\n for ii in range(len(self.plates)):\n if bright and 'faint' in self.platestr[ii].programname: continue\n elif not bright \\\n and not 'faint' in self.platestr[ii].programname: continue\n p= self.plates[ii]\n if self.weight[str(p)] == 0.:\n self.rcuts_bright[str(p)]= 0.\n self.rcuts_faint[str(p)]= 0.\n self.rcuts_correct[str(p)]= 0.\n continue\n self.rcuts_bright[str(p)]= numpy.amin(self.platespec[str(p)].dered_r)\n self.rcuts_faint[str(p)]= numpy.amax(self.platespec[str(p)].dered_r)\n denom= float(numpy.sum((self.platephot[str(p)].r <= self.rcuts_faint[str(p)])*(self.platephot[str(p)].r > self.rcuts_bright[str(p)])))\n if denom == 0.: self.rcuts_correct[str(p)]= 0.\n else:\n self.rcuts_correct[str(p)]= \\\n float(len(self.platephot[str(p)]))/denom\n elif type.lower() == 'r':\n #Determine the selection function in bins in r, for bright/faint\n nrbins= int(math.floor((17.8-self.rmin)/dr))+1\n s_one_r= numpy.zeros((nrbins,len(self.plates)))\n s_r= numpy.zeros((nrbins,len(self.plates)))\n #Determine s_1(r) for each plate separately first\n weights= numpy.zeros(len(self.plates))\n if not bright:\n thisrmin, thisrmax= 17.8, self.rmax+dr/2. #slightly further to avoid out-of-range errors\n else:\n thisrmin, thisrmax= self.rmin-dr/2., 17.8 #slightly further to avoid out-of-range errors\n for ii in range(len(self.plates)):\n plate= self.plates[ii]\n if bright and 'faint' in self.platestr[ii].programname: \n continue\n elif not bright \\\n and not 'faint' in self.platestr[ii].programname: \n continue\n nspecr, edges = numpy.histogram(self.platespec[str(plate)].dered_r,bins=nrbins,range=[thisrmin,thisrmax])\n nphotr, edges = numpy.histogram(self.platephot[str(plate)].r,\n bins=nrbins,\n range=[thisrmin,thisrmax])\n nspecr= numpy.array(nspecr,dtype='float64')\n nphotr= numpy.array(nphotr,dtype='float64')\n nonzero= (nspecr > 0.)*(nphotr > 0.)\n s_r[nonzero,ii]= nspecr[nonzero].astype('float64')/nphotr[nonzero]\n weights[ii]= float(numpy.sum(nspecr))/float(numpy.sum(nphotr))\n nspecr/= float(numpy.sum(nspecr))\n nphotr/= float(numpy.sum(nphotr))\n s_one_r[nonzero,ii]= nspecr[nonzero]/nphotr[nonzero]\n if bright:\n self.s_r_plate_rs_bright= \\\n numpy.linspace(self.rmin+dr/2.,17.8-dr/2.,nrbins)\n self.s_r_plate_bright= s_r\n self.s_one_r_plate_bright= s_one_r\n else:\n self.s_r_plate_rs_faint= \\\n numpy.linspace(17.8+dr/2.,self.rmax-dr/2.,nrbins)\n self.s_r_plate_faint= s_r\n self.s_one_r_plate_faint= s_one_r\n s_one_r_plate= s_one_r\n s_r_plate= s_r\n fromIndividual= False\n if fromIndividual:\n #Mean or median?\n median= False\n if median:\n s_one_r= numpy.median(s_one_r_plate[:,plateindx],axis=1)\n else:\n if bright:\n s_one_r= numpy.sum(s_one_r_plate,axis=1)/self.nbrightplates\n else:\n s_one_r= numpy.sum(s_one_r_plate,axis=1)/self.nfaintplates\n else:\n s_one_r= \\\n numpy.sum(s_r_plate[:,plateindx],axis=1)\\\n /numpy.sum(weights)\n if bright:\n self.s_one_r_bright= s_one_r\n self.s_r_bright= s_r\n else:\n self.s_one_r_faint= s_one_r\n self.s_r_faint= s_r\n #Bootstrap an uncertainty on the selection function\n if bright: nplates= self.nbrightplates\n else: nplates= self.nfaintplates\n jack_samples= numpy.zeros((nplates,len(s_one_r)))\n jack_s_r_plate= s_r_plate[:,plateindx]\n jack_s_r_weights= weights[plateindx]\n for jj in range(nplates):\n boot_indx= numpy.array([True for ii in range(nplates)],\\\n dtype='bool')\n boot_indx[jj]= False\n if fromIndividual:\n #Mean or median?\n if median:\n jack_samples[jj,:]= numpy.median(s_one_r_plate[:,plateindx[boot_indx]],\n axis=1)\n else:\n jack_samples[jj,:]= numpy.sum(s_one_r_plate[:,plateindx[boot_indx]],\n axis=1)/nplates\n else:\n jack_samples[jj,:]= \\\n numpy.sum(jack_s_r_plate[:,boot_indx],axis=1)\\\n /numpy.sum(jack_s_r_weights[boot_indx])\n #Compute jackknife uncertainties\n s_one_r_err= numpy.sqrt((nplates-1)*numpy.var(jack_samples,\n axis=0))\n s_one_r_err[(s_one_r_err == 0.)]= 0.01\n if bright:\n self.s_one_r_jack_samples_bright= jack_samples\n self.s_one_r_err_bright= s_one_r_err\n else:\n self.s_one_r_jack_samples_faint= jack_samples\n self.s_one_r_err_faint= s_one_r_err\n if bright: self.interp_type_bright= interp_type\n else: self.interp_type_faint= interp_type\n if bright:\n w= numpy.zeros(len(self.s_one_r_bright))+10000.\n yfunc= numpy.zeros(len(w))-20.\n nonzero= (self.s_one_r_bright > 0.)\n w[nonzero]= \\\n self.s_one_r_bright[nonzero]/self.s_one_r_err_bright[nonzero]\n yfunc[nonzero]= numpy.log(self.s_one_r_bright[nonzero])\n self.interp_rs_bright= \\\n numpy.linspace(self.rmin+1.*dr/2.,17.8-1.*dr/2.,nrbins)\n if interp_type.lower() == 'spline':\n self.s_one_r_bright_interpolate= interpolate.splrep(\\\n self.interp_rs_bright,yfunc,\n k=interp_degree,w=w)\n #Continue along the derivative for out of bounds\n minderiv= interpolate.splev(self.interp_rs_bright[0],\n self.s_one_r_bright_interpolate,\n der=1)\n self.s_one_r_bright_minderiv= minderiv\n self.s_one_r_bright_minxo= self.interp_rs_bright[0]\n self.s_one_r_bright_minyo= yfunc[0]\n elif interp_type.lower() == 'tanh':\n #Fit a tanh to s_1(r)\n params= numpy.array([17.7,numpy.log(0.1),\n numpy.log(3.)])\n params= optimize.fmin_powell(_sf_tanh_minusloglike,\n params,\n args=(self.interp_rs_bright,\n self.s_one_r_bright,\n self.s_one_r_err_bright,\n numpy.zeros(len(self.interp_rs_bright))+(self.interp_rs_bright[1]-self.interp_rs_bright[0])/2.,\n robust))\n self.s_one_r_tanh_params_bright= params\n else:\n w= numpy.zeros(len(self.s_one_r_faint))+10000.\n yfunc= numpy.zeros(len(w))-20.\n nonzero= (self.s_one_r_faint > 0.)\n w[nonzero]= \\\n self.s_one_r_faint[nonzero]/self.s_one_r_err_faint[nonzero]\n yfunc[nonzero]= numpy.log(self.s_one_r_faint[nonzero])\n self.interp_rs_faint= \\\n numpy.linspace(17.8+1.*dr/2.,self.rmax-dr/2.,nrbins)\n if interp_type.lower() == 'spline':\n self.s_one_r_faint_interpolate= interpolate.splrep(\\\n self.interp_rs_faint,yfunc,\n k=interp_degree,w=w)\n #Continue along the derivative for out of bounds\n minderiv= interpolate.splev(self.interp_rs_faint[0],\n self.s_one_r_faint_interpolate,\n der=1)\n self.s_one_r_faint_minderiv= minderiv\n self.s_one_r_faint_minxo= self.interp_rs_faint[0]\n self.s_one_r_faint_minyo= yfunc[0]\n elif interp_type.lower() == 'tanh':\n #Fit a tanh to s_1(r)\n params= numpy.array([18.7,numpy.log(0.1),\n numpy.log(3.)])\n params= optimize.fmin_powell(_sf_tanh_minusloglike,\n params,\n args=(self.interp_rs_faint,\n self.s_one_r_faint,\n self.s_one_r_err_faint,\n numpy.zeros(len(self.interp_rs_faint))+(self.interp_rs_faint[1]-self.interp_rs_faint[0])/2.,robust))\n self.s_one_r_tanh_params_faint= params\n return None", "title": "" }, { "docid": "639adae57c78e715f454bb3f6f06a0ba", "score": "0.46009785", "text": "def select(condlist, choicelist, default=0):\n from astropy.utils.masked import Masked\n\n condlist = [c.unmasked if isinstance(c, Masked) else c for c in condlist]\n\n data_list, mask_list = _get_data_and_masks(*choicelist)\n default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)\n return (\n (condlist, data_list, default.unmasked),\n (condlist, mask_list, default.mask),\n {},\n None,\n )", "title": "" }, { "docid": "2a987756684d6a1510f86176035d2f65", "score": "0.45810536", "text": "def get_selected(self, is_selected):\n self.is_selected = is_selected", "title": "" }, { "docid": "1670f0191bed4527001045a226ba0004", "score": "0.4575272", "text": "def apply_selection(iteration, data_reader, data_list,\n select, species, extensions):\n # Create the array that determines whether the particle\n # should be selected or not.\n Ntot = len(data_list[0])\n select_array = np.ones(Ntot, dtype='bool')\n\n # Loop through the selection rules, and aggregate results in select_array\n for quantity in select.keys():\n q = data_reader.read_species_data(\n iteration, species, quantity, extensions)\n # Check lower bound\n if select[quantity][0] is not None:\n select_array = np.logical_and(\n select_array,\n q > select[quantity][0])\n # Check upper bound\n if select[quantity][1] is not None:\n select_array = np.logical_and(\n select_array,\n q < select[quantity][1])\n\n # Use select_array to reduce each quantity\n for i in range(len(data_list)):\n if len(data_list[i]) > 1: # Do not apply selection on scalar records\n data_list[i] = data_list[i][select_array]\n\n return(data_list)", "title": "" }, { "docid": "98de7e332c37f7e605d4b7bed5191b28", "score": "0.45668942", "text": "def random_flag():\r\n return bool(np.random.random_integers(0, 1, 1))", "title": "" }, { "docid": "bd47d49cb8acdc734916d5271b5cc462", "score": "0.45395306", "text": "def select(self, eval):\n selection = np.random.choice(self.P, math.floor( (1-self.r)*self.p ), replace=False, p=eval)\n return selection.tolist()", "title": "" }, { "docid": "9485efcba9fdf47df500b09321d92748", "score": "0.45332232", "text": "def activate_boolean_map(bool_map, dilation_width=7):\r\n # use the boolean map as a mask for flood filling\r\n activation=np.array(bool_map, dtype=np.uint8)\r\n mask_shape=(bool_map.shape[0] + 2, bool_map.shape[1] + 2)\r\n ffill_mask=np.zeros(mask_shape, dtype=np.uint8)\r\n\r\n # top and bottom rows\r\n for i in range(0, activation.shape[0]):\r\n for j in [0, activation.shape[1] - 1]:\r\n if activation[i,j]:\r\n cv2.floodFill(activation, ffill_mask, (j, i), 0)\r\n\r\n # left and right columns\r\n for i in [0, activation.shape[0] - 1]:\r\n for j in range(0, activation.shape[1]):\r\n if activation[i,j]:\r\n cv2.floodFill(activation, ffill_mask, (j, i), 0)\r\n\r\n ret=activation!=1\r\n map1=ret * bool_map\r\n map2=ret * (1-bool_map)\r\n kernal=np.ones((dilation_width,dilation_width),np.uint8);\r\n \r\n if dilation_width > 0:\r\n map1= cv2.dilate(map1.astype('uint8'),kernal,1)\r\n map2= cv2.dilate(map2.astype('uint8'),kernal,1)\t\r\n activation=(map1+map2)/2\r\n \r\n return activation", "title": "" }, { "docid": "40c12b1213842dc39cd51c516d2995eb", "score": "0.4528164", "text": "def select(self, *outsig):\n pass", "title": "" }, { "docid": "7c42c1a26e12d20b3cb2ef9f8b151239", "score": "0.45258164", "text": "def select(self, flags):\n\t\tif isinstance(flags, basestring): flags = [flags]\n\t\t# Build bitfield\n\t\tpos = np.zeros(self.nbyte, np.uint8)\n\t\tneg = pos.copy()\n\t\tfor flag in flags:\n\t\t\tinverse = flag.startswith(\"~\") or flag.startswith(\"!\")\n\t\t\tif inverse: flag = flag[1:]\n\t\t\ttry:\n\t\t\t\ti = self.flag_names.index(flag)\n\t\t\t\tbyte = i//8\n\t\t\t\tbit = 1<<(i%8)\n\t\t\t\tif not inverse: pos[byte] |= bit\n\t\t\t\telse: neg[byte] |= bit\n\t\t\texcept ValueError:\n\t\t\t\ti = self.derived_names.index(flag)\n\t\t\t\tif not inverse:\n\t\t\t\t\tpos |= self.derived_masks[i,0]\n\t\t\t\t\tneg |= self.derived_masks[i,1]\n\t\t\t\telse:\n\t\t\t\t\tneg |= self.derived_masks[i,0]\n\t\t\t\t\tpos |= self.derived_masks[i,1]\n\t\tres = self.copy()\n\t\tres.flag_stack = self.flag_stack & pos | ~self.flag_stack & neg\n\t\treturn res", "title": "" }, { "docid": "fdaa3d0330b0297df1ee16c26e98d882", "score": "0.4518224", "text": "def bool_to_sf(b: bool) -> Generator[None, None, None]:\n if b:\n yield", "title": "" }, { "docid": "326c0c6605bac1708139808614ec3f87", "score": "0.45162117", "text": "def variant_bool(b):\n if not isinstance(b, bool):\n raise TypeError(\"Only booleans are supported\")\n\n return GLib.Variant.new_boolean(b)", "title": "" }, { "docid": "5ac6216dfe282038801b22d761e575ba", "score": "0.45094672", "text": "def batched_index_select(t, dim, inds):\n dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))\n out = t.gather(dim, dummy) # b x e x f\n return out", "title": "" }, { "docid": "f5618de45b1d16b2131d917353c10e46", "score": "0.45077157", "text": "def any_is_true(generator):\n for flag in generator:\n if flag:\n return True\n return False", "title": "" }, { "docid": "d80d03462164129a9cf36e0444c77056", "score": "0.44987136", "text": "def _choose_masked(ar, choices):\n mask = ar == -1\n ar[mask] == 0 # we fill it in with some values, doesn't matter, since it is masked\n ar = choices[ar]\n return np.ma.array(ar, mask=mask)", "title": "" }, { "docid": "aeee326f6ab2df8e574bafaad356e3a0", "score": "0.44982395", "text": "def bool_mapper(attribute):\r\n def _fn(values):\r\n if values['bool'].lower() in ('yes', '1', 'on', 'true'):\r\n return {attribute: True}\r\n elif values['bool'].lower() in ('no', '0', 'off', 'false'):\r\n return {attribute: False}\r\n return {}\r\n return _fn", "title": "" }, { "docid": "71fb052be06120fe95b56f3c49f51388", "score": "0.449672", "text": "def source_selection_all(N):\n return np.ones(N) >= 0", "title": "" }, { "docid": "210c4bbc020e850f37b59dfaa7816bab", "score": "0.44788986", "text": "def make_nd_bin_selection(\n *argv : Tuple[str, Iterable[float]],\n features_mapping : List[Tuple[str, str]] = default_features_mapping\n ) -> Tuple[Tuple[Callable[[np.ndarray], np.ndarray]], Tuple[Tuple[float, float]]]:\n\n variables, bin_edges = zip(*argv)\n cuts = [zip(single_var_bin_edges[:-1],\n single_var_bin_edges[1: ]) for single_var_bin_edges in bin_edges]\n\n selection = [\n ((lambda single_cut_capture: (\n lambda array: _vararg_logical_and(*(\n _is_between(get_feature(array, var_name, features_mapping), *single_var_cut)\n for var_name, single_var_cut in zip(variables, single_cut_capture)\n ))\n ))(single_cut), single_cut)\n for single_cut in product(*cuts)\n ]\n\n selection_funcs, bins = zip(*selection)\n return selection_funcs, bins", "title": "" }, { "docid": "6526097dfce645b796631c6828fd5158", "score": "0.44786382", "text": "def bool_item(bool_item_spec):\n return from_specification(bool_item_spec)['my_bool']", "title": "" }, { "docid": "23c28b2e94bc5af4f2757a8ea4612d4e", "score": "0.44612077", "text": "def make_binary_mask(data, mask_bool):\n data = np.asarray(data)\n mask_bool = np.asarray(mask_bool)\n assert(len(data.shape) == len(mask_bool.shape)),\\\n \"Data and mask shape differ \\n\" \\\n + \"Data dim is: %s\\nMask dim is: %s\" \\\n %(len(data.shape), len(mask_bool.shape))\n assert(all(data.shape[i] >= mask_bool.shape[i] \\\n for i in range(len(data.shape)))),\\\n \"Data and mask shape are not compatible\"\\\n +\"Data shape is: %s\\nMask shape is: %s\"\\\n %(data.shape, mask_bool.shape)\n new_mask = np.zeros(data.shape)\n new_mask[mask_bool] = 1\n return new_mask", "title": "" }, { "docid": "6b40be86653eb19c4fe65c743c625917", "score": "0.44570872", "text": "def prim_if(self, boolean, blklist):\n if boolean:\n self.icall(self.evline, blklist[:])\n yield True\n self.ireturn()\n yield True", "title": "" }, { "docid": "886a1d79932c0b011df565c193aebe95", "score": "0.44539976", "text": "def __getitem__(self, arg):\n return _array_select(self, arg)", "title": "" }, { "docid": "8c6885d3aa6f7043a6451401958f8194", "score": "0.4453249", "text": "def bool_yield_wrapper(gen):\n def wrapped_func(*args, **kwargs):\n return Bool_Yield_Wrapper(gen(*args, **kwargs))\n\n return wrapped_func", "title": "" }, { "docid": "382f2f1f8798edf75c91f0921f99a1eb", "score": "0.4451909", "text": "def getVselResult(tensor_mask, tensor_x):\n dtype_x = tensor_x.dtype\n op_name = \"emit_insn_elewise_multiple_sel\"\n mode = 'bit'\n shape_condition = te.lang.cce.util.shape_to_list(tensor_mask.shape)\n shape = shape_condition\n shape[-1] = shape[-1] * 8\n\n def get_indice(indice):\n \"\"\"\n get indice\n \"\"\"\n res_index = []\n for i, value in enumerate(indice):\n if i == len(indice) - 1:\n res_index.append(value // 8)\n else:\n res_index.append(value)\n return res_index\n\n zero_tensor = tvm.const(0, dtype_x)\n\n def _compute(*indice):\n res_index = get_indice(indice)\n return tvm.select(tensor_mask(*res_index).astype('bool'), tensor_x(*indice), zero_tensor)\n\n op_name = op_name + '|' + mode\n with tvm.tag_scope(op_name):\n tensor_x_ub_sel = tvm.compute(shape, _compute, name=\"tensor_x_ub_sel\")\n return tensor_x_ub_sel", "title": "" }, { "docid": "d0048a8b5810f051e0e13be6d64e04b4", "score": "0.44509357", "text": "def setBool(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "003cd41ec3dbee6109c1315a1f8d0fa3", "score": "0.44479716", "text": "def reset_booleans(self, subset=None, step=None): \n step_0, step_1, step_2 = self._get_steps(step=step)\n args = [step_0, subset, step_1, step_2]\n \n bool_dict = self.booleans\n for key in args:\n if key:\n bool_dict = bool_dict[key]\n else:\n bool_dict = self._get_default_boolean()\n break", "title": "" }, { "docid": "4720dbe2c1c174347d3d700f89c2bc8b", "score": "0.44438305", "text": "def select(a,b):\n return [ a[i] for i in b ]", "title": "" }, { "docid": "234e8b38adddb9d45053c5d86d8f1355", "score": "0.4443385", "text": "def convertToBool(sailAST):\n\t# Get the type of the term\n\ttry:\n\t\tifType = sailAST[0].getType()\n\texcept ValueError:\n\t\t# If we can't get the type, assume it's a bool\n\t\tifType = Sail_t_bool()\n\n\t# Modify the term term based on its type\n\tif isinstance(ifType, (Sail_t_bool, Sail_t_unknown)):\n\t\ttoReturn = sailAST\n\telif isinstance(ifType, Sail_t_option):\n\t\ttoReturn = [SailApp(\n\t\t\tfn=SailHandwrittenFn(\n\t\t\t\tname='is_some',\n\t\t\t\ttyp=Sail_t_fn([], Sail_t_bool()) # Sort of\n\t\t\t),\n\t\t\tactuals=sailAST\n\t\t)]\n\telse:\n\t\tsys.exit(f\"Error: don't know how to handle term of type {ifType} in `convertToBool`\")\n\n\treturn toReturn", "title": "" }, { "docid": "f30a62f7d6ea2fb4e2d6617a8c8c66cf", "score": "0.4437633", "text": "def __init__(self, indicies):\n if isinstance(indicies, tuple):\n assert len(indicies) == 2\n assert indicies[0].ndim == 1\n self.shape = [indicies[0].size, indicies[1]]\n int_idx = indicies[0]\n else:\n assert indicies.ndim == 1\n assert indicies.dtype == bool\n self.shape = [np.count_nonzero(indicies), indicies.size]\n int_idx = np.nonzero(indicies)[0]\n\n nnz = self.shape[0]\n self.sel = sparse.csr_matrix((np.ones(nnz,dtype=bool),(np.arange(nnz),int_idx)), shape=self.shape, dtype=bool)\n self.sel_T = self.sel.T # testing has shown the precomputing the transpose saves lots of time\n return", "title": "" }, { "docid": "712f8758c5ff269dcd0268bb2fb876c9", "score": "0.44339335", "text": "def select_parameters(df, params):\n sel = np.zeros(len(df), dtype=bool)\n for key, val in params.items():\n sel = sel & (df[key] == val)\n return sel", "title": "" }, { "docid": "969258c798da080ca03721535f2a39d8", "score": "0.4426958", "text": "def scan(bool_matrix, goal=4):\n N = len(bool_matrix.shape)\n for i in xrange(1, 2 ** N):\n translation = [int(x) for x in bin(i)[2:]]\n # Pad with zeros if it's to small: a translation must have N-length\n longer = N - len(translation)\n if longer:\n translation = [0] * longer + translation\n print 'scan ', i, translation\n cells = iter_matrix_translation(bool_matrix, translation)\n if cells is not False:\n return cells\n # Nothing found\n return False", "title": "" }, { "docid": "803951894b91f4267a74f1efa4ab68d8", "score": "0.4418969", "text": "def select_choice(current,choices):\n return [(tag,text,(current == tag and \"ON\") or \"OFF\") for tag,text in choices]", "title": "" }, { "docid": "a039ecd03ba68f1373b3b4e12cfb1dbc", "score": "0.4418139", "text": "def GetSelectedIndices(basis, scenariosList):\n scenariosList.sort()\n checklist = np.zeros(basis.shape[1], dtype = np.bool)\n checklist[scenariosList] = True\n return ((basis > 0) == checklist).prod(1) > 0", "title": "" }, { "docid": "318c0c1e38834ea7802e22502767b308", "score": "0.44130853", "text": "def _select_next(self, X, gain, idx):\n\n\t\tfor function in self.functions:\n\t\t\tfunction._select_next(X, gain, idx)\n\n\t\tsuper(MixtureSelection, self)._select_next(X, gain, idx)", "title": "" }, { "docid": "4aadadb75160e808cb1833098d904c94", "score": "0.44105154", "text": "def subset2bitvector(subset, superset_list):\n assert isinstance(superset_list, list)\n return [(1 if s in subset else 0) for s in superset_list]", "title": "" }, { "docid": "11c8e718faec3ae92a2130775f53796e", "score": "0.4407591", "text": "def Select(a, *args):\n args = _get_args(args)\n if z3_debug():\n _z3_assert(is_array_sort(a), \"First argument must be a Z3 array expression\")\n return a[args]", "title": "" }, { "docid": "c57157cd7423917dc241b2915172fc48", "score": "0.43957874", "text": "def get_bool_index(length, int_index):\n result = np.zeros(length, dtype=bool)\n result[int_index] = True\n return result", "title": "" }, { "docid": "50ab3e1b40ae372be0d4c182abeb11bb", "score": "0.43957523", "text": "def test_bool_predicate_on_bool_variant(self):\n self._perform_test(\n props={55: True},\n teilenummer=\"9502661\",\n expected=True\n )", "title": "" }, { "docid": "dd71e03842cd67dbaa882c772a4d5764", "score": "0.4393562", "text": "def subset_by_ids(data, sel_profid):\n\n lgc = np.ones((len(data.index)), dtype=bool)\n for i in range(0, len(data.index)):\n lgc[i] = data.index[i] in sel_profid\n subset = data[lgc]\n\n return subset", "title": "" }, { "docid": "0bc4b7013819d045040cfb2549fbcd68", "score": "0.43838203", "text": "def masked_select(self, x, mask):\n ndim = x.ndim\n B = x.shape[0]\n m = x.shape[1]\n mask = np.diag(mask)\n\n x_ = x.reshape(B, -1)\n x_ = ops.matmul(mask, x_)\n\n if ndim == 4:\n # x shape = (B, m, n, 0)\n n = x.shape[2]\n return x_.reshape(B, m, n, -1)\n if ndim == 2:\n # x shape = (B, m)\n return x_.reshape(B, m)\n # x shape = (B, m, n)\n return x_.reshape(B, m, -1)", "title": "" }, { "docid": "6d90553f90870789e03ac2026034e2e1", "score": "0.43833464", "text": "def returnwhich(retvals, bools):\r\n if len(retvals) != len(bools):\r\n raise IOError(\"CANNOT RETURN\")\r\n toreturn = [r for r, b in zip(retvals, bools) if b]\r\n if len(toreturn) > 1:\r\n return toreturn\r\n if len(toreturn) == 1:\r\n return toreturn[0]\r\n if len(toreturn) == 0:\r\n raise ValueError(\"NOTHING TO RETURN\")", "title": "" }, { "docid": "3a366ae81b848ec6ba562596a7a73821", "score": "0.43805042", "text": "def row_subset(self, boolean_list):\n if len(boolean_list) != self.len():\n return None\n new_dict = OrderedDict()\n for column_name in self.column_names():\n zipped = zip(self.column(column_name), boolean_list)\n new_list = [item for (item, boolean) in zipped if boolean is True]\n new_dict[column_name] = new_list\n return MiniDataFrame(new_dict)", "title": "" }, { "docid": "54ff807a063c429cd13e7ae839547ecc", "score": "0.43796033", "text": "def getboolean(self, section, option):\n if self.combined is None:\n self.combine()\n return self.combined.getboolean(section, option)", "title": "" }, { "docid": "1f9c640b20dcfffe47291163854d9b2c", "score": "0.43783128", "text": "def _const_bool(cls, boolean):\n return ir.IntType(1)(boolean)", "title": "" }, { "docid": "19cb29ebe5a27f301efe711f5e1669df", "score": "0.43677413", "text": "def choose(a, choices, out=None, mode=\"raise\"):\n from astropy.utils.masked import Masked\n\n a_data, a_mask = Masked._get_data_and_mask(a)\n if a_mask is not None and mode == \"raise\":\n # Avoid raising on masked indices.\n a_data = a.filled(fill_value=0)\n\n kwargs = {\"mode\": mode}\n if out is not None:\n if not isinstance(out, Masked):\n raise NotImplementedError\n kwargs[\"out\"] = out.unmasked\n\n data, masks = _get_data_and_masks(*choices)\n data_chosen = np.choose(a_data, data, **kwargs)\n if out is not None:\n kwargs[\"out\"] = out.mask\n\n mask_chosen = np.choose(a_data, masks, **kwargs)\n if a_mask is not None:\n mask_chosen |= a_mask\n\n return Masked(data_chosen, mask_chosen) if out is None else out", "title": "" }, { "docid": "b4948b9aaef23819d988b55be0fb1dfe", "score": "0.43630788", "text": "def test_flats_selector2(self): \n to_include = [True,False]\n for boolean in to_include:\n try:\n actual = boolean\n expected = None\n \n if boolean is True:\n expected = True\n else:\n expected = False\n \n self.assertEqual(actual,expected)\n except:\n print(\"There is something wrong with the variable include.\")", "title": "" }, { "docid": "32fc7372e0be79999d228744fc1c0ec9", "score": "0.4347789", "text": "def bool_array_to_rgb(array: np.ndarray) -> np.ndarray:\n assert array.dtype == np.bool, \"array must be boolean\"\n array = array.astype(np.uint8) * (2 ** 8 - 1)\n empty = np.zeros_like(array)\n return np.stack([array, empty, empty], axis=2)", "title": "" }, { "docid": "876645300e84d251fd3b521fc7beef6f", "score": "0.43402678", "text": "def _get_filter_bool(self):\n and_filter = self._get_and_filter()\n for f in and_filter:\n if f.has_key(\"bool\"):\n return f\n\n bools = {\"bool\":{}}\n and_filter.append(bools)\n\n return bools", "title": "" }, { "docid": "add7ecfdc229a92f737c067689445b33", "score": "0.43380266", "text": "def filter_bool(self, output_filename, mask):\n assert output_filename != self.filename, \"output filename should be different from the input filename\"\n assert len(mask) == len(self), \"list of bool must be the same size as BAM file\"\n self.reset()\n with pysam.AlignmentFile(output_filename, \"wb\", template=self.data) as fh:\n for read, keep in zip(self.data, mask):\n if keep:\n fh.write(read)", "title": "" }, { "docid": "d62fa5dc8f3f001a5d5216a7f2bbbb6f", "score": "0.4329721", "text": "def sample_indices(self, n_galaxies, selection_mask=None):\n if selection_mask is None:\n selection_mask = np.ones(len(self), dtype=np.bool_)\n return np.random.choice(\n np.where(selection_mask)[0], size=n_galaxies, replace=True\n )", "title": "" }, { "docid": "64f03f3881d525b340d305b83a56d9b7", "score": "0.43267846", "text": "def match_bool_indices(*args):\n mn = min([a.sum() for a in args])\n d = [a.sum()-mn for a in args]\n for i,d_ in enumerate(d):\n if d_>0:\n lastix = np.where(args[i])[0][-d_:]\n args[i][lastix] = False", "title": "" }, { "docid": "8c90f496900c5ea0fd47d4ce8875eac7", "score": "0.43222347", "text": "def check_toy_box(self, toy_array):\r\n\t\tif self.toy_choice_box.get() == \"Yes\":\r\n\t\t\ttry:\r\n\t\t\t\tself.toys_selected = True\r\n\t\t\t\tcall([\"python\", \"life-generator.py\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\"])\r\n\t\t\t\tself.read_file(\"output.csv\", toy_array, \"toy_file\")\r\n\t\t\texcept FileNotFoundError:\r\n\t\t\t\tself.toys_selected = False\r\n\t\telse:\r\n\t\t\tself.toys_selected = False", "title": "" }, { "docid": "7885aa8057c41bd57b42d571894940c9", "score": "0.43191737", "text": "def get_mask_array_from_cube(self, select=None):\n drp_mask = self.cube.copy_to_array(attr='mask')\n if select is not None:\n drp_mask = drp_mask[select,:]\n\n # Initialize\n mask = numpy.zeros(drp_mask.shape, dtype=self.bitmask.minimum_dtype())\n\n # Consolidate pixels flagged to be excluded from any\n # stacking into DIDNOTUSE\n flags = self.cube.do_not_stack_flags()\n indx = self.cube.bitmask.flagged(drp_mask, flag=flags)\n mask[indx] = self.bitmask.turn_on(mask[indx], 'DIDNOTUSE')\n\n # Propagate the FORESTAR flags\n indx = self.cube.bitmask.flagged(drp_mask, flag='FORESTAR')\n mask[indx] = self.bitmask.turn_on(mask[indx], 'FORESTAR')\n\n return mask", "title": "" }, { "docid": "60c352a0e7366287b3ff39e8b9b7dcce", "score": "0.43156642", "text": "def from_boolean_union(cls, brep_a, brep_b):\n raise NotImplementedError", "title": "" }, { "docid": "af3eb624fd1cf06c15bca66b08aca341", "score": "0.4315596", "text": "def _select_subset(selection, ymin, ymax):\n try:\n np.random.shuffle(selection)\n subset = []\n counter = 5\n for i in selection:\n if i >= ymin and i <= ymax:\n subset.append(i)\n counter -= 1\n if counter == 0:\n break\n except: # if there isn't any cell firing\n subset = [500, 2000, 4000, 6000, 7500]\n return subset", "title": "" }, { "docid": "ce57f426af424934d2deeabfb2412a22", "score": "0.43150014", "text": "def asBool(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "ce57f426af424934d2deeabfb2412a22", "score": "0.43150014", "text": "def asBool(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "59c44d1688b1c75ae6b3c6467b01ee68", "score": "0.4314794", "text": "def randomizeBool(name, fastDeviceWeight, slowDeviceWeight, **kwargs):\n randomizer.add(name[-1], fastDeviceWeight, slowDeviceWeight)\n parser.add_argument(*name, action='store_true', default=False, **kwargs)", "title": "" }, { "docid": "cb168e879626331bfc5deb75d307493e", "score": "0.431043", "text": "def select_actions(self, states) -> np.ndarray:\n states = torch.as_tensor(states, dtype=torch.float32, device=self.device).detach_()\n actions = self.act(states)\n return actions.cpu().numpy() # -1 < action < +1", "title": "" }, { "docid": "7b8ddf4c9da14872f6a96e35780a0ea6", "score": "0.43056843", "text": "def preprocess_checkbox(input):\r\n if input == \"on\":\r\n return 1\r\n else:\r\n return 0", "title": "" }, { "docid": "72a5fc5ef9905c0def967b0ef114f0c5", "score": "0.43036327", "text": "def filter(self):\n while True:\n a = (yield)\n # good = np.ones(a.shape, dtype=bool)\n in_poly_mask = self.filter_mask(a) \n self.target.send(a[in_poly_mask])", "title": "" }, { "docid": "d19c9743f12e7a26b18493fede1f399f", "score": "0.42972913", "text": "def select_from_array(cls, array, identifier):\n\n base_array = np.zeros(array.shape)\n array_coords = np.where(array == identifier)\n base_array[array_coords] = 1\n\n return cls(base_array)", "title": "" }, { "docid": "210cc8daefd50b4b7107321f215c67f3", "score": "0.4297139", "text": "def selected(self) -> bool:\r\n ...", "title": "" }, { "docid": "210cc8daefd50b4b7107321f215c67f3", "score": "0.4297139", "text": "def selected(self) -> bool:\r\n ...", "title": "" }, { "docid": "5fb79327d399f2dcf2635b334dff07df", "score": "0.42931432", "text": "def boolean_indexed(lst, boolean):\n updated_list = list(np.array(lst)[boolean])\n return updated_list", "title": "" }, { "docid": "b072a206489aead4496a26ceb4990a38", "score": "0.42904854", "text": "def select(data, field, values, return_indices=False):\n indices = []\n for v in values:\n j = np.nonzero(data[field] == v)[0]\n if len(j) == 0:\n continue\n assert len(j) == 1\n indices += [j[0]]\n if return_indices == True:\n return np.array(indices)\n else:\n return data[np.array(indices)]", "title": "" }, { "docid": "f3f4f08a33652e6a4d9201cd460ce07d", "score": "0.42896044", "text": "def _checkbox_to_bool(field_name):\n def _checkbox_to_bool_inner(d):\n if field_name in d and d[field_name] == \"on\":\n return True\n return _checkbox_to_bool_inner", "title": "" } ]
1732c168881102eb89121f0aa6d38507
Create virtualenv for running unit tests
[ { "docid": "201d6b11ddbc6da6d2c9fc87dd73c84b", "score": "0.6648784", "text": "def create_venv_dir():\n SHELL_EXEC = []\n SHELL_EXEC.append(F'set -e')\n SHELL_EXEC.append(F'virtualenv {VENV_DIR}')\n\n return_code = os.system(';'.join(SHELL_EXEC))\n return return_code", "title": "" } ]
[ { "docid": "189f06ad80b7b2ec683c7774ef1026f2", "score": "0.77171963", "text": "def create_virtualenv():\n require('virtualenv_root', provided_by=('staging', 'production'))\n args = '--clear --distribute --no-site-packages'\n try:\n run('rm -rf %s' % env.virtualenv_root)\n except:\n pass\n \n run('virtualenv %s %s' % (args, env.virtualenv_root))\n run('mkdir -p %s' % env.git_dir)\n with cd(\"%s\" % env.git_dir):\n run('git clone git://github.com/fxdgear/beersocial.git')", "title": "" }, { "docid": "18da46370eaa62c02262397fe20e948f", "score": "0.7416132", "text": "def create_virtualenv():\n run_local(\"python3 -m venv venv\")\n run_local(\"venv/bin/pip install -U pip wheel setuptools\")\n if os.path.exists(\"requirements.txt\"):\n run_local(\"venv/bin/pip install -r requirements.txt\")\n else:\n run_local(\"venv/bin/pip install -U -r requirements-to-freeze.txt\")\n execute(\"local.freeze\")", "title": "" }, { "docid": "6f1fc1bfac089c0dbf970b9950dea141", "score": "0.72929984", "text": "def tox_testenv_create(venv, action):", "title": "" }, { "docid": "a94fd85fe14f5f07025019a2f0f20e8b", "score": "0.7277896", "text": "def setup_virtualenv():\n run('virtualenv -p %(python)s --no-site-packages %(env_path)s;' % env)\n run('source %(env_path)s/bin/activate; easy_install -U setuptools; easy_install -U pip;' % env)", "title": "" }, { "docid": "0491d7d9ef1e2c43cc3a8dc6b3bdb21d", "score": "0.7235514", "text": "def setup_virtual_environment(self):\n if not os.path.exists(self.temp_dir):\n os.makedirs(self.temp_dir)\n\n if not os.path.exists(self.virtualenv_dir):\n virtualenv_setup_path = os.path.join(self.temp_dir, 'virtualenv.py')\n urllib.urlretrieve('https://raw.github.com/pypa/virtualenv/master/virtualenv.py', virtualenv_setup_path)\n virtualenv_prompt = '\\({0}\\)'.format(project_name) if os.name == 'posix' else '({0})'.format(project_name)\n os.system('python {0} --prompt={1} env'.format(virtualenv_setup_path, virtualenv_prompt))\n self.activate_virtual_environment()\n easy_install_setup_path = os.path.join(self.temp_dir, 'ez_setup.py')\n urllib.urlretrieve('http://peak.telecommunity.com/dist/ez_setup.py', easy_install_setup_path)\n os.system('python {0} -U setuptools'.format(easy_install_setup_path))\n os.system('pip install --requirement=requirements.txt --download-cache={0}'.format(self.temp_dir))", "title": "" }, { "docid": "7c7e5890699fe417ac10989ab0be6840", "score": "0.7226679", "text": "def bare_virtualenv():\n with pytest_virtualenv.VirtualEnv(args=(\n '--no-wheel',\n '--no-pip',\n '--no-setuptools',\n )) as venv:\n yield venv", "title": "" }, { "docid": "722ece6ca229ec30b3bc4b78429335a1", "score": "0.7157971", "text": "def test_create():\n\t\n\twith workspace() as ws:\n\t\tws.run(\n\t\t\t'venv --create --no-activate')\n\t\t\n\t\tws.check_venv()", "title": "" }, { "docid": "f0fc28883b4839aacb0fc5a541ed277e", "score": "0.70844185", "text": "def setup():\n run('mkdir -p %(path)s' % env)\n with cd(env.path):\n # Create the virtualenv\n run('virtualenv .venv')\n run('mkdir releases; mkdir shared;')\n _clone_repo()\n _checkout_latest()\n _install_requirements()", "title": "" }, { "docid": "ee5d3cc4266798dfa0ef1ebaa757be0e", "score": "0.6645186", "text": "def setup_virtualenv():\n virtualenv.cli_run([VENV_PATH])\n check_call([PIP_PATH, \"install\",\n \"flake8=={0}\".format(FLAKE8_VERSION),\n \"flake8-diff=={0}\".format(FLAKE8_DIFF_VERSION)])", "title": "" }, { "docid": "0591c2eec5873281bc76ea095de0bca3", "score": "0.6631612", "text": "def create_virtualenv():\n command = COMMANDS['venv'].split()\n\n if os.path.exists('venv'):\n print('Directory already exists.')\n else:\n if subprocess.call(command) == 0:\n return True\n else:\n return False", "title": "" }, { "docid": "44d0386a912ed44ea6cec674b2689848", "score": "0.66167635", "text": "def create_build_virtualenv(self):\n log('Creating virtualenv')\n\n # Create clean virtualenv\n if not os.path.exists(self.venv_paths.python):\n self._cmd(['virtualenv', self.venv_paths.venv])\n\n log('Upgrading pip')\n self._cmd([self.venv_paths.pip, 'install', '--upgrade', 'pip'])\n\n log('Installing pip dependencies')\n # Install things\n for dep in self.deploy.pip:\n log('Installing %s' % (dep))\n self._cmd([self.venv_paths.pip, 'install', '--upgrade', dep])", "title": "" }, { "docid": "18111cd71ed81e30c782cc16aaf841e0", "score": "0.66126734", "text": "def prepare_virtualenv(packages=()):\n vroot = get_vroot()\n env_key = get_env_key(packages)\n vdir = os.path.join(vroot, env_key)\n\n vbin = os.path.join(vdir, ('bin', 'Scripts')[_windows])\n vpython = os.path.join(vbin, 'python' + get_exe_suffix())\n vpip = os.path.join(vbin, 'pip' + get_exe_suffix())\n\n vpip_install = [vpip, \"install\"]\n if (2, 5) <= sys.version_info < (2, 6):\n vpip_install.append(\"--insecure\")\n\n venv_description = VirtualEnvDescription(home_dir=vdir, bin_dir=vbin, python=vpython, pip=vpip, packages=packages)\n\n env = get_clean_system_environment()\n env['PIP_DOWNLOAD_CACHE'] = os.path.abspath(os.path.join(vroot, \"pip-download-cache\"))\n\n # Cache environment\n done_flag_file = os.path.join(vdir, \"done\")\n if not os.path.exists(done_flag_file):\n if os.path.exists(vdir):\n shutil.rmtree(vdir)\n\n virtualenv.create_environment(vdir)\n\n for package_spec in packages:\n rc = subprocess.call(vpip_install + [package_spec], env=env)\n if rc != 0:\n raise Exception(\"Unable to install \" + package_spec + \" to \" + vroot)\n\n open(done_flag_file, 'a').close()\n\n subprocess.call([vpython, \"setup.py\", \"install\"], env=env)\n\n return venv_description", "title": "" }, { "docid": "c4f3985dd87986be6ec714ce0478fc1d", "score": "0.6568133", "text": "def cli(project: Project, testenv: str, pyver: str) -> None:\n project.add_ci_testenv(testenv, pyver)", "title": "" }, { "docid": "a9e53e9a2a1aa1bf38b767d23adb1ef4", "score": "0.653555", "text": "def virtual_env():\n if not exists(VIRTUAL_ENVIRONMENT, use_sudo=True):\n\t run(\"virtualenv MadMachinesNLP01\")\n with cd(VIRTUAL_ENVIRONMENT):\n #put(PATH, VIRTUAL_ENVIRONMENT)\n\t\t env.run(\"git clone https://github.com/kaali-python/MadMachinesNLP01.git\")\n with prefix(\"source bin/activate\"):\n\t\t\t if confirm(\"Do you want to install requirements.txt again??\"):\n\t\t env.run(\"pip install pyopenssl ndg-httpsclient pyasn1\")\n env.run(\"pip install numpy\")\n env.run(\"pip install -r MadMachinesNLP01/requirements.txt\")", "title": "" }, { "docid": "f1a40c88790669d95ecb28de270dbcd4", "score": "0.6515186", "text": "def create(self):\n env = dict(os.environ)\n env.pop('PYTHONDONTWRITEBYTECODE', None)\n\n args = [sys.executable, self.virtualenv_script_path,\n self.virtualenv_root]\n\n result = subprocess.call(args, stdout=self.log_handle,\n stderr=subprocess.STDOUT, env=env)\n\n if result:\n raise Exception('Error creating virtualenv.')\n\n return self.virtualenv_root", "title": "" }, { "docid": "673d465cc09a8938a7faeb08fd84db54", "score": "0.65003335", "text": "def setup():\n sudo('aptitude install -y python-setuptools apache2 libapache2-mod-wsgi')\n sudo('easy_install pip')\n sudo('pip install virtualenv')\n sudo('pip install virtualenvwrapper')\n put('.bash_profile', '~/.bash_profile')\n run('mkdir -p %(workon_home)s' % env)\n with settings(warn_only=True):\n # just in case it already exists, let's ditch it\n run('rmvirtualenv %(project_name)s' % env)\n run('mkvirtualenv --no-site-packages %(project_name)s' % env)\n # [... plus other useful stuff.]", "title": "" }, { "docid": "49fabe6cbb57973af62ffd4a5b1d6e71", "score": "0.6487062", "text": "def bootstrap():\n print green(\"Bootstrapping %s on %s..\" % (env.project_name, env.environment))\n require('root', provided_by=('production', 'test'))\n run(\"git clone %s %s\" % (env.project_repo, env.path))\n with cd(env.path):\n run(\"mkvirtualenv --no-site-packages %s\" % env.project_name)\n run(\"source %(virtualenv_path)s%(project_name)s/bin/activate && pip install -r requirements.txt\")", "title": "" }, { "docid": "24116b17d45791feeace3d8f64a46d7a", "score": "0.64812005", "text": "def setup_virtualenv():\n require.python.package('virtualenv', use_sudo=True)\n django_version = prompt('Django version to install [None]:')\n with cd(env.PROJECT_PATH):\n sudo('virtualenv env --system-site-packages')\n install_cmd = '{0} install -U django'.format(env.PIP_BIN)\n if django_version:\n install_cmd = install_cmd + '=={0}'.format(django_version)\n sudo(install_cmd)\n sudo('{0} install -U south'.format(env.PIP_BIN))", "title": "" }, { "docid": "b5280e2feb4f9f400d399270e04958af", "score": "0.64806294", "text": "def create_environment(self, **kwargs):\n # Delete old environment\n self._delete(self.venv_directory)\n\n # Create file\n self._create_batch_environment_file()\n\n # Run file\n self._run_batch_file(self.batch_file_create_venv)\n\n self._create_pth_file()\n\n # Install python packages\n # self.install_packages()", "title": "" }, { "docid": "0185e0f115a292e1a6de38fc1e686c10", "score": "0.64375496", "text": "def test_clean_env_install(bare_virtualenv):\n bare_virtualenv.run(' && '.join((\n 'cd {source}',\n 'python setup.py install',\n )).format(source=SOURCE_DIR))", "title": "" }, { "docid": "fd6ebca5275f74a1ac6d96b3de2d4ba8", "score": "0.64133835", "text": "def make_env(env, base_data_folder, base_work_folder, user, host):\n make_environment(env, base_data_folder, base_work_folder, user, None, host)", "title": "" }, { "docid": "1afe4567f5d60442995ab443491ad169", "score": "0.64024884", "text": "def test_virtualrunenv_not_applied(client):\n conanfile = textwrap.dedent(\"\"\"\n from conan import ConanFile\n import platform\n\n class ConanFileToolsTest(ConanFile):\n settings = \"os\"\n generators = \"VirtualBuildEnv\", \"VirtualRunEnv\"\n requires = \"foo/1.0\"\n \"\"\")\n\n client.save({\"conanfile.py\": conanfile})\n client.run(\"install . \")\n extension = \"bat\" if platform.system() == \"Windows\" else \"sh\"\n exists_file = os.path.exists(os.path.join(client.current_folder,\n \"conanrun.{}\".format(extension)))\n assert exists_file\n\n global_env = client.load(\"conanbuild.{}\".format(extension))\n assert \"conanrunenv\" not in global_env", "title": "" }, { "docid": "920bccd066099a4dec221d3e593ca1a3", "score": "0.639617", "text": "def generate(config, spec):\n spec.macros['venv_cmd'] = '{0} {1}'.format(\n config.python_venv.cmd,\n ' '.join(\n config.python_venv.flags if config.python_venv.flags else ()\n ),\n )\n if config.python_venv.python:\n\n spec.macros['venv_cmd'] = '{0} --python={1}'.format(\n spec.macros['venv_cmd'],\n config.python_venv.python,\n )\n spec.macros['venv_name'] = config.python_venv.name\n spec.macros['venv_install_dir'] = '{0}/%{{venv_name}}'.format(\n config.python_venv.path,\n )\n spec.macros['venv_dir'] = '%{buildroot}/%{venv_install_dir}'\n spec.macros['venv_bin'] = '%{venv_dir}/bin'\n spec.macros['venv_python'] = '%{venv_bin}/python'\n spec.macros['venv_pip'] = (\n '%{{venv_python}} %{{venv_bin}}/pip install {0}'.format(\n ' '.join(\n config.python_venv.pip_flags\n if config.python_venv.pip_flags\n else ()\n ),\n )\n )\n spec.macros['__prelink_undo_cmd'] = \"%{nil}\"\n\n spec.globals['__os_install_post'] = (\n \"%(echo '%{__os_install_post}' | sed -e \"\n \"'s!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:\"\n \"]].*$!!g')\"\n )\n\n spec.tags['AutoReq'] = 'No'\n spec.tags['AutoProv'] = 'No'\n\n spec.blocks.prep.append(\n 'mkdir -p %{buildroot}/%{venv_install_dir}',\n )\n\n spec.blocks.files.append('/%{venv_install_dir}')\n\n spec.blocks.install.append('%{venv_cmd} %{venv_dir}')\n for requirement in config.python_venv.requirements:\n\n spec.blocks.install.extend((\n 'cd %{SOURCE0}',\n '%{{venv_pip}} -r {0}'.format(requirement),\n 'cd -',\n ))\n\n if config.python_venv.require_setup_py:\n spec.blocks.install.append('cd %{SOURCE0}')\n\n if config.python_venv.use_pip_install:\n spec.blocks.install.append('%{venv_pip} .')\n else:\n spec.blocks.install.append('%{venv_python} setup.py install')\n\n spec.blocks.install.append('cd -')\n\n if config.python_venv.remove_pycache:\n spec.blocks.install.append(\n r'find %{venv_dir} -type d -name \"__pycache__\" -print0 | '\n r'xargs -0 rm -rf'\n )\n\n spec.blocks.install.extend((\n '# RECORD files are used by wheels for checksum. They contain path'\n ' names which',\n '# match the buildroot and must be removed or the package will '\n 'fail to build.',\n 'find %{buildroot} -name \"RECORD\" -exec rm -rf {} \\\\;',\n '# Change the virtualenv path to the target installation '\n 'direcotry.',\n 'venvctrl-relocate --source=%{venv_dir}'\n ' --destination=/%{venv_install_dir}',\n ))\n\n if config.python_venv.strip_binaries:\n spec.blocks.install.extend((\n '# Strip native modules as they contain buildroot paths in'\n 'their debug information',\n 'find %{venv_dir}/lib -type f -name \"*.so\" | xargs -r strip',\n ))\n else:\n spec.macros[\"debug_package\"] = \"debug_package %{nil}\"\n spec.macros[\"__strip\"] = \"/bin/true\"\n # Several RPM bundles and dev packages, like redhat-rpm-config and\n # rpm-devel inject macros into every RPM spec by modifying the\n # global RPM configuration. This disables the known macros from\n # those packages that inject an additional binary strip step.\n # See https://github.com/kevinconway/rpmvenv/pull/93 for details.\n\n return spec", "title": "" }, { "docid": "06f559e32b65dd3771f7ae42cb992441", "score": "0.639008", "text": "def virtualenv():\n if not exists(\"source ~/.virtualenvs/{}/bin/activate\".format(env.virtualenv_name)):\n run(\"mkdir source ~/.virtualenvs/{}/bin/activate\".format(env.virtualenv_name))\n with cd(env.virtualenv_name):\n with prefix(\"source ~/.virtualenvs/{}/bin/activate\".format(env.virtualenv_name)):\n yield", "title": "" }, { "docid": "ea609252bb6083b572c81188649b2826", "score": "0.6360661", "text": "def prepare_environment(self):\n self.create_build_virtualenv()\n self.workspace.make_build_dir()\n self.put_env_variables()", "title": "" }, { "docid": "d04b57d6733f8cd177281328ab783a78", "score": "0.6357932", "text": "def setup(branch=None):\n require('path')\n with env.cd_cmd(PROJECT_PATH):\n env.gitbranch = branch or local('git rev-parse --abbrev-ref HEAD', capture=True)\n\n make_dirs()\n\n # create user & add to group\n sudo('grep \"{server_owner:s}\" /etc/passwd > /dev/null || adduser {server_owner:s} && usermod -G {server_group:s} {server_owner:s}'.format(**env))\n\n # create virtualenv, clone repo\n sudo('test -e /srv/myapp/env/bin/activate || /usr/local/bin/virtualenv /srv/myapp/env'.format(**env), user=env.server_owner)\n\n if env.gitsource:\n with env.cd_cmd('/srv/myapp/repo'):\n sudo('test -e /srv/myapp/repo/.git || /usr/bin/git clone -q -b {gitbranch:s} {gitsource:s} .'.format(**env))\n\n # install requirements into virtualenv - do it from the repo on setup; subsequent installs should be from the release\n deploy_secrets(False)\n deploy_manage_wrapper()\n deploy()\n\n restart_webserver()\n restart_supervisor()\n start_celery()", "title": "" }, { "docid": "0ba6a650f3efd6b6d2a4849de2754490", "score": "0.6354789", "text": "def setup_base(self):\n cli_args = [\n '-mvirtualenv',\n # Append the positional destination argument\n \"$READTHEDOCS_VIRTUALENV_PATH\",\n ]\n\n self.build_env.run(\n self.config.python_interpreter,\n *cli_args,\n # Don't use virtualenv bin that doesn't exist yet\n bin_path=None,\n # Don't use the project's root, some config files can interfere\n cwd=None,\n )", "title": "" }, { "docid": "ff6abc8d7f6a05248f04c1a65ccf2161", "score": "0.63431704", "text": "def _setup_test_env(self, create_folder=True, path=None, **kwargs):\r\n MultiProductSystem.FakePermClass = MockPerm\r\n kwargs.setdefault('enable', ['trac.*', 'multiproduct.*'])\r\n self.env = env = EnvironmentStub(**kwargs)\r\n if create_folder:\r\n if path is None:\r\n env.path = tempfile.mkdtemp(prefix='bh-tempenv-')\r\n else:\r\n env.path = path\r\n if not os.path.exists(env.path):\r\n os.mkdir(env.path)\r\n conf_dir = os.path.join(env.path, 'conf')\r\n if not os.path.exists(conf_dir):\r\n os.mkdir(conf_dir)\r\n return env", "title": "" }, { "docid": "4924f88a2d506cf49649f0b05a17961e", "score": "0.63157016", "text": "def test_usability():\n\t\n\twith workspace(virtualenvs = ['venv'], dummy_project = True) as ws:\n\t\tws.run(\n\t\t\t'venv',\n\t\t\t'python setup.py install')\n\t\t\n\t\t# For good measure.\n\t\tws.run(\n\t\t\t'venv',\n\t\t\t'python setup.py develop')", "title": "" }, { "docid": "84e835c953980c459c0a0c4d9d5865f7", "score": "0.6293455", "text": "def setup(upgrade=False, upgrade_strategy='only-if-needed'):\n venv_root = ctx('virtualenv.dirs.root')\n venv_name = ctx('virtualenv.name')\n venv_path = os.path.join(venv_root, venv_name)\n py = 'python{}'.format(ctx('python.version'))\n env.venv_path = venv_path\n\n if not fabtools.deb.is_installed('python-virtualenv'):\n fabtools.deb.install('python-virtualenv')\n # Experimental\n require.python.virtualenv(\n venv_path, python_cmd=py, use_sudo=True, venv_python=py)\n with _virtualenv(venv_path):\n require.python.pip()\n require.python.setuptools()\n execute(install_requirements, upgrade=upgrade, upgrade_strategy=upgrade_strategy)\n # /Experimental\n\n # lib_root = os.path.join(venv_root, venv_name, 'lib')\n # if not files.exists(lib_root, use_sudo=True):\n # print(cyan(\"Setuping virtualenv on {}\".format(env.stage)))\n # with cd(venv_root):\n # sudo('virtualenv --python=python{version} {name}'.format(\n # version=ctx('python.version'),\n # name=ctx('virtualenv.name')))\n # pip('install -U setuptools pip') # Just avoiding some headaches..", "title": "" }, { "docid": "1196c2c319cbb57e3325ecdcecabbe62", "score": "0.6283571", "text": "def make_env(py_exe, *packages):\n py_exe = py_exes.get(py_exe, py_exe)\n\n if not os.path.exists(env_root):\n os.makedirs(env_root)\n\n env = os.path.join(env_root, os.path.basename(py_exe))\n py = pjoin(env, 'bin', 'python')\n # new env\n if not os.path.exists(py):\n run('virtualenv {} -p {}'.format(\n pipes.quote(env),\n pipes.quote(py_exe),\n ))\n py = pjoin(env, 'bin', 'python')\n run([py, '-V'])\n install(py, 'pip', 'setuptools')\n install(py, *packages)\n return py", "title": "" }, { "docid": "e76ea69f8b9e5d9a490299a46fb1ad49", "score": "0.62740517", "text": "def _setup_venv(self):\n python.setup_virtualenv(\n self.venv_path, sudo_user=self.user, python_version=3)\n packages = [\n \"Radicale\", \"radicale-dovecot-auth\", \"pytz\"\n ]\n python.install_packages(packages, self.venv_path, sudo_user=self.user)\n python.install_package_from_repository(\n \"radicale-storage-by-index\",\n \"https://github.com/tonioo/RadicaleStorageByIndex\",\n venv=self.venv_path, sudo_user=self.user)", "title": "" }, { "docid": "707109ab7a3abb18272c13fbcf1992e2", "score": "0.62720925", "text": "def _make_empty_env(self):\n\n # Create our environment in the proper directory, with the name 'test'\n self._xpkg_cmd(['init', self.env_dir, 'test'])", "title": "" }, { "docid": "a897b012e2348c9cbe283da256a3ec68", "score": "0.62339693", "text": "def withVirtualenv(env_name, base_dir=None, python=None, delete=True, style=None):\n\n if style is not None:\n my_print(\"Creating a virtualenv:\")\n\n if python is None:\n python = sys.executable\n\n # Avoid symlinks on Windows, they won't work for virtualenv e.g.\n python = os.path.join(\n getDirectoryRealPath(os.path.dirname(python)),\n os.path.basename(python),\n )\n\n if base_dir is not None:\n env_dir = os.path.join(base_dir, env_name)\n else:\n env_dir = env_name\n\n removeDirectory(env_dir, ignore_errors=False)\n\n with withDirectoryChange(base_dir, allow_none=True):\n command = [python, \"-m\", \"virtualenv\", env_name]\n if style is not None:\n my_print(\"Executing: %s\" % \" \".join(command), style=style)\n check_call(command)\n\n yield Virtualenv(env_dir)\n\n if delete:\n removeDirectory(env_dir, ignore_errors=False)", "title": "" }, { "docid": "e7eb500990bb007fada3eea0673f0733", "score": "0.62227184", "text": "def bootstrap_virtualenv(self):\n\n chdir(self.basedir)\n packages_to_install = ['six']\n packages_to_install.extend(getattr(self, 'install_extra_packages', []))\n\n paver.virtual._create_bootstrap(script_name=\"bootstrap.py\",\n packages_to_install=packages_to_install,\n paver_command_line=None,\n install_paver=False,\n dest_dir=self.basedir / 'virtualenv')\n\n try:\n check_output([sys.executable, self.basedir / \"bootstrap.py\"], stderr=STDOUT, cwd=self.basedir)\n except CalledProcessError as err:\n # print(err.output)\n raise", "title": "" }, { "docid": "88d889e36372216c244e493f28ae0dcb", "score": "0.6211898", "text": "def test_create_non_default_name():\n\t\n\twith workspace() as ws:\n\t\tws.run(\n\t\t\t'venv --create --no-activate venv2')\n\t\t\n\t\tws.check_dir(['venv2'])\n\t\tws.check_venv('venv2')", "title": "" }, { "docid": "68ed65954ed4052258f4fe3faf31281f", "score": "0.6196145", "text": "def bootstrap(git_tag=None):\n require('environment', provided_by=[staging, production])\n if (exists('%(project_path)s' % env)):\n print ('The staging environment already exists at %(project_path)s. Please clean it up manually and try again.'\n % env)\n return\n\n # Set up directory\n sudo('mkdir %(project_path)s' % env)\n \n # Set up python virtual env\n sudo('virtualenv -p %(python)s --no-site-packages %(project_path)s/env'\n % env)\n with prefix('source %(project_path)s/env/bin/activate' % env):\n sudo('easy_install pip' % env)\n \n # Create the log dir\n sudo('mkdir %(project_path)s/logs' % env)\n sudo('chown wwwrun:www %(project_path)s/logs' % env)\n \n # Create the catalogs dir\n sudo('mkdir %(project_path)s/catalogs' % env)\n sudo('chown wwwrun:www %(project_path)s/catalogs' % env)\n \n # Create the scripts dir\n sudo('mkdir %(project_path)s/scripts' % env)\n\n # Create the settings dir\n sudo('mkdir %(project_path)s/settings' % env)\n \n print(\"The environment is prepared.\")", "title": "" }, { "docid": "cc8ecd7bc661dbca6cda40438e5c1739", "score": "0.6176357", "text": "def setup(properties):\n\n try:\n if not os.path.exists(consts.ENV):\n call([\"virtualenv\", consts.ENV])\n except:\n print \"Error creating virtual environment. Make sure viurtualenv is installed.\"\n sys.exit(1)\n \n try:\n call([\"cp\", os.path.join(consts.RESOURCES, 'req.pip'), \"./\"])\n call([os.path.join(consts.ENV, 'bin', 'pip'), \"install\", \"-r\", os.path.join(consts.PATH, 'req.pip')])\n except:\n print \"Error installing django or south\"\n sys.exit(2)\n\n try:\n print \"Building project\"\n os.chdir(consts.ENV)\n call([os.path.join(consts.ENV, 'bin', 'django-admin.py'), \"startproject\", properties[\"website\"][\"name\"]])\n except OSError as e:\n print \"Error creating django project\", e\n sys.exit(3)\n\n try:\n call([\"git\", \"init\", consts.PATH])\n except:\n print \"Failed to initialize git\"\n sys.exit(4)", "title": "" }, { "docid": "99374979f580bbec308646593751fbb5", "score": "0.60996616", "text": "def test_recreate():\n\t\n\twith workspace(virtualenvs = ['venv']) as ws:\n\t\tws.create_file('venv/dummy')\n\t\t\n\t\tws.run(\n\t\t\t'venv --recreate --no-activate')\n\t\t\n\t\tws.check_file('venv/dummy', exists = False)", "title": "" }, { "docid": "5d2211965b42c45722feaf765a4d4d20", "score": "0.609088", "text": "def test_install_target_venv(mock_sys, mock_popen, mock_env_copy, mock_venv) -> None:\n target = \"target_folder\"\n with pytest.raises(AssertionError):\n package.install_package(TEST_NEW_REQ, False, target=target)", "title": "" }, { "docid": "e8f37f64df3b8a63ec2914081202822f", "score": "0.6090118", "text": "def test_not_recreated():\n\t\n\twith workspace(virtualenvs = ['venv']) as ws:\n\t\tws.create_file('venv/dummy')\n\t\t\n\t\tws.run(\n\t\t\t'venv --no-activate')\n\t\t\n\t\tws.check_file('venv/dummy')", "title": "" }, { "docid": "ad70ad7d7900f7aa13d0067fc91734fd", "score": "0.60891944", "text": "def test_create_via_no_activate():\n\t\n\twith workspace() as ws:\n\t\tws.run(\n\t\t\t'venv --no-activate')\n\t\t\n\t\tws.check_venv()", "title": "" }, { "docid": "71aa36c5a862796df2c4f8c19c39b3cd", "score": "0.60762507", "text": "def create_or_update_virtualenvs(virtualenv_name, requirements_file):\n execute(_create_or_update_virtualenv, env.virtualenv_path, virtualenv_name,\n requirements_file, hosts=env.storm_workers)", "title": "" }, { "docid": "ed953df67442fd71ac7fcd1a35210640", "score": "0.6059149", "text": "def tox_testenv_install_deps(venv, action):", "title": "" }, { "docid": "5c57f5db3329761f125de2c9867d3bb3", "score": "0.60588413", "text": "def setup():\n if os.path.exists(\"venv\"):\n puts(red(\"It seems that this project is already set up, aborting.\"))\n return 1\n\n if not os.path.exists(\"venv\"):\n execute(\"local.create_virtualenv\")\n execute(\"local.frontend_tools\")\n if not os.path.exists(\".env\"):\n execute(\"local.create_dotenv\")\n execute(\"local.create_database\")\n\n puts(green(\"Initial setup has completed successfully!\", bold=True))\n puts(green(\"Next steps:\"))\n puts(green(\"- Update the README: edit README.rst\"))\n puts(green(\"- Create a superuser: venv/bin/python manage.py createsuperuser\"))\n puts(green(\"- Run the development server: fab dev\"))\n puts(green(\"- Create a Bitbucket repository: fab git.init_bitbucket\"))\n puts(green(\"- Configure a server for this project: fab server.setup\" % env))", "title": "" }, { "docid": "1dfa127290b58d97100ac461c0e9c5bd", "score": "0.6030788", "text": "def virtualenv(directory, system_site_packages=False, venv_python=None,\n use_sudo=False, user=None, clear=False, prompt=None,\n virtualenv_cmd='virtualenv', pip_cmd='pip', python_cmd='python'):\n\n package('virtualenv', use_sudo=True, pip_cmd=pip_cmd, python_cmd=python_cmd)\n\n if not virtualenv_exists(directory):\n create_virtualenv(\n directory,\n system_site_packages=system_site_packages,\n venv_python=venv_python,\n use_sudo=use_sudo,\n user=user,\n clear=clear,\n prompt=prompt,\n virtualenv_cmd=virtualenv_cmd,\n )", "title": "" }, { "docid": "127bcde0e44d9d0d2c922fd373465ccf", "score": "0.6025764", "text": "def make_env(self, pybinary, path,\n system_site_packages=False,\n symlinks=False,\n copies=False,\n with_pip=True,\n prompt=None\n ):\n assoc_path = self._assoc_path(path)\n self._root.mkdir(exist_ok=True)\n assoc_path.mkdir(exist_ok=True)\n\n venv_path = assoc_path.joinpath(pybinary)\n\n cmd = ['/usr/bin/env', pybinary, '-m', 'venv', '--clear']\n if system_site_packages is True:\n cmd.append('--system-site-packages')\n\n if symlinks is True:\n cmd.append('--symlinks')\n\n if copies is True:\n cmd.append('--copies')\n\n if with_pip is False:\n cmd.append('--without-pip')\n\n cmd.append(str(venv_path))\n\n subprocess.run(cmd)\n\n binname = get_bin_name()\n binpath = venv_path.joinpath(binname)\n\n script = make_activate_script(\n path=ACTIVATE_PATH,\n prompt=prompt,\n bin_name=binname,\n )\n\n open(binpath.joinpath('activate'), 'w').write(script)", "title": "" }, { "docid": "8ef6d31df215b98349178eda7971b5c9", "score": "0.60154057", "text": "async def virtualenv(python_interpreter, env):\n with TemporaryDirectory() as virtualenv_dir:\n await check_call(\n [\"virtualenv\", virtualenv_dir, \"-p\", python_interpreter],\n env=env,\n cwd=virtualenv_dir,\n )\n\n # Figure out what environment variables we need to set\n output_bytes = await check_output(\n f\". {os.path.join(virtualenv_dir, 'bin', 'activate')}; env\",\n shell=True,\n cwd=virtualenv_dir,\n )\n output = output_bytes.decode()\n yield virtualenv_dir, dict(\n line.split(\"=\", 1) for line in output.splitlines() if \"=\" in line\n )", "title": "" }, { "docid": "6a710a8a5c1ffe0c7f806d75ed62746e", "score": "0.6007486", "text": "def test_venvs_default_name(self):\n\n virtualenv = self.locator.for_directory(Path.cwd())\n self.assertFalse(virtualenv.exists_on(self.filesystem))\n self.run_cli([])\n self.assertTrue(virtualenv.exists_on(self.filesystem))", "title": "" }, { "docid": "9504112fe8d4720e6733ae3bb9f5b9dd", "score": "0.5981723", "text": "def create(\n env_dir,\n system_site_packages=False,\n clear=False,\n symlinks=False,\n with_pip=False,\n prompt=None,\n):\n builder = ExtendedEnvBuilder(\n system_site_packages=system_site_packages,\n clear=clear,\n symlinks=symlinks,\n with_pip=with_pip,\n prompt=prompt,\n )\n builder.create(env_dir)\n return builder.context", "title": "" }, { "docid": "c844b703bdb686ee6669ddae6672259a", "score": "0.59812224", "text": "def prepare_environment(base_path):\n if not os.path.exists(base_path):\n os.makedirs(base_path)", "title": "" }, { "docid": "ecc2a4c0a7785bc21cd46945e6c1e59e", "score": "0.5972399", "text": "def setup_with_production_data():\n if os.path.exists(\"venv\"):\n puts(red(\"It seems that this project is already set up, aborting.\"))\n return 1\n\n execute(\"git.add_remote\")\n execute(\"local.create_virtualenv\")\n execute(\"local.frontend_tools\")\n execute(\"local.create_dotenv\")\n execute(\"local.pull_database\")\n execute(\"local.reset_passwords\")\n\n puts(green(\"Setup with production data has completed successfully!\", bold=True))\n puts(green(\"Next steps:\"))\n puts(green(\"- Create a superuser: venv/bin/python manage.py createsuperuser\"))\n puts(green(\"- Run the development server: fab dev\"))", "title": "" }, { "docid": "6d2ecb4e07022ee053924d63b6d51457", "score": "0.5950397", "text": "def _create_or_update_virtualenv(virtualenvs_path, virtualenv_name,\n requirements_file):\n if not exists(\"{}/{}\".format(virtualenvs_path, virtualenv_name)):\n puts(\"virtualenv not found for {}, creating one.\".format(virtualenvs_path))\n run(\"virtualenv {}/{}\".format(virtualenvs_path, virtualenv_name))\n\n puts(\"Uploading requirements.txt to temporary file.\")\n tmpfile = run(\"mktemp /tmp/streamparse_requirements-XXXXXXXXX.txt\")\n put(requirements_file, tmpfile)\n\n puts(\"Updating virtualenv: {}\".format(virtualenv_name))\n cmd = \"source {}/{}/bin/activate\".format(virtualenvs_path, virtualenv_name)\n with prefix(cmd):\n run(\"pip install streamparse\")\n run(\"pip install -r {}\".format(tmpfile))\n\n run(\"rm {}\".format(tmpfile))", "title": "" }, { "docid": "7da106ae52b2624807f84eb1edf42be2", "score": "0.5941383", "text": "def virtualenv(path):\n with prefix('. %s' % mkpath(path).child('bin', 'activate')):\n yield", "title": "" }, { "docid": "89d56203853cbcc5b8b6272ce923bb29", "score": "0.5925416", "text": "def run(self):\r\n #self.upgrade_system()\r\n self.install_prereqs()\r\n self.create_user()\r\n self.checkout_project()\r\n self.create_virtualenv()\r\n self.create_symlink()\r\n self.install_nginx()\r\n self.install_gunicorn()\r\n self.run_tests()", "title": "" }, { "docid": "0277c12043001caf98757a52eaa4c065", "score": "0.59184045", "text": "def test_gtest_with_vpython(self):\n args = mock.MagicMock()\n args.test_exe = 'base_unittests'\n args.test_launcher_summary_output = None\n args.trace_dir = None\n args.runtime_deps_path = None\n args.path_to_outdir = self._tmp_dir\n args.vpython_dir = self._tmp_dir\n args.logs_dir = self._tmp_dir\n\n # With vpython_dir initially empty, the test_runner should error out\n # due to missing vpython binaries.\n gtest = test_runner.GTestTest(args, None)\n with self.assertRaises(test_runner.TestFormatError):\n gtest.build_test_command()\n\n # Create the two expected tools, and the test should be ready to run.\n with open(os.path.join(args.vpython_dir, 'vpython3'), 'w'):\n pass # Just touch the file.\n os.mkdir(os.path.join(args.vpython_dir, 'bin'))\n with open(os.path.join(args.vpython_dir, 'bin', 'python3'), 'w'):\n pass\n gtest = test_runner.GTestTest(args, None)\n gtest.build_test_command()", "title": "" }, { "docid": "175692e279168ca38b9382cd2f901638", "score": "0.5865143", "text": "def test_initialise_after_new(tmp_path):\n\n # go one level up to the cwd so we are are the root where\n # setup.py exits\n os.chdir(\"../\")\n # create the dist folder with shoypo-<version>.tar.gz file\n subprocess.check_call([sys.executable, \"setup.py\", \"sdist\"])\n # store all path names to be used later\n dist_path = os.path.join(os.getcwd(), \"dist\")\n shopyo_dist_name = f\"shopyo-{__version__}.tar.gz\"\n project_path = tmp_path / \"foo\"\n # copy the shopyo dist to the test project path\n copytree(dist_path, os.path.join(project_path, \"dist\"))\n # change cwd to that of test project\n os.chdir(project_path)\n # create a new virtual environment(venv)\n subprocess.check_call([sys.executable, \"-m\", \"venv\", \"env\"])\n # store path for python and shopyo executable of venv for the case when OS\n # is Unix\n python_env = os.path.join(os.getcwd(), \"env\", \"bin\", \"python\")\n shopyo_env = os.path.join(os.getcwd(), \"env\", \"bin\", \"shopyo\")\n # if OS is Windows, update the python and shopyo executable\n if sys.platform == \"win32\":\n python_env = os.path.join(os.getcwd(), \"env\", \"Scripts\", \"python\")\n shopyo_env = os.path.join(os.getcwd(), \"env\", \"Scripts\", \"shopyo\")\n # update pip of venv\n subprocess.check_call(\n [python_env, \"-m\", \"pip\", \"install\", \"--upgrade\", \"pip\"]\n )\n # install the shopyo package from dist added earlier\n subprocess.check_call(\n [\n python_env,\n \"-m\",\n \"pip\",\n \"install\",\n os.path.join(\"dist\", shopyo_dist_name)\n ]\n )\n # run shopyo help command followed by new command\n subprocess.check_call([\"shopyo\", \"--help\"])\n subprocess.check_call([shopyo_env, \"new\"])\n # change the cwd to the newly created shopyo project\n os.chdir(os.path.join(project_path, \"foo\"))\n # initialise the project\n subprocess.check_call(\n [shopyo_env, \"initialise\"]\n )\n\n assert os.path.exists(\"shopyo.db\")\n assert os.path.exists(\"migrations\")", "title": "" }, { "docid": "3c63a788388181dc3e7a5660a7e4080d", "score": "0.5858013", "text": "def build(self):\n\n self.create()\n\n # We need to populate the virtualenv using the Python executable in\n # the virtualenv for paths to be proper.\n\n args = [self.python_path, __file__, 'populate', self.topsrcdir,\n self.topobjdir, self.virtualenv_root, self.manifest_path]\n\n result = subprocess.call(args, stdout=self.log_handle,\n stderr=subprocess.STDOUT, cwd=self.topsrcdir)\n\n if result != 0:\n raise Exception('Error populating virtualenv.')\n\n os.utime(self.activate_path, None)\n\n return self.virtualenv_root", "title": "" }, { "docid": "d0988c9a290cf8004c81536bcc921189", "score": "0.5846211", "text": "def setup():\n global TestPath\n TestPath = os.path.dirname(__file__)\n pythonPath = os.path.join(os.path.dirname(os.path.abspath(TestPath)), \"python\")\n if \"PYTHONPATH\" in os.environ:\n pythonPath += \":\" + os.environ[\"PYTHONPATH\"]\n os.environ[\"PYTHONPATH\"] = pythonPath", "title": "" }, { "docid": "d276495dff9f8eefc36622f8a70a0123", "score": "0.5840953", "text": "def prepare_environment(base_path):\n if os.path.exists(base_path):\n shutil.rmtree(base_path)\n os.makedirs(base_path)", "title": "" }, { "docid": "b288944dfea0e7af428018bdaaaa98a1", "score": "0.58191466", "text": "def init_project():\n with cd(code_dir):\n with prefix('source bin/activate'):\n run('DJANGO_SETTINGS_MODULE=teaching.settings_deploy python teaching/manage.py syncdb')\n run('DJANGO_SETTINGS_MODULE=teaching.settings_deploy python teaching/manage.py migrate')\n run('DJANGO_SETTINGS_MODULE=teaching.settings_deploy python teaching/manage.py collectstatic --noinput')", "title": "" }, { "docid": "20c08cd017be8d5368d0dc0f6a9a6983", "score": "0.58123636", "text": "def create_environment(home_dir, site_packages=True, clear=False,\r\n unzip_setuptools=False, use_distribute=False,\r\n prompt=None, search_dirs=None, never_download=False):\r\n home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)\r\n\r\n py_executable = os.path.abspath(install_python(\r\n home_dir, lib_dir, inc_dir, bin_dir,\r\n site_packages=site_packages, clear=clear))\r\n\r\n install_distutils(home_dir)\r\n\r\n if use_distribute or os.environ.get('VIRTUALENV_USE_DISTRIBUTE'):\r\n install_distribute(py_executable, unzip=unzip_setuptools, \r\n search_dirs=search_dirs, never_download=never_download)\r\n else:\r\n install_setuptools(py_executable, unzip=unzip_setuptools, \r\n search_dirs=search_dirs, never_download=never_download)\r\n\r\n install_pip(py_executable, search_dirs=search_dirs, never_download=never_download)\r\n\r\n install_activate(home_dir, bin_dir, prompt)", "title": "" }, { "docid": "536db7849be278dbbde04db22957dcdd", "score": "0.5795029", "text": "def main(args):\n name = get_topology_definition(args.name)[0]\n config = get_config()\n config[\"virtualenv_specs\"] = config[\"virtualenv_specs\"].rstrip(\"/\")\n create_or_update_virtualenvs(args.environment, name,\n \"{}/{}.txt\".format(config[\"virtualenv_specs\"],\n name))", "title": "" }, { "docid": "9b8b8a7e5271c6278228f70de2f16b60", "score": "0.5785122", "text": "def test_environment():\n\tpass", "title": "" }, { "docid": "a6e18b939ec948e5ada35f66c2a48dd4", "score": "0.5765499", "text": "def createProjectArea(version):\n\n print(\"Setting up CMSSW_\" + version + \" environment\")\n execCmd(\"scramv1 project CMSSW CMSSW_\" + version)\n os.chdir(\"CMSSW_\" + version + \"/src\")", "title": "" }, { "docid": "b47777bcbaed25d197aa983fc6fb8e11", "score": "0.5762175", "text": "def test_create_project(self):\n pass", "title": "" }, { "docid": "b47777bcbaed25d197aa983fc6fb8e11", "score": "0.5762175", "text": "def test_create_project(self):\n pass", "title": "" }, { "docid": "d40a761836085cbcc7c569f17cae6f69", "score": "0.57619125", "text": "def setup(days: int):\n\n\tif TestEnvironment.exists():\n\t\traise click.ClickException(\"Test environment is already set up.\")\n\n\tTestEnvironment.setup(days=days)", "title": "" }, { "docid": "7638c086ef2ea4fc655ce67b6fedd7af", "score": "0.5742991", "text": "def setup_directories():\n run('mkdir -p %(path)s' % env)", "title": "" }, { "docid": "fe374f254ef2a1fa420e9826cc006e86", "score": "0.5739442", "text": "def setup_env():\n\n print('+ Creating directory structure')\n if files.exists(env.host_site_path):\n if console.confirm('Remove existing directory %s' % env.host_site_path):\n with hide('running', 'stdout'):\n run('rm -rf %s' % env.host_site_path)\n else:\n print('+ Directory not removed and recreated')\n return\n with hide('running', 'stdout'):\n run('mkdir -p %s' % env.host_site_path)\n with cd(env.host_site_path):\n with hide('stdout'):\n run('mkdir changesets files private')\n print('+ Cloning repository: %s' % env.repo_url)\n run('ssh-keyscan -H github.com >> ~/.ssh/known_hosts')\n run('ssh-keyscan -H bitbucket.org >> ~/.ssh/known_hosts')\n run('%s clone --quiet %s private/repo' % (env.repo_type,\n env.repo_url))\n run('chmod g+w private/repo')\n print('+ Site directory structure created at: %s' % env.host_site_path)", "title": "" }, { "docid": "e31b8cf8dc2ec45d93481cb2e377b999", "score": "0.57354295", "text": "def setUp(self):\n\n # Create temp dir\n self.work_dir = tempfile.mkdtemp(suffix = '-testing-xpkg')\n print self.work_dir\n\n # Create the user cache dir\n self.user_cache_dir = os.path.join(self.work_dir, 'user_cache')\n\n # Mock the user dir\n os.environ[core.xpkg_local_cache_var] = self.user_cache_dir\n\n # Create the env_dir\n self.env_dir = os.path.join(self.work_dir, 'env')\n\n # Create our binary package repository dir\n self.repo_dir = os.path.join(self.work_dir, 'repo')\n util.ensure_dir(self.repo_dir)\n\n # Save the environment\n self._envStorage = util.EnvStorage(store = True)", "title": "" }, { "docid": "34656fb6abf365e35d977f75a69b7856", "score": "0.5728862", "text": "def test_install_special_deps(toxconfig, mocker, actioncls, tmpdir):\n action = actioncls()\n p = tmpdir.join(\"tox.ini\")\n p.write(toxconfig)\n with tmpdir.as_cwd():\n config = parseconfig([])\n\n for env, envconfig in config.envconfigs.items():\n session = Session(config)\n venv = tox.venv.VirtualEnv(envconfig, session=session)\n mocker.patch(\"subprocess.Popen\")\n result = session.runtestenv(venv)\n assert result == True\n assert subprocess.Popen.call_count == 1\n call_list = [sys.executable, \"-m\", \"pipenv\", \"install\", \"--dev\"]\n call_list.extend([package for package in venv._getresolvedeps()])\n assert subprocess.Popen.call_args_list[0][0] == (call_list,)", "title": "" }, { "docid": "ce752d5410454fbdd85a82f341fa69a6", "score": "0.57255477", "text": "def bootstrap():\n require('root', provided_by=('staging', 'production'))\n run('mkdir -p %(root)s' % env)\n run('mkdir -p %s' % os.path.join(env.home, 'log'))\n create_virtualenv()\n deploy()\n run('export PYTHONPATH=/home/nick/code/python/beersocial/src/beersocial:$PYTHONPATH')\n run('/home/nick/code/python/beersocial/bin/python %s syncdb --settings=%s' %\n (os.path.join(env.git_dir, 'beersocial', 'manage.py'), env.settings) )\n run('/home/nick/code/python/beersocial/bin/python %s migrate --settings=%s' %\n (os.path.join(env.git_dir, 'beersocial', 'manage.py'), env.settings) )", "title": "" }, { "docid": "addb9ab149af5b1e2dcf9f6baa0867da", "score": "0.5711274", "text": "def setUpModule():\n TMPDIR.mkdir(mode=0o755, parents=True, exist_ok=True)", "title": "" }, { "docid": "25b4196c9c0f7aa0c9ac48f72de1dd45", "score": "0.57066864", "text": "def create_environment(home_dir, site_packages=False, clear=False,\r\n unzip_setuptools=False,\r\n prompt=None, search_dirs=None, never_download=False,\r\n no_setuptools=False, no_pip=False, symlink=True):\r\n home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)\r\n\r\n py_executable = os.path.abspath(install_python(\r\n home_dir, lib_dir, inc_dir, bin_dir,\r\n site_packages=site_packages, clear=clear, symlink=symlink))\r\n\r\n install_distutils(home_dir)\r\n\r\n if not no_setuptools:\r\n install_sdist('Setuptools', 'setuptools-*.tar.gz', py_executable, search_dirs)\r\n if not no_pip:\r\n install_sdist('Pip', 'pip-*.tar.gz', py_executable, search_dirs)\r\n\r\n install_activate(home_dir, bin_dir, prompt)", "title": "" }, { "docid": "5752224e8328c6671c346a2540c7859c", "score": "0.5700057", "text": "def setUp(self):\n test_env_setup()", "title": "" }, { "docid": "5752224e8328c6671c346a2540c7859c", "score": "0.5700057", "text": "def setUp(self):\n test_env_setup()", "title": "" }, { "docid": "5752224e8328c6671c346a2540c7859c", "score": "0.5700057", "text": "def setUp(self):\n test_env_setup()", "title": "" }, { "docid": "1d9c1201a8dfb9942423b50f5cd75567", "score": "0.56800276", "text": "def test_deploy(environment):\n pass", "title": "" }, { "docid": "15091de4e811569597cccbca22197d28", "score": "0.56781286", "text": "def build(self):\n self.prepare_environment()\n self.run_buildscript()\n self.copy_files()\n self.freeze_virtualenv()\n self.create_postinstall_script()", "title": "" }, { "docid": "2361f24168a48b1ea20f5b91addd2d84", "score": "0.567502", "text": "def create_env(self, env_def):", "title": "" }, { "docid": "cc527bdeb312569887b3b04fa2db231c", "score": "0.5646349", "text": "def setup_package():\n\n global TEST_WORKSPACE\n TEST_WORKSPACE = env.get_workspace('db_cleanup')\n\n # Set the TEST_WORKSPACE used by the tests.\n os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE\n\n # Create a basic CodeChecker config for the tests, this should\n # be imported by the tests and they should only depend on these\n # configuration options.\n codechecker_cfg = {\n 'suppress_file': None,\n 'skip_list_file': None,\n 'check_env': env.test_env(TEST_WORKSPACE),\n 'workspace': TEST_WORKSPACE,\n 'checkers': [],\n 'viewer_host': 'localhost',\n 'viewer_product': 'db_cleanup',\n 'reportdir': os.path.join(TEST_WORKSPACE, 'reports')\n }\n\n env.export_test_cfg(TEST_WORKSPACE, {'codechecker_cfg': codechecker_cfg})", "title": "" }, { "docid": "ec12e3f88cb738aaee107e7559f6462d", "score": "0.563471", "text": "def run(self):\n\n assert os.getenv('VIRTUAL_ENV'), 'You should be in a virtualenv!'\n develop.run(self)\n self.spawn(('pip', 'install', '--upgrade', '--requirement', 'requirements-dev.txt'))", "title": "" }, { "docid": "00230a1633d7c2d494f2393b3072e1e7", "score": "0.563054", "text": "def make_dockerenv_dir():\n mkdir(dockerenv_dir())", "title": "" }, { "docid": "3dbcdab4140622e95999db9edcbd3b57", "score": "0.56304234", "text": "def test_clone_exists(self):\n # run virtualenv-clone\n sys.argv = ['virtualenv-clone', venv_path, clone_path]\n clonevirtualenv.main()\n\n # verify cloned venv exists at the path specified\n assert os.path.exists(clone_path), 'Cloned Virtualenv does not exists'", "title": "" }, { "docid": "00c475f2a4e3560ae1909bb1ff794730", "score": "0.5624174", "text": "def config_virtualenv(virtualenv_config):\n pass", "title": "" }, { "docid": "1c239a185786b284559f403d0405c5f6", "score": "0.5618027", "text": "def make_test_environ_builder(app, path='/', base_url=None, *args, **kwargs):\r\n http_host = app.config.get('SERVER_NAME')\r\n app_root = app.config.get('APPLICATION_ROOT')\r\n if base_url is None:\r\n base_url = 'http://%s/' % (http_host or 'localhost')\r\n if app_root:\r\n base_url += app_root.lstrip('/')\r\n return EnvironBuilder(path, base_url, *args, **kwargs)", "title": "" }, { "docid": "a3426aa867bc2ad513d52d03c99c3d96", "score": "0.5612252", "text": "def web_prepare_site(domain, py_version='2'):\n\n web_create_site_directory(domain)\n web_create_virtual_env(domain, py_version)\n web_create_site_log_file(domain)", "title": "" }, { "docid": "6371c9f93ec5ee8d3ea2964622c0d78e", "score": "0.5606645", "text": "def prepare_sandbox():\n tmp_dir = tempfile.mkdtemp(prefix=\"taichi-\")\n atexit.register(shutil.rmtree, tmp_dir)\n print(f\"[Taichi] preparing sandbox at {tmp_dir}\")\n os.mkdir(os.path.join(tmp_dir, \"runtime/\"))\n return tmp_dir", "title": "" }, { "docid": "b7ffb7b2b0f5e9e4f8297e4e034036d9", "score": "0.5601085", "text": "def setup_sandbox(sandboxDir):\n if os.path.isdir(sandboxDir):\n shutil.rmtree(sandboxDir)\n os.mkdir(sandboxDir)", "title": "" }, { "docid": "822acbde8d141cd56779696a69c3e0d3", "score": "0.55877227", "text": "def create_venv(\n path: str, packages: list[str] = [], pip_opts: list[str] = []\n) -> list[str]:\n logging.info(\"Creating virtual environment...\")\n venv.create(path, clear=True, symlinks=False, with_pip=True)\n\n if os.name == \"nt\":\n python = os.path.join(path, \"Scripts\", \"python.exe\")\n else:\n python = os.path.join(path, \"bin\", \"python\")\n\n n = 1\n err = []\n for pkg in packages:\n logging.info(\n \"Installing {0}...{1:>{2}}[{3}/{4}]\".format(\n pkg, \" \", abs(25 - len(pkg)), n, len(packages)\n )\n )\n pip = run(\n [python, \"-m\", \"pip\", \"install\", *pip_opts, pkg],\n stdout=PIPE,\n stderr=STDOUT,\n universal_newlines=True,\n )\n if pip.returncode != 0:\n logging.warning(\"{0} install failed.\".format(pkg))\n logging.debug(\"Error message:\\n===\\n{0}===\".format(pip.stdout))\n err.append(pkg)\n n += 1\n return err", "title": "" }, { "docid": "2947ba02dc59303ec9c2d611a19581fa", "score": "0.5568108", "text": "def set_up_framework_and_tests():\n framework = '{{ cookiecutter.framework }}'\n if framework == '(none)':\n return\n\n LOG.info('Moving files for %s project ...', framework)\n framework_folder = join('_', 'frameworks', framework)\n for file_or_folder in listdir(framework_folder):\n shutil.move(join(framework_folder, file_or_folder), '.')\n\n if framework in ['Django', 'Flask']:\n LOG.info('Moving test setup for %s project ...', framework)\n testing_folder = join('_', 'testing', 'python')\n for file_or_folder in listdir(testing_folder):\n shutil.move(join(testing_folder, file_or_folder), '.')", "title": "" }, { "docid": "755383716b168fb1b2057241763e80de", "score": "0.55601054", "text": "def _prepare_project_deployment(cont, util, py_util, py_cont):\n with util.Task(\"\"\"Installing version bumper\"\"\"):\n py_util.pip_install(cont, util, \"travis-bump-version\")\n\n with py_cont.deactivated(util):\n py_util.pip_install(cont, util, \"travis-bump-version\")", "title": "" }, { "docid": "d42885fa0b232ce6925362c958271f0f", "score": "0.55578244", "text": "def test_create_app(self):\n pass", "title": "" }, { "docid": "d42885fa0b232ce6925362c958271f0f", "score": "0.55578244", "text": "def test_create_app(self):\n pass", "title": "" }, { "docid": "1b1f934b0d46938e872b96f68c7800d6", "score": "0.55387825", "text": "def test_create_project(self):\n self._create_project()", "title": "" }, { "docid": "b41f315d9e74502b348a4fbf14fb5be9", "score": "0.55381155", "text": "def test_serve_build_with_project(self):\n pass", "title": "" }, { "docid": "fe69f40fe034ef373e5aa80bc80fa0a2", "score": "0.553659", "text": "def setup():\n\n with settings(warn_only=True):\n sudo('service nginx stop')\n\n # install packages\n sudo('apt-get install build-essential python python-dev')\n sudo('apt-get install python-pip supervisor')\n sudo('pip install virtualenv')\n sudo('apt-get install git unzip socket')\n\n # create application directory if it doesn't exist yet\n with settings(warn_only=True):\n if run(\"test -d %s\" % env.code_dir).failed:\n # create project folder\n sudo('mkdir -p ' + env.code_dir)\n sudo('mkdir -p %s/api' % env.code_dir)\n sudo('mkdir %s/instance' % env.code_dir)\n sudo('mkdir %s/election_results' % env.code_dir)\n if run(\"test -d %s/env\" % env.code_dir).failed:\n # create virtualenv\n sudo('virtualenv --no-site-packages %s/env' % env.code_dir)\n\n # install the necessary Python packages\n with virtualenv():\n put('requirements/base.txt', '/tmp/base.txt')\n put('requirements/production.txt', '/tmp/production.txt')\n sudo('pip install -r /tmp/production.txt')\n\n # install nginx\n sudo('apt-get install nginx')\n # restart nginx after reboot\n sudo('update-rc.d nginx defaults')\n sudo('service nginx start')\n\n set_permissions()\n return", "title": "" }, { "docid": "757cb4741a8c5a02668f43cc1b4645fe", "score": "0.5512299", "text": "def prepare_environment(base_path, backup_path_dir):\n if not os.path.exists(base_path):\n os.makedirs(base_path)\n else:\n shutil.rmtree(os.path.dirname(base_path))\n os.makedirs(base_path)\n if not os.path.exists(backup_path_dir):\n os.makedirs(backup_path_dir)", "title": "" }, { "docid": "e15a65deb7a6a2f11c855976abc9eb85", "score": "0.5511577", "text": "def setGTestEnv(env, pdir):\n env['GTM_ROOT'] = pdir\n env['GTEST'] = \"%s/googletest\" % pdir\n env['GMOCK'] = \"%s/googlemock\" % pdir\n env['GTM_CPPPATH'] = \"%s/include %s/include\" % (env['GTEST'], env['GMOCK'])\n bld_vdir = env.get('build_vdir','sbuild/dbg')\n env['GTM_LIBPATH'] = \"%s/%s/lib\" % (pdir, bld_vdir)", "title": "" } ]
8f3322cbece87d4cd2d4aed1847f9ac1
Returns a list of words contained by an identifier.
[ { "docid": "ebf7095286e0d19491a13934e837cdfb", "score": "0.80292684", "text": "def identifier_to_words(identifier):\n identifier = identifier.strip('_')\n\n if not identifier:\n return []\n\n lowered = identifier.lower()\n\n split = lowered.split('_')\n if len(split) > 1:\n return split\n\n index = 1\n next_split_start = 0\n words = []\n\n while index < len(identifier):\n if identifier[index] != lowered[index]:\n words.append(lowered[next_split_start:index])\n next_split_start = index\n index += 1\n\n words.append(lowered[next_split_start:index])\n return words", "title": "" } ]
[ { "docid": "a7bcb6fba7f8bad6e6745830aff48630", "score": "0.6666058", "text": "def get_hate_word_list():\n return list(i['word'] for i in db.hateword.find())", "title": "" }, { "docid": "6f0239e2d00df96a06ea7ff27bfc076d", "score": "0.64950097", "text": "def get_words(data):\n return [item[\"words\"] for item in data]", "title": "" }, { "docid": "188fa447a6f88beea1a083dabe781c7e", "score": "0.64908314", "text": "def Ids2Words(ids_list, vocab):\n assert isinstance(ids_list, list), '%s is not a list' % ids_list\n return [vocab.IdToWord(i) for i in ids_list]", "title": "" }, { "docid": "4a61868951bc63f0b768cb1c819e4929", "score": "0.6381122", "text": "def all_words(self):\n return [\n i\n for i in self.data_dir.child(\"word_list\").getContent().split(b\"\\n\")\n if b\"#\" not in i\n ]", "title": "" }, { "docid": "d6db70fee02aeca76b8c27db5975f81e", "score": "0.63494414", "text": "def get_ntlk_words() -> List[str]:\n from nltk.corpus import words\n return words.words()", "title": "" }, { "docid": "be010ea643fc9a7950c2e0753ad8709f", "score": "0.6213325", "text": "def lookup(self, word):\n return list(self.lexicon.get(word, []))", "title": "" }, { "docid": "4ce498a80255b4c9a35cff6793bfcb22", "score": "0.62124246", "text": "def word_list(self, tag):\n matches = self.find(tag, before=0, after=0, printing=False)\n return set(item[0].lower().split()[0] for item in matches)", "title": "" }, { "docid": "6c9e7259f383e05128effc1970a962d1", "score": "0.61928254", "text": "def get_words(self):\n string_words = []\n for word in self.words:\n string_words.append(word.word + ' ')\n\n return string_words", "title": "" }, { "docid": "0fbb8ecb2552a188eb9143d9b1d42c0d", "score": "0.6167937", "text": "def listwords(fn):\n f = open(fn)\n text = f.read()\n splitted = text.split()\n return [word.lower() for word in splitted\n if wordlike.match(word) and not is_stopword(word)]", "title": "" }, { "docid": "a1011231592419090f8610fd60657fad", "score": "0.61484295", "text": "def get_word_list():\n\ttextfile = open(\"pg32325.txt\")\n\tfull_text = textfile.read()\n\tno_punctuation = full_text.translate(None, string.punctuation)\n\tno_intro = strip_headers(no_punctuation).strip()\n\tconvert_ascii = no_intro.encode(\"ascii\")\n\tconvert_lowercase = string.lower(convert_ascii)\n\tlist_split = convert_lowercase.split()\n\treturn list_split", "title": "" }, { "docid": "dc8d7e2cb9366bc46576b5aa2aed39cf", "score": "0.61422116", "text": "def words(context):\n return \" \".join(context['word'])", "title": "" }, { "docid": "027eea716beee31df4b90136a1246167", "score": "0.6139208", "text": "def word_ids(self, token_ids):\n word_ids = []\n curr_id = 0\n\n special_mask = self.tokenizer.get_special_tokens_mask(token_ids, already_has_special_tokens=True)\n regular_token = True\n\n for i, token_id in enumerate(token_ids):\n if special_mask[i]:\n word_ids.append(None)\n curr_id += 1\n elif token_id in self.whitespace_ids:\n if regular_token:\n regular_token = False\n curr_id += 1\n word_ids.append(curr_id)\n else:\n regular_token = True\n word_ids.append(curr_id)\n\n return word_ids", "title": "" }, { "docid": "eca8bd6352ce36623da510851b05ad4d", "score": "0.61327726", "text": "def get_wordlist(filename):\n with open(filename, encoding=\"utf-8\") as f:\n text = f.read()\n return text.split()", "title": "" }, { "docid": "40a13f0f280f6b1a4f76c01944f0eba5", "score": "0.61252034", "text": "def get_list_words(file_name):\n fin = open(file_name)\n return clean_words(list(fin))", "title": "" }, { "docid": "5ae9fad228e99d102dea20a1ee10b124", "score": "0.61231506", "text": "def get_words(letter):", "title": "" }, { "docid": "23fae022c38fb8d423cbeb862a85460e", "score": "0.6091063", "text": "def words():\n return [word.lower() for (word, _) in entries()]", "title": "" }, { "docid": "2fde4c384623b55c68434824f3dffcf4", "score": "0.60898584", "text": "def getWords(allwords,wordlength):\n wlist = [w for w in allwords if len(w) == wordlength]\n return wlist", "title": "" }, { "docid": "9d27c9304a6fefc39c73d1c4b577a4c4", "score": "0.6085532", "text": "def word_ids_to_words(data, id_to_word):\r\n return [id_to_word[i] for i in data]", "title": "" }, { "docid": "25b4dcdce0861e697414d76dc6b3b910", "score": "0.60697514", "text": "def get_words(xmlelement):\n return [word_tag.text.strip().lower()\n for word_tag in xmlelement.find_all('w')]", "title": "" }, { "docid": "87080fc085004a0549eb7dd2d2e7c783", "score": "0.6068264", "text": "def get_words_in_book(filename):\n f = open(filename, \"r\")\n content = f.read()\n f.close()\n wds = text_to_words(content)\n return wds", "title": "" }, { "docid": "87080fc085004a0549eb7dd2d2e7c783", "score": "0.6068264", "text": "def get_words_in_book(filename):\n f = open(filename, \"r\")\n content = f.read()\n f.close()\n wds = text_to_words(content)\n return wds", "title": "" }, { "docid": "703ace91f220a0e92f4bd0d6230773ee", "score": "0.60471636", "text": "def get_words(text):\n return re.compile('\\w+').findall(text)", "title": "" }, { "docid": "bbfb224239aba87e8bc24bf342e9ff64", "score": "0.603643", "text": "def extract_text_words_by_token_ids(input_ids, tokenizer, max_sequence_length):\n extracted_text_tokens = tokenizer.convert_ids_to_tokens(input_ids)\n # There is no decode method provided in FullTokenizer\n # which is usually used with bert. Thus, we join word pieces and remove `##`\n # symbols that define boundaries between word pieces to form words from token\n # indices.\n text = \" \".join(extracted_text_tokens)\n text = text.replace(\" ##\", \"\")\n words = text.split(\" \")\n words_padded = _pad_input_sequence(words, max_sequence_length)\n return words_padded", "title": "" }, { "docid": "a7e174b32b7df42b9d0bc1b947296d78", "score": "0.60207546", "text": "def retrieve_words_names(text):\n sents = list(tok.tokenize_and_tag(text))\n return [retrieve_words_only(sents), retrieve_names_only(sents)]", "title": "" }, { "docid": "200c12b7db0b5c4d0a5833680fe2474f", "score": "0.5990594", "text": "def get_wordlist(mnemonic: str) -> list:\n language = get_language(mnemonic)\n mnemo = Mnemonic(language)\n wordlist = mnemo.wordlist\n return wordlist", "title": "" }, { "docid": "20b90c76594e6b64b4ee1d88bb9ff3e5", "score": "0.5984162", "text": "def getSpaCyWords():\r\n \r\n # Format the vocabulary for use in the distance function\r\n ids = [x for x in nlp.vocab.vectors.keys()]\r\n vocabulary = [nlp.vocab.vectors[x] for x in ids]\r\n vocabulary = np.array(vocabulary)\r\n\r\n return vocabulary, ids", "title": "" }, { "docid": "a6a274cda5f900ccdc6216cfc8ab5520", "score": "0.5975361", "text": "def words(self, fileids=None, **kwargs):\n for fileid in fileids:\n if not os.path.exists(os.path.join(self.add_root(fileid), 'ann_words.xml')):\n return []\n return concat([self._view(self.add_root(fileid),\n mode=NKJPCorpusReader.WORDS_MODE, **kwargs).handle_query()\n for fileid in fileids])", "title": "" }, { "docid": "2639c9d3672f8fefd82c9282f86bb32d", "score": "0.5970246", "text": "def filter_by_word(\n self, word: str, ids_to_skip: set[int], max_length: int\n ) -> list[SentenceTranslations]:\n raise NotImplementedError()", "title": "" }, { "docid": "07ffe6c2ef01530a43f7e1a953a6e0ae", "score": "0.5967889", "text": "def get_word(table):\r\n\r\n return list(table.scan()['Items'])", "title": "" }, { "docid": "ec57eeb6d53b663e6aa272103b6768e2", "score": "0.5959668", "text": "def convert_ids_to_words(ids, vocab):\n\n assert isinstance(ids, list), '%s is not a list' % ids\n\n return [vocab.id_to_word(_id) for _id in ids]", "title": "" }, { "docid": "28bcbe51fd3e530199537c62ff8f41d0", "score": "0.5944947", "text": "def extract_words(text):\n return R.findall(text)", "title": "" }, { "docid": "5fb9ec21451e5bba700b252cb26448e5", "score": "0.59324205", "text": "def words_list(text):\r\n return re.findall(\"[a-zA-Z]+\", text)", "title": "" }, { "docid": "e4692d2576738444bcb9afca7702eb39", "score": "0.5912572", "text": "def get_words(self):\n self.words = [word\n for my_dict in self.tweet_nested_list\n for status in my_dict['statuses']\n for word in status['text'].split()\n ]", "title": "" }, { "docid": "919b2c8665b71b22fa07a0a730d43f1c", "score": "0.59097385", "text": "def get_words(self):\n return self.words", "title": "" }, { "docid": "ddd2a799116a6a29d4f2953f88559b4a", "score": "0.5901209", "text": "def get_words():\n filename = \"10k_english_words.txt\"\n return Path(filename).read_text().split()", "title": "" }, { "docid": "77b2e542cd7878e4f5fb640a9467c524", "score": "0.5896755", "text": "def word_list(self):\n return self._word_list", "title": "" }, { "docid": "6bed6e675becea86701467ad439d12aa", "score": "0.5888216", "text": "def all_words(self):\n words = [(word, player.id, i) for player in self.players.values() for (i, word) in enumerate(player.words)]\n\n if self.bot:\n words.extend([(word, self.bot.id, i) for (i, word) in enumerate(self.bot.words)])\n\n return words", "title": "" }, { "docid": "69a873f4997037d072d9b3aa4e2c5e1d", "score": "0.587961", "text": "def words(self):\n return self._data.split()", "title": "" }, { "docid": "ba67c21a08fcc20b77720ecb78549cd7", "score": "0.5876669", "text": "def load_words(dictionary_filename):\n\t\twith open(dictionary_filename) as file:\n\t\t\treturn file.read().splitlines()", "title": "" }, { "docid": "f27930ffbf770c953a008fb3af515a04", "score": "0.58572376", "text": "def get_all() -> list:\n all_words = Word.query.filter_by(user_id=current_user.id).all()\n return all_words", "title": "" }, { "docid": "5b4cafb351baed9db2a010521e193cec", "score": "0.5852354", "text": "def getList(self):\n return self.word_list", "title": "" }, { "docid": "ddf7db3379cf1522ff188cfa29861e1c", "score": "0.5840506", "text": "async def listwords(ctx):\n\n guild_id = ctx.guild.id\n swears = db.get(\"blacklist\", fields=(\"term\",), conditions=(\"guildid\",), values=(guild_id,))\n # a list comprehension since the fetchall method returns a tuple in the form of (word,)\n words = [swear[0] for swear in swears]\n if not words:\n await ctx.send('There are no blacklisted words')\n return\n # basic message formatting for the swearwords\n wordlist = ''.join([f'-{word}\\n' for word in words])\n await ctx.send(f'the swearwords in this server are:\\n{wordlist}')", "title": "" }, { "docid": "59f6b9fc17f62adf8b8cc9a7cba94619", "score": "0.5838622", "text": "def splitToWords(self):\n self.wordsList = re.findall(r'\\w+', self.doc)", "title": "" }, { "docid": "f5a069ace07f0a6544afd8c78ebaa67c", "score": "0.5832269", "text": "def as_wordlist(self,**filters):\n return self.get_queryset().filter(**filters).values_list('word',flat=True)", "title": "" }, { "docid": "cc0ad0f78a5885e5302af7cf45769691", "score": "0.5830912", "text": "def extract_words(dirc):\n words = []\n for filename in os.listdir(dirc):\n with open(os.path.join(dirc, filename)) as f:\n tokens = tokenize(f.read())\n words = words + tokens\n\n print \"Diretorio: \", dirc, \" / Arquivos: \", len(os.listdir(dirc))\n return words", "title": "" }, { "docid": "7aed39dcf29a5fe2f3938875972cdb56", "score": "0.5826378", "text": "def load_words():\r\n word_list = [\"navgurukul\", \"learning\", \"kindness\",\"laptop\",\"anything\",]\r\n return word_list", "title": "" }, { "docid": "8938688a18e12c28ce2e933fea7e37e1", "score": "0.579945", "text": "def extract_words_from_document(self, sub_class, d):\n\n W = []\n\n for w in d.split():\n if w in self.V:\n if sub_class == 'binarized' and w in W:\n pass\n else:\n W.append(w)\n\n return W", "title": "" }, { "docid": "9df78f34ef4717f430ac60d69c66d160", "score": "0.5799285", "text": "def words(text):\n return re.findall('[a-z]+', text.lower())", "title": "" }, { "docid": "8a14125f23ddf216b024b3a217614c63", "score": "0.5798594", "text": "def find_words(self, prefix):\n v = self.find_final_node(prefix)\n wList = self.build_word_list(v, prefix)\n if(v and v.word):\n #v exists and the prefix is itself a word; add it to the list.\n wList.append(prefix)\n\n return wList", "title": "" }, { "docid": "b96fad63053616aec07ed4b15ee494e7", "score": "0.579546", "text": "def words(self):\n w = []\n for p in self.players:\n w += self.players[p]['words']\n return w", "title": "" }, { "docid": "82956484e1340f7ffa7f2c43f2b2e14b", "score": "0.5784997", "text": "def get_words(file_path):\n with open(file_path, encoding='utf-8') as hfile:\n return hfile.read().lower().split()", "title": "" }, { "docid": "c9b4b92862a40cf926316dad14c649a6", "score": "0.57823074", "text": "def wordlist(self):\n return self._wordlist", "title": "" }, { "docid": "9ce0429d8ba8725ee331258c985c8180", "score": "0.57777905", "text": "def convert_to_list_of_words(lyrics):\n return lyrics.replace(',','').lower().strip().split()", "title": "" }, { "docid": "bb353a46f4150bf8508624c5437692e6", "score": "0.57555765", "text": "def find_words_from_sentence(sentence: str) -> list:\n pass", "title": "" }, { "docid": "2e0c0dfb9b83ad61a3df176dd6f21589", "score": "0.57519406", "text": "def indices2words(self, word_ids: List[int]):\n return [self.id2word[w_id] for w_id in word_ids]", "title": "" }, { "docid": "5026823161655076c072d54fbe183e13", "score": "0.5748753", "text": "def findwordlist(template, closewordind, vocab, numwords=10, addeos=False):\n if isinstance(template, str):\n template = template.split()\n templateind = closewordind.new_tensor([vocab.stoi[w] for w in template])\n # subvocab = closewordind[templateind, :numwords].flatten().cpu() # torch.flatten() only exists from PyTorch 0.4.1\n subvocab = closewordind[templateind, :numwords].view(-1).cpu()\n if addeos:\n subvocab = torch.cat([subvocab, torch.LongTensor([vocab.stoi['<eos>']])])\n subvocab = subvocab.unique(sorted=True)\n word_list = [vocab.itos[i] for i in subvocab]\n \n return word_list, subvocab", "title": "" }, { "docid": "a3baf84f4a31f57469d94697e3996898", "score": "0.5741877", "text": "def words_by_value(self,value):\n return [letter for letter in self.words if self.words[letter] == value]", "title": "" }, { "docid": "8ac8e31cce932310127caeb4c608c4b2", "score": "0.5713383", "text": "def words(self) -> List[str]:\n return shamir_share.decrypt_mnemonic(self.encrypted_mnemonic, self._get_password())", "title": "" }, { "docid": "ee7ea33264bd839645e9177fbeb58c51", "score": "0.5695588", "text": "def words(self):\n return self._words", "title": "" }, { "docid": "ee7ea33264bd839645e9177fbeb58c51", "score": "0.5695588", "text": "def words(self):\n return self._words", "title": "" }, { "docid": "ee7ea33264bd839645e9177fbeb58c51", "score": "0.5695588", "text": "def words(self):\n return self._words", "title": "" }, { "docid": "2bd5de04a069740783ea3e10c4497dcc", "score": "0.56955373", "text": "def words_addr(self,uu,rr,dd):\n ret = []\n addr = [ x if 'word' in x else None for x in self.r(uu,rr,dd)]\n if any(addr):\n for xx in addr:\n if not xx is None:\n t,nm = xx.split('_')\n ret.append([xx,t[-1] in 'wW',nm])\n return ret", "title": "" }, { "docid": "32d1c51f49c379043b0c3c936d005403", "score": "0.5683354", "text": "def words(line):\n return line.strip().split()", "title": "" }, { "docid": "bcaf5b7e785556ed1754313cae181c9c", "score": "0.56822544", "text": "def get_en_words(self):\n return list(set(self.en_map.keys()))", "title": "" }, { "docid": "1f93a4429f4c66aed9e2cb1d92d7c629", "score": "0.5671825", "text": "def get_synonyms(self, prefix: str, identifier: str) -> List[str]:\n logger.warning(f\"getting synonyms is not yet implemented for {self.__class__}\")\n return []", "title": "" }, { "docid": "4a0c21d4ec8dea2e7a36720c611701a6", "score": "0.56712633", "text": "def getAllWord():\n allWord = set()\n Dataset = loadSenseval2Format()\n for lemma in Dataset.values():\n for instence in lemma.instances:\n for word in instence['context'].split():\n allWord.add(word.strip(punctuation))\n\n for synset in wn.synsets(lemma.lemma, pos=lemma.pos):\n for word in synset.definition().split():\n allWord.add(word.strip(punctuation))\n\n return allWord", "title": "" }, { "docid": "3e0a5d5c256401190f0a50997883f30a", "score": "0.56613094", "text": "def load_doc_words(filename):\n with open(filename, 'r', encoding= 'ascii') as file:\n words = [word.lower() for word in re.findall(r'[A-Za-z]+', file.read())]\n return words", "title": "" }, { "docid": "7cc4f3d43084170e485397d5918009b6", "score": "0.56605417", "text": "def words(self):\n\n return self._words", "title": "" }, { "docid": "a0d0ce791689d6ad4c88889b0dca70ee", "score": "0.56511474", "text": "def getWordsForDisplay(text):\n text = re.sub( \"\"\"['\"/-]\"\"\", \"\", text );\n text = re.sub( '[^a-zA-Z ]', \" \", text).lower()\n words = text.split()\n for i in range( 0, len(words) -1 ):\n if not (words[i] in stopWords or len(words[i]) <= 1):\n builder = [words[i]]\n j = i + 1\n while j < len(words) and (words[j] in stopWords or len(words[i]) <= 1):\n builder += [words[j]]\n j = j + 1;\n if j < len(words):\n builder += [words[j]]\n words[i] = tuple(builder)\n words = [x for x in words if not (x in stopWords or len(x) <= 1)]\n return words[:-1]", "title": "" }, { "docid": "05e63b307f4f2e1f4ffe42270cd7a721", "score": "0.5648641", "text": "def show_word_to_list(self):\n print(self._word_to_id)", "title": "" }, { "docid": "75ef0afe15b6f001082744c06e5108db", "score": "0.5643566", "text": "def indices2words(self, word_ids):\n return [self.id2word[w_id] for w_id in word_ids]", "title": "" }, { "docid": "0971ac4760c6220373a4bd862b5e406e", "score": "0.5639695", "text": "def get_wordlist(self):\n \n ## init\n if( self.wordlist == None ):\n pass\n elif( self.wordlist.__len__() > 0):\n vout = self.wordlist\n elif( self.wordlist.__len__() == 0):\n vout = []\n ##;;\n \n ## process\n if(vout.__len__() == 0):\n aawords = [] \n ## extract\n for objj in [datetime,itertools,os,random,re,string,sys,]:\n aawords.extend(\n re.findall('[a-zA-Z0-9]+',str(getattr(objj,vxx).__doc__))\n for vxx in dir(objj)\n if(re.findall('^[a-z]',vxx))\n )\n aawords = list(itertools.chain(*aawords))\n aawords = [vxx.replace('_','') for vxx in aawords]\n ##;;\n \n ## transform\n aawords = [vxx for vxx in aawords if(vxx)] ##;; remove_empty\n aawords = [str(vxx) for vxx in aawords if(vxx)] ##;; stringify \n aawords = [vxx.lower() for vxx in aawords] ##;; lowercase\n ## extend\n aawords.extend( [vxx[::-1] for vxx in aawords] ) ##;; reversal\n aawords.extend( [self.wordnumberize(vxx[0:7]) for vxx in aawords[0:10000]] ) ##;; wordnumberize \n ##;;\n \n ## remove_dupes\n aawords = sorted(set(aawords))\n vout = aawords\n self.wordlist = vout\n ##;;\n ##endif\n \n ## return \n vout = self.listshuffle(vout)\n return vout\n ##;;", "title": "" }, { "docid": "209e79d585b7d40e93c8d174ea366869", "score": "0.56249774", "text": "def known(words):\n return set(w for w in words if w in WORDS)", "title": "" }, { "docid": "7b3d137c867e6455eef1b770202b17a0", "score": "0.5623789", "text": "def words_in_file(self):\n return self.words", "title": "" }, { "docid": "f6ce2d6c8722f9742d9e25cad6f76456", "score": "0.56214", "text": "def read_word_file(file_name):\r\n words = []\r\n\r\n with open(file_name) as file:\r\n for line in file:\r\n for word in re.findall(r'\\w[^\\d\\W]+\\b', line):\r\n words.append(word)\r\n\r\n return words", "title": "" }, { "docid": "1dc65eb5b2169c86edd0323997ed489c", "score": "0.5621055", "text": "def find_words(letters: str) -> list[Word]:\n\n center_letter = letters[0]\n words = word_list(\n includes=center_letter,\n min_length=4,\n )\n letter_set = set(letters)\n\n return [Word(word) for word in words if set(word) <= letter_set]", "title": "" }, { "docid": "695d14f87d361e162d8dc4ae56d77026", "score": "0.5616774", "text": "def load_word_list(filename):\n handle = open('data.txt', 'r')\n # Load a list of whitespace-delimited words from the specified file\n raw_text = handle.read().strip().split()\n # Strip non-alphanumeric characters from each word\n alphanumeric_words = map(lambda word: ''.join(char for char in word if char.isalnum()), raw_text)\n # Filter out words that are now empty (e.g. strings that only contained non-alphanumeric chars)\n alphanumeric_words = filter(lambda word: len(word) > 0, alphanumeric_words)\n # Convert each word to lowercase and return the result\n\n handle.close()\n return list(map(lambda word: word.lower(), alphanumeric_words))", "title": "" }, { "docid": "082fa8a0a90305b050b567964cff7c17", "score": "0.56162053", "text": "def get_word_list(file_name):\n exclude = set(string.punctuation) # makes set of punctuation characters\n file1 = file_name\n s = ''.join(ch for ch in file1 if ch not in exclude) # for each character of the story creates a new string without any of the pucntiation\n startBook = s.index('CHAPTER I') + 9\n s = s.lower()\n s = s[startBook:]\n lis_File = s.split() #breaks up string at the spaces creates a list of elements\n return lis_File", "title": "" }, { "docid": "ce3586ec2ae80fd632274021d8936a35", "score": "0.5608422", "text": "def get_words_list(self, widget):\n options=widget.get(1.0, END)# get lines into string\n #Remove spaces in list of lines\n return [i.strip() for i in options.splitlines()]", "title": "" }, { "docid": "7600b1c4451b7e3911cd66755c6e12ff", "score": "0.56058943", "text": "def getWordList(self, lc = False, uc = False, includeNumbers = True):\n\t\tif includeNumbers == True:\n\t\t\treturn self.tokens(lc = lc, uc = uc)\n\t\telif includeNumbers == False:\n\t\t\twordList = []\n\n\t\t\tfor w in self.tokens(lc = lc, uc = uc):\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tnumber = int(w)\n\t\t\t\texcept(ValueError):\n\t\t\t\t\twordList.append(w)\n\n\t\t\treturn wordList", "title": "" }, { "docid": "4b7a2a2264c8e0d95b06a2351a631c51", "score": "0.5602784", "text": "def load_words():\n data = open(DICTIONARY).read().split()\n return data", "title": "" }, { "docid": "0553aa39fda4530d6c4cbdcfc59844ae", "score": "0.55960995", "text": "def words_from_node_db(cursor, node):\n\n all_prefixed_words = []\n if not node:\n return []\n explore = deque([node])\n while explore:\n node = explore.pop()\n if node[SQL_Vars.count]:\n all_prefixed_words.append([node[SQL_Vars.word], node[SQL_Vars.count]])\n cursor.execute(\"\"\"SELECT * FROM Trie WHERE p_id = ?\"\"\", \n (node[SQL_Vars.id],))\n children = cursor.fetchall()\n for child in children:\n explore.appendleft(child)\n return all_prefixed_words", "title": "" }, { "docid": "98dbab9dacc78addd2300f0bd0968430", "score": "0.5595113", "text": "def get_words(self, lst, indices=None):\n words = list()\n if indices is not None:\n pass\n else:\n indices = raw_input().split()\n for i in indices:\n words.append(self.get_word_at(int(i), lst))\n return words", "title": "" }, { "docid": "e3cbb65810b6a8fedd57b9757fb34895", "score": "0.5592416", "text": "def known(words,wordCounts):\n return { w for w in words if w in wordCounts }", "title": "" }, { "docid": "4a55fcc84f486fccc64605fcb16228b9", "score": "0.55889904", "text": "def get_words_from_line_list(text):\n\ttext = text.translate(translation_table)\n\tword_list = [x for x in text.split() if x not in set(stopwords.words('english'))]\n\t\n\treturn word_list", "title": "" }, { "docid": "85c86e42fc92413e99b0d466fbfbe7b9", "score": "0.5586325", "text": "def get_word_list(file_name):\n\tfin = open(file_name, 'r+')\n\tstrngs = ''\n\twords = []\n\tfor strng in fin.read().split(\"\\n\"):\n\t\tstrngs += strng.strip(string.whitespace+string.punctuation)+\"\"\n\t\tfor word in strng.split():\n\t\t\tword = word.strip(string.punctuation+string.whitespace)\n\t\t\tword = word.lower()\n\t# does not append words in the list useless_words\n\t\t\tif word not in useless_words:\n\t\t\t\twords.append(word)\n\t\t\telse:\n\t\t\t\tpass\n\treturn words", "title": "" }, { "docid": "8c960d9f263c4305c9291d2e9e860582", "score": "0.55846775", "text": "def words_to_word_ids(data=[], word_to_id={}, unk_key = 'UNK'):\r\n # if isinstance(data[0], six.string_types):\r\n # print(type(data[0]))\r\n # # exit()\r\n # print(data[0])\r\n # print(word_to_id)\r\n # return [word_to_id[str(word)] for word in data]\r\n # else:\r\n\r\n word_ids = []\r\n for word in data:\r\n if word_to_id.get(word) is not None:\r\n word_ids.append(word_to_id[word])\r\n else:\r\n word_ids.append(word_to_id[unk_key])\r\n return word_ids\r\n # return [word_to_id[word] for word in data] # this one\r\n\r\n # if isinstance(data[0], str):\r\n # # print('is a string object')\r\n # return [word_to_id[word] for word in data]\r\n # else:#if isinstance(s, bytes):\r\n # # print('is a unicode object')\r\n # # print(data[0])\r\n # return [word_to_id[str(word)] f\r", "title": "" }, { "docid": "beb9161625f2f945f23c18395a2a1feb", "score": "0.5572725", "text": "def load_words():\n lst = list()\n with open(DICTIONARY, 'r') as f:\n for line in f:\n lst.append(line.strip())\n return lst", "title": "" }, { "docid": "b6ff7839963b5143ee8d481d43bd55bc", "score": "0.55724114", "text": "def Words(self):\n return self.__bag_of_words.keys()", "title": "" }, { "docid": "07787c63aa83b006c79d7707d4cb2e52", "score": "0.5562035", "text": "def load_words():\n lst = []\n with open(DICTIONARY, \"r\") as file: \n data = file.readlines() \n for line in data: \n word = line.strip() \n lst.append(word) \n return lst", "title": "" }, { "docid": "69eaab3bb453487a124bcee1d60e531d", "score": "0.5561707", "text": "def words(\n num_words,\n min_length=DEFAULT_MIN_LENGTH,\n max_length=DEFAULT_MAX_LENGTH,\n dictionary=DICT_BRITISH_KEY,\n):\n mod = getattr(fictionary.models, dictionary)\n return [\n mod.random_word(min_length=min_length, max_length=max_length)\n for _ in range(num_words)\n ]", "title": "" }, { "docid": "d1d7116ac5aeef744f1aed5906790391", "score": "0.5560075", "text": "def findWords(line):\n return re.findall(r'[a-z]*[A-Z]+', line, re.I)", "title": "" }, { "docid": "f94f3381bdada2e30c736f938d54196e", "score": "0.5555437", "text": "def retrieve_word_list(self):\n split_list = {}\n try:\n with open(os.path.join(os.getcwd(), 'sources', 'diceware.wordlist.asc')) as list_file:\n for line in list_file.readlines():\n pattern = re.compile(r\"[\\d]{5}\")\n result = pattern.match(line)\n if result is not None and len(str(result)) > 0:\n key_value = tuple(line.split('\\t'))\n split_list[key_value[0]] = key_value[1].replace('\\n', '')\n self.word_list = split_list\n except FileNotFoundError:\n raise(FileNotFoundError('Please install the dicelist sources.'))", "title": "" }, { "docid": "7c64f7bbc1352bd3c1f660cd00a39413", "score": "0.55367196", "text": "def get_strings(data):\n return [\" \".join(item[\"words\"]) for item in data]", "title": "" }, { "docid": "81df71a898f7d4e990fd02b1964336cf", "score": "0.5536499", "text": "def _get_words_from_dataset(dataset):\n\n # Words may be either a string or a list of tokens. Return an iterator\n # of tokens accordingly\n def tokenize(words):\n if isinstance(words, str):\n return word_tokenize(words, include_punc=False)\n else:\n return words\n\n all_words = chain.from_iterable(tokenize(words) for words, _ in dataset)\n return set(all_words)", "title": "" }, { "docid": "c002e071237f840e012520683c7d2ada", "score": "0.5533537", "text": "def contains(self, word):\n return [list_word for list_word in self.ls if re.search(f\".*{word}.*\", list_word)]", "title": "" }, { "docid": "0397850a4cb5e388bf22ce571cf56602", "score": "0.55289453", "text": "def filter_words(self, words: List[Token]) -> List[Token]:\n raise NotImplementedError", "title": "" }, { "docid": "77c903b8e6d70b7bb9fba8230bb5d2a6", "score": "0.5528795", "text": "def get_words() -> List[str]:\n with open(INFILE, \"r\", newline=\"\") as infile:\n reader = csv.reader(infile)\n items = [row[0] for row in reader]\n return items", "title": "" }, { "docid": "508ad5577975b5114f24038f5198f65d", "score": "0.5525946", "text": "def sandwich(self, word):\n result = set()\n for i in range(1, len(word)):\n result.update(\n [\n list_word\n for list_word in self.ls\n if re.search(f\"^{word[:i]}.+{word[i:]}$\", list_word)\n ]\n )\n return list(result)", "title": "" }, { "docid": "efa372a1307b73c115301a67637c984a", "score": "0.5524657", "text": "def load_words(): \n inFile = open(\"words.txt\", \"r\")\n line = inFile.read()\n word_list = line.split()\n\n return word_list", "title": "" } ]
bd00d22d75d3e8398074e98ff4793c1e
Adds a step to the factory to update the script folder.
[ { "docid": "74f798e23fbee8505eca1454de0ef392", "score": "0.56004137", "text": "def AddUpdateScriptStep(self, gclient_jobs=None):\n # This will be run in the '..' directory to udpate the slave's own script\n # checkout.\n command = [chromium_utils.GetGClientCommand(self._target_platform),\n 'sync', '--verbose']\n if gclient_jobs:\n command.append('-j%d' % gclient_jobs)\n self._factory.addStep(shell.ShellCommand,\n name='update_scripts',\n description='update_scripts',\n locks=[self.slave_exclusive_lock],\n timeout=60*5,\n workdir='..',\n command=command)", "title": "" } ]
[ { "docid": "da6f256ea87182c3396057839a71cae9", "score": "0.607602", "text": "def test_steps_add(self):\n support.create_project(self, 'bob')\n project = cauldron.project.get_internal_project()\n\n r = support.run_command('steps add first.py')\n self.assertFalse(r.failed, 'should not have failed')\n self.assertTrue(os.path.exists(\n os.path.join(project.source_directory, 'S02-first.py')\n ))", "title": "" }, { "docid": "565ef8e2b0cce7d099d6cde66892f394", "score": "0.60713434", "text": "def test_steps_modify(self):\n response = support.create_project(self, 'lindsey')\n self.assertFalse(\n response.failed,\n Message(\n 'should not have failed to create project',\n response=response\n )\n )\n\n project = cauldron.project.get_internal_project()\n directory = project.source_directory\n\n r = support.run_command('steps add first.py')\n self.assertFalse(r.failed, 'should not have failed')\n self.assertTrue(os.path.exists(os.path.join(directory, 'S02-first.py')))\n\n r = support.run_command('steps modify S02-first.py --name=\"second.py\"')\n self.assertFalse(r.failed, 'should not have failed')\n self.assertFalse(\n os.path.exists(os.path.join(directory, 'S02-first.py'))\n )\n self.assertTrue(\n os.path.exists(os.path.join(directory, 'S02-second.py'))\n )", "title": "" }, { "docid": "3dcc7ae805dc2fad23ebe888fc71a64e", "score": "0.58751434", "text": "def test_move_step_later(self):\n support.create_project(self, 'edina')\n support.add_step(self, 'first')\n support.add_step(self, 'second')\n support.add_step(self, 'third')\n\n project = cauldron.project.get_internal_project()\n\n r = step_actions.modify_step(\n response=Response(),\n project=project,\n name=project.steps[1].filename,\n position='3'\n )\n\n self.assertTrue(r.success)\n self.assertEqual(\n ['S01.py', 'S02-second.py', 'S03-first.py', 'S04-third.py'],\n [s.filename for s in project.steps]\n )", "title": "" }, { "docid": "c139b05be7325bbceec750763f05ab95", "score": "0.57512367", "text": "def update_json_file(path, step):\n content = read_data(path)\n content = json.loads(content[0])\n content[\"step\"] = step\n\n write_data(path, json.dumps(content))", "title": "" }, { "docid": "7adcfa252d9f3641b7c261038a7d0dc5", "score": "0.57066226", "text": "def trigger_update(self):\n self.path_util.set_path(Path(self.path) / self.executable)", "title": "" }, { "docid": "4cbb61b6faed6bec743777e878dfbdb3", "score": "0.5609095", "text": "def script_init(self) -> bool:\n # if output directory given create the script there\n if len(self.output_dir) > 0:\n self.full_output_path = os.path.join(self.output_dir, self.dir_name)\n\n # will create the script under the Scripts directory of the pack\n elif os.path.isdir(SCRIPTS_DIR):\n self.full_output_path = os.path.join('Scripts', self.dir_name)\n\n # if non of the conditions above apply - create the integration in the local directory\n else:\n self.full_output_path = self.dir_name\n\n if not self.create_new_directory():\n return False\n\n hello_world_path = os.path.normpath(os.path.join(__file__, \"..\", \"..\", 'common', 'templates',\n self.HELLO_WORLD_SCRIPT))\n\n copy_tree(str(hello_world_path), self.full_output_path)\n if self.id != self.HELLO_WORLD_SCRIPT:\n # note rename does not work on the yml file - that is done in the yml_reformatting function.\n self.rename(current_suffix=self.HELLO_WORLD_SCRIPT)\n self.yml_reformatting(current_suffix=self.HELLO_WORLD_SCRIPT)\n self.fix_test_file_import(name_to_change=self.HELLO_WORLD_SCRIPT)\n\n print_color(f\"Finished creating script: {self.full_output_path}\", LOG_COLORS.GREEN)\n\n return True", "title": "" }, { "docid": "fea36eb5655bff470d6844e47b65b85c", "score": "0.55533916", "text": "def set_step(self):\n regex_step = re.compile(r'nStep\\s*=.*\\d+.*;')\n step_setting = 'nStep = {};'.format(env.config.nstep)\n with open(self.main_filepath) as file:\n old_file = file.read()\n new_file = re.sub(regex_step, step_setting, old_file)\n with open(self.main_filepath, 'w') as file:\n file.write(new_file)\n print('\"{}\" written to \"{}\"'.format(step_setting, self.main_filepath))", "title": "" }, { "docid": "8a3527a77a4a5ff9ee73c9e1344b7185", "score": "0.5526343", "text": "def update_script(self):\n\n self.script = self.script_imports + self.script_bright_config + self.script_variables + self.script_execution", "title": "" }, { "docid": "05f80efbd5861a0be1377220fef4c15d", "score": "0.5452806", "text": "def test_update_folder(self):\n pass", "title": "" }, { "docid": "d2a319f7c892008bbd41c3004534cc3a", "score": "0.5365407", "text": "def run(self):\n log(\"\")\n log(\n 'Refresh step \"{step}\" in sub-directory \"{directory}\" of project dir \"{project_dir}\"',\n args={\"step\": self.step, \"directory\": self.directory, \"project_dir\": self.directory},\n )\n\n dest_dir = os.path.join(self.args.project_directory, self.directory)\n if not assume_path_existing(dest_dir):\n return 1 # pragma: nocover\n\n # Load project-wide configuration and check that the step already exists.\n try:\n config_yaml = self._load_config_yaml()\n except RefreshStepAppException:\n return 1\n\n # Re-setup the step sub directory.\n self._resetup_step_dir(dest_dir, config_yaml)\n\n log(\"all done, have a nice day!\", level=LVL_SUCCESS)", "title": "" }, { "docid": "b2e71a64e8271c8b8db3bdb4097bd030", "score": "0.5344231", "text": "def update_julia_script(self, script):\n self.iter.update_julia_script(script)\n # update script in iter, update script in db", "title": "" }, { "docid": "3dc0e81d075e10da336b4a1939497789", "score": "0.5340129", "text": "def setup(self, script):\n raise NotImplementedError()", "title": "" }, { "docid": "5ff403402ff709b57b2f85ed3597f463", "score": "0.5332206", "text": "def ops_scripts_sync_post(self, kwargs):\r\n kwargs['target_folder'] = self.base_kit.target_folder\r\n return CustomizerControls(self.tools).sync_scripts(kwargs)", "title": "" }, { "docid": "f3068cfe38de370ec36cc680215250ea", "score": "0.53213704", "text": "def main():\n parse_args()\n dest = os.path.abspath(os.path.join(os.path.dirname(__file__),\n \"../templates/addons\"))\n update_addons(dest)", "title": "" }, { "docid": "4820c8ba290591084fad22638e901dff", "score": "0.5321231", "text": "def setup_method(self, method):\n TestMflag.directory = tempfile.mkdtemp(TestMflag.__name__)\n TestMflag.script = os.path.join(os.path.dirname(__file__),\n TestMflag.SCRIPT_NAME)\n TestMflag.original_script = TestMflag.script + \".orig\"\n shutil.copy(TestMflag.script, TestMflag.original_script)", "title": "" }, { "docid": "786380810d8ad72796d908aca2d87a27", "score": "0.5265873", "text": "def add_scripts_to_package():\n global setup_arguments\n\n if os.path.isdir('scripts'):\n setup_arguments['scripts'] = [\n os.path.join('scripts', f) for f in os.listdir('scripts')\n ]", "title": "" }, { "docid": "a4041a18e35d113616bae68983e133e7", "score": "0.524094", "text": "def put(self):\n args = self.parser_step.parse_args()\n step = args['step']\n self.case.set_step(step)\n return step, 201", "title": "" }, { "docid": "3161a57dc4b6dbede2dca36730d2acc0", "score": "0.5229666", "text": "def putScript(self, content: str):\n cluster = cluster_factory(provisioner='aws', zone=self.zone, clusterName=self.clusterName)\n leader = cluster.getLeader()\n\n self.sshUtil(['mkdir', '-p', self.scriptDir])\n\n with tempfile.NamedTemporaryFile(mode='w') as t:\n # use appliance ssh method instead of sshutil so we can specify input param\n t.write(content)\n # This works to make writes visible on non-Windows\n t.flush()\n leader.injectFile(t.name, self.script(), 'toil_leader')", "title": "" }, { "docid": "cfd5c816839b577251b82055bdfa533c", "score": "0.51992387", "text": "def _addScript(self, read_store, write_store, script):\n # Convert path from tuple to dot-separated list.\n path = self.PATH_SEPARATOR.join(script.getPhysicalPath())\n title = self.getScriptTitle(script)\n # And save information about all found.\n if path in read_store:\n info = read_store[path]\n info.title = title # Updated cached script title.\n else:\n info = ScriptInfo(title)\n write_store[path] = info", "title": "" }, { "docid": "eb0e5e0dbef745263dd35a38ee55f070", "score": "0.51853144", "text": "def add_script(script_name, template):\n base_path = find_root_folder(Path(os.getcwd()))\n if not base_path:\n raise click.ClickException(\"There are no scripts folder.\")\n project_name = str(base_path).split('/')[-1]\n\n script_content = load_template(\"scripts/\" + template,\n PROJECT_NAME=project_name,\n SCRIPT_NAME=script_name)\n\n if (base_path / \"scripts\" / (script_name + \".py\")).exists():\n raise click.ClickException(f\"The script `{script_name}` already exists.\")\n with open(base_path / \"scripts\" / (script_name + \".py\"), 'w') as script_file:\n script_file.write(script_content)\n click.echo(f\"Script {script_name} successfully created.\")", "title": "" }, { "docid": "05ad9b0ca1d864a3d4b809be5d78746a", "score": "0.5171756", "text": "def install_script(self) -> str:\n return os.path.join(self.config_dir, 'install')", "title": "" }, { "docid": "8b973f987e4ed1497b0a4f8742561f84", "score": "0.513154", "text": "def __call__(self):\n update_addons(self.dest)", "title": "" }, { "docid": "b2442a23d27efbfaf9f604f4f5ca844f", "score": "0.5113782", "text": "def update_script_dir(self, dir_selected):\n if dir is None:\n return\n dir_selected = os.path.abspath(dir_selected)\n self.script_dir = dir_selected\n self.script_validatior.validate_scripts(self.script_dir)\n self.prof_selection.update(dir_selected)", "title": "" }, { "docid": "b3fb3e075a795d4007a65475fbef220f", "score": "0.5061775", "text": "def setUp(self):\n if os.path.exists(self.script_path):\n os.remove(self.script_path)", "title": "" }, { "docid": "d9339b25dde5eeb9d63a0ab03c1e7cd8", "score": "0.50022084", "text": "def test_fail_remove_old_step(self):\n\n support.create_project(self, 'bloomington')\n support.add_step(self, 'first')\n\n project = cauldron.project.get_internal_project()\n step = project.steps[0]\n\n with patch.object(project, 'remove_step') as remove_step:\n remove_step.return_value = None\n r = step_actions.modify_step(Response(), project, step.filename)\n\n self.assertFalse(r.success)\n self.assert_has_error_code(r, 'NO_SUCH_STEP')", "title": "" }, { "docid": "0165c8ec81700c8d0c8c62a5b20fe0fe", "score": "0.49964595", "text": "def test_no_existing_source_file(self):\n support.create_project(self, 'richfield')\n support.add_step(self, 'first')\n\n project = cauldron.project.get_internal_project()\n step = project.steps[0]\n\n self.assertTrue(\n systems.remove(step.source_path),\n 'should have removed source file'\n )\n\n r = step_actions.modify_step(\n response=Response(),\n project=project,\n name=step.filename,\n new_name='solo',\n title='Only Step'\n )\n\n new_step = project.steps[0]\n self.assertTrue(r.success)\n self.assertTrue(os.path.exists(new_step.source_path))", "title": "" }, { "docid": "5cbb1e57c1474ffba3744641fb8ca3b8", "score": "0.49818686", "text": "def buildInstallScript(self): \n installData = self.extract('install')\n\n if installData is not None:\n script = \"\"\n \n # We automatically change in the copied directory\n script += str(\"cd \") + self._tempDirAfterSetup + str(\"\\n\")\n \n for cmd in installData:\n script += str(cmd) + str(\"\\n\")\n\n self._script_install = self.convertToFile(script, \"install.sh\")", "title": "" }, { "docid": "582730cde5f4d2199c66b142cbc7fa9c", "score": "0.49579304", "text": "def add_run_folder(self, folder):\n self.run_folder = folder\n self.check_for_read_until_files()", "title": "" }, { "docid": "b94ae6aacc3baebba5395174a8e393db", "score": "0.49433732", "text": "def execute_upgrade_step(self, step):\n request = self.layer['request']\n request.form['profile_id'] = self.profile_id\n request.form['upgrades'] = [step['id']]\n self.setup.manage_doUpgrades(request=request)", "title": "" }, { "docid": "a084fd0c40a44e95e22d806180223061", "score": "0.4924313", "text": "def setup_method(self, method):\n self.work_path = join(save_path, \"Material\")\n # Delete old test if needed\n if isdir(self.work_path):\n rmtree(self.work_path)\n mkdir(self.work_path)", "title": "" }, { "docid": "b6465b84768b72426d662af71869e2a9", "score": "0.4922456", "text": "def add_step(self, step):\n # Add the node to the DAG.\n self.add_node(step.real_name, step)\n LOGGER.info(\n \"Adding step '%s' to study '%s'...\", step.name, self.name)\n # Apply the environment to the incoming step.\n step.__dict__ = \\\n apply_function(step.__dict__, self.environment.apply_environment)\n\n # If the step depends on a prior step, create an edge.\n if \"depends\" in step.run and step.run[\"depends\"]:\n for dependency in step.run[\"depends\"]:\n LOGGER.info(\"{0} is dependent on {1}. Creating edge (\"\n \"{1}, {0})...\".format(step.real_name, dependency))\n if \"*\" not in dependency:\n self.add_edge(dependency, step.real_name)\n else:\n self.add_edge(\n re.sub(ALL_COMBOS, \"\", dependency),\n step.real_name\n )\n else:\n # Otherwise, if no other dependency, just execute the step.\n self.add_edge(SOURCE, step.real_name)", "title": "" }, { "docid": "d07275fbb428015fcfead85556af5617", "score": "0.49162108", "text": "def test_scripts_command_inserted():\n pass", "title": "" }, { "docid": "80109850a479f3fa5f044dfffb321331", "score": "0.4904645", "text": "def create(self, addPath):\n import tempfile\n if self.have():\n self.log.info(_(\"Init script {name} already present\").format(\n name = self._name))\n return self.SUCCESS\n\n text = self.getInitScript(addPath)\n tmpf, tmpnam = tempfile.mkstemp()\n try:\n os.write(tmpf, text)\n os.close(tmpf)\n except OSError:\n self.log.error(_(\"Could not create temporary file\"))\n return self.ERROR\n\n try:\n mvCmd = Command(['mv', tmpnam, self._scriptName])\n mvCmd()\n except CommandError:\n self.log.error(_(\"Could not create init script\"))\n os.unlink(tmpnam)\n return self.ERROR\n\n try:\n self.insserv()\n except CommandError:\n self.log.error(_(\"Could not install init script\"))\n return self.ERROR\n # Make sure init script configuration is on persistent storage\n os.system(\"sync\")\n return self.SUCCESS", "title": "" }, { "docid": "73c3745d597e6238f983b41fde78b855", "score": "0.48861125", "text": "def init_script(self):\n logging.info(\"Nuke Script Name is {0}\".format(nuke.root().name()))\n # project settings\n self.set_project_settings()", "title": "" }, { "docid": "c5d7ce37915277562f0eecfa9a13a72f", "score": "0.48859352", "text": "def test_simple_update(self):\n self.environment.directory.add_to_env = Mock()\n self.environment.run_feature(\"simple_update\", \"sync\")\n self.environment.directory.add_to_env.assert_any_call(\"export A=b\")\n self.environment.directory.add_to_env.assert_any_call(\"export C=dd\")\n self.environment.directory.add_to_env.assert_any_call(\"export E=fgh\")", "title": "" }, { "docid": "818b86fe81daa7e43a1d7cc441f94589", "score": "0.48825595", "text": "def install(editable):\n script_dir = str(Path(__file__).parent.abspath())\n click.secho(script_dir, fg='blue')\n if editable:\n sh('pip3 install --editable {0} --upgrade'.format(script_dir))\n else:\n sh('pip3 install {0} --upgrade'.format(script_dir))", "title": "" }, { "docid": "ac6f111ab2c160f5ae6dab697c26c506", "score": "0.4880588", "text": "def step_impl(context):\n # os.system('python3.6 /home/arnab/PythonProjects/restAPI/restAPI.py')\n print('Server up and running')", "title": "" }, { "docid": "2cc729ddc8b4aa5731b2c263e15a8f33", "score": "0.48804852", "text": "def update():\n print(f\"{BANNER}\")\n try:\n with open(os.path.join(OUTPUT_DIR, \"launchr.json\"), \"r\") as f:\n context = json.loads(f.read())\n\n # create a cookiecutter template based on the context provided.\n # use a temporary directory, we are going to sync them later on\n temp_dir = tempfile.mkdtemp()\n cookiecutter(\n template=TEMPLATE_DIR,\n no_input=True,\n extra_context=context,\n overwrite_if_exists=False,\n output_dir=temp_dir\n )\n\n # some directories don't make sense to copy over\n omit_directories = []\n if click.prompt(\n \"Exclude .env/ files? (Recommended)\",\n default=\"y\",\n type=click.BOOL\n ):\n omit_directories += [\".env/\"]\n\n if click.prompt(\n \"Exclude template files? (Recommended)\",\n default=\"y\",\n type=click.BOOL\n ):\n omit_directories += [\"templates/\"]\n\n if click.prompt(\n \"Exclude migration files? (Recommended)\",\n default=\"y\",\n type=click.BOOL\n ):\n omit_directories += [\"migrations/\"]\n\n if click.prompt(\n \"Exclude test files? (Recommended)\",\n default=\"y\",\n type=click.BOOL\n ):\n omit_directories += [\"tests/\"]\n raise NotImplementedError\n\n except IOError:\n print(f\"{RED}Error:{RESET} unable to find launchr.json in the project directory.\")\n sys.exit(1)\n except ValueError:\n print(f\"{RED}Error:{RESET} unable to load launchr.json. Make sure it is valid json.\")\n sys.exit(1)", "title": "" }, { "docid": "3786dcbd200e88ea5d80e8f5bfbbfe32", "score": "0.48786873", "text": "def _publishScript(item, entity, context):\n\n ## @todo There is assorted peril here as this may be the same\n # file that is already being used by a previous version of an asset\n # but, if its not a path managing asset system, what can we do?\n\n return entity.registerItem(item, context)", "title": "" }, { "docid": "1b738992b92b23055dcb9bf5d72d7d82", "score": "0.48619395", "text": "def on_replace_plugin_dir_changed(self, event):\n\n if not self.replace_plugin_update:\n pth = event.target\n if pth is not None and os.path.exists(pth):\n self.replace_plugin_update = True\n self.m_replace_textbox.safe_set_value(pth)\n self.replace_plugin_update = False\n event.Skip()", "title": "" }, { "docid": "b48bb8ebe992dee89ec53ddfe477721a", "score": "0.48491406", "text": "def write_file(self, script_name, script_content):\n if not os.path.exists(jamf_info.script_dir):\n os.makedirs(jamf_info.script_dir)\n first_line = script_content.split('\\n', 1)[0]\n if 'bash' in first_line:\n script_name = script_name + '.sh'\n elif 'python' in first_line:\n script_name = script_name + '.py'\n print('Syncing ' + script_name)\n f = open(self.script_dir + script_name, 'w+')\n f.write(script_content)", "title": "" }, { "docid": "2a4ebc42b2ff2ccaebef0d851f31dfdb", "score": "0.48488238", "text": "async def setup_script(hass, notify_q, now, source):\n scripts = [\n \"/some/config/dir/pyscripts/hello.py\",\n ]\n integration = loader.Integration(\n hass,\n \"config.custom_components.pyscript\",\n pathlib.Path(\"config/custom_components/pyscript\"),\n {\n \"name\": \"pyscript\",\n \"dependencies\": [],\n \"requirements\": [],\n \"domain\": \"automation\",\n },\n )\n\n with patch(\n \"homeassistant.loader.async_get_integration\", return_value=integration,\n ), patch(\n \"config.custom_components.pyscript.os.path.isdir\", return_value=True\n ), patch(\n \"config.custom_components.pyscript.glob.iglob\", return_value=scripts\n ), patch(\n \"config.custom_components.pyscript.open\",\n mock_open(read_data=source),\n create=True,\n ), patch(\n \"config.custom_components.pyscript.trigger.dt_now\", return_value=now\n ):\n assert await async_setup_component(hass, \"pyscript\", {})\n\n #\n # I'm not sure how to run the mock all the time, so just force the dt_now()\n # trigger function to return the given list of times in now.\n #\n def return_next_time():\n nonlocal now\n if isinstance(now, list):\n if len(now) > 1:\n return now.pop(0)\n return now[0]\n return now\n\n trigger.__dict__[\"dt_now\"] = return_next_time\n\n if notify_q:\n\n async def state_changed(event):\n var_name = event.data[\"entity_id\"]\n if var_name != \"pyscript.done\":\n return\n value = event.data[\"new_state\"].state\n await notify_q.put(value)\n\n hass.bus.async_listen(EVENT_STATE_CHANGED, state_changed)", "title": "" }, { "docid": "a8a1b6591064a835bdb356a90f6e1384", "score": "0.48388514", "text": "def run_step(self):\n pass", "title": "" }, { "docid": "5549263747ff81f3f158c9e4a1819c60", "score": "0.4833667", "text": "def build(self, step, workingDir, **args):\n stepName = nodeName(step)\n stepWorkingArea = \"%s/%s\" % (workingDir, stepName)\n self.installWorkingArea(step, stepWorkingArea)\n print(\"Builders.CMSSW.build called on %s\" % stepName)", "title": "" }, { "docid": "d68164150a5bb4b983adaa2e5c648796", "score": "0.48253542", "text": "def AddRunHooksStep(self, env=None, timeout=None):\n env = env or {}\n env['DEPOT_TOOLS_UPDATE'] = '0'\n if timeout is None:\n # svn timeout is 2 min; we allow 5\n timeout = 60*5\n self._factory.addStep(\n shell.ShellCommand,\n haltOnFailure=True,\n name='runhooks',\n description='gclient hooks',\n env=env,\n locks=[self.slave_exclusive_lock],\n timeout=timeout,\n command=['gclient', 'runhooks'])", "title": "" }, { "docid": "7db1477c3df0d7023fa32cbe35525b4d", "score": "0.48073763", "text": "def write_script(script):\n\n if len(script['steps']) < 1:\n print(\"No steps found. Exiting.\")\n sys.exit()\n\n print(f\"Generating script for {script['name']}.\")\n print(f\"Outputting to {script['filename']}.\")\n print(\"Script steps:\")\n script['code_body'] = ''\n script['execname'] = script['name']\n\n for i, step in enumerate(script['steps']):\n print(f\"{i+1}. {step}\")\n script['code_body'] += f\"{myh.tabspace}print(f' {i+1}. {step}')\\n\"\n\n # TODO add in render with parts from script dict\n template = jinja2.Template(myh.file_template)\n donothing_script = template.render(**script)\n print(donothing_script)\n\n with open(script[\"filename\"], 'w') as f:\n f.write(donothing_script)", "title": "" }, { "docid": "fe96173a9530011adfa47ee0d5890ee3", "score": "0.47896266", "text": "def create_suite():\n click.echo(\"Updating test files...\")\n create_all()\n click.echo(\"Done.\")", "title": "" }, { "docid": "7d6d60551b055818ca891df3a29d6e67", "score": "0.47727522", "text": "def upload_environment(\n client: python_pachyderm.Client, repo: str, config: PpsConfig, script: bytes\n) -> str:\n\n def entrypoint():\n \"\"\"Entrypoint used by the PPS extension.\"\"\"\n print(\"Greetings from the Pachyderm PPS Extension\")\n\n from pathlib import Path\n from subprocess import run\n\n reqs = Path(__file__).parent.joinpath(\"requirements.txt\")\n if reqs.exists():\n run([\"pip\", \"--disable-pip-version-check\", \"install\", \"-r\", reqs.as_posix()])\n print(\"Finished installing requirements\")\n \n print(\"running user code\")\n import user_code # This runs the user's code.\n\n entrypoint_script = (\n f'{dedent(getsource(entrypoint))}\\n\\n'\n 'if __name__ == \"__main__\":\\n'\n ' entrypoint()\\n'\n )\n project = config.pipeline.project.name\n with client.commit(repo, \"master\", project_name=project) as commit:\n # Remove the old files.\n for item in client.list_file(commit, \"/\"):\n client.delete_file(commit, item.file.path)\n\n # Upload the new files.\n client.put_file_bytes(commit, f\"/user_code.py\", script)\n if config.requirements:\n with open(config.requirements, \"rb\") as reqs_file:\n client.put_file_bytes(commit, \"/requirements.txt\", reqs_file)\n client.put_file_bytes(commit, \"/entrypoint.py\", entrypoint_script.encode('utf-8'))\n\n # Use the commit ID in the branch name to avoid name collisions.\n branch_name = f\"commit_{commit.id}\"\n client.create_branch(repo, branch_name, commit, project_name=project)\n\n return branch_name", "title": "" }, { "docid": "a39029c35a11da39f81af260a0bee690", "score": "0.4759596", "text": "def create_setup_script(sourcerepo):\n if isinstance(sourcerepo, (str,unicode)):\n repo = SourceRepository.objects.get(name=sourcerepo)\n elif isinstance(sourcerepo, int):\n repo = SourceRepository.objects.get(pk=sourcerepo)\n elif isinstance(sourcerepo, SourceRepository):\n repo = sourcerepo\n tpl = loader.get_template('setup_py.txt')\n \n site = Site.objects.get_current()\n authors = repo.owners()\n try:\n version = repo.metadata_set.get(key=VERSION_KEY).value\n except:\n version = '0.0.1'\n description = repo.summary or ''\n long_description = repo.description or ''\n context = Context({\n 'name': repo.name,\n 'version': version,\n 'description': mkdn2rest(description),\n 'long_description': mkdn2rest(long_description),\n 'author': \", \".join([author.get_full_name() for author in authors]),\n 'author_email': \", \".join([author.email for author in authors]),\n 'url': \"%s%s\" % (site.domain,repo.get_absolute_url()),\n 'classifications': [md.key for md in repo.metadata_set.all() if md.key != VERSION_KEY],\n })\n return tpl.render(context)", "title": "" }, { "docid": "04ce02e2d43683b9365031915be98522", "score": "0.4759242", "text": "def new_folder(self):\r\n datalist = [(translate('Explorer', 'Folder name'), ''),\r\n (translate('Explorer', 'Python package'), False),]\r\n answer = fedit( datalist, title=translate('Explorer', \"New folder\"),\r\n parent=self, icon=get_icon('spyder.svg') )\r\n if answer is not None:\r\n dirname, pack = answer\r\n dirname = osp.join(self.get_dirname(), dirname)\r\n try:\r\n os.mkdir(dirname)\r\n except EnvironmentError, error:\r\n QMessageBox.critical(self,\r\n translate('Explorer', \"New folder\"),\r\n translate('Explorer',\r\n \"<b>Unable to create folder <i>%1</i></b>\"\r\n \"<br><br>Error message:<br>%2\") \\\r\n .arg(dirname).arg(str(error)))\r\n finally:\r\n if pack:\r\n create_script( osp.join(dirname, '__init__.py') )\r\n self.refresh_folder(osp.dirname(dirname))", "title": "" }, { "docid": "e752966ab2cb1ad70312573b21e8a82c", "score": "0.47574785", "text": "def script_update_version(root_dir, script, template, tag):\n filename = os.path.join(root_dir, 'scripts', script)\n\n with open(filename) as dev_file:\n lines = dev_file.readlines()\n\n output = []\n for line in lines:\n line = template.sub(r'\\g<1>{}\\g<2>'.format(tag), line)\n\n output.append(line)\n\n # Write updated Dockerfile.\n with open(filename, 'w') as dev_file:\n dev_file.write(''.join(output))", "title": "" }, { "docid": "2516036817cfa03decda88e5eaede2f6", "score": "0.47493583", "text": "def update_transformer_script(self, script=None):\n self.transformer_script = script", "title": "" }, { "docid": "45898db9b506750ec637d004bf3c7e0c", "score": "0.47445706", "text": "def edit_test_script_status(self, test_run_key, test_case_key, step, status, **kwargs):\n self.logger.debug(\"edit_test_script_status(\\\"%s\\\", \\\"%s\\\", \\\"%i\\\", \\\"%s\\\")\", test_run_key, test_case_key, step, status)\n\n comment = kwargs.pop(\"comment\", None)\n environment = kwargs.pop(\"environment\", None)\n\n assert not kwargs, \"Unknown arguments: %r\" % kwargs\n\n test_result = self.get_test_result(test_run_key, test_case_key)\n\n script_results = test_result.get(\"scriptResults\", [])\n\n for script_result in script_results:\n # keep relevant fields only (to make PUT pass)\n for key in list(script_result.keys()):\n if key not in [\"index\", \"status\", \"comment\"]:\n script_result.pop(key)\n\n # update given step\n if script_result.get(\"index\") == step - 1:\n script_result[\"status\"] = status\n if comment is not None:\n script_result[\"comment\"] = comment\n\n request_url = self.adaptavist_api_url + \"/testrun/\" + test_run_key + \"/testcase/\" + test_case_key + \"/testresult\"\n\n request_data = {}\n request_data[\"environment\"] = environment\n request_data[\"executedBy\"] = get_executor()\n request_data[\"assignedTo\"] = request_data[\"executedBy\"]\n request_data[\"status\"] = test_result.get(\"status\") # mandatory, to keep test result status unchanged\n request_data[\"scriptResults\"] = script_results\n\n try:\n request = requests.put(request_url,\n auth=self.authentication,\n headers=self.headers,\n data=json.dumps(request_data))\n request.raise_for_status()\n except HTTPError as ex:\n # HttpPut: in case of status 400 request.text contains error messages\n self.logger.error(\"request failed. %s %s\", ex, request.text)\n return None\n except (requests.exceptions.ConnectionError, requests.exceptions.RequestException) as ex:\n self.logger.error(\"request failed. %s\", ex)\n return None\n\n response = request.json()\n\n return response[\"id\"]", "title": "" }, { "docid": "3e3c26fd40805f066d94223d920ed25d", "score": "0.4737971", "text": "def post_task_hook(self):\n\n self.git_add(self.destination)", "title": "" }, { "docid": "8f3a0ff1547019e1e607de18961d78c9", "score": "0.4737877", "text": "def test_nameless_step(self):\n\n support.create_project(self, 'columbia-heights')\n project = cauldron.project.get_internal_project()\n project.naming_scheme = None\n\n support.add_step(self)\n support.add_step(self)\n\n with patch('cauldron.session.naming.explode_filename') as func:\n func.return_value = dict(\n extension='py',\n index=1,\n name=''\n )\n r = step_actions.modify_step(\n response=Response(),\n project=project,\n name=project.steps[0].filename,\n new_name='',\n position=2\n )\n\n self.assertFalse(r.failed)", "title": "" }, { "docid": "068f63b89fe3ddc9b78b6a3aeaef1f7e", "score": "0.47315612", "text": "def fixture_sleep_script(tmpdir):\n script_path = tmpdir.join(\"sleep_script.py\")\n script_path.write(\n \"\"\"\nimport time\n\ndef init(subarray_id):\n pass\n\ndef main(secs):\n time.sleep(secs)\n\"\"\"\n )\n return FileSystemScript(f\"file://{str(script_path)}\")", "title": "" }, { "docid": "3346b170f50198308be57abc46a44170", "score": "0.47309467", "text": "def script(self):\n pass", "title": "" }, { "docid": "2293388d62c1d730d091141b2c73de7c", "score": "0.47220066", "text": "def main():\n jss_sync = JamfSync(jamf_info.script_api_url, jamf_info.script_dir,\n jamf_info.api_user, jamf_info.api_pw)\n\n jss_sync.get_script_catalog(jamf_info.resource_url + 'scripts')\n for script in jss_sync.script_catalog:\n id = str(script['id'])\n script_url = jamf_info.url + id\n script_name, script_content = jss_sync.get_script_info(script_url)\n jss_sync.write_file(script_name, script_content)", "title": "" }, { "docid": "d73f46d014a968e2d2dd754525108228", "score": "0.47209692", "text": "def setup_new_eval(self):\n name = ProjectManager.now\n if self.postfix is not None:\n name = name + \"_\" + self.postfix\n ProjectManager.latest_eval = os.path.join(ProjectManager.eval, name)\n os.makedirs(ProjectManager.latest_eval)", "title": "" }, { "docid": "3df772fc1f6edd37fb63700499f92034", "score": "0.47209638", "text": "def setUp(self):\n self.example_dir = ExampleDirSpiders()\n self.wd = self.example_dir.create_directory()", "title": "" }, { "docid": "e8a77065a82b2386450b483af68e19b1", "score": "0.47204733", "text": "def updateState(self, plugin):\n # Get the filename for the new entry\n if plugin.enabled == \"Active\":\n newentry = \"%s.py\" % plugin.name\n else:\n newentry = \"#%s.py\" % plugin.name \n \n if plugin.name in self.all:\n # Plugin exists in the management file\n item = self.all[plugin.name]\n # TODO: Unicode issues with the following line??\n self.text = \"%s%s%s\" % (\n self.text[:item.start()],\n str(newentry),\n self.text[item.end():]) \n else:\n # Plugin doesn't exist - add it at a suitale place\n self.text = \"%s%s\\n%s\" % (\n self.text[:self.manager.start()],\n str(newentry),\n self.text[self.manager.start():])\n \n self.writeFile(g.os_path_join(g.app.loadDir,\"..\",\"plugins\"))", "title": "" }, { "docid": "3cfd51428bd5a3ed99e0b1e5563680a0", "score": "0.4719082", "text": "def update(self, pkg_fullname):", "title": "" }, { "docid": "91291bab6661709c15c36616500cff57", "score": "0.4718498", "text": "def test_writeScript(self):\n testHeaderShell = \"/usr/env/python\"\n testHeaderScheduler = ['-G pam', '-N fam']\n\n testDetailsPath = self.pav_cfg.working_dir/'testPath'\n testDetailsGroup = self._other_group()\n testDetailsPerms = 0o760\n\n testComposer = scriptcomposer.ScriptComposer()\n\n testComposer.header.shell_path = testHeaderShell\n testComposer.header.scheduler_headers = testHeaderScheduler\n\n testComposer.details.path = testDetailsPath\n testComposer.details.group = testDetailsGroup\n testComposer.details.perms = testDetailsPerms\n\n testDir = os.getcwd()\n\n testComposer.write()\n\n self.assertTrue(testDetailsPath.exists())\n\n with testDetailsPath.open() as testFile:\n testLines = testFile.readlines()\n\n for i in range(0, len(testLines)):\n testLines[i] = testLines[i].strip()\n\n self.assertEqual(testLines[0], \"#!/usr/env/python\")\n self.assertEqual(testLines[1], \"# -G pam\")\n self.assertEqual(testLines[2], \"# -N fam\")\n self.assertEqual(testLines[3], \"\")\n self.assertEqual(testLines[4], \"\")\n\n self.assertEqual(len(testLines), 5)\n\n testStat = testDetailsPath.stat()\n expectedStat=stat.S_IFREG + stat.S_IRWXU + stat.S_IRGRP + stat.S_IWGRP\n\n self.assertEqual(testStat.st_mode, expectedStat)\n\n testGID = testStat.st_gid\n\n testGID = grp.getgrgid(testGID).gr_name\n\n self.assertEqual(testGID, testDetailsGroup)\n\n testDetailsPath.unlink()", "title": "" }, { "docid": "e1cc3629afee9c2b68ba414b7d1f98d5", "score": "0.47123078", "text": "def setUp(self):\r\n self.process1 = Process.objects.create(\r\n script=Script.objects.all()[0],\r\n folder=os.path.abspath(os.path.dirname(__file__)),\r\n )", "title": "" }, { "docid": "c68645ec4b624a18e9b5a9c25ff574ce", "score": "0.4706343", "text": "def _writeScriptAsset(item, entity, context):\n\n workingEntity = entity.preflightItem(item, context)\n\n workingPath = workingEntity.resolve()\n if not workingPath:\n raise RuntimeError(\"Entity failed to provide a working path %s\" % entity)\n\n nuke.scriptSave(filename=workingPath)\n\n item.path = workingPath\n\n return workingEntity.registerItem(item, context)", "title": "" }, { "docid": "4b0e4bce21704b48666215592f4ad948", "score": "0.47063345", "text": "def update(self):\n if self.running:\n cur_pos = self.steps_names.index(self.current_step[\"name\"])\n self.steps[cur_pos][\"function\"](self)\n\n next_pos = self.steps_names.index(self.current_step[\"name\"])\n self.current_step[\"first_call\"] = next_pos != cur_pos", "title": "" }, { "docid": "45e605cd6cce269aa7bef4e3552664f8", "score": "0.47056177", "text": "def build_step(self):\n pass", "title": "" }, { "docid": "7c8740384ae99b63b22184e2a11eda8d", "score": "0.47039607", "text": "def script(self):\n raise NotImplementedError", "title": "" }, { "docid": "c94cecc0445f96a0a5d3e6a67cedfb42", "score": "0.46981898", "text": "def create_script_file(script_type, script=\"\", entity_context=\"\"):\n\n # Use task context for unique names\n file_name = entity_context\n scripts_dir = get_scripts_dir()\n\n if script_type == \"sh\":\n file_name += \".sh\"\n\n elif script_type == \"npsscript\":\n file_name += \".ps1\"\n\n elif script_type == \"static\":\n file_name += \".py\"\n\n else:\n raise TypeError(\"Script Type {} not supported\".format(script_type))\n\n file_location = os.path.join(scripts_dir, file_name)\n with open(file_location, \"w+\") as fd:\n fd.write(script)\n\n dsl_file_location = \"os.path.join('{}', '{}')\".format(\n get_scripts_dir_key(), file_name\n )\n return dsl_file_location", "title": "" }, { "docid": "879aaa6c03a960a195ae43522d111248", "score": "0.46920487", "text": "def add_install_execution(self):\n build_path = self.create_path(\"build\")\n build = self.root.find(build_path)\n plugins_path = self.create_path(\"plugins\")\n plugins = build.find(plugins_path)\n\n ### make xml wrapper - add plugin, executions ###\n plugin = etree.Element(\"plugin\")\n group_id = etree.SubElement(plugin, 'groupId')\n group_id.text = \"org.apache.maven.plugins\"\n artifact = etree.SubElement(plugin, 'artifactId')\n artifact.text = \"maven-install-plugin\"\n executions = etree.SubElement(plugin, 'executions')\n ### end xml wrapper - add plugin, executions ###\n\n ### change configuration_list\n for execution_id, Configuration in enumerate(self.configuration_list.get_list()):\n file_path = str(self.repo_name) + str(self.path_split_char) + str(Configuration.filename)\n print(\"adding jar: \", file_path)\n executions.append(self.add_install_plugin_to_pom_xml(\n configuration=Configuration, file_path=file_path, execution_id=execution_id)\n )\n # append plugin result\n plugins.append(plugin)\n self.write_to_pom_file(minidom.parseString(etree.tostring(self.root)))", "title": "" }, { "docid": "9a9e87a739bb674b80e9c6affdb64119", "score": "0.4691964", "text": "def addScript(self, script):\n self._addScript(self.data, self.data, script)", "title": "" }, { "docid": "f9d482b97693154cffac94c71b3a66bf", "score": "0.46882093", "text": "def update_scripts(scriptpath):\n for script in [\"onto_metrics_calc.pl\", \"obo_def_comp.pl\", \"obo_track_new.pl\"]:\n subprocess.run(\"rm %s\" % os.path.join(scriptpath, script), shell=True)\n subprocess.run(\"rm %s\" % os.path.join(scriptpath, \"OboModel.pm\"), shell=True)\n\n wget.download((\"https://raw.githubusercontent.com/FlyBase/drosophila-anatomy-developmental-ontology/\"\n \"master/tools/release_and_checking_scripts/releases/%s\" % script),\n os.path.join(scriptpath, script))\n wget.download((\"https://raw.githubusercontent.com/FlyBase/drosophila-anatomy-developmental-ontology/\"\n \"master/tools/perl_modules/releases/OboModel.pm\"),\n os.path.join(scriptpath, \"OboModel.pm\"))", "title": "" }, { "docid": "a7041ca8710c0f6539c44c6fa4d2ce5a", "score": "0.46801165", "text": "def run_steps(properties, stream_engine, step_runner, universe_view,\n emit_initial_properties=False):\n with stream_engine.make_step_stream('setup_build') as s:\n if emit_initial_properties:\n for key in sorted(properties.iterkeys()):\n s.set_build_property(key, json.dumps(properties[key], sort_keys=True))\n\n engine = RecipeEngine(\n step_runner, properties, os.environ, universe_view)\n\n # Create all API modules and top level RunSteps function. It doesn't launch\n # any recipe code yet; RunSteps needs to be called.\n api = None\n\n assert 'recipe' in properties\n recipe = properties['recipe']\n\n root_package = universe_view.universe.package_deps.root_package\n run_recipe_help_lines = [\n 'To repro this locally, run the following line from the root of a %r'\n ' checkout:' % (root_package.name),\n '',\n '%s run --properties-file - %s <<EOF' % (\n os.path.join( '.', root_package.relative_recipes_dir, 'recipes.py'),\n recipe),\n ]\n run_recipe_help_lines.extend(\n json.dumps(properties, indent=2).splitlines())\n run_recipe_help_lines += [\n 'EOF',\n '',\n 'To run on Windows, you can put the JSON in a file and redirect the',\n 'contents of the file into run_recipe.py, with the < operator.',\n ]\n\n with s.new_log_stream('run_recipe') as l:\n for line in run_recipe_help_lines:\n l.write_line(line)\n\n # Find and load the recipe to run.\n try:\n recipe_script = universe_view.load_recipe(recipe, engine=engine)\n s.write_line('Running recipe with %s' % (properties,))\n\n api = loader.create_recipe_api(\n universe_view.universe.package_deps.root_package,\n recipe_script.LOADED_DEPS,\n recipe_script.path,\n engine,\n recipe_test_api.DisabledTestData())\n\n s.add_step_text('running recipe: \"%s\"' % recipe)\n except (loader.LoaderError, ImportError, AssertionError) as e:\n for line in str(e).splitlines():\n s.add_step_text(line)\n s.set_step_status('EXCEPTION')\n return result_pb2.Result(\n failure=result_pb2.Failure(\n human_reason=str(e),\n exception=result_pb2.Exception(\n traceback=traceback.format_exc().splitlines()\n )))\n\n # The engine will use step_runner to run the steps, and the step_runner in\n # turn uses stream_engine internally to build steam steps IO.\n return engine.run(recipe_script, api)", "title": "" }, { "docid": "877b7888c43b20136f8e6cd04d8c679e", "score": "0.4670764", "text": "def test_update_with_locally_modified_file(project_with_template, rendered_template_with_update, with_injection):\n (project_context.path / \"requirements.txt\").write_text(\"Local modification\")\n\n with with_injection():\n actions = get_file_actions(\n rendered_template=rendered_template_with_update, template_action=TemplateAction.UPDATE, interactive=False\n )\n\n assert FileAction.KEEP == actions[\"requirements.txt\"]", "title": "" }, { "docid": "8d6a494eed67a219de6ee9f82fc92903", "score": "0.4666327", "text": "def test_steps_muting(self):\n support.create_project(self, 'larry')\n\n support.run_command('steps add first.py')\n\n r = support.run_command('steps mute S02-first.py')\n self.assertFalse(r.failed, 'should not have failed')\n\n r = support.run_command('steps unmute S02-first.py')\n self.assertFalse(r.failed, 'should nto have failed')", "title": "" }, { "docid": "a943caa82beaaaca8457906241cc10e6", "score": "0.46558276", "text": "def update(self):\n random_agent = self.model.get_random_agent()\n rule = random_agent.rule\n #log.info(\"entering copying step %s with agent %s\", self._timestep, random_agent)\n rule.step(random_agent, self._timestep)\n\n #log.info(\"entering mutation rule\")\n # choose a different random agent, pass it to the innovation rule and see if it triggers this timestep\n self.innovation_rule.step(self.model.get_random_agent(), self._timestep)\n\n # increment the time in our dynamics\n self._timestep += 1\n return self._timestep", "title": "" }, { "docid": "7d973c7e0e121a69aa6061e007c1b328", "score": "0.46553344", "text": "def buildSetupScript(self): \n path = self.extract('path')\n git = self.extract('git')\n\n if git != None and path != None:\n error.fatal(\"package file invalid: 'path' and 'git' are mutually \"\\\n \"exclusive\")\n \n script = \"\"\n dest = os.path.join(str(self._tempDir), str(self._packageName))\n if path:\n script += str(\"cp -r \") + str(path) + str(\" \") + str(dest)\n elif git:\n script += str(\"git clone '\") + str(git) + str(\"' \") + str(dest)\n else:\n error.fatal(\"package file invalid: neither 'path' nor 'git' are\"\\\n \" provided\")\n \n self._tempDirAfterSetup = dest\n self._script_setup = self.convertToFile(script, \"setup.sh\")", "title": "" }, { "docid": "632036faf91310901441a9618239976f", "score": "0.4653602", "text": "def test_withRelativeTrialPath(self):\n self.assertDestinationAdded(\"./trial\")", "title": "" }, { "docid": "f52ab4c9adfd63faa10d7964cdc43616", "score": "0.4649928", "text": "def _resetup_step_dir(self, dest_dir, config_yaml):\n create_directory(dest_dir, exist_ok=True)\n create_directory(os.path.join(dest_dir, \"slurm_log\"), exist_ok=True)\n\n create_from_tpl(\n src_path=os.path.join(os.path.dirname(__file__), \"tpls\", \"step_config.yaml\"),\n dest_path=os.path.join(dest_dir, CONFIG_FILENAME),\n format_args={\"step_name\": self.step, \"step_version\": 1, \"config_subdir\": CONFIG_SUBDIR},\n message=\"creating step-wide configuration in {path}\",\n message_args={\"path\": os.path.join(dest_dir, CONFIG_FILENAME)},\n )\n\n create_from_tpl(\n src_path=os.path.join(os.path.dirname(__file__), \"tpls\", FILENAME_PIPELINE_JOB_SH),\n dest_path=os.path.join(dest_dir, FILENAME_PIPELINE_JOB_SH),\n format_args={\n \"line_m\": (\n \"##SBATCH --mail-type ALL\" if not self.args.email else \"#SBATCH --mail-type ALL\"\n ),\n \"line_M\": (\n \"##SBATCH --mail-user [email protected]\"\n if not self.args.email\n else \"##SBATCH --mail-user {}\".format(self.args.email)\n ),\n \"partition\": self.args.partition,\n \"step_name\": self.step,\n },\n message=\"creating SGE job shell file in {path}\",\n message_args={\"path\": os.path.join(dest_dir, FILENAME_PIPELINE_JOB_SH)},\n )", "title": "" }, { "docid": "f9b91c5c9089a8ff8f8756e5a19a3bf0", "score": "0.46468812", "text": "def __update_task(self):\n try:\n temp_var = snakemake.input[0]\n except NameError:\n Update(\"Guppy\", self.iteration, self.number_of_files)\n self.iteration += 1", "title": "" }, { "docid": "89d768b47aa86e59b080e9015042e201", "score": "0.46356544", "text": "def run_task(self, fw_spec):\n print 'ho modificato!!!!'\n pass", "title": "" }, { "docid": "b37de9432782f58e6636328de4537d57", "score": "0.46223196", "text": "def test_update_file():\n fileid=4\n filecontent = \"import testupdated\"\n\n print(dm.update_file(fileid, filecontent))", "title": "" }, { "docid": "646527891aa0fec499d0ea98acec0a4f", "score": "0.4621585", "text": "def configure_step(self):\n pass", "title": "" }, { "docid": "0b5b2daa5009966f78bd4f6380c58901", "score": "0.46199626", "text": "def add_definition_to_solution(sln: str, definition: str):\n projects = list_projects_in_solution(sln)\n for project in projects:\n add_definition_to_project(project, definition)", "title": "" }, { "docid": "49f03dc6089567dc5a181ceb3e53549f", "score": "0.46162263", "text": "def InScriptRunnerCommand():\r\n pass", "title": "" }, { "docid": "952abf7c20b53388b65c339340e35dd0", "score": "0.46126872", "text": "def generate_script(\n repo_dir,\n output_dir,\n path,\n id,\n env,\n map,\n scenario,\n alg,\n cur,\n joint_curiosity,\n curiosity_state_rep_size,\n count_key_dim,\n eta,\n seed,\n lr,\n curiosity_lr,\n dropout_p,\n partial_observable,\n sparse_rewards,\n no_rewards,\n no_exploration\n):\n if env == \"mape\":\n assert(scenario is not None)\n run_name = env + \"_\" + scenario\n else:\n assert(map is not None)\n run_name = env + \"_\" + map\n run_name += \"_seed\" + str(seed) + \"_\" + alg\n if cur is not None:\n run_name += \"_\" + str(cur)\n if joint_curiosity:\n run_name += \"_joint\"\n if partial_observable:\n run_name += \"_partialobservable\"\n if sparse_rewards:\n run_name += \"_sparserewards\"\n if no_rewards:\n run_name += \"_norewards\"\n if no_exploration:\n run_name += \"_noexploration\"\n run_name += \"_id\" + str(id)\n\n job_name = \"job_\" + run_name\n job_path = os.path.join(path, job_name + \".sh\")\n output_path = os.path.join(output_dir, job_name + \".log\")\n with open(job_path, \"w\") as job_file:\n parameter_chain = \"\"\n parameter_chain += \"--alg=%s \" % alg\n if cur is not None:\n parameter_chain += \"--curiosity=%s \" % cur\n if joint_curiosity:\n parameter_chain += \"--joint_curiosity \"\n if curiosity_state_rep_size is not None:\n parameter_chain += \"--curiosity_state_rep_size=%s \" % curiosity_state_rep_size\n if count_key_dim is not None:\n parameter_chain += \"--count_key_dim=%s \" % count_key_dim\n if eta is not None:\n parameter_chain += \"--eta=%d \" % eta\n if seed is not None:\n parameter_chain += \"--seed=%d \" % seed\n if lr is not None:\n parameter_chain += \"--lr=%f \" % lr\n if curiosity_lr is not None:\n parameter_chain += \"--curiosity_lr=%f \" % curiosity_lr\n if dropout_p is not None:\n parameter_chain += \"--dropout_p=%f \" % dropout_p\n if partial_observable:\n parameter_chain += \"--partial_observable \"\n if sparse_rewards:\n parameter_chain += \"--sparse_rewards \"\n if no_rewards:\n parameter_chain += \"--no_rewards \"\n if no_exploration:\n parameter_chain += \"--no_exploration \"\n\n parameter_chain += \"--run=%s\" % run_name\n\n job_file.write(\"cd \" + repo_dir + \"\\n\")\n pipe_output = \" > \" + output_path + \" 2>&1\"\n if env == \"mape\":\n job_file.write(\"python3 mape_train.py --scenario=\" + str(scenario) + \" \" + parameter_chain + pipe_output + \"\\n\")\n elif env == \"smac\":\n job_file.write(\"python3 smac_train.py --map=\" + str(map) + \" \" + parameter_chain + pipe_output + \"\\n\")\n job_file.write(\"cd \" + path + \"\\n\")\n\n return job_path", "title": "" }, { "docid": "9f251b62357096ed1c7aeaf0000a36f1", "score": "0.46106994", "text": "def update_manifest(ctx):\n spec = json.load(open('manifest.json', 'r'))\n sys.path.append(os.path.dirname(__file__))\n spec['version'] = __import__(\"hwfloe_mod\").__version__\n json.dump(spec, open('manifest.json', 'w'))", "title": "" }, { "docid": "0b5c5ea9d3912faebf858ec097c7439a", "score": "0.46096626", "text": "def add_script(self):\n\n args = {}\n webbrowser.open(url_with_params(\"scriptable:///add\", args))", "title": "" }, { "docid": "64ac8eb3b21faf1d77132f70099b4ecf", "score": "0.46061668", "text": "def InScriptRunnerCommand():\r\n pass", "title": "" }, { "docid": "68581fdcbac50985905786868ae875f3", "score": "0.46027097", "text": "def update_folder(self):\n\t\t# import os.path\n\t\tfrom os import mkdir\n\n\t\tif settings.SHINY_LOCAL_ENABLE:\n\t\t\tsafe_rm(self._folder_path_base, ignore_errors=True)\n\t\t\tmkdir(self._folder_path_base, self.FS_ACL)\n\t\t\tmkdir(self._link_holder_path(), self.FS_ACL)\n\t\t\tmkdir(self.folder_path, self.FS_ACL)\n\t\t\tmkdir('%s%s/' % (self.folder_path, self.RES_FOLDER), self.FS_ACL)\n\n\t\tif self.make_remote_too:\n\t\t\tsafe_rm(self.__folder_path_base_gen(True), ignore_errors=True)\n\t\t\tmkdir(self.__folder_path_base_gen(True), self.FS_REMOTE_ACL)\n\t\t\tmkdir(self.report_link('', remote=True), self.FS_REMOTE_ACL)\n\t\t\tmkdir(self.__folder_path_gen(True), self.FS_REMOTE_ACL)\n\t\t\tmkdir('%s%s/' % (self.__folder_path_gen(True), self.RES_FOLDER), self.FS_REMOTE_ACL)", "title": "" }, { "docid": "b231e785c9f16ba834a87a44ffe91a23", "score": "0.4600287", "text": "def add_line_to_ddns_update(step, line):\n world.nsupdate_commands.append(line)", "title": "" }, { "docid": "093657cdec820f9a7f1ea27774d57f27", "score": "0.4582384", "text": "def get_set_up_script(self, workspace, deploy):\n return ''", "title": "" }, { "docid": "b1235410fb1381012a4a659fe0e1ceb4", "score": "0.456224", "text": "def _gen_script(script, directory):\r\n with open(script, 'wb') as f:\r\n f.write('CD /D %s\\n' % directory)", "title": "" }, { "docid": "47129fc6d827d468c425b697daf984f4", "score": "0.45564333", "text": "def test_update_plugin(self):\n pass", "title": "" }, { "docid": "eedf11a564c60a6a8d5d2aa7556d2b8f", "score": "0.4556287", "text": "def update(self, name: str, path: Path) -> None:", "title": "" }, { "docid": "741dbb54391b159b7f196eb66892e6f7", "score": "0.45541054", "text": "def step_function(self):\n pass", "title": "" }, { "docid": "cee891e1689cc4297a18d20a415efa7a", "score": "0.4551406", "text": "def install_and_run_script(self,script:str,script_path:str):\n self.upload(script,script_path)\n # make executable\n self.dc.container.execute([\"chmod\",\"+x\",script_path])\n self.dc.container.execute([script_path],tty=True)", "title": "" }, { "docid": "257bfd99bd9b69dd399503637733352b", "score": "0.45490015", "text": "def __update_task(self):\n try:\n temp_var = snakemake.input[0]\n except NameError:\n Update(\"MiniMap2\", self.iteration, self.number_of_files)\n self.iteration += 1", "title": "" }, { "docid": "fe7d1516b7d7a5b5d072ad0c68e8eabb", "score": "0.4548365", "text": "def installPlugin(self):\n\n filename = filedialog.askopenfilename(defaultextension='.py\"',\n initialdir=os.getcwd(),\n filetypes=[(\"python\",\"*.py\")],\n parent=self.main)\n if filename:\n import shutil\n shtutil.copy(filename, self.pluginpath)\n self.updatePluginMenu()\n return", "title": "" }, { "docid": "2b8600d8e050ab4076274d66dbd58893", "score": "0.45409134", "text": "def test_step(self):\n pass", "title": "" } ]
e65d8166479645f4b886344fd1e0c04b
Convert a pair of rgb and depth map to colored point cloud
[ { "docid": "7349737b67d3843bc44a0a1fc241a082", "score": "0.6349855", "text": "def rgbd_to_colored_pc(self, depth, rgb, f_x, f_y, c_x, c_y, cap=100):\r\n\r\n rgb_height, rgb_width, _ = rgb.shape\r\n x_map, y_map = np.meshgrid(np.arange(rgb_width), np.arange(rgb_height))\r\n xyz_rgb = np.concatenate(\r\n [x_map[:, :, None], y_map[:, :, None], depth[:, :, None], rgb],\r\n axis=2\r\n )\r\n xyz_rgb[:, :, 0] = (xyz_rgb[:, :, 0] - c_x) * xyz_rgb[:, :, 2] / f_x\r\n xyz_rgb[:, :, 1] = (xyz_rgb[:, :, 1] - c_y) * xyz_rgb[:, :, 2] / f_y\r\n points = xyz_rgb[:, :, :3].reshape(-1, 3)\r\n colors = xyz_rgb[:, :, 3:].reshape(-1, 3) / 255.\r\n cap_ind = np.logical_and(points[:, 2] < cap, points[:, 2] > 1)\r\n points = points[cap_ind]\r\n colors = colors[cap_ind]\r\n return points, colors", "title": "" } ]
[ { "docid": "a5754be415e7011d3d6a8987eeae038a", "score": "0.7130225", "text": "def world_to_color(params, pcloud, color):\n x, y, z = pcloud[..., 0], pcloud[..., 1], pcloud[..., 2]\n x = x * params['fx_rgb'] / z + params['cx_rgb']\n y = y * params['fy_rgb'] / z + params['cy_rgb']\n x1, xw1, x2, xw2, inv1 = linear_interpolation(x, color.shape[1])\n y1, yw1, y2, yw2, inv2 = linear_interpolation(y, color.shape[0])\n invalid = np.logical_or(inv1, inv2)\n depth_color = color[y1, x1] * xw1 * yw1 + \\\n color[y2, x1] * xw1 * yw2 + \\\n color[y1, x2] * xw2 * yw1 + \\\n color[y2, x2] * xw2 * yw2\n depth_color[invalid] = 0\n return depth_color", "title": "" }, { "docid": "e5f70a0f5854996dae49c7251ee967ea", "score": "0.6833774", "text": "def generate_pointcloud(pil_rgb_img, pil_depth_img, ply_file):\n\n if rgb.size != depth.size:\n raise Exception(\"Color and depth image do not have the same resolution.\")\n if rgb.mode != \"RGB\":\n raise Exception(\"Color image is not in RGB format\")\n if depth.mode != \"I\":\n raise Exception(\"Depth image is not in intensity format\")\n\n points = []\n for v in range(rgb.size[1]):\n for u in range(rgb.size[0]):\n color = rgb.getpixel((u, v))\n Z = depth.getpixel((u, v)) / scalingFactor\n if Z == 0: continue\n X = (u - centerX) * Z / focalLength\n Y = (v - centerY) * Z / focalLength\n points.append(\"%f %f %f %d %d %d 0\\n\" % (X, Y, Z, color[0], color[1], color[2]))\n\n with open(ply_file, \"w\") as ply:\n ply.write(\"ply\\n\" + \\\n \"format ascii 1.0\\n\" + \\\n \"element vertex {}\\n\".format(len(points)) + \\\n \"property float x\\n\" + \\\n \"property float y\\n\" + \\\n \"property float z\\n\" + \\\n \"property uchar red\\n\" + \\\n \"property uchar green\\n\" + \\\n \"property uchar blue\\n\" + \\\n \"property uchar alpha\\n\" + \\\n \"end_header\\n\" + \\\n \"{}\".format(\"\".join(points)))", "title": "" }, { "docid": "c73377cee31e50adc2ef08da0b6c429d", "score": "0.6309965", "text": "def make_point_cloud(image, depth, intrinsics, max_depth=5.0):\n colors = image.permute(1,2,0).view(-1,3)\n colors = colors[...,[2,1,0]] / 255.0\n clr = colors.cpu().numpy()\n\n inv_depth = 1.0 / depth[None,None]\n points = pops.iproj(inv_depth, intrinsics[None,None])\n points = (points[..., :3] / points[..., 3:]).view(-1,3)\n pts = points.cpu().numpy()\n\n # open3d point cloud\n pc = o3d.geometry.PointCloud()\n\n keep = pts[:,2] < max_depth\n pc.points = o3d.utility.Vector3dVector(pts[keep])\n pc.colors = o3d.utility.Vector3dVector(clr[keep])\n\n return pc", "title": "" }, { "docid": "30e6b1677110ffea8597604e38c40af8", "score": "0.6208426", "text": "def XYZ_to_XYZRGB(XYZ_cloud, color):\n XYZRGB_cloud = pcl.PointCloud_PointXYZRGB()\n points_list = []\n\n float_rgb = rgb_to_float(color)\n\n for data in XYZ_cloud:\n points_list.append([data[0], data[1], data[2], float_rgb])\n\n XYZRGB_cloud.from_list(points_list)\n return XYZRGB_cloud", "title": "" }, { "docid": "630aafd9e82fbd1df47fad23d5a40daa", "score": "0.61021364", "text": "def XYZRGB_to_XYZ(XYZRGB_cloud):\n XYZ_cloud = pcl.PointCloud()\n points_list = []\n\n for data in XYZRGB_cloud:\n points_list.append([data[0], data[1], data[2]])\n\n XYZ_cloud.from_list(points_list)\n return XYZ_cloud", "title": "" }, { "docid": "630aafd9e82fbd1df47fad23d5a40daa", "score": "0.61021364", "text": "def XYZRGB_to_XYZ(XYZRGB_cloud):\n XYZ_cloud = pcl.PointCloud()\n points_list = []\n\n for data in XYZRGB_cloud:\n points_list.append([data[0], data[1], data[2]])\n\n XYZ_cloud.from_list(points_list)\n return XYZ_cloud", "title": "" }, { "docid": "7114f29c8f1628aba6cce1fbe46053ac", "score": "0.6044905", "text": "def get_color(pts3d, K, image):\n pts2d = K.dot(pts3d.T)\n pts2d /= pts2d[2, :]\n pts2d = pts2d.astype(int)\n colors = image[pts2d[1, :], pts2d[0, :]]\n return colors", "title": "" }, { "docid": "bebe2e83fe29081ecfb20d52aab75b1d", "score": "0.60015565", "text": "def pointcloud_to_rgb(self,pointcloud):\n\t\tpointcloud_rgb = np.array(pointcloud)[:,3:6]\n\t\treturn pointcloud_rgb", "title": "" }, { "docid": "191ad8e833d855a724c94e6609cc5744", "score": "0.5974968", "text": "def color_depths(depth, offset=None, scale=None):\n if offset is None:\n offset = depths.min()\n if scale is None:\n scale = depths.max() - offset\n\n values = ((depth.flatten() - offset) / scale).clamp_(min=0, max=1).to(_color_map_depths.device)\n # for each value, figure out where they fit in in the bincenters: what is the last bincenter smaller than this value?\n lower_bin = (values.view(-1,1) >= _color_map_bincenters.view(1,-1)).max(dim=1)[1]\n lower_bin_value = _color_map_bincenters[lower_bin]\n higher_bin_value = _color_map_bincenters[lower_bin + 1]\n alphas = (values - lower_bin_value) / (higher_bin_value - lower_bin_value)\n colors = _color_map_depths[lower_bin] * (1-alphas).view(-1,1) + _color_map_depths[lower_bin + 1] * alphas.view(-1,1)\n return colors", "title": "" }, { "docid": "6536ba660cd55271a9869e9c0765c0de", "score": "0.59396636", "text": "def pointCloud2Points(pointCloud):\n pc = np.asarray(pointCloud.points)\n\n # Color\n if len(pointCloud.colors) == len(pointCloud.points):\n pc = np.hstack((pc, np.asarray(pointCloud.colors)))\n\n return pc", "title": "" }, { "docid": "ea559f3059eec82456e76deaafed8873", "score": "0.5933892", "text": "def XYZRGB_to_XYZ(XYZRGB_cloud):\n \tXYZ_cloud = pcl.PointCloud()\n \tpoints_list = []\n\n \tfor data in XYZRGB_cloud:\n points_list.append([data[0], data[1], data[2]])\n\n \tXYZ_cloud.from_list(points_list)\n \treturn XYZ_cloud", "title": "" }, { "docid": "30f37bf3f52836a57233340d0c1d9c3e", "score": "0.5890032", "text": "def depth_to_img(self, depth):\r\n vis_depth = 1 / (depth + 1e-3)\r\n vmax = np.percentile(vis_depth, 90)\r\n normalizer = mpl.colors.Normalize(vmin=0, vmax=vmax)\r\n mapper = mpl.cm.ScalarMappable(norm=normalizer, cmap='magma')\r\n colormapped_img = (mapper.to_rgba(vis_depth)[:, :, :3] * 255).astype(np.uint8)\r\n\r\n return colormapped_img", "title": "" }, { "docid": "b7439e68d455ea35d0f25f2838b8c567", "score": "0.5843018", "text": "def get_associated_colors(points_on_image: np.ndarray, src_image: np.ndarray) -> np.ndarray:\n src_colors = np.zeros((points_on_image.shape[0], 4), dtype=points_on_image.dtype)\n src_colors[:, :3] = src_image[points_on_image[:, 1], points_on_image[:, 0]]\n # Copies over point indices\n src_colors[:, 3] = points_on_image[:, 4]\n return src_colors", "title": "" }, { "docid": "57fe30cfb9e4a7a1ff3f134efe64d7a4", "score": "0.5839135", "text": "def map_to_color(map, crange, args):\n\tmap = ((map.T-crange[0])/(crange[1]-crange[0])).T # .T ensures broadcasting for rgb case\n\tif args.reverse_color: map = 1-map\n\tif args.rgb: m_color = colorize.colorize(map, desc=args.color, driver=args.method, mode=args.rgb_mode)\n\telse: m_color = colorize.colorize(map[0], desc=args.color, driver=args.method)\n\tm_color = enmap.samewcs(np.rollaxis(m_color,2), map)\n\treturn m_color", "title": "" }, { "docid": "51fc12b5a63e704abe005e83af1871f1", "score": "0.57770497", "text": "def calculate_color_features(data):\n\n rgb = rescale_intensity(data[:, 3:6], out_range=\"uint8\").astype(np.uint8)\n lab = rgb2lab(np.array([rgb]))[0].reshape(-1, 3)\n ngrdvi = calculate_ngrdvi(data).reshape(-1, 1)\n return np.hstack([lab[:, 1:3], ngrdvi])", "title": "" }, { "docid": "5d99c21d0e9389155bb67b7205ebd9ae", "score": "0.573266", "text": "def draw_point_cloud_color(points_history_coord, color_map, stage_index):\n for key in points_history_coord.keys():\n point = Point(points_history_coord[key][stage_index][0],\n points_history_coord[key][stage_index][1],\n points_history_coord[key][stage_index][2]\n )\n p = PointArtist(point, name=key.strip(\"()\"), color=color_map[key][stage_index], layer='Stage_'+str(stage_index))\n p.draw()", "title": "" }, { "docid": "0b335535a9cca17fe7a2048128185e67", "score": "0.5694677", "text": "def data_function_color_depth_and_color_gt(self, element):\n # decide on a central view\n center_view = random.randint(0, self.adapter.nr_views-1)\n\n # get the central image, estimate and GT depth map\n center_image = self.cache.get(self.adapter.get_single_image, (element, center_view))\n center_estimate = self.cache.get(self.adapter.get_single_depth_map, (element, center_view, False))\n center_depth_map = self.cache.get(self.adapter.get_single_depth_map, (element, center_view, True))\n\n return ((center_image[None], center_estimate[None]), (center_image[None], center_depth_map[None],))", "title": "" }, { "docid": "8ec2743fd624cff929d46565a105abfe", "score": "0.5694155", "text": "def points2PointCloud(points):\n if isinstance(points, o3d.geometry.PointCloud):\n return points\n\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(points[..., 0:3])\n if points.shape[-1] > 3:\n pcd.colors = o3d.utility.Vector3dVector(points[..., 3:6])\n return pcd", "title": "" }, { "docid": "52a13372575e149a42868db1b5f6769e", "score": "0.5668824", "text": "def _pascal_color_map(N=256, normalized=False):\n def bitget(byteval, idx):\n return (byteval & (1 << idx)) != 0\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7 - j)\n g = g | (bitget(c, 1) << 7 - j)\n b = b | (bitget(c, 2) << 7 - j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap / 255 if normalized else cmap\n return cmap", "title": "" }, { "docid": "575107355ce9a01abe83dd841c1697ab", "score": "0.5666342", "text": "def _pascal_color_map(N=256, normalized=False):\n\n def bitget(byteval, idx):\n return (byteval & (1 << idx)) != 0\n\n dtype = 'float32' if normalized else 'uint8'\n cmap = np.zeros((N, 3), dtype=dtype)\n for i in range(N):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7 - j)\n g = g | (bitget(c, 1) << 7 - j)\n b = b | (bitget(c, 2) << 7 - j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n cmap = cmap / 255 if normalized else cmap\n return cmap", "title": "" }, { "docid": "ecba7f86109e0520bfd723960fa03c55", "score": "0.565362", "text": "def cart2cmap(xyz, cmap):\n \n rthetaphi = cart2sph(xyz)\n phi = rthetaphi[1]\n theta = rthetaphi[2] + 180.0\n rgb = cmap[int(phi), int(theta)]\n\n return rgb", "title": "" }, { "docid": "7d9f246f3ffe2938806696974526cbf0", "score": "0.5648252", "text": "def initColorMap(self):\n\n colors = {\n \"black\": [1, [0.00, 0.00, 0.0]],\n \"lightGrey\": [2, [0.75, 0.75, 0.75]],\n \"darkGrey\": [3, [0.50, 0.50, 0.50]],\n \"fusia\": [4, [0.80, 0.00, 0.20]],\n \"blueDark\": [5, [0.00, 0.00, 0.40]],\n \"blue\": [6, [0.00, 0.00, 1.00]],\n \"green\": [7, [0.00, 0.30, 0.00]],\n \"purpleDark\": [8, [0.20, 0.00, 0.30]],\n \"magenta\": [9, [0.80, 0.00, 0.80]],\n \"brownLight\": [10, [0.60, 0.30, 0.20]],\n \"brownDark\": [11, [0.25, 0.13, 0.13]],\n \"orange\": [12, [0.70, 0.20, 0.00]],\n \"red\": [13, [1.00, 0.00, 0.00]],\n \"greenBright\": [14, [0.00, 1.00, 0.00]],\n \"blueMedium\": [15, [0.00, 0.30, 0.60]],\n \"white\": [16, [1.00, 1.00, 1.00]],\n \"yellow\": [17, [1.00, 1.00, 0.00]],\n \"greenBlue\": [18, [0.00, 1.00, 1.00]],\n \"turqoise\": [19, [0.00, 1.00, 0.80]],\n \"pink\": [20, [1.00, 0.70, 0.70]],\n \"peach\": [21, [0.90, 0.70, 0.50]],\n \"yellowLight\": [22, [1.00, 1.00, 0.40]],\n \"turqoiseDark\": [23, [0.00, 0.70, 0.40]],\n \"brownMuted\": [24, [0.60, 0.40, 0.20]],\n \"yellowMuted\": [25, [0.63, 0.63, 0.17]],\n \"greenMuted\": [26, [0.40, 0.60, 0.20]],\n \"turqoiseMuted\": [27, [0.20, 0.63, 0.35]],\n \"blueLightMuted\": [28, [0.18, 0.63, 0.63]],\n \"blueDarkMuted\": [29, [0.18, 0.40, 0.63]],\n \"purpleLight\": [30, [0.43, 0.18, 0.63]],\n \"mutedMagenta\": [31, [0.63, 0.18, 0.40]]\n }\n\n return colors", "title": "" }, { "docid": "994c2038c1381e2878c64648e2d2f1d0", "score": "0.5639653", "text": "def cmap(start=0.5, rot=-1.5, gamma=1.0, reverse=False, nlev=256.,\n minSat=1.2, maxSat=1.2, minLight=0., maxLight=1.,\n **kwargs):\n\n# override start and rot if startHue and endHue are set\n if kwargs is not None:\n if 'startHue' in kwargs:\n start = (kwargs.get('startHue') / 360. - 1.) * 3.\n if 'endHue' in kwargs:\n rot = kwargs.get('endHue') / 360. - start / 3. - 1.\n if 'sat' in kwargs:\n minSat = kwargs.get('sat')\n maxSat = kwargs.get('sat')\n\n# set up the parameters\n fract = np.linspace(minLight, maxLight, int(nlev))\n angle = 2.0 * pi * (start / 3.0 + rot * fract + 1.)\n fract = fract**gamma\n\n satar = np.linspace(minSat, maxSat, int(nlev))\n amp = satar * fract * (1. - fract) / 2.\n\n# compute the RGB vectors according to main equations\n red = fract + amp * (-0.14861 * np.cos(angle) + 1.78277 * np.sin(angle))\n grn = fract + amp * (-0.29227 * np.cos(angle) - 0.90649 * np.sin(angle))\n blu = fract + amp * (1.97294 * np.cos(angle))\n\n# find where RBB are outside the range [0,1], clip\n red[np.where((red > 1.))] = 1.\n grn[np.where((grn > 1.))] = 1.\n blu[np.where((blu > 1.))] = 1.\n\n red[np.where((red < 0.))] = 0.\n grn[np.where((grn < 0.))] = 0.\n blu[np.where((blu < 0.))] = 0.\n\n# optional color reverse\n if reverse is True:\n red = red[::-1]\n blu = blu[::-1]\n grn = grn[::-1]\n\n# put in to tuple & dictionary structures needed\n rr = []\n bb = []\n gg = []\n for k in range(0, int(nlev)):\n rr.append((float(k) / (nlev - 1.), red[k], red[k]))\n bb.append((float(k) / (nlev - 1.), blu[k], blu[k]))\n gg.append((float(k) / (nlev - 1.), grn[k], grn[k]))\n\n cdict = {'red': rr, 'blue': bb, 'green': gg}\n return LSC('cubehelix_map', cdict)", "title": "" }, { "docid": "d594647a19d10244c3e06d0d0e42f319", "score": "0.5639222", "text": "def rgb_to_xyz(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n x: torch.Tensor = 0.412453 * r + 0.357580 * g + 0.180423 * b\n y: torch.Tensor = 0.212671 * r + 0.715160 * g + 0.072169 * b\n z: torch.Tensor = 0.019334 * r + 0.119193 * g + 0.950227 * b\n\n out: torch.Tensor = torch.stack([x, y, z], -3)\n\n return out", "title": "" }, { "docid": "b843d307a05011453c1e4747dbebde62", "score": "0.5637516", "text": "def rgb_colormaps(color):\n\n if color == \"red\":\n cdict = {'red': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),\n\n 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))}\n elif color == \"green\":\n cdict = {'red': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'green': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)),\n\n 'blue': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0))}\n elif color == \"blue\":\n cdict = {'red': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'green': ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0)),\n\n 'blue': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))}\n else:\n raise ValueError(\"Wrong color specified. Allowed colors are 'red', 'green', 'blue'.\")\n\n cmap = LinearSegmentedColormap('BlueRed2', cdict)\n\n return cmap", "title": "" }, { "docid": "bf243f4a717f9f8eb77de4d5b165b98d", "score": "0.5599674", "text": "def depth_map_to_3d(self, depth, cam_K, cam_W):\n \n assert(depth.size(1) == 1)\n batch_size, _, N, M = depth.size()\n device = depth.device\n # Turn depth around. This also avoids problems with inplace operations\n depth = -depth .permute(0, 1, 3, 2)\n \n zero_one_row = torch.tensor([[0., 0., 0., 1.]])\n zero_one_row = zero_one_row.expand(batch_size, 1, 4).to(device)\n\n # add row to world mat\n cam_W = torch.cat((cam_W, zero_one_row), dim=1)\n\n # clean depth image for mask\n mask = (depth.abs() != float(\"Inf\")).float()\n depth[depth == float(\"Inf\")] = 0\n depth[depth == -1*float(\"Inf\")] = 0\n\n # 4d array to 2d array k=N*M\n d = depth.reshape(batch_size, 1, N * M)\n\n # create pixel location tensor\n px, py = torch.meshgrid([torch.arange(0, N), torch.arange(0, M)])\n px, py = px.to(device), py.to(device)\n\n p = torch.cat((\n px.expand(batch_size, 1, px.size(0), px.size(1)), \n (M - py).expand(batch_size, 1, py.size(0), py.size(1))\n ), dim=1)\n p = p.reshape(batch_size, 2, py.size(0) * py.size(1))\n p = (p.float() / M * 2) \n \n # create terms of mapping equation x = P^-1 * d*(qp - b)\n P = cam_K[:, :2, :2].float().to(device) \n q = cam_K[:, 2:3, 2:3].float().to(device) \n b = cam_K[:, :2, 2:3].expand(batch_size, 2, d.size(2)).to(device)\n Inv_P = torch.inverse(P).to(device) \n\n rightside = (p.float() * q.float() - b.float()) * d.float()\n x_xy = torch.bmm(Inv_P, rightside)\n \n # add depth and ones to location in world coord system\n x_world = torch.cat((x_xy, d, torch.ones_like(d)), dim=1)\n\n # derive loactoion in object coord via loc3d = W^-1 * x_world\n Inv_W = torch.inverse(cam_W)\n loc3d = torch.bmm(\n Inv_W.expand(batch_size, 4, 4),\n x_world\n ).reshape(batch_size, 4, N, M)\n\n loc3d = loc3d[:, :3].to(device)\n mask = mask.to(device)\n return loc3d, mask", "title": "" }, { "docid": "715c86b48daba8f4082c2d93a9597563", "score": "0.5589014", "text": "def create_colors():\n colors_T = {}\n cm_data = np.loadtxt(\"/Users/akobsch/Dropbox/Recherche/PhD-MD-silicates/simulations/scripts/Analysis/ScientificColourMaps6/romaO/romaO.txt\")\n cm_data = cm_data[::-1] #for reverse colors\n new_map = LinearSegmentedColormap.from_list('new', cm_data)\n temperatures = ['T2','T2.5','T3','T3.5','T4','T4.5','T5','T5.5','T6','T6.5','T7','T7.5','T7.7']\n color = iter(new_map(np.linspace(0,1,len(temperatures)))) #Creation of the color list \n for T in temperatures:\n c = next(color)\n colors_T[T] = c \n return colors_T", "title": "" }, { "docid": "0264d3c9fc90de695f7a3a3e2a2dc585", "score": "0.55340934", "text": "def convert_to_color(arr_2d, palette=GLOB.palette):\n arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)\n\n for c, i in palette.items():\n m = arr_2d == c\n arr_3d[m] = i\n\n return arr_3d", "title": "" }, { "docid": "f66b151a8e7383c3d807acda09032ba1", "score": "0.5523656", "text": "def get_pixel_map(class_map, colormap):\n \"\"\"\n Function to generate a pixel map (from network output) which can be plotted by OpenCV\n :param network_output: output from network (inference on GPU or NCS)\n :return: an array of tuples to be plotted by OpenCV. The tuples define pixel values\n \"\"\"\n\n # Convert to format suitable for plotting with OpenCV, i.e. array of pixels\n pixel_map = np.array([[colormap[class_num] for class_num in row] for row in class_map], dtype=np.uint8)\n return pixel_map", "title": "" }, { "docid": "90aeef890b286ea04904d32f24ce43f2", "score": "0.55092305", "text": "def create_rgb(self):\n nnear = 5\n values = np.isnan(self.point_r) == 0\n red = np.flipud(idw(self.point_x[values], self.point_y[values], self.point_r[values], self.grid_x, self.grid_y,\n nnear).reshape(self.grid_x.shape))\n green = np.flipud(idw(self.point_x[values], self.point_y[values], self.point_g[values], self.grid_x,\n self.grid_y, nnear).reshape(self.grid_x.shape))\n\n blue = np.flipud(idw(self.point_x[values], self.point_y[values], self.point_b[values], self.grid_x,\n self.grid_y, nnear).reshape(self.grid_x.shape))\n\n [red, green, blue] = VegetationIndices.convert_colors(red, green, blue)\n\n rgb = np.array([red, green, blue])\n rgb = np.rollaxis(rgb.T, 2).T\n self.rgb = rgb", "title": "" }, { "docid": "11822c1cf44dc8514376a4274958efe3", "score": "0.54990226", "text": "def mapping(loc,nx,ny,data):\n data -= data.min()\n data *= data.max()\n allpix = np.zeros((ny*nx,4))\n for i in np.arange(len(loc)):\n allpix[loc[i]] = np.array(plt.cm.jet(data[i]))*data[i]\n allpixsquare = np.reshape(allpix,(nx,ny,4))\n return allpixsquare", "title": "" }, { "docid": "f5620533fb4a724fe7a701b2c2fd9af3", "score": "0.54967284", "text": "def readDepthMap(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "324010bd46f3bf028846e6906d76e8d6", "score": "0.5495344", "text": "def convert_to_color(self, arr_2d):\n arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)\n\n for c, i in self.palette.items():\n m = arr_2d == c\n arr_3d[m] = i\n\n return arr_3d", "title": "" }, { "docid": "34f936f9963df40222940ec9cbde1501", "score": "0.54952884", "text": "def convert_from_color(arr_3d, n_classes):\n arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)\n if n_classes == 6:\n palette = {0: (255, 255, 255), # Impervious surfaces (white)\n 1: (0, 0, 255), # Buildings (blue)\n 2: (0, 255, 255), # Low vegetation (cyan)\n 3: (0, 255, 0), # Trees (green)\n 4: (255, 255, 0), # Cars (yellow)\n 5: (255, 0, 0), # Clutter (red)\n 6: (0, 0, 0)} # Undefined (black)\n\n invert_palette = {v: k for k, v in palette.items()}\n for c, i in invert_palette.items():\n m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)\n arr_2d[m] = i\n elif n_classes == 2:\n arr_2d[np.all(arr_3d == (255, 255, 255), axis=2)] = 1\n elif n_classes == 4:\n palette = {0: (255, 0, 0), # Impervious surfaces (white)\n 1: (0, 255, 0), # Buildings (blue)\n 2: (0, 0, 255), # Low vegetation (cyan)\n 3: (0, 0, 0), # Undefined (black)\n }\n invert_palette = {v: k for k, v in palette.items()}\n for c, i in invert_palette.items():\n m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)\n arr_2d[m] = i\n return arr_2d", "title": "" }, { "docid": "5f70dcd5d6351c5043dcc91e6f2cefdd", "score": "0.5493525", "text": "def cloud(arr):\n return _capture_bits(arr, 4, 4)", "title": "" }, { "docid": "c641b18f9abd918bf49e574c5990656a", "score": "0.5482777", "text": "def apply_color_map_2(gray, cmap):\n\n lut = build_lut(cmap)\n lut2 = np.reshape(lut, (256, 1, 3))\n im_color = cv2.applyColorMap(gray, lut2)\n return im_color", "title": "" }, { "docid": "d377d40e35ba343bb0cb7329e3d9265a", "score": "0.5454616", "text": "def convert_to_color(arr_2d, palette=palette):\n arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)\n\n for c, i in palette.items():\n m = arr_2d == c\n arr_3d[m] = i\n\n return arr_3d", "title": "" }, { "docid": "d377d40e35ba343bb0cb7329e3d9265a", "score": "0.5454616", "text": "def convert_to_color(arr_2d, palette=palette):\n arr_3d = np.zeros((arr_2d.shape[0], arr_2d.shape[1], 3), dtype=np.uint8)\n\n for c, i in palette.items():\n m = arr_2d == c\n arr_3d[m] = i\n\n return arr_3d", "title": "" }, { "docid": "ce4fdf7abe939c03b6d0ccd712da31db", "score": "0.5452771", "text": "def create_cdict(r, g, b):\n i = np.linspace(0, 1, r0.size)\n \n cdict = dict(\n (name, list(zip(i, el/255.0, el/255.0)))\n for el, name in [(r, 'red'), (g, 'green'), (b, 'blue')]\n )\n return cdict", "title": "" }, { "docid": "0e0480f48a42153db8f74fd80f46d89c", "score": "0.54487014", "text": "def print_projection_cv2(points, color, image):\n assert points.shape[1] == 2, points.shape\n\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n points = points.astype(np.int32).tolist()\n color = color.astype(np.int32).tolist()\n\n for (x,y),c in zip(points,color):\n if x < 0 or y <0 or x >= hsv_image.shape[1] or y >= hsv_image.shape[0]:\n continue\n cv2.circle(hsv_image, (x, y), 2, (c, 255, 255), -1)\n # cv2.rectangle(hsv_image,(x-1,y-1),(x+1,y+1),(c,255,255),-1)\n\n return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)", "title": "" }, { "docid": "bc4090fdb9f40dc2f2a308ea060077f0", "score": "0.54446167", "text": "def interpolated_colormap(colors, resolution=64, space='lch-ab'):\n import colorsys\n import matplotlib as mpl\n\n colors_inputs = colors\n\n if isinstance(colors_inputs, dict):\n colors_inputs = [(f, c) for f, c in sorted(colors_inputs.items())]\n else:\n if len(colors_inputs[0]) != 2:\n fracs = np.linspace(0, 1, len(colors_inputs))\n colors_inputs = list(zip(fracs, colors_inputs))\n\n # print('colors_inputs = {!r}'.format(colors_inputs))\n import kwplot\n colors = [kwplot.Color(c) for f, c in colors_inputs]\n fracs = [f for f, c in colors_inputs]\n\n basis = np.linspace(0, 1, resolution)\n fracs = np.array(fracs)\n indices = np.searchsorted(fracs, basis)\n indices = np.maximum(indices, 1)\n cpool = []\n\n from colormath import color_conversions\n # FIXME: need to ensure monkeypatch for networkx 2.0 in colormath\n # color_conversions._conversion_manager = color_conversions.GraphConversionManager()\n from colormath import color_objects\n def new_convertor(target_obj):\n source_obj = color_objects.sRGBColor\n def to_target(src_tup):\n src_tup = src_tup[0:3]\n src_co = source_obj(*src_tup)\n target_co = color_conversions.convert_color(src_co, target_obj)\n target_tup = target_co.get_value_tuple()\n return target_tup\n\n def from_target(target_tup):\n target_co = target_obj(*target_tup)\n src_co = color_conversions.convert_color(target_co, source_obj)\n src_tup = src_co.get_value_tuple()\n return src_tup\n return to_target, from_target\n\n def from_hsv(rgb):\n return colorsys.rgb_to_hsv(*rgb[0:3])\n\n def to_hsv(hsv):\n return colorsys.hsv_to_rgb(*hsv[0:3].tolist())\n\n classnames = {\n # 'AdobeRGBColor',\n # 'BaseRGBColor',\n 'cmk': 'CMYColor',\n 'cmyk': 'CMYKColor',\n 'hsl': 'HSLColor',\n 'hsv': 'HSVColor',\n 'ipt': 'IPTColor',\n 'lch-ab': 'LCHabColor',\n 'lch-uv': 'LCHuvColor',\n 'lab': 'LabColor',\n 'luv': 'LuvColor',\n # 'SpectralColor',\n 'xyz': 'XYZColor',\n # 'sRGBColor',\n 'xyy': 'xyYColor'\n }\n\n conversions = {k: new_convertor(getattr(color_objects, v))\n for k, v in classnames.items()}\n\n from_rgb, to_rgb = conversions['hsv']\n from_rgb, to_rgb = conversions['xyz']\n from_rgb, to_rgb = conversions['lch-uv']\n from_rgb, to_rgb = conversions['lch-ab']\n from_rgb, to_rgb = conversions[space]\n # from_rgb, to_rgb = conversions['lch']\n # from_rgb, to_rgb = conversions['lab']\n # from_rgb, to_rgb = conversions['lch-uv']\n\n for idx2, b in zip(indices, basis):\n idx1 = idx2 - 1\n f1 = fracs[idx1]\n f2 = fracs[idx2]\n\n c1 = colors[idx1].as01('rgb')\n c2 = colors[idx2].as01('rgb')\n # from_rgb, to_rgb = conversions['lch']\n h1 = np.array(from_rgb(c1))\n h2 = np.array(from_rgb(c2))\n alpha = (b - f1) / (f2 - f1)\n new_h = h1 * (1 - alpha) + h2 * (alpha)\n new_c = np.clip(to_rgb(new_h), 0, 1)\n # print('new_c = %r' % (new_c,))\n cpool.append(new_c)\n\n cpool = np.array(cpool)\n cmap = mpl.colors.ListedColormap(cpool, 'indexed')\n return cmap", "title": "" }, { "docid": "7c3e32c63d7783f3dc7bcc75b27a0815", "score": "0.5444128", "text": "def print_projection_cv2(points, color, image, black_background=False):\n if black_background:\n image = np.zeros_like(image)\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n for i in range(points.shape[1]):\n cv2.circle(\n hsv_image, (np.int32(\n points[0][i]), np.int32(\n points[1][i])), 2, (int(\n color[i]), 255, 255), -1)\n\n return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)", "title": "" }, { "docid": "88e888603bcacb8e0ee0600e0997c060", "score": "0.54419863", "text": "def data_function_all_color_depth_and_gt(self, element):\n # get the central image, estimate and GT depth map\n center_images = self.cache.get(self.adapter.get_element_images, (element, ))\n center_estimates = self.cache.get(self.adapter.get_element_depth_maps, (element, False)) \n center_depth_maps = self.cache.get(self.adapter.get_element_depth_maps, (element, True)) \n\n if self.restricted_nr_views != 0:\n center_images = center_images[:self.restricted_nr_views]\n center_estimates = center_estimates[:self.restricted_nr_views]\n center_depth_maps = center_depth_maps[:self.restricted_nr_views]\n\n return ((center_images, center_estimates), (center_depth_maps,))", "title": "" }, { "docid": "1eda237b695ca4ff77b20294001e28b0", "score": "0.5437818", "text": "def get_color_for_depth(depth, max_distance):\n\n cmap = matplotlib.cm.get_cmap(\"plasma\")\n color = cmap(depth / max_distance)\n\n return color[:, 0:3] * 255", "title": "" }, { "docid": "8b3123e71c03eceae694ed43d7cb181d", "score": "0.5425268", "text": "def calc_jab_colors():\n rgb_colors = np.empty((256 ** 3, 3), dtype=np.uint8)\n jab_colors = np.empty((256 ** 3, 3), dtype=np.float32)\n deut_jab_colors = jab_colors.copy()\n prot_jab_colors = jab_colors.copy()\n trit_jab_colors = jab_colors.copy()\n c = 0\n for i in range(256 ** 3):\n r = i % 256\n g = (i // 256) % 256\n b = i // (256 ** 2)\n rgb_linear = color_conversions.sRGB1_to_sRGB1_linear(\n np.array((r / 255, g / 255, b / 255))\n )\n jab = color_conversions.rgb_linear_to_jab(rgb_linear)\n if jab[0] >= MIN_J and jab[0] <= MAX_J:\n rgb_colors[c] = np.array((r, g, b))\n jab_colors[c] = jab\n deut_jab_colors[c] = color_conversions.rgb_linear_to_jab(\n color_conversions.CVD_forward_deuteranomaly(rgb_linear, CVD_SEVERITY)\n )\n prot_jab_colors[c] = color_conversions.rgb_linear_to_jab(\n color_conversions.CVD_forward_protanomaly(rgb_linear, CVD_SEVERITY)\n )\n trit_jab_colors[c] = color_conversions.rgb_linear_to_jab(\n color_conversions.CVD_forward_tritanomaly(rgb_linear, CVD_SEVERITY)\n )\n c += 1\n rgb_colors = rgb_colors[:c]\n jab_colors = jab_colors[:c]\n deut_jab_colors = deut_jab_colors[:c]\n prot_jab_colors = prot_jab_colors[:c]\n trit_jab_colors = trit_jab_colors[:c]\n return rgb_colors, jab_colors, deut_jab_colors, prot_jab_colors, trit_jab_colors", "title": "" }, { "docid": "1b6404177eb700086b7674f382ec1963", "score": "0.5421071", "text": "def rgb_to_world(p, depth, K, cam_R, split):\n\n n = p.size(0)\n\n K_ex = torch.cat([K[index].expand(interval[1] - interval[0], -1, -1) for index, interval in enumerate(split)], 0)\n cam_R_ex = torch.cat([cam_R[index].expand(interval[1] - interval[0], -1, -1) for index, interval in enumerate(split)], 0)\n\n x_temp = (p[:, 0] - K_ex[:, 0, 2]) / K_ex[:, 0, 0]\n y_temp = (p[:, 1] - K_ex[:, 1, 2]) / K_ex[:, 1, 1]\n z_temp = 1\n ratio = depth / torch.sqrt(x_temp ** 2 + y_temp ** 2 + z_temp ** 2)\n x_cam = x_temp * ratio\n y_cam = y_temp * ratio\n z_cam = z_temp * ratio\n\n # transform to toward-up-right coordinate system\n x3 = z_cam\n y3 = -y_cam\n z3 = x_cam\n\n p_cam = torch.stack((x3, y3, z3), 1).view(n, 3, 1) # n x 3\n p_world = torch.bmm(cam_R_ex, p_cam)\n return p_world", "title": "" }, { "docid": "3ca0797053a92dcbe03d68687d6bec24", "score": "0.5410481", "text": "def draw_map(a, b, image, images, red=255, green=0, blue=0, ns = 224):\n normal_size = ns\n for i in range(a, a + normal_size, 1):\n for j in range(b, b + normal_size, 1):\n # print(a,b,i,j)\n if images[i - a][j - b] > 0:\n # print(i,j)\n image[i][j][0] = blue\n image[i][j][1] = green\n image[i][j][2] = red\n return image", "title": "" }, { "docid": "ec63be67af8527eb6077a9ab2914af4d", "score": "0.54076976", "text": "def _peaks_colors_from_points(points, colors=None, points_per_line=2):\n num_pnts = len(points)\n num_lines = num_pnts // points_per_line\n colors_are_scalars = False\n global_opacity = 1\n if colors is None or colors == 'rgb_standard':\n # Automatic RGB colors\n colors = np.asarray((0, 0, 0))\n color_array = numpy_to_vtk_colors(np.tile(255 * colors, (num_pnts, 1)))\n elif type(colors) is tuple:\n global_opacity = 1 if len(colors) == 3 else -1\n colors = np.asarray(colors)\n color_array = numpy_to_vtk_colors(np.tile(255 * colors, (num_pnts, 1)))\n else:\n colors = np.asarray(colors)\n if len(colors) == num_lines:\n pnts_colors = np.repeat(colors, points_per_line, axis=0)\n if colors.ndim == 1: # Scalar per line\n color_array = numpy_support.numpy_to_vtk(pnts_colors, deep=True)\n colors_are_scalars = True\n elif colors.ndim == 2: # RGB(A) color per line\n global_opacity = 1 if colors.shape[1] == 3 else -1\n color_array = numpy_to_vtk_colors(255 * pnts_colors)\n elif len(colors) == num_pnts:\n if colors.ndim == 1: # Scalar per point\n color_array = numpy_support.numpy_to_vtk(colors, deep=True)\n colors_are_scalars = True\n elif colors.ndim == 2: # RGB(A) color per point\n global_opacity = 1 if colors.shape[1] == 3 else -1\n color_array = numpy_to_vtk_colors(255 * colors)\n\n color_array.SetName('colors')\n return color_array, colors_are_scalars, global_opacity", "title": "" }, { "docid": "21103b57119b24ee225a6f838f13263b", "score": "0.53914535", "text": "def spatial_map(icc, thr, mode='+'):\n return thr_img(icc_img_to_zscore(icc), thr=thr, mode=mode).get_data()", "title": "" }, { "docid": "687fa58dcb600bfa707c790529ff10e2", "score": "0.5382458", "text": "def map_to_colors(self, mmap, color_map, rgb_arr, orientation=\"UP\"):\n x_len = mmap.shape[0]\n y_len = mmap.shape[1]\n if orientation == \"UP\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[mmap[row_elem, col_elem]]\n elif orientation == \"LEFT\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[mmap[col_elem, x_len - 1 - row_elem]]\n elif orientation == \"DOWN\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[\n mmap[x_len - 1 - row_elem, y_len - 1 - col_elem]\n ]\n elif orientation == \"RIGHT\":\n for row_elem in range(x_len):\n for col_elem in range(y_len):\n rgb_arr[row_elem, col_elem, :] = color_map[mmap[y_len - 1 - col_elem, row_elem]]\n else:\n raise ValueError(\"Orientation {} is not valid\".format(orientation))\n\n return rgb_arr", "title": "" }, { "docid": "5485dd65e73d395b702d55f4974c87f5", "score": "0.53740644", "text": "def convert_to_world_point(x, y, depth_map, \n image_sensor_width=32.0*1e-3,\n image_sensor_height = 18.0*1e-3,\n focal_length = 10.0*1e-3):\n image_center_x = 1024 / 2.0 #depth_map.shape[1] / 2.0\n image_center_y = 512 / 2.0 # depth_map.shape[0] / 2.0\n px_x = x - image_center_x\n px_z = image_center_y - y\n\n sensor_x = px_x * (image_sensor_width / 1024)\n sensor_z = px_z * (image_sensor_height / 512)\n \n d = depth_map[y, x]\n world_y = d\n world_x = (world_y * sensor_x) / focal_length\n world_z = (world_y * sensor_z) / focal_length\n \n return (world_x, world_y, world_z)", "title": "" }, { "docid": "bf7e53d39e0275e08f6e0e432ef4fd98", "score": "0.5352273", "text": "def __MapRGB(self,aName,aRange):\n colors=self.LUTS[aName].GetRGBValues()\n mapCoords=self.LUTS[aName].GetMapCoordinates()\n nColors=len(colors)\n coord0=float(aRange[0])\n coordDelta=float(aRange[1])-float(aRange[0])\n mappedColors=[]\n i=0\n while(i<nColors):\n x=coord0+coordDelta*mapCoords[i]\n val=[x]+colors[i]\n mappedColors+=val\n i=i+1\n return mappedColors", "title": "" }, { "docid": "0ce30519042729a9e485d7737554ef8f", "score": "0.53489554", "text": "def colour_code_segmentation(image):\n\n w = image.shape[0]\n h = image.shape[1]\n x = np.zeros([w,h,3])\n for i in range(0, w):\n for j in range(0, h):\n x[i, j, :] = colour_dict(image[i, j, 0])\n return x", "title": "" }, { "docid": "e31b3ee56f05e3a021cb3c173902f88f", "score": "0.5344241", "text": "def cvt_pcd(self,pcd):\n # The [N,3] downsampled array\n pts_3d = np.asarray(pcd.points)\n # Create a [N,1] array\n one_mat = np.ones((pts_3d.shape[0], 1),dtype=np.float64)\n # Concat and change shape from [N,4] to [4,N]\n xyz_v = np.concatenate((pts_3d, one_mat), axis=1).T\n return xyz_v", "title": "" }, { "docid": "56ac27ee3e986045f877330b57921469", "score": "0.5342287", "text": "def draw_point_cloud(img, depth_pred, seg_pred, f_len, uncertainty_threshold=0.0, apply_depth_mask=False):\n depth_pred = np.transpose(depth_pred, (1, 2, 0))\n depth_pred = depth_pred[:, :, 0]\n\n if apply_depth_mask:\n # mask high gradient regions ~ these are usually not as accurate\n grad = np.asarray(np.gradient(depth_pred))\n grad = np.abs(grad[0, :, :]) + np.abs(grad[1, :, :])\n grad_mask = grad < 0.95\n\n # focus on the immediate surroundings: mask everything that is farther than 50m\n depth_mask = (depth_pred < 50.0) * (depth_pred > 5.0)\n depth_pred = depth_pred * depth_mask * grad_mask\n\n # mask out pixels where the certainty of the class prediction is lower than the uncertainty threshold\n uc = np.max(seg_pred, 0)\n uc_mask = uc > uncertainty_threshold\n seg_pred = np.argmax(seg_pred, 0) + 1\n seg_pred *= uc_mask\n mask = np.zeros(shape=(seg_pred.shape[0], seg_pred.shape[1], 3))\n for key in CLASSES:\n class_mask = np.isin(seg_pred, np.asarray(key))\n mask[:, :, 0] += class_mask * CLASS_COLORS[key][0]\n mask[:, :, 1] += class_mask * CLASS_COLORS[key][1]\n mask[:, :, 2] += class_mask * CLASS_COLORS[key][2]\n mask = np.clip(mask, 0, 1)\n mask = (img / 255.0 * 0.7) + (mask * 0.3)\n\n # generate 3D points\n x = []\n y = []\n z = []\n colors = []\n idx = 0\n for i in range(depth_pred.shape[0]):\n for j in range(depth_pred.shape[1]):\n idx += 1\n # focus on the immediate surroundings: mask everything that is farther than 50m\n # also, mask out things that are too close, this might be noise\n if depth_pred[i, j] > 50.0 or depth_pred[i, j] < 2.0:\n continue\n # if the pixel is classified as sky or if it is invalid, skip\n if seg_pred[i, j] == 5 or seg_pred[i, j] == 0:\n continue\n # only show every 2nd pixel, this is more than enough for visualization\n if idx % 2 == 1:\n continue\n\n z.append(depth_pred[i, j])\n y.append(i * depth_pred[i, j] / f_len)\n x.append((-160) + j * depth_pred[i, j] / f_len)\n\n # color based on mask (0.7 * pixel color + 0.3 * label color)\n r, g, b = int(mask[i, j][0] * 255), int(mask[i, j][1] * 255), int(mask[i, j][2] * 255)\n colors.append(rgb2hex(r, g, b))\n\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(x, y, z, c=colors, marker=',', s=5)\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n ax.view_init(elev=-37., azim=-117.)\n\n plt.draw()", "title": "" }, { "docid": "9e33cc28756c37e548792f7a83c517b1", "score": "0.5338409", "text": "def print_projection_plt(points, color, image, black_background=False):\n if black_background:\n image = np.zeros_like(image)\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n for i in range(points.shape[1]):\n cv2.circle(\n hsv_image, (np.int32(\n points[0][i]), np.int32(\n points[1][i])), 2, (int(\n color[i]), 255, 255), -1)\n\n return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)", "title": "" }, { "docid": "c4df694d7491d0d20421d806dd46b282", "score": "0.5330301", "text": "def depth_color(val, min_d=0, max_d=120):\n np.clip(val, 0, max_d, out=val) # max distance is 120m but usually not usual\n return (((val - min_d) / (max_d - min_d)) * 120).astype(np.uint8)", "title": "" }, { "docid": "bdc8e88c9151b3acdf6719f3d278129c", "score": "0.5315751", "text": "def print_projection_plt(points, color, image):\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n for i in range(points.shape[1]):\n cv2.circle(hsv_image, (int(points[0][i]), int(points[1][i])), 2, (int(color[i]), 255, 255), -1)\n\n return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2RGB)", "title": "" }, { "docid": "c793bed94fae07f913757e4aa9525e05", "score": "0.53091055", "text": "def project_to_point_cloud(points, surface):\n diff = (np.expand_dims(points, 1) - np.expand_dims(surface, 0)) ** 2\n diff = np.sum(diff, 2)\n return surface[np.argmin(diff, 1)]", "title": "" }, { "docid": "44c5a1b2b1aab5af39f81a848913e015", "score": "0.5308899", "text": "def color_map(cat):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval( cat, -1, 1, 0, 255)\n return int(color_code)", "title": "" }, { "docid": "5efb7d4a595a09aef7ad8d61cfb93536", "score": "0.53027254", "text": "def main(self, rgb, depth, flow, poses_pred, poses_opt, lc_pairs):\r\n\r\n rgb = rgb[:, :, ::-1] # bgr->rgb\r\n\r\n # get the point cloud of the current frame\r\n points, colors = self.rgbd_to_colored_pc(depth, rgb, self.K[2], self.K[3], self.K[0], self.K[1])\r\n point_cloud = [points, colors]\r\n\r\n # covert depth and optical flow into pseud colored images and concatenate with the color image into one image\r\n colormapped_depth = cv2.resize(self.depth_to_img(depth), (self.img_w, self.img_h))\r\n colormapped_flow = cv2.resize(self.flow_to_image(flow), (self.img_w, self.img_h))\r\n rgb = cv2.resize(rgb, (self.img_w, self.img_h))\r\n\r\n # get the loop closure pairs\r\n xyz_1 = []\r\n xyz_2 = []\r\n if len(lc_pairs) != 0:\r\n cur_pose = poses_pred[len(poses_pred) - 1].pose\r\n for i in range(0, len(lc_pairs)):\r\n pose_1 = np.linalg.inv(cur_pose) @ np.asarray(poses_pred[lc_pairs[i][0]].pose)\r\n pose_2 = np.linalg.inv(cur_pose) @ np.asarray(poses_pred[lc_pairs[i][1]].pose)\r\n xyz_1.append(pose_1[:3, 3])\r\n xyz_2.append(pose_2[:3, 3])\r\n\r\n lc_xyz = [np.array(xyz_1), np.array(xyz_2)]\r\n\r\n # get the relative camera poses for visualization\r\n T_c_pre_pred = []\r\n T_c_pre_opt = []\r\n pose_num = len(poses_pred)\r\n\r\n cur_pose = poses_pred[pose_num - 1].pose\r\n for i in range(pose_num):\r\n pre_pose = poses_pred[i].pose\r\n T_c_pre_pred.append(np.linalg.inv(cur_pose) @ pre_pose)\r\n\r\n cur_pose = poses_opt[pose_num - 1].pose\r\n for i in range(pose_num):\r\n pre_pose = poses_opt[i].pose\r\n T_c_pre_opt.append(np.linalg.inv(cur_pose) @ pre_pose)\r\n\r\n # check the keyboard interface\r\n self.interface()\r\n\r\n # check if the pose is paused\r\n if self.pause:\r\n while True: # iterative draw all the items until button \"c\" is pressed\r\n # terminate the process from the visualization interface\r\n if pangolin.ShouldQuit(): \r\n self.should_quit = True\r\n\r\n self.draw(rgb, colormapped_depth, colormapped_flow, point_cloud, T_c_pre_pred, T_c_pre_opt, lc_xyz)\r\n self.interface()\r\n if not self.pause:\r\n break\r\n else: # else only draw all the items one time\r\n # terminate the process from the visualization interface\r\n if pangolin.ShouldQuit(): \r\n self.should_quit = True\r\n \r\n self.draw(rgb, colormapped_depth, colormapped_flow, point_cloud, T_c_pre_pred, T_c_pre_opt, lc_xyz)", "title": "" }, { "docid": "2e8e5b32ce3bffb3c2a0b36b23d4c0f1", "score": "0.5285112", "text": "def depth2xyz(depth):\n H,W = depth.shape\n xx,yy = np.meshgrid(np.arange(W),np.arange(H))\n X = (xx-W/2) * depth / DepthCamera.f\n Y = (yy-H/2) * depth / DepthCamera.f\n return np.dstack([X,Y,depth.copy()])", "title": "" }, { "docid": "710af6f5f9cacef6ae89404caf88ec94", "score": "0.52849287", "text": "def create_custom_colormap():\n\n cmap = np.zeros((180,360,3))\n\n x, y = np.mgrid[0:180:1, 0:360:1]\n pos = np.dstack((x, y))\n rv = multivariate_normal([0, 0], 10000* np.asarray([[2.0, 0.], [0., 0.5]])).pdf(pos)\n rv += multivariate_normal([0, 360], 10000* np.asarray([[2.0, -0.], [-0., 0.50]])).pdf(pos)\n cmap[:,:,2] = rv / np.max(rv)\n\n rv = multivariate_normal([0, 120], 10000* np.asarray([[2.5, 0.], [0., 0.5]])).pdf(pos)\n cmap[:,:,1] = rv / np.max(rv)\n\n rv = multivariate_normal([180, 120], 10000* np.asarray([[0.5, 0.], [0., 40]])).pdf(pos)\n cmap[:,:,0] = rv / np.max(rv)\n\n return cmap", "title": "" }, { "docid": "6e94a82d59e2d65b9d10999d196aaddb", "score": "0.5284359", "text": "def apply_color_map_1(gray, cmap):\n\n lut = build_lut(cmap)\n s0, s1 = gray.shape\n out = np.empty(shape=(s0, s1, 3), dtype=np.uint8)\n\n for i in range(3):\n out[..., i] = cv2.LUT(gray, lut[:, i])\n return out", "title": "" }, { "docid": "6ececf23b8d8aad98abf7274919872fd", "score": "0.526989", "text": "def lin_srgb_to_xyz(rgb):\n\n return util.dot(RGB_TO_XYZ, rgb)", "title": "" }, { "docid": "ed8892ab317c4bf698f3765f30c6e21a", "score": "0.526319", "text": "def grayworld(rgb):\n\n ind = rgb.sum(2) > 0\n fac = np.array([i[ind].mean() for i in rgb.transpose(2, 0, 1)])\n fac /= fac.max()\n\n return (rgb / fac).clip(0, 1)", "title": "" }, { "docid": "75fbb2bd63e474089680566a51f11f17", "score": "0.526081", "text": "def project_pc_to_image(points, resolution=64):\n img = []\n for i in range(3):\n canvas = np.zeros((resolution, resolution))\n axis = [0, 1, 2]\n axis.remove(i)\n proj_points = (points[:, axis] + 1) / 2 * resolution\n proj_points = proj_points.astype(np.int)\n canvas[proj_points[:, 0], proj_points[:, 1]] = 1\n img.append(canvas)\n img = np.concatenate(img, axis=1)\n return img", "title": "" }, { "docid": "4ead666b0ac24441587246f9b89e0289", "score": "0.52517754", "text": "def data_function_color_depth_and_gt(self, element, center_view=None):\n # decide on a central view\n if center_view is None:\n center_view = random.randint(0, self.adapter.nr_views-1)\n\n # get the central image, estimate and GT depth map\n center_image = self.cache.get(self.adapter.get_single_image, (element, center_view))\n center_estimate = self.cache.get(self.adapter.get_single_depth_map, (element, center_view, False))\n center_depth_map = self.cache.get(self.adapter.get_single_depth_map, (element, center_view, True))\n\n return ((center_image[None], center_estimate[None]), (center_depth_map[None],))", "title": "" }, { "docid": "62ce49737fd82a0ade3ce3588dbac5c7", "score": "0.52498025", "text": "def convert_from_color(arr_3d, palette={v: k for k, v in palette.items()}):\n arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)\n\n for c, i in palette.items():\n m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)\n arr_2d[m] = i\n\n return arr_2d", "title": "" }, { "docid": "bb3ea4b4272c2b376d3b2f1ef63f8966", "score": "0.5240316", "text": "def pix_to_3dpt(depth_im, rs, cs, intrinsic_mat, depth_map_factor, reduce=None, k=5):\n assert isinstance(rs, int) or isinstance(rs, list) or isinstance(rs, np.ndarray)\n assert isinstance(cs, int) or isinstance(cs, list) or isinstance(cs, np.ndarray)\n if isinstance(rs, int):\n rs = [rs]\n if isinstance(cs, int):\n cs = [cs]\n if isinstance(rs, np.ndarray):\n rs = rs.flatten()\n if isinstance(cs, np.ndarray):\n cs = cs.flatten()\n R, C = depth_im.shape\n if reduce == \"none\" or reduce is None:\n depth_im = depth_im[rs, cs]\n elif reduce == \"mean\":\n depth_im = np.array(\n [\n np.mean(\n depth_im[\n max(i - k, 0) : min(i + k, R), max(j - k, 0) : min(j + k, C)\n ]\n )\n for i, j in zip(rs, cs)\n ]\n )\n elif reduce == \"max\":\n depth_im = np.array(\n [\n np.max(\n depth_im[\n max(i - k, 0) : min(i + k, R), max(j - k, 0) : min(j + k, C)\n ]\n )\n for i, j in zip(rs, cs)\n ]\n )\n elif reduce == \"min\":\n depth_im = np.array(\n [\n np.min(\n depth_im[\n max(i - k, 0) : min(i + k, R), max(j - k, 0) : min(j + k, C)\n ]\n )\n for i, j in zip(rs, cs)\n ]\n )\n else:\n raise ValueError(\n \"Invalid reduce name provided, only the following\"\n \" are currently available: [{}, {}, {}, {}]\".format(\n \"none\", \"mean\", \"max\", \"min\"\n )\n )\n\n depth = depth_im.reshape(-1) / depth_map_factor\n img_pixs = np.stack((rs, cs)).reshape(2, -1)\n img_pixs[[0, 1], :] = img_pixs[[1, 0], :]\n uv_one = np.concatenate((img_pixs, np.ones((1, img_pixs.shape[1]))))\n\n intrinsic_mat_inv = np.linalg.inv(intrinsic_mat)\n uv_one_in_cam = np.dot(intrinsic_mat_inv, uv_one)\n pts_in_cam = np.multiply(uv_one_in_cam, depth)\n pts_in_cam = np.concatenate((pts_in_cam, np.ones((1, pts_in_cam.shape[1]))), axis=0)\n return pts_in_cam", "title": "" }, { "docid": "e7d04ba68fd239031f6eafaf3dd0f2f1", "score": "0.5237445", "text": "def create_map_coloring_csp():\n csp = CSP()\n provinces = ['WA', 'NT', 'Q', 'NSW', 'V', 'SA', 'T']\n neighbors = {\n 'SA' : ['WA', 'NT', 'Q', 'NSW', 'V'],\n 'NT' : ['WA', 'Q'],\n 'NSW' : ['Q', 'V']\n }\n colors = ['red', 'blue', 'green']\n def are_neighbors(a, b):\n return (a in neighbors and b in neighbors[a]) or \\\n (b in neighbors and a in neighbors[b])\n\n # Add the variables and binary factors\n for p in provinces:\n csp.add_variable(p, colors)\n for p1 in provinces:\n for p2 in provinces:\n if are_neighbors(p1, p2):\n # Neighbors cannot have the same color\n csp.add_binary_factor(p1, p2, lambda x, y : x != y)\n return csp", "title": "" }, { "docid": "c752275015e5ee172a30a58a5c43bfe8", "score": "0.52290297", "text": "def __colormap(self, N):\n cmap = np.zeros((N, 3), dtype = np.uint8)\n\n def uint82bin(n, count=8):\n \"\"\"returns the binary of integer n, count refers to amount of bits\"\"\"\n return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])\n\n for i in range(N):\n r = 0\n g = 0\n b = 0\n idx = i\n for j in range(7):\n str_id = uint82bin(idx)\n r = r ^ ( np.uint8(str_id[-1]) << (7-j))\n g = g ^ ( np.uint8(str_id[-2]) << (7-j))\n b = b ^ ( np.uint8(str_id[-3]) << (7-j))\n idx = idx >> 3\n cmap[i, 0] = r\n cmap[i, 1] = g\n cmap[i, 2] = b\n return cmap", "title": "" }, { "docid": "130e7e47c4b2faf71fa0291e9e8c8113", "score": "0.522223", "text": "def toimage(arr, high=..., low=..., cmin=..., cmax=..., pal=..., mode=..., channel_axis=...):\n ...", "title": "" }, { "docid": "37f25a399b8e75ed39dc99499056b720", "score": "0.52137864", "text": "def get_color_features(self,image):\n ch1_featr = image[:,:,0].ravel()\n ch2_featr = image[:,:,1].ravel()\n ch3_featr = image[:,:,2].ravel()\n\n return np.hstack((ch1_featr, ch2_featr, ch3_featr))", "title": "" }, { "docid": "44bd8f65f03e4390116ce8828b6bb0f5", "score": "0.5213656", "text": "def convert_from_color(arr_3d, palette=GLOB.invert_palette):\n arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)\n\n for c, i in palette.items():\n m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)\n arr_2d[m] = i\n\n return arr_2d", "title": "" }, { "docid": "d868ee3dfb45b8e322e81474adc4169c", "score": "0.5209106", "text": "def lin_p3(rgb):\n\n return lin_srgb(rgb) # same as sRGB", "title": "" }, { "docid": "fb35143f0d8b353a16871ed032e8a966", "score": "0.52077246", "text": "def __projectPoints__(self, X, K, R, t, Kd):\n\n x = np.dot(R, X) + t # panoptic to kinect color scaling\n depth_val = x[2:3,:].transpose()\n\n x[0:2, :] = x[0:2, :] / (x[2, :] + 1e-5)\n\n r = x[0, :] * x[0, :] + x[1, :] * x[1, :]\n\n # 去畸变\n x[0, :] = x[0, :] * (1 + Kd[0] * r + Kd[1] * r * r + Kd[4] * r * r * r\n ) + 2 * Kd[2] * x[0, :] * x[1, :] + Kd[3] * (\n r + 2 * x[0, :] * x[0, :])\n x[1, :] = x[1, :] * (1 + Kd[0] * r + Kd[1] * r * r + Kd[4] * r * r * r\n ) + 2 * Kd[3] * x[0, :] * x[1, :] + Kd[2] * (\n r + 2 * x[1, :] * x[1, :])\n\n x[0, :] = K[0, 0] * x[0, :] + K[0, 1] * x[1, :] + K[0, 2]\n x[1, :] = K[1, 0] * x[0, :] + K[1, 1] * x[1, :] + K[1, 2]\n\n return x, depth_val", "title": "" }, { "docid": "8590ae31f514a40264f540fd33811fde", "score": "0.51977795", "text": "def decode(self, loc3d, c, z):\n rgb = self.decoder(loc3d, c, z)\n return rgb", "title": "" }, { "docid": "7dffd8ce28ee793b8c8bc2b92bf2cebe", "score": "0.5192522", "text": "def _get_grid_colors(grid_proba, grid_pred, class_values, colors):\n nclasses = len(class_values)\n class_colors = np.array(colors['classes'][nclasses])\n\n grid_pred_colors = class_colors[grid_pred] # color for each prediction in grid\n\n color_map = {v: class_colors[i] for i, v in enumerate(class_values)}\n # multiply each probability vector times rgb color for each class then add\n # together to get weighted color\n rgb = np.array([ImageColor.getcolor(c, mode=\"RGB\") for c in class_colors])\n grid_proba_colors = grid_proba @ rgb\n grid_proba_colors /= 255 # get in [0..1]\n grid_proba_colors = [Color(rgb=c).hex for c in grid_proba_colors]\n return color_map, grid_pred_colors, grid_proba_colors", "title": "" }, { "docid": "f0b1373a2127121485e02c18afd9356d", "score": "0.51911354", "text": "def check_color_set(rgb_colors):\n min_dist = 100\n deut_jab_test = np.empty((NUM_COLORS, 3), dtype=np.float32)\n prot_jab_test = deut_jab_test.copy()\n trit_jab_test = deut_jab_test.copy()\n for severity in range(1, CVD_SEVERITY):\n for i in range(NUM_COLORS):\n rgb_linear = color_conversions.sRGB1_to_sRGB1_linear(rgb_colors[i] / 255)\n deut_jab_test[i] = color_conversions.rgb_linear_to_jab(\n color_conversions.CVD_forward_deuteranomaly(rgb_linear, severity)\n )\n prot_jab_test[i] = color_conversions.rgb_linear_to_jab(\n color_conversions.CVD_forward_protanomaly(rgb_linear, severity)\n )\n trit_jab_test[i] = color_conversions.rgb_linear_to_jab(\n color_conversions.CVD_forward_tritanomaly(rgb_linear, severity)\n )\n for pair in COMBINATIONS:\n min_dist = min(\n min_dist,\n color_conversions.cam02de(\n deut_jab_test[pair[0]], deut_jab_test[pair[1]]\n ),\n )\n min_dist = min(\n min_dist,\n color_conversions.cam02de(\n prot_jab_test[pair[0]], prot_jab_test[pair[1]]\n ),\n )\n min_dist = min(\n min_dist,\n color_conversions.cam02de(\n trit_jab_test[pair[0]], trit_jab_test[pair[1]]\n ),\n )\n if min_dist < MIN_COLOR_DIST:\n return False\n return True", "title": "" }, { "docid": "87ded7c0ef72b826497ac204fcc58161", "score": "0.5190932", "text": "def map(self, from_type, to_type):\n result = None\n if from_type == 'color' and to_type == 'camera':\n map_ary = np.empty((COLOR_HEIGHT, COLOR_WIDTH, 3), np.float32)\n if self._kinect.get_map_color_to_camera(map_ary):\n result = map_ary\n elif from_type == 'depth' and to_type == 'camera':\n map_ary = np.empty((DEPTH_HEIGHT, DEPTH_WIDTH, 3), np.float32)\n if self._kinect.get_map_depth_to_camera(map_ary):\n result = map_ary\n elif from_type == 'depth' and to_type == 'color':\n map_ary = np.empty((DEPTH_HEIGHT, DEPTH_WIDTH, 2), np.float32)\n if self._kinect.get_map_depth_to_color(map_ary):\n result = map_ary\n elif from_type == 'color' and to_type == 'depth':\n map_ary = np.empty((COLOR_HEIGHT, COLOR_WIDTH, 2), np.float32)\n if self._kinect.get_map_color_depth(map_ary):\n result = map_ary\n return result", "title": "" }, { "docid": "12d81d9c85bb110ed47373bcf15d80ec", "score": "0.51813275", "text": "def sample_cmap(cmap, n_pts, bottom=0.0, top=1.0, gamma=1):\r\n assert top <= 1\r\n assert bottom >= 0\r\n assert top >= bottom\r\n rng = float(top) - float(bottom)\r\n return map(cmap, bottom + rng*np.power(np.linspace(0, 1, n_pts), 1))", "title": "" }, { "docid": "e47adf5fac5fae1ffff29925b8382db4", "score": "0.5169544", "text": "def colors_from_colormap_continuous(num, cmap): \n ints = np.arange(0,num+1,1)\n norm = clr.Normalize(vmin=ints[0], vmax=ints[-1], clip=True)\n mapper = cm.ScalarMappable(norm=norm, cmap=cmap)\n color_list = []\n for i in ints:\n color_list.append(mapper.to_rgba(i))\n \n return color_list", "title": "" }, { "docid": "2e7ad23fe43afebd8c5a9cabfbad35d7", "score": "0.5164136", "text": "def RGB_to_rgb(RGB: ArrayLike) -> NDArrayFloat:\n\n rgb = vector_dot(matrix_dot(MATRIX_XYZ_TO_HPE, CAT_INVERSE_CAT02), RGB)\n\n return rgb", "title": "" }, { "docid": "95727acef12f9091351e481f8ffad71e", "score": "0.516384", "text": "def planeColor(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "a7e7b54756e07edaf7feb792e41daac5", "score": "0.51625353", "text": "def get_color_data_about_image(px_handle, w: int, h: int) -> tuple:\n R = [0 for i in range(256)]\n G = R.copy()\n B = R.copy()\n\n for i in range(w):\n for j in range(h):\n color = px_handle[i, j]\n R[color[0]] += 1 # count pixels define certain cast\n G[color[1]] += 1\n B[color[2]] += 1\n\n M = [(R[i] + G[i] + B[i]) // 3 for i in range(256)]\n return R, G, B, M", "title": "" }, { "docid": "e424fe40d9e5a19efbaa124fecb49331", "score": "0.51604", "text": "def process_rgb(rgb):\r\n global surface\r\n array = np.frombuffer(rgb.raw_data, dtype=np.dtype(\"uint8\"))\r\n array = np.reshape(array, (rgb.height, rgb.width, 4))\r\n array = array[:, :, :3]\r\n array = array[:, :, ::-1] # switch r,g,b\r\n array = array.swapaxes(0, 1) # exchange the width and height\r\n surface = pygame.surfarray.make_surface(array) # Copy an array to a new surface\r\n\r\n # rgb.save_to_disk('D:\\\\mb95541\\\\aeroplane\\\\data\\\\rgb\\\\%d' % rgb.frame)\r", "title": "" }, { "docid": "999af928c56308561b2208b862040529", "score": "0.51546156", "text": "def convert_from_color(arr_3d, palette=invert_palette):\n arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)\n\n for c, i in palette.items():\n m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)\n arr_2d[m] = i\n\n return arr_2d", "title": "" }, { "docid": "999af928c56308561b2208b862040529", "score": "0.51546156", "text": "def convert_from_color(arr_3d, palette=invert_palette):\n arr_2d = np.zeros((arr_3d.shape[0], arr_3d.shape[1]), dtype=np.uint8)\n\n for c, i in palette.items():\n m = np.all(arr_3d == np.array(c).reshape(1, 1, 3), axis=2)\n arr_2d[m] = i\n\n return arr_2d", "title": "" }, { "docid": "082b90608d05bebba2cbc16db242db49", "score": "0.5145349", "text": "def color_image(color_points: np.ndarray, shape: tuple) -> np.ndarray:\n img = np.zeros(shape, dtype=np.uint8)\n # Makes it all white\n img.fill(255)\n\n img[color_points[:, 1], color_points[:, 0]] = color_points[:, 4:]\n return img", "title": "" }, { "docid": "95e9e40c5dba2781042e10f890960d30", "score": "0.51449716", "text": "def purpleMain(cam, color_image, depth_image):\n # init\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03),\n cv2.COLORMAP_JET)\n images = np.hstack([depth_colormap, color_image])\n\n # get xyd and xyz\n xyd = getPurpleXYD(color_image, depth_image)\n if xyd is None:\n return False, images\n xyz = cam.getXYZ(*xyd)\n\n print(\"xyd\", f\"{xyd[0]:04.0f} {xyd[1]:04.0f} {xyd[2]:04.0f}\",\n \"xyz\", f\"{xyz[0]:04.0f} {xyz[1]:04.0f} {xyz[2]:04.0f}\")\n\n # get arm xyz\n axyz = Arm.xyzFromCamera(*xyz)\n print(\"arm xyz\", f\"{axyz[0]:.03f} {axyz[1]:.03f} {axyz[2]:.03f}\")\n\n # get arm angle\n try:\n angle = Arm.xyz2Angle(*axyz)\n print(\"Angle\", angle)\n except ValueError as e:\n print(e)\n\n images = np.hstack([depth_colormap, color_image])\n return False, images", "title": "" }, { "docid": "99ff36a26b0f14dfd5f277e96912208a", "score": "0.5141466", "text": "def colorspace(self, conv, **kwargs):\n\n # TODO other color cases\n # TODO check conv is valid\n\n # TODO conv string parsing\n\n # ensure floats? unsure if cv.cvtColor operates on ints\n imf = self.float()\n\n out = []\n for im in imf:\n if conv == 'xyz2bgr':\n # note that using cv.COLOR_XYZ2RGB does not seem to work\n BGR_raw = cv.cvtColor(im.bgr, cv.COLOR_XYZ2BGR, **kwargs)\n\n # desaturate and rescale to constrain resulting RGB values\n # to [0,1]\n B = BGR_raw[:, :, 0]\n G = BGR_raw[:, :, 1]\n R = BGR_raw[:, :, 2]\n add_white = -np.minimum(np.minimum(np.minimum(R, G), B), 0)\n B += add_white\n G += add_white\n R += add_white\n\n # inverse gamma correction\n B = self._gammacorrection(B)\n G = self._gammacorrection(G)\n R = self._gammacorrection(R)\n\n out.append(np.dstack((B, G, R))) # BGR\n\n elif conv == 'Lab2bgr':\n # convert source from Lab to xyz\n\n # in colorspace.m, image was parsed into a (251001,1,3)\n labim = np.reshape(im.image,\n (im.shape[0], 1, im.shape[1]))\n\n fY = (labim[:, :, 0] + 16) / 116\n fX = fY + labim[:, :, 1] / 500\n fZ = fY - labim[:, :, 2] / 200\n # cie xyz whitepoint\n WhitePoint = np.r_[0.950456, 1, 1.088754]\n\n xyz = np.zeros(labim.shape)\n xyz[:, :, 0] = WhitePoint[0] * self._invf(fX)\n xyz[:, :, 1] = WhitePoint[1] * self._invf(fY)\n xyz[:, :, 2] = WhitePoint[2] * self._invf(fZ)\n\n # then call function again with conv = xyz2bgr\n xyz = self.__class__(xyz)\n\n out.append(xyz.colorspace('xyz2bgr').image)\n\n else:\n raise ValueError('other conv options not yet implemented')\n # TODO other color conversion cases\n # out.append(cv.cvtColor(np.float32(im), **kwargs))\n\n return self.__class__(out)", "title": "" }, { "docid": "d10eacc1ace63d349ed56dfda35772cb", "score": "0.5140953", "text": "def get_colors(num_colors,colormap='jet',flag_plot=False):\n\n len_cmap=500\n cmap = plt.cm.get_cmap(colormap, len_cmap)\n \n step=int(len_cmap/(num_colors))\n \n \n rgb_list=[]\n index=int(step/2)\n for kk in range(num_colors):\n print(index)\n rgb_list.append(cmap(index)[:3])\n index+=step\n \n \n if flag_plot:\n \n x=np.linspace(1,10,num_colors)\n y=np.ones((num_colors,1))\n \n for kk in range(num_colors):\n plt.plot(x[kk],y[kk],'o',color=rgb_list[kk])\n \n return rgb_list", "title": "" }, { "docid": "2265ab34c41d766ac8e7bb7f8c3e2954", "score": "0.51328754", "text": "def map_colors(src_dst_colormap, img):\n # Preconditions\n dic = src_dst_colormap\n assert ((type(dic) is bidict) or (type(dic) is dict) and \n type(img) is np.ndarray), \\\n 'type(src_dst_colormap) = {}, type(img) = {}'.format(\n type(dic), type(img))\n assert unique_color_set(img) <= set(map( tuple, dic.keys() )), \\\n (' img = {} > {} = dic \\n It means some pixels in img' \n +' cannot be mapped with this rgb<->1hot dict').format( \n unique_color_set(img), set(map( tuple, dic.keys() )))\n\n h,w,_ = img.shape\n some_dst_color = next(iter(src_dst_colormap.values()))\n c_dst = len(some_dst_color)\n\n ret_img = np.zeros((h,w,c_dst), dtype=img.dtype)\n for c, (src_bgr, dst_color) in enumerate(src_dst_colormap.items()):\n mapped = map_pixels(img, src_bgr, dst_color)\n ret_img += mapped\n\n return ret_img", "title": "" }, { "docid": "8eef9f9b7cb0c93587a8ea10b41e9ee0", "score": "0.51313835", "text": "def colorsPerVertex(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "461e62a21a0b83dda1e90efc99171134", "score": "0.5109865", "text": "def opponent_colour_dimensions_forward(RGB: ArrayLike) -> NDArrayFloat:\n\n R, G, B = tsplit(RGB)\n\n a = R - 12 * G / 11 + B / 11\n b = (R + G - 2 * B) / 9\n\n ab = tstack([a, b])\n\n return ab", "title": "" }, { "docid": "491989ede30c211eb19f2cd0a3d5b8f1", "score": "0.5106167", "text": "def pointcloud_to_2D(self, pointcloud):\n\t\tpointcloud_2d = np.array(pointcloud)[:,:2]\n\t\t# print(pointcloud_2d)\n\t\treturn pointcloud_2d", "title": "" }, { "docid": "90267a17df7efdc9932f5d2dcd7dd0a3", "score": "0.5100035", "text": "def getPygletColorMap(colordict, height=1080, width=1440, suffix=\"\"):\n colormap = {}\n for (name, pattern) in colordict.items():\n r,g,b=pattern\n _temp = pyglet.image.SolidColorImagePattern((r,g,b,255))\n colormap[name] = _temp.create_image(width=width, height=height)\n \n return colormap", "title": "" }, { "docid": "db026bdb24222dfe941f41e76d57ebdd", "score": "0.50996774", "text": "def cmap(self, *args):\n return self.send({'cmd': 'cmap', 'args': args})", "title": "" }, { "docid": "8818c54f15bf291f1890f74723de91e0", "score": "0.50992817", "text": "def getColor(frame, coords, tolerance=40):\n cx, cy = coords\n\n # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # use rgb img\n # frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # use rgb img\n \n # h_channel = frame_hsv[;,:,0]\n # s_channel = frame_hsv[;,:,1]\n # r_channel = frame_hsv[;,:,0]\n # g_channel = frame_hsv[;,:,1]\n # b_channel = frame_hsv[;,:,2]\n\n\n # yellow_mask = np.zeros_like(h_channel)\n # orange_mask = np.zeros_like(h_channel)\n # black_mask = np.zeros_like(h_channel)\n # green_mask = np.zeros_like(h_channel)\n # blue_mask = np.zeros_like(h_channel)\n # pink_mask = np.zeros_like(h_channel)\n # red_mask = np.zeros_like(h_channel)\n # purple_mask = np.zeros_like(h_channel)\n\n # yellow = np.array([30, ])\n # orange = np.array([20, ])\n # black = np.array([51,60,72])\n # green = np.array([90])\n # blue = np.array([160])\n # pink = np.array([])\n # red = np.array([])\n # purple = np.array([])\n \n\n # yellow_tol = 5\n # orange_tol = 5\n # green_tol =30\n # blue _tol = 30\n\n # y_mask_h = cv2.inRange(frame, lower, upper)\n\n # color_masks = [\n # ('yellow', yellow_mask),\n # ('orange', yellow_mask),\n # ('pink', yellow_mask),\n # ('black',yellow_mask),\n # ('red',yellow_mask),\n # ('purple',yellow_mask),\n # ('green',yellow_mask),\n # ('blue',yellow_mask)\n # ]\n \n # for (color, mask) in color_masks:\n # output = cv2.bitwise_and(frame, frame, mask = mask)\n # if np.all(output[cy][cx]):\n # return color\n\n # HSV\n # yellow = np.array([247,198,8])\n # orange = np.array([199,90,17])\n # pink = np.array([204,56,93])\n # black = np.array([33,26,27])\n # red = np.array([148,34,41])\n # purple = np.array([132,82,115])\n # green = np.array([101,125,99])\n # blue = np.array([80,96,149])\n \n\n # RGB\n yellow = np.array([213,217,220])\n orange = np.array([199,90,17])\n pink = np.array([218,86,129])\n black = np.array([51,60,72])\n red = np.array([205,198,195])\n purple = np.array([113,78,134])\n green = np.array([138,170,180])\n blue = np.array([85,94,160]) \n\n color_boundaries = [\n ('yellow', yellow-tolerance,yellow+tolerance),\n ('orange', orange-tolerance,orange+tolerance),\n ('pink', pink-tolerance, pink+tolerance),\n ('black',black-tolerance, black+tolerance),\n ('red',red-tolerance, red+tolerance),\n ('purple',purple-tolerance, purple+tolerance),\n ('green',green-tolerance, green+tolerance),\n ('blue',blue-tolerance, blue+tolerance)\n ]\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # use rgb img\n # frame_hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # use HSV img\n \n\n for (color, lower, upper) in color_boundaries:\n mask = cv2.inRange(frame, lower, upper)\n output = cv2.bitwise_and(frame, frame, mask = mask)\n if np.all(output[cy][cx]):\n return color", "title": "" } ]
ff1b78c3ab6c17b32de402a44284ff0e
Return the default parameters to use for this instrument.
[ { "docid": "29ecb96dd0a73340f5b590baf576e00a", "score": "0.63064265", "text": "def default_pypeit_par(cls):\n par = super().default_pypeit_par()\n\n # Ignore PCA\n par['calibrations']['slitedges']['sync_predict'] = 'nearest'\n # Bound the detector with slit edges if no edges are found\n par['calibrations']['slitedges']['bound_detector'] = True\n\n # Always correct for flexure, starting with default parameters\n par['flexure']['spec_method'] = 'boxcar'\n # Set the default exposure time ranges for the frame typing\n par['calibrations']['biasframe']['exprng'] = [None, 1]\n par['calibrations']['darkframe']['exprng'] = [999999, None] # No dark frames\n par['calibrations']['pinholeframe']['exprng'] = [999999, None] # No pinhole frames\n par['calibrations']['pixelflatframe']['exprng'] = [0, None]\n par['calibrations']['traceframe']['exprng'] = [0, None]\n par['calibrations']['arcframe']['exprng'] = [None, 61]\n par['calibrations']['standardframe']['exprng'] = [1, 61]\n #\n par['scienceframe']['exprng'] = [61, None]\n return par", "title": "" } ]
[ { "docid": "a700ce8225135b8cb4346a035e703959", "score": "0.8122184", "text": "def getDefaultParameters():\n param = {}\n return param", "title": "" }, { "docid": "f076a91fe2bc8e56f0b1a28e10c6e71a", "score": "0.80809766", "text": "def _get_default_parameters(self):\n default_parameter = {\n \"variance\": np.ones(self.num_dim),\n \"proportions\": np.ones(2)/2.0,\n \"scales\": np.array([0.5, 1.5]),\n }\n return default_parameter", "title": "" }, { "docid": "be3c026f59c7ca56b3c44fd57daefef3", "score": "0.79400915", "text": "def get_default_param_values(cls):\n return dict(\n rename_dict={},\n show_pins=False,\n debug=False,\n clock_track=1,\n fb_idx=None,\n clk_rst_sp=2,\n power_width_ntr=None,\n\n )", "title": "" }, { "docid": "aee60ab98bce475463bcccbf7b92a169", "score": "0.79241955", "text": "def get_default_param_values(cls):\n return dict(\n rename_dict={},\n show_pins=False,\n g_space=0,\n ds_space=0,\n debug=False,\n power_width_ntr=None,\n )", "title": "" }, { "docid": "4a9a71ea783d0366871a7dde7e02d8f5", "score": "0.7919623", "text": "def default_params(self):\n params = {}\n for p in self.parms:\n params[p] = self.parms[p][\"default_value\"]\n return params", "title": "" }, { "docid": "61d386284f3e86d7caaf7d22d8c142bb", "score": "0.7866738", "text": "def _default_params(self) -> Mapping[str, Any]:\n return {\n \"tokens_to_generate\": self.tokens_to_generate,\n \"stop\": self.stop,\n \"logprobs\": self.logprobs,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"top_k\": self.top_k,\n \"repetition_penalty\": self.repetition_penalty,\n \"random_seed\": self.random_seed,\n \"beam_search_diversity_rate\": self.beam_search_diversity_rate,\n \"beam_width\": self.beam_width,\n \"length_pentaly\": self.length_pentaly,\n }", "title": "" }, { "docid": "1f488f7be3485f9e82f16386e23f1238", "score": "0.7829876", "text": "def get_default_param_values(cls):\n return dict(\n nduml=4,\n ndumr=4,\n nsep=0,\n num_track_sep=1,\n io_width=1,\n rename_dict={},\n guard_ring_nf=0,\n show_pins=False,\n )", "title": "" }, { "docid": "81b905fbfb260f401e3949644b42ca40", "score": "0.78123194", "text": "def get_default_param_values(cls):\n return dict(\n rename_dict={},\n power_width_ntr=None,\n show_pins=False,\n )", "title": "" }, { "docid": "d849171b8be33f0a86c94380ac493ff9", "score": "0.77428854", "text": "def default_params():\n params = {}\n params['workload'] = '2drangeid-kron'\n params['domain'] = 256\n params['init_scale'] = 1.0\n\n return params", "title": "" }, { "docid": "659ca365d2859394594200c10676bc5d", "score": "0.77307826", "text": "def _get_default_params(self):\n\t\treturn {\n\t\t\t'apikey': self.api_key,\n\t\t\t}", "title": "" }, { "docid": "ab30cbb160c7d20b194fe036feb8b917", "score": "0.76607853", "text": "def default_parameters(cls):\n parameters = {\n \"changepoint_prior_scale\": 0.05,\n \"date_index\": None,\n \"seasonality_prior_scale\": 10,\n \"holidays_prior_scale\": 10,\n \"seasonality_mode\": \"additive\",\n \"stan_backend\": \"CMDSTANPY\",\n }\n return parameters", "title": "" }, { "docid": "dd6a9699d5be821207e841f27fe3e7b2", "score": "0.76483166", "text": "def get_default_params(self):\r\n # This function should return the default parameters to be used for training the model from scratch\r\n # The output of this function should be the same as the params parameter in the train function\r\n # Before submitting your work to gitlab, edit this function to return the optimized parameters for your model\r\n pass", "title": "" }, { "docid": "637292ce083c17308362e9f063b62616", "score": "0.76062214", "text": "def default_params(cls):\n ...", "title": "" }, { "docid": "f6dfe29f6f8773f6ac2aa71a3025a46e", "score": "0.7498749", "text": "def get_default_param_values(cls):\n return dict(\n debug=False,\n )", "title": "" }, { "docid": "4d31bf4f8d54717cd88e0a52d1688ba1", "score": "0.7482539", "text": "def get_default_params(self, params):\n params_default = {'fmin': 0.1, 'fmax': 150., 'df': 1.0/(2*np.pi),\n 'to_file': True, 'from_file': True}\n params_default.update(params)\n return params_default", "title": "" }, { "docid": "35709826027b6ca7bf42748b966af2f1", "score": "0.74705946", "text": "def defaults(self):\n return {name: par.default\n for name, par in inspect.signature(self.eval).parameters.items()}", "title": "" }, { "docid": "4950d00a549d970eda17f008b7219ba2", "score": "0.7467563", "text": "def default_parameter(self) -> Dict[str, Any]:\n return self._default_parameter", "title": "" }, { "docid": "590b780d39e53494a635cf9d9fa5f5ed", "score": "0.74221104", "text": "def default_parameters():\n\n print \"Loading default parameters\"\n params={}\n # filtering - select only a subset\n params['analysis_overlays']={}\n # queen signal\n params['analysis_overlays']['hist_queen']={}\n mydict = params['analysis_overlays']['hist_queen']\n mydict['channels'] = [0,1]\n mydict['bins'] = [16, 16, 16]\n mydict['colors']=[None, None, None]\n mydict['units_dx']=None\n\n # fluorescence\n params['analysis_overlays']['hist_channel']={}\n mydict = params['analysis_overlays']['hist_channel']\n mydict['channel'] = 0\n mydict['bins'] = [16,16,16]\n mydict['colors']=[None,None,None]\n mydict['units_dx']=None\n mydict['mode']='total_fl' # alternative value is \\'concentration\\'\n return params", "title": "" }, { "docid": "4d37f47109565bda6ce456767ab3e934", "score": "0.73541456", "text": "def carn_params(self):\n return Carn().DEFAULT_PARAMETERS", "title": "" }, { "docid": "ae44a1a23c21f2004da2488616eb7040", "score": "0.71787226", "text": "def defaults(self):\n return dict(max_steps=1000,\n energy_break_rounds=-1,\n energy_break_tol=-1,\n temp_tol=-1,\n verbose=0,\n debug=False,\n pickle=False,\n pickle_file=None)", "title": "" }, { "docid": "05474f995fc39eabf747723edd5c0669", "score": "0.7165386", "text": "def getDefaultParameters(self):\n #\n dp = AExpressionModule.getDefaultParameters(self)\n dp['initType'] = 'pca'\n dp['nIterations'] = 20\n dp['schedule'] = ['S','W','Alpha','Eps']\n dp['components'] = 5\n dp['priors'] = {}\n dp['name_str'] = {}\n return dp", "title": "" }, { "docid": "346e38ad377eec98249cd5e8cd9c1814", "score": "0.7154922", "text": "def defaults(self):\n\n return {\"stats_demographic_id\": FieldTemplate.dummy(\"parameter_id\"),\n }", "title": "" }, { "docid": "460d59ab613f7db53f760e7b24492b9a", "score": "0.7146495", "text": "def set_defaults(self):\n from numpy import isnan\n from timing_system import timing_system\n for key in self.parameters:\n if key in [\"pass_number\",\"image_number\"]: continue\n if isnan(self.parameters[key]):\n self.parameters[key] = timing_sequencer.get_default(key)", "title": "" }, { "docid": "3fdb6cb207e218c2431b2c08080230d6", "score": "0.7141171", "text": "def get_params(self):\n default_params = {\n \"exclude_tests\" : [],\n \"flatten_path\" : None,\n \"expected_count\" : 0, \n }\n\n params = self.apply_default_params(default_params)\n return params", "title": "" }, { "docid": "95df1c944d17151ddc160fbedd0a9e84", "score": "0.7103382", "text": "def get_default_param_dict(self):\n return {}", "title": "" }, { "docid": "16b7ceb8e2f263eab78ed49ebc23b89e", "score": "0.7044552", "text": "def herb_params(self):\n return Herb().DEFAULT_PARAMETERS", "title": "" }, { "docid": "4a7865ff4ea5d55330362f79d90b63e6", "score": "0.7013057", "text": "def defaults(self):\n\n #dummy = FieldTemplate.dummy\n\n return {\"stats_parameter_represent\": lambda v: \"\",\n }", "title": "" }, { "docid": "d66ef3da7ce251402f14411a9a85b72e", "score": "0.70034957", "text": "def default_params():\n return {\n # variable initializer\n \"init_scale\": 0.1,\n \"initializer\": \"uniform\",\n\n # vocabulary file\n \"disable_vocab_table\": False,\n \"vocab_source\": \"\",\n \"vocab_target\": \"\",\n \"special_vocabs\": {\n \"unk\": vocab_utils.UNK,\n \"sos\": vocab_utils.SOS,\n \"eos\": vocab_utils.EOS},\n \"pad_to_eight\": False,\n\n # embedding config\n \"embedding.dim\": 512,\n \"embedding.share\": False,\n \"embedding.num_shards\": 1,\n \"src.embedding.multiply_mode\": None,\n \"src.embedding.initializer\": None,\n \"tgt.embedding.multiply_mode\": None,\n \"tgt.embedding.initializer\": None,\n\n # encoder and decoder\n \"encoder.class\": \"\",\n \"encoder.params\": {},\n \"decoder.class\": \"\",\n \"decoder.params\": {},\n\n # beam search config\n \"inference.use_sampling\": False,\n \"inference.beam_search.return_top_beam\": True,\n \"inference.beam_search.keep_finished\": True,\n \"inference.beam_search.stop_early\": False,\n \"inference.beam_search.beam_width\": 4,\n \"inference.beam_search.length_penalty_weight\": 1.0,\n \"inference.beam_search.coverage_penalty_weight\": 0.0,\n\n # loss\n \"word_level_loss\": True,\n \"label_smoothing_factor\": 0.1,\n \"weight_tying\": False,\n \"softmax_bias\": False,\n\n # optimizer\n \"optimizer.name\": \"sgd\",\n \"optimizer.params\": {\n # Arbitrary parameters for the optimizer\n # for Momentum optimizer\n \"momentum\": 0.99,\n \"use_nesterov\": True,\n # for Adam optimizer, tf default values\n \"beta1\": 0.9,\n \"beta2\": 0.999,\n \"epsilon\": 1e-08,\n # for MultistepAdam\n \"accumulate_steps\": 1\n },\n\n # learning_rate\n \"learning_rate.decay_steps\": 10000,\n \"learning_rate.decay_rate\": 0.98,\n \"learning_rate.start_decay_step\": 0,\n \"learning_rate.stop_decay_at\": tf.int32.max,\n \"learning_rate.min_value\": 1e-12,\n \"learning_rate.decay_staircase\": True,\n \"learning_rate.warmup_steps\": 0,\n \"learning_rate.constant\": 1.0,\n \"learning_rate.schedule\": \"\",\n\n # gradients clip\n \"max_grad_norm\": None,\n\n # guided attention loss\n \"guided_attention.weight\": 0.,\n \"guided_attention.loss_type\": \"ce\", # ce, mse or sqrt_mse\n \"guided_attention.decay_steps\": 10000,\n \"guided_attention.decay_rate\": 0.95,\n \"guided_attention.start_decay_step\": 0, # default: no decay\n \"guided_attention.stop_decay_at\": tf.int32.max,\n\n # clip gemm: MatMul and BatchMatMul (optional)\n \"clip_gemm.value\": None,\n \"clip_gemm.decay_steps\": 10000,\n \"clip_gemm.decay_rate\": 0.95,\n \"clip_gemm.start_decay_step\": 0, # default: no decay\n \"clip_gemm.stop_decay_at\": tf.int32.max,\n \"clip_gemm.min_value\": 1.0,\n \"clip_gemm.staircase\": True,\n \"clip_gemm.batch_matmul\": False,\n\n # quantization\n \"quant_bits\": None, # 16 or 8\n \"quant16_mul_bits\": 9, # 10 in Marian but overflow, so we use 9\n\n # float16 training\n \"mixed_precision\": False,\n \"mixed_precision.params\": {\n \"init_loss_scale\": 2.0 ** 10,\n \"incr_every_n_steps\": 2000,\n \"decr_every_n_nan_or_inf\": 2,\n \"incr_ratio\": 2,\n \"decr_ratio\": 0.5,\n \"fix_loss_scale\": False,\n },\n\n # mixture of softmax:\n \"mos_n_experts\": 0, # recommend 15\n }", "title": "" }, { "docid": "3adb33d689e91ba55c7bd2505493485d", "score": "0.69414616", "text": "def default_options(cls):\n return {\n 'std': 1.0, # the proposal standard deviation\n 'show_progress': True, # whether to show the progress\n }", "title": "" }, { "docid": "8b90d9b5650057e038e23e35246b7218", "score": "0.69142276", "text": "def get_service_default_param()->Dict:\n return {\n \"pv_max_power\": PV_MAX_POWER,\n \"meter_max_power\": METER_MAX_POWER,\n \"meter_min_power\": METER_MIN_POWER,\n \"output_folder\": OUTPUT_FOLDER,\n \"delta_time\": DELTA_TIME }", "title": "" }, { "docid": "6089c16d9ea55d0b9f81918773c003b6", "score": "0.68877393", "text": "def defaultParameters(self):\n return set(self._defaultParameters.keys())", "title": "" }, { "docid": "e0b4429fc6664245543ce56d127cdab4", "score": "0.6853373", "text": "def defaults(self):\n return {\n 'kafka': {},\n 'operators': []\n }", "title": "" }, { "docid": "f393eca4d104c5d60945537bf9cb2a4e", "score": "0.6806658", "text": "def defaults(cls):\n return {\n cls.PUBLIC_ATTRS: {},\n cls.FORMATTERS: {},\n cls.USE_QUERY_PATHS: True,\n cls.HAS_DRIVER_QUERIES: False,\n cls.EFFECTIVE_ENV: None,\n cls.SUPPORTS_THREADING: False,\n cls.SUPPORTS_ASYNC: False,\n cls.MAX_PARALLEL: 4,\n cls.FILTER_ON_CONNECT: False,\n }", "title": "" }, { "docid": "b68b3ea68d1136641984c8951a3f61d9", "score": "0.6766289", "text": "def get_base_params():\n return merge_dicts(\n Flickr32Evaluator.get_base_params(),\n {\n 'thresholds': {\n 'detection': 0.3,\n 'iou': 0.3\n }\n }\n )", "title": "" }, { "docid": "3a8004bf26405692ff8023a572412577", "score": "0.6747507", "text": "def defaults(self, ):\n\t\tpass", "title": "" }, { "docid": "c72d9ec6a0a356d8b544267801433b92", "score": "0.6733028", "text": "def get_settings_defaults(self):\n\t\treturn dict(\n sensor_name = '28-031455b66cff'\n\t\t)", "title": "" }, { "docid": "261c0fdefcbddc531e68f361e189e835", "score": "0.6731734", "text": "def defaults(self):\n return {}", "title": "" }, { "docid": "f64afe5f10bdce86a6ec571b4a7931b5", "score": "0.673151", "text": "def get_default_param(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "bde39876ca104702e1da1a4e0e411ec3", "score": "0.6724649", "text": "def default_parameters(cls):\n defaults = {}\n for c in cls.component_graph:\n component = handle_component_class(c)\n if component.default_parameters:\n defaults[component.name] = component.default_parameters\n return defaults", "title": "" }, { "docid": "576f2ae0dae4d5dde60a779b3751463e", "score": "0.6692136", "text": "def default_pypeit_par(cls):\n par = super().default_pypeit_par()\n\n # Scienceimage default parameters\n # Set the default exposure time ranges for the frame typing\n par['calibrations']['biasframe']['exprng'] = [None, 1]\n par['calibrations']['darkframe']['exprng'] = [999999, None] # No dark frames\n par['calibrations']['pinholeframe']['exprng'] = [999999, None] # No pinhole frames\n par['calibrations']['pixelflatframe']['exprng'] = [0, None]\n par['calibrations']['traceframe']['exprng'] = [0, None]\n par['calibrations']['arcframe']['exprng'] = [None, None]\n par['calibrations']['standardframe']['exprng'] = [1, 200]\n par['scienceframe']['exprng'] = [200, None]\n\n # Do not sigmaclip the arc frames for better Arc and better wavecalib\n par['calibrations']['arcframe']['process']['clip'] = False\n # Do not sigmaclip the tilt frames\n par['calibrations']['tiltframe']['process']['clip'] = False\n\n return par", "title": "" }, { "docid": "73d8ecabc56260cfd4e8929917b0d41d", "score": "0.66879636", "text": "def LoadDefaultParameters(self):\n #Initialize with two sides\n self.SetParameterValue(NUSECTIONS_PAR, 2)\n self.SetParameterValue(DP_CORR_PAR, 'OnePhase')\n self.SetParameterValue(ENERGYMODEL_PAR, LINEARQ_MODEL)\n self.SetParameterValue(AV_ENERGYMODELS_PAR, \"%s %s %s\" %(LINEARQ_MODEL, LINEART_MODEL, EQUALU_MODEL))", "title": "" }, { "docid": "4bedef9a1be887b96701e90e8db92aa9", "score": "0.6680803", "text": "def default_kwargs(self):\n return self._default_kwargs", "title": "" }, { "docid": "da6cc1124c8949a244e9cb7244d6f3ca", "score": "0.6651387", "text": "def node_default_marker_params(cls):\n return cls.NODE_MARKER_PARAMS", "title": "" }, { "docid": "cc404d0718f6721bf4f4aa8a6c59e06a", "score": "0.6616346", "text": "def default_parameters() -> df.Parameters:\n parameters = df.Parameters(\"BasicSplittingSolver\")\n parameters.add(\"theta\", 0.5, 0., 1.)\n parameters.add(\"apply_stimulus_current_to_pde\", False)\n parameters.add(\"pde_solver\", \"bidomain\")\n\n # Add default parameters from ODE solver, but update for V space\n ode_solver_parameters = BasicCardiacODESolver.default_parameters()\n parameters.add(ode_solver_parameters)\n\n pde_solver_parameters = BasicBidomainSolver.default_parameters()\n pde_solver_parameters[\"polynomial_degree\"] = 1\n parameters.add(pde_solver_parameters)\n\n pde_solver_parameters = BasicMonodomainSolver.default_parameters()\n pde_solver_parameters[\"polynomial_degree\"] = 1\n parameters.add(pde_solver_parameters)\n return parameters", "title": "" }, { "docid": "d16adf6310ed4f5155101ba90167987b", "score": "0.66074693", "text": "def get_default_parameters(cls, config):\n return {\n k: config.get(v)\n for k, v in cls.expected_parameters.items()\n }", "title": "" }, { "docid": "5b38e0ad6bb085221a18c89e8f4abd06", "score": "0.66021353", "text": "def __init_params(self):\n\n default = {\n \"reaction_autonomy_level\": 100,\n \"storage_timeout\": 10,\n \"evaluation_period\": 1,\n \"default/min_reaction_interval\": 10,\n \"default/reaction_timeout\": 30\n }\n for param in default:\n if not rospy.has_param(helper.ARNI_CTM_CFG_NS + param):\n rospy.set_param(helper.ARNI_CTM_CFG_NS + param, default[param])", "title": "" }, { "docid": "18d61799ec5d3ad43954bde9217161fd", "score": "0.659259", "text": "def get_default_dataset_config() -> Dict[str, Any]:\n default_config = {\n \"GT_TRACKS\": None, # tracker_name -> seq id -> frames\n \"PREDICTED_TRACKS\": None, # tracker_name -> seq id -> frames\n \"SEQ_IDS_TO_EVAL\": None, # list of sequences ids to eval\n \"CLASSES_TO_EVAL\": None,\n \"TRACKERS_TO_EVAL\": None,\n \"OUTPUT_FOLDER\": None, # Where to save eval results (if None, same as TRACKERS_FOLDER)\n \"OUTPUT_SUB_FOLDER\": \"\", # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER\n \"ZERO_DISTANCE\": 2,\n }\n return default_config", "title": "" }, { "docid": "61337c797fda76e25b260e69eb9e7ac6", "score": "0.65885127", "text": "def init_params(self):\n return {}", "title": "" }, { "docid": "471ab0ea223154aa65b1e4aa0e5f6bb2", "score": "0.65864635", "text": "def get_default_config(self):\n return {}", "title": "" }, { "docid": "b2d2ad8828bc9c339fb5ef18fab1459d", "score": "0.65864086", "text": "def default_para_dict(self):\n self.ranknet_para_dict = dict(model_id=self.model_id, sigma=1.0)\n return self.ranknet_para_dict", "title": "" }, { "docid": "c45bf3dbddd374136f7a18b6fb8250c0", "score": "0.6576221", "text": "def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n\n options.schedule = None\n options.amp = 0.2\n options.duration = 160\n options.sigma = 40\n options.reps = [1, 3, 5]\n options.betas = np.linspace(-5, 5, 51)\n\n return options", "title": "" }, { "docid": "e24f4cd1b0d8820d3c8762003dd77fac", "score": "0.6564074", "text": "def get_base_parameters(cls):\n return {\n \"num_clusters\": None\n }", "title": "" }, { "docid": "e5763466bdc85f78c5019c7e648d6214", "score": "0.6548591", "text": "def default_parameters() -> df.Parameters:\n parameters = df.Parameters(\"SplittingSolver\")\n parameters.add(\"theta\", 0.5, 0, 1)\n parameters.add(\"apply_stimulus_current_to_pde\", False)\n parameters.add(\"pde_solver\", \"bidomain\", {\"bidomain\", \"monodomain\"})\n\n # Add default parameters from ODE solver\n multicell_ode_solver_parameters = MultiCellSolver.default_parameters()\n parameters.add(multicell_ode_solver_parameters)\n\n pde_solver_parameters = BidomainSolver.default_parameters()\n pde_solver_parameters[\"polynomial_degree\"] = 1\n parameters.add(pde_solver_parameters)\n return parameters", "title": "" }, { "docid": "d22ddd2398bdd23d6e376bd8dc055702", "score": "0.653413", "text": "def default_hparams():\n\n return {\n 'pretrained_model_name': 'roberta-base',\n 'embed': {\n 'dim': 768,\n 'name': 'word_embeddings'\n },\n 'vocab_size': 50265,\n 'position_embed': {\n 'dim': 768,\n 'name': 'position_embeddings'\n },\n 'position_size': 514,\n\n 'encoder': {\n 'dim': 768,\n 'embedding_dropout': 0.1,\n 'multihead_attention': {\n 'dropout_rate': 0.1,\n 'name': 'self',\n 'num_heads': 12,\n 'num_units': 768,\n 'output_dim': 768,\n 'use_bias': True\n },\n 'name': 'encoder',\n 'num_blocks': 12,\n 'eps': 1e-12,\n 'poswise_feedforward': {\n 'layers': [\n {\n 'kwargs': {\n 'in_features': 768,\n 'out_features': 3072,\n 'bias': True\n },\n 'type': 'Linear'\n },\n {\"type\": \"BertGELU\"},\n {\n 'kwargs': {\n 'in_features': 3072,\n 'out_features': 768,\n 'bias': True\n },\n 'type': 'Linear'\n }\n ]\n },\n 'residual_dropout': 0.1,\n 'use_bert_config': True\n },\n 'hidden_size': 768,\n 'initializer': None,\n 'name': 'roberta_encoder',\n '@no_typecheck': ['pretrained_model_name']\n }", "title": "" }, { "docid": "cc2dcf0ce7790545422f0fe9938ae622", "score": "0.6520811", "text": "def load_default_parameters(self, config=0) -> None:\n print(\"\\t@ParameterControl: Loading default model parameters for test purpose...\")\n print(f\"\\t\\tConfiguration={config}\")\n if config == 0:\n self.num_periods = 48\n self.f_horizon = 1\n self.learning_rate = 0.001\n self.epochs = int(input(\"Training epochs: \"))", "title": "" }, { "docid": "89fc78718cd480ccc3006bffcb15e361", "score": "0.64782923", "text": "def _test_parameters_overrides(self):\n return {}", "title": "" }, { "docid": "dfc9dfb4e0663c4fcbede238ddbec94e", "score": "0.64656043", "text": "def default_options(self) -> dict[str, Any] | None:\n return None", "title": "" }, { "docid": "d4511efec4d7c618dbdb37bd38cd8d34", "score": "0.64573395", "text": "def default_opts():\n return tf.contrib.training.HParams(\n summarize=True,\n evaluate=True,\n train=True,\n training_progress_interval=0,\n average_accuracy_interval=100,\n sequence='0123', # Specify a filename, string or \"random\"\n sequence_length=8, # Length of generated sequence if using \"random\" mode\n example_type='random', # Specify: random, same, specific\n specific_examples=[1, 3, 5, 7, 2, 0, 13, 15, 17, 4] # Used with \"specific\", indexed by label\n )", "title": "" }, { "docid": "43a53a6b080aecdba529b52d5fed74c0", "score": "0.64476454", "text": "def config_defaults(cls):\n return {}", "title": "" }, { "docid": "e2993b3e11e6c321d9e2bc3bca957308", "score": "0.643546", "text": "def default_parameters() -> df.Parameters:\n parameters = df.Parameters(\"SplittingSolver\")\n parameters.add(\"theta\", 0.5, 0, 1)\n parameters.add(\"apply_stimulus_current_to_pde\", False)\n # parameters.add(\"pde_solver\", \"bidomain\", {\"bidomain\", \"monodomain\"})\n parameters.add(\"pde_solver\", \"bidomain\")\n parameters.add(\n \"ode_solver_choice\",\n \"CardiacODESolver\"\n )\n\n # Add default parameters from ODE solver\n ode_solver_parameters = CardiacODESolver.default_parameters()\n ode_solver_parameters[\"scheme\"] = \"RK4\"\n parameters.add(ode_solver_parameters)\n\n # Add default parameters from ODE solver\n basic_ode_solver_parameters = BasicCardiacODESolver.default_parameters()\n parameters.add(basic_ode_solver_parameters)\n\n pde_solver_parameters = BidomainSolver.default_parameters()\n pde_solver_parameters[\"polynomial_degree\"] = 1\n parameters.add(pde_solver_parameters)\n\n pde_solver_parameters = MonodomainSolver.default_parameters()\n pde_solver_parameters[\"polynomial_degree\"] = 1\n parameters.add(pde_solver_parameters)\n return parameters", "title": "" }, { "docid": "c2f5e280210d0f42e30d1c0b28e4ba5b", "score": "0.6430796", "text": "def default_para_dict(self):\n temperature = 0.2\n d_epoches, g_epoches = 1, 1\n ad_training_order = 'DG'\n # ad_training_order = 'GD'\n\n self.ad_para_dict = dict(model_id=self.model_id, d_epoches=d_epoches, g_epoches=g_epoches,\n temperature=temperature, ad_training_order=ad_training_order, loss_type='svm')\n return self.ad_para_dict", "title": "" }, { "docid": "259cc6d702665f638c87a5cbfe32d5dc", "score": "0.6427048", "text": "def get_fixed_params(self):\n\n fixed_params = {\n 'total_time_steps': 50,\n 'num_epochs': 100,\n 'multiprocessing_workers': 5\n }\n\n return fixed_params", "title": "" }, { "docid": "e6f5f8173178f182c9bdff4cfd0caee4", "score": "0.6415213", "text": "def _get_default_parameters_prior(self):\n prior = {}\n return prior", "title": "" }, { "docid": "361777841a34c350e456ffe3a3778893", "score": "0.64098996", "text": "def default_config(cls):\n config = Config()\n config['inputs'] = {}\n config['initial_block'] = {}\n config['body'] = {}\n config['head'] = {}\n config['predictions'] = None\n config['output'] = None\n config['optimizer'] = ('Adam', dict())\n config['decay'] = (None, dict())\n config['scope'] = ''\n config['common'] = {'batch_norm': {'momentum': .1}}\n\n return config", "title": "" }, { "docid": "4a602d9085298fd08d267e0701d31639", "score": "0.6401111", "text": "def edge_default_marker_params(cls):\n return cls.EDGE_MARKER_PARAMS", "title": "" }, { "docid": "4a3f577e1b8bae8e313c0905bd334ed9", "score": "0.6400935", "text": "def get_params(self):\n return {\n \"time_horizon\": self.time_horizon,\n 'seasonality': self.seasonality,\n 'time_lags': self.time_lags,\n \"lambda0\": self.lambda0,\n \"learning_rate\": self.learning_rate,\n 'theta': self.theta,\n 'window': self.window,\n 'epsilon': self.epsilon,\n 'alpha': self.alpha,\n 'maxiter': self.maxiter,\n }", "title": "" }, { "docid": "5abfa14cb42054728c23a6556ebc7d49", "score": "0.6399218", "text": "def params(self):\n params = {}\n for p in self.parms:\n params[p] = {\"default\": self.parms[p][\"default_value\"],\n \"actual\": self.parms[p][\"actual_value\"],\n \"input\": self.parms[p][\"input_value\"]}\n return params", "title": "" }, { "docid": "d30ee1dd326263e5cd8047768c8ae3f4", "score": "0.639763", "text": "def add_default_params(self, params):\r\n params['client_id'] = self.user_id\r\n params['api_key'] = self.key\r\n return params", "title": "" }, { "docid": "6d2b2d130b243c18267ff7943aa609e8", "score": "0.6395527", "text": "def get_default_parameters(sim_options):\n default_keys = [key for key in DEFAULTS_PENDULUM_MPC if key not in sim_options]\n return default_keys", "title": "" }, { "docid": "7d866c67539b60cccc48e016b0095b2e", "score": "0.6375004", "text": "def returnInitialParameters(self):\n iniParDict = dict(list(self.initOptionDict.items()) + list({'returnType':self.__class__.returnType,'qualityEstType':self.__class__.qualityEstType,'Features':self.features,\n 'Target':self.target,'returnType':self.__class__.returnType}.items()) + list(self.__returnInitialParametersLocal__().items()))\n return iniParDict", "title": "" }, { "docid": "ba1632a7b016738377cfad062c6691a9", "score": "0.637435", "text": "def default_hparams():\n return {\n \"name\": \"embedder\"\n }", "title": "" }, { "docid": "f49e2ee181eb09853cfc30c2fbb22d65", "score": "0.63635844", "text": "def default(self, attrs):\n self._default = config.ParameterSet(attrs)", "title": "" }, { "docid": "7226ca8135bac35eec03f4ce305d90a0", "score": "0.63584924", "text": "def c_opts_default(self):\n return (self.aggression,)", "title": "" }, { "docid": "c469d3c1c4650f2808b7c82d5581d118", "score": "0.6346358", "text": "def getInitParams(self):\n paramDict = {}\n for variable in self.toBeSampled.items():\n paramDict[\"sampled variable: \"+variable[0]] = 'is sampled using the distribution ' +variable[1]\n paramDict['limit' ] = self.limit\n paramDict['initial seed' ] = self.initSeed\n paramDict.update(self.localGetInitParams())\n\n return paramDict", "title": "" }, { "docid": "8137ec72a3d4a9e2ead31f1886e4ba24", "score": "0.63047487", "text": "def default_pars(self, var=False):\n p = {}\n # NIR\n p['R_v'] = 3.1 + var * normal(0, 0.6)\n # Optical\n p['ospline_x'] = 1e4 / np.array([4100., 4670., 5470., 6000.,\n 12200, 26500, 3e8])\n p['ospline_x'][-1] = 0.\n\n # UV\n p['c3'] = 3.23 + var * normal(0, 1.0)\n p['c4'] = 0.41 + var * normal(0, 0.16)\n p['x0'] = 4.596 + var * normal(0, 0.025)\n p['gamma'] = 0.99 + var * normal(0, 0.1)\n\n self.pardict = p\n self.selfupdate()", "title": "" }, { "docid": "71fa7d394111e7b7decedc5954c6adb1", "score": "0.62978697", "text": "def default_options():\n return dict(\n # CA grid shape\n shape_grades=dict(\n fast=(1024*4,)*2,\n mixed=(1024*2,)*2,\n slow=(1024,)*2\n ),\n # number of generations to compute\n gens_grades=dict(\n fast=50,\n mixed=100,\n slow=100\n ),\n int_types=('u1', 'i1'),\n threads_per_block=(8, ) * 2, # CUDA's blockDim\n random_seed=0,\n run_func_prefix='run_ca_'\n )", "title": "" }, { "docid": "a360e3fa78d388ecea8dc34d26523125", "score": "0.62901664", "text": "def default_pars(self, var=False):\n p = {}\n # UV\n p['c2'] = 2.264 + var * normal(0, 0.040)\n p['c3'] = 0.389 + var * normal(0, 0.110)\n p['c4'] = 0.46 + var * normal(0, 0.079)\n p['x0'] = 4.60\n p['gamma'] = 1.0\n # Optical\n p['ospline_x'] = 1e4 / np.array([3300., 4000., 5330])\n p['ospline_k'] = (np.array([2.05, 1.32, 0.0]) +\n var * normal(0, 1, 3) *\n np.array([0.17, 0.026, 0.008]))\n # NIR\n p['alpha'] = -1.84\n p['R_v'] = 3.1 + var * normal(0, 0.6)\n\n self.pardict = p\n self.selfupdate()", "title": "" }, { "docid": "5b935071ef4b3928e3489c0e843980e2", "score": "0.6289246", "text": "def defaults():\n\n #dummy = S3ReusableField(\"dummy_id\", \"integer\",\n # readable = False,\n # writable = False)\n\n return {}", "title": "" }, { "docid": "439277ef16035070d4f773bd09692646", "score": "0.6287942", "text": "def get_default_config(self):\n return None", "title": "" }, { "docid": "c96dde7b28821f411a984d8cba38fa8a", "score": "0.62845683", "text": "def default_pars(self, var=False):\n p = {}\n # UV\n p['c2'] = 0.998 + var * normal(0, 0.027)\n p['c3'] = 2.719 + var * normal(0, 0.137)\n p['c4'] = 0.40 + var * normal(0, 0.036)\n p['x0'] = 4.579 + var * normal(0, 0.007)\n p['gamma'] = 0.934 + var * normal(0, 0.016)\n # Optical\n p['ospline_x'] = 1e4 / np.array([3300., 4000., 5330])\n p['ospline_k'] = (np.array([2.05, 1.32, 0.0]) +\n var * normal(0, 1, 3) *\n np.array([0.17, 0.026, 0.008]))\n # NIR\n p['alpha'] = -1.84\n p['R_v'] = 3.1 + var * normal(0, 0.6)\n\n self.pardict = p\n self.selfupdate()", "title": "" }, { "docid": "eff00524a00ea4699137bcf191988681", "score": "0.62784445", "text": "def defaults(self):\n if self._defaults is _missing:\n return {}\n return self._defaults", "title": "" }, { "docid": "1500be403f23a37613eb0e0395b9ca26", "score": "0.6274131", "text": "def get_defaults(self):\n return dict(((p,d) for p,t,d in self.settings\n if d is not None and '*' not in p))", "title": "" }, { "docid": "d24f1bf071f94f61938a2ba112cc10d9", "score": "0.62692755", "text": "def default_pypeit_par(cls):\n par = super().default_pypeit_par()\n\n # 1D wavelength solution\n par['calibrations']['wavelengths']['lamps'] = ['NeI', 'HgI', 'HeI', 'ArI']\n par['calibrations']['wavelengths']['rms_threshold'] = 0.20\n par['calibrations']['wavelengths']['sigdetect'] = 5.\n par['calibrations']['wavelengths']['use_instr_flag'] = True\n\n return par", "title": "" }, { "docid": "4d9cd0e1609148ca762fdc7e9592228a", "score": "0.6251331", "text": "def default_config(self, hyperparams: Optional[Dict[str, Any]]) -> Any:", "title": "" }, { "docid": "b8d22290c6aea6de3ffb4774b8e8c82f", "score": "0.624673", "text": "def c_opts_default(self):\n return self.aggression", "title": "" }, { "docid": "eb3cc02e5f5bcc681032c7383127f076", "score": "0.6246299", "text": "def defaults(self):\n return {\n 'hostname': 'localhost',\n 'port': 9092,\n 'topic': 'dirbs'\n }", "title": "" }, { "docid": "1c6a380bbbd687e92408f566460d22a0", "score": "0.6244005", "text": "def _get_record_defaults():\n return (\"\", \"PASS\", [], {})", "title": "" }, { "docid": "45b769f9f19d7047191ac7d4702dc1b3", "score": "0.6240824", "text": "def parameters(self):\n return set(self._defaultParameters.keys() + self._additionalParameters)", "title": "" }, { "docid": "12577ff3661edab5c3af4f3c764f8cdf", "score": "0.62405556", "text": "def preprocessing_defaults() -> Dict[str, Any]:\n raise NotImplementedError", "title": "" }, { "docid": "f2f4df78ad488444c437ed5cb140c94a", "score": "0.62358487", "text": "def get_parameters(self):\n d = Algorithm.get_parameters(self)\n d.update({\n 'lifetime': self.lifetime,\n 'area_limit': self.area_limit,\n 'local_seeding_changes': self.local_seeding_changes,\n 'global_seeding_changes': self.global_seeding_changes,\n 'transfer_rate': self.transfer_rate\n })\n return d", "title": "" }, { "docid": "005fbfd8daf334ed60cf3175f3df8094", "score": "0.62356347", "text": "def set_default_parameters_for_species(cls):\n cls.parameters = cls.DEFAULT_PARAMETERS.copy()\n cls._set_params_as_attributes()", "title": "" }, { "docid": "a3957606795e26f6801fc0dda558b0d4", "score": "0.6235391", "text": "def _default_options(cls):\n default_options = super()._default_options()\n default_options.data_processor = dp.DataProcessor(\n input_key=\"counts\", data_actions=[dp.Probability(\"1\"), dp.BasisExpectationValue()]\n )\n default_options.curve_plotter = \"mpl_multiv_canvas\"\n default_options.xlabel = \"Flat top width\"\n default_options.ylabel = \"<X(t)>,<Y(t)>,<Z(t)>\"\n default_options.xval_unit = \"s\"\n default_options.style = curve.visualization.PlotterStyle(\n figsize=(8, 10),\n legend_loc=\"lower right\",\n fit_report_rpos=(0.28, -0.10),\n )\n default_options.ylim = (-1, 1)\n\n return default_options", "title": "" }, { "docid": "4e8e38516fade099d61ba1a2a8e0c5ab", "score": "0.6234773", "text": "def default_pars(self, var=False):\n p = {}\n # UV\n p['c2'] = 0.81\n p['c3'] = 2.99\n p['c4'] = 0.32\n p['c5'] = 6.10\n p['x0'] = 4.59\n p['gamma'] = 0.90\n # Optical\n p['ospline_x'] = 1e4/np.array([3300., 4000., 5330])\n p['ospline_k'] = (np.array([2.05, 1.32, 0.0]))\n # NIR\n p['alpha'] = 0-1.84\n p['R_v'] = 3.1\n\n if var:\n draws = normal(0, 1, 10)\n p['c2'] += draws[0] * 0.25\n p['c3'] += draws[1] * 1.0\n p['c4'] += draws[2] * 0.16\n p['c5'] += draws[3] * 0.6\n p['x0'] += draws[4] * 0.025\n p['gamma'] += draws[5] * 0.1\n p['ospline_k'] += draws[6:9] * np.array([0.17, 0.026, 0.008])\n p['R_v'] += draws[9] * 0.6\n\n self.pardict = p\n self.selfupdate(var=var)", "title": "" }, { "docid": "810d05fc144cc774918d9b0a15ef92e4", "score": "0.62292403", "text": "def get_dataset_default_flags(self):\n return {}", "title": "" }, { "docid": "df474f00153f2c767713f61000682049", "score": "0.6229171", "text": "def sync_defaults_parameter(self):\n for key in self.parameters_keys:\n if self.config:\n self.config.setValue(\"Defaults/\" + key, self.parameters_values[key])\n if self.project:\n self.sync_project_hydraulic_parameters()\n\n # record cross section parameters\n group = \"Defaults/\"\n xsect_name = self.parameters_values[self.xsection_key]\n xsect_type = CrossSectionShape[xsect_name]\n self.config.setValue(group + self.keyprefix_conduit + \"SHAPE\", self.xsection.shape.name)\n key_prefix = group + self.keyprefix_conduit + \"GEOM\"\n self.config.setValue(key_prefix + \"1\", str(self.xsection.geometry1))\n self.config.setValue(key_prefix + \"2\", str(self.xsection.geometry2))\n self.config.setValue(key_prefix + \"3\", str(self.xsection.geometry3))\n self.config.setValue(key_prefix + \"4\", str(self.xsection.geometry4))", "title": "" }, { "docid": "40475243172e30008d23fbbdb1580236", "score": "0.6220815", "text": "def specify_params(self):\n return {}", "title": "" }, { "docid": "06579c321365a6e5297b3cd3f6c649b9", "score": "0.62172383", "text": "def _fill_defaults(cls, dataset_config):\n\n if \"image_ordering\" not in dataset_config:\n dataset_config[\"image_ordering\"] = cls.DEFAULT_IMAGE_ORDERING_PATH\n\n if \"il_raw_features_path\" not in dataset_config:\n dataset_config[\"il_raw_features_path\"] =\\\n cls.DEFAULT_IL_RAW_FEATURES_PATH\n\n if \"il_features_path\" not in dataset_config:\n dataset_config[\"il_features_path\"] = cls.DEFAULT_IL_FEATURES_PATH\n\n if \"index_features_path\" not in dataset_config:\n dataset_config[\"index_features_path\"] =\\\n cls.DEFAULT_INDEX_FEATURES_PATH\n\n if \"index_dir\" not in dataset_config:\n dataset_config[\"index_dir\"] = cls.DEFAULT_INDEX_DIR\n\n if \"il_n_processes\" not in dataset_config:\n dataset_config[\"il_n_processes\"] =\\\n BlackthornFeatures.DEFAULT_N_PROCESSES\n\n if \"il_n_feat_per_image\" not in dataset_config:\n dataset_config[\"il_n_feat_per_image\"] =\\\n BlackthornFeatures.DEFAULT_N_FEAT_PER_IMG\n\n if \"index_n_submat\" not in dataset_config:\n dataset_config[\"index_n_submat\"] = CollectionIndex.DEFAULT_N_SUBMAT\n\n return dataset_config", "title": "" }, { "docid": "943c5090f04f877c515ea18e17b96f3f", "score": "0.62140214", "text": "def returned_defaults(self):\r\n return self.context.returned_defaults", "title": "" }, { "docid": "41a7b8b7a571d714b6415fa190f3b9bd", "score": "0.6190649", "text": "def default_hparams():\n return {\n 'pretrained_model_name': None,\n 'name': \"pretrained_base\",\n '@no_typecheck': ['pretrained_model_name']\n }", "title": "" }, { "docid": "e786d3d32734eadf90ec2ab1178b4f4c", "score": "0.61902076", "text": "def __init__(self, defaults={}, data=None):\n\n super().__init__(defaults={**NVT_Parameters.parameters, **defaults}, data=data)", "title": "" } ]
2d61504f4f2f4eb5f0650c9977e4266b
Send all avaleble pages to frontend
[ { "docid": "83b799e7681ebd13a03275005bae9630", "score": "0.5553395", "text": "def all_reed_pages(id):\n if not g.user:\n flash('You have to Login')\n return redirect('/')\n\n \n book = Book.query.get_or_404(id)\n\n if book.user_id != g.user.id:\n flash(\"This is not your Book\")\n return redirect('/books')\n else:\n \n pages = [page.serialize() for page in book.pages]\n\n\n return jsonify(pages=pages)", "title": "" } ]
[ { "docid": "391186b4f0f909aa1e6774c9c86f8638", "score": "0.63025695", "text": "def send_pages(self, response):\n # The internal 'overview' page\n pages = {\"main\": OVERVIEW_ID,\n \"pages\": [OVERVIEW_ID],\n \"names\": [OVERVIEW_ID.title()]}\n\n # Prepare the list of pages\n for page_id in sorted(self._pages):\n pages['pages'].append(page_id)\n pages['names'].append(self._pages[page_id].get_title())\n\n # Send the response as a JSON object\n response.send_content(200, json.dumps(pages), \"application/json\")", "title": "" }, { "docid": "2d1738fcd513cb07fb6ed26dd36eb820", "score": "0.6155635", "text": "def page_home():\n\n states = models.storage.all('State').values()\n states = sorted(states, key=lambda s: s.name)\n cities = {\n state.id: sorted(state.cities, key=lambda c: c.name)\n for state in states\n }\n amenities = models.storage.all('Amenity').values()\n amenities = sorted(amenities, key=lambda a: a.name)\n places = models.storage.all('Place').values()\n places = sorted(places, key=lambda p: p.name)\n ret = Response()\n ret.headers['Content-Type'] = 'text/html; charset=latin1'\n ret.data = render_template(\n '100-hbnb.html',\n states=states,\n cities=cities,\n amenities=amenities,\n places=places\n ).encode('latin1')\n return ret", "title": "" }, { "docid": "23dcb90f2ac4374855ce66eee2027ca4", "score": "0.5953611", "text": "def _visit_pages(self):\n html = self.get_page_content_str(self._seed_url)\n self._extract_data(html)", "title": "" }, { "docid": "2a56046caf74a4a703edb602125f52e5", "score": "0.59095293", "text": "def pages():\n print \"accessing the /pages route\"\n files = filter(os.listdir('./templates'), '*.html')\n pages = [{'name':file, 'html':render_template(file)} for file in files]\n return jsonify(collection=pages)", "title": "" }, { "docid": "dcc93ffb203af9fdd7784b1126b35412", "score": "0.5861242", "text": "def list_maff_pages(pages):\n return render_template('maff_index.html',\n sitename=host.name,\n is_local=is_local_access(),\n base=request.script_root,\n path=request.path,\n pages=pages,\n )", "title": "" }, { "docid": "dd2fe6413db0267e4d2728bede7d90cc", "score": "0.5820322", "text": "def render_pages(self, sender):\r\n mkdir_p(Page.out_dir)\r\n\r\n for page in self.pages:\r\n self.render_to(page.out, Page.template, page=page)\r\n logger.success(\"Pages rendered\")", "title": "" }, { "docid": "d18db2c342c2fce4012a18bc283248a0", "score": "0.58201826", "text": "def page_home():\n\n index_page = \"./client/build/index.html\"\n app.static_folder = \"./client/build/static\"\n # index_page = \"./user-interface/index.html\"\n with open(index_page, encoding=\"UTF8\") as f:\n return \"\\n\".join(f.readlines())", "title": "" }, { "docid": "c5faa97c1d47a461401f9334ea05d97e", "score": "0.58109266", "text": "def get_all():\n\n return render_template(\"index.html\")", "title": "" }, { "docid": "d6702d54d659a73361091bf03bffd2e1", "score": "0.5798605", "text": "def _visit_pages(self):\n for single_url in self._seed_url:\n html = self.get_page_content_str(single_url)\n self._extract_data(html)", "title": "" }, { "docid": "4a410e1239d47d255c52a0278387ecfd", "score": "0.5795683", "text": "def indexpage():\n return jsonify({\n 'gages': url_for('api.get_gages', _external=True),\n 'sensors': url_for('api.get_sensors', _external=True),\n 'samples': url_for('api.get_samples', _external=True),\n 'rivers': url_for('api.get_rivers', _external=True),\n 'sections': url_for('api.get_sections', _external=True),\n 'regions': url_for('api.get_regions', _external=True),\n })", "title": "" }, { "docid": "56b6a7d8ae1387f2795b341042635919", "score": "0.5770798", "text": "def web_pagina():\n seq = request.args.get(\"seq\", '')\n seq = seq.upper()\n if check_dna(seq):\n bio_dna = Seq(seq, generic_dna)\n return render_template(\"afvink4.html\", soort='DNA',\n een=(bio_dna.transcribe()),\n twee=(bio_dna.translate()))\n elif check_rna(seq):\n bio_rna = Seq(seq, generic_rna)\n return render_template(\"avink4.html\",\n soort='RNA',\n een=(bio_rna.back_transcribe()),\n twee=(bio_rna.translate()))\n\n elif check_eiwit(seq):\n return render_template(\"afvink4.html\",\n soort='Eiwit',\n een=\"Klik op de link en druk op BLAST.\",\n twee=\"https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastp&PAGE_TYPE=BlastSearch&QUERY=\" + str(seq))\n else:\n return render_template(\"afvink4.html\",\n soort = 'Geen DNA, RNA of eiwit',\n een='',\n twee='')", "title": "" }, { "docid": "619e6149776072bc20cf269d817b715b", "score": "0.5764181", "text": "def generate_webpages(self):\n self.webpage_object.generate_webpages()", "title": "" }, { "docid": "619e6149776072bc20cf269d817b715b", "score": "0.5764181", "text": "def generate_webpages(self):\n self.webpage_object.generate_webpages()", "title": "" }, { "docid": "619e6149776072bc20cf269d817b715b", "score": "0.5764181", "text": "def generate_webpages(self):\n self.webpage_object.generate_webpages()", "title": "" }, { "docid": "20567627697d93f3d5cce66e75df7974", "score": "0.57618296", "text": "def save_webpage(self, pages):", "title": "" }, { "docid": "b113acb6963b9e3eadd235fb60ca8160", "score": "0.5672078", "text": "def do_list(self, args):\n self.player.show_curr_page()", "title": "" }, { "docid": "d7c122c97a3bdad32138ae12d086a064", "score": "0.5660964", "text": "def home(request):\n # go to the home page\n defaultOffer()\n if request.method == 'GET':\n return render(request, '../templates/main/index.html', status = 200)\n else:\n return HttpResponse(BadRequestMessage, status = 405)", "title": "" }, { "docid": "dd346a71763e22f36b370b55491198d0", "score": "0.5618166", "text": "def index():\n return index_page()", "title": "" }, { "docid": "631442184d29a335a4c661f0606500b5", "score": "0.561458", "text": "def page_index(request):\n contribs = Contribution.objects.filter(published=True).filter(public_from__lte=timezone.now()).order_by('-public_from')[:5]\n return render(request, 'pastvina/index.html', {\n 'contribs': contribs,\n 'navbar_absolute_pos': True,\n })", "title": "" }, { "docid": "15af79abe85dde0a521025ab8ceff3ac", "score": "0.56079745", "text": "def index():\n litems = []\n # litems = rootItems()\n igay = __imgsearch__.replace('search.', 'fgaytube.')\n iph = __imgsearch__.replace('search.', 'fpornhub.')\n irt = __imgsearch__.replace('search.', 'fredtube.')\n isw = __imgsearch__.replace('search.', 'fspankwire.')\n it8 = __imgsearch__.replace('search.', 'ftube8.')\n ixt = __imgsearch__.replace('search.', 'fxtube.')\n iyp = __imgsearch__.replace('search.', 'fyouporn.')\n pgay = plugin.url_for(siteroot, sitename=\"gaytube\")\n pph = plugin.url_for(siteroot, sitename=\"pornhub\")\n prt = plugin.url_for(siteroot, sitename=\"redtube\")\n psw = plugin.url_for(siteroot, sitename=\"spankwire\")\n pt8 = plugin.url_for(siteroot, sitename=\"tube8\")\n pxt = plugin.url_for(siteroot, sitename=\"xtube\")\n pyp = plugin.url_for(siteroot, sitename=\"youporn\")\n\n DOSTR8 = plugin.get_setting(key='dostr8')\n if not (DOSTR8 == True or DOSTR8 == 'true'):\n # IF the STR8 Setting is turned on no point in including the GAYTUBE site as it's gay specific content only\n item = {'label': 'Gaytube', 'icon': igay, 'thumb': igay, 'path': pgay}\n litems.append(item)\n item = {'label': 'Pornhub', 'icon': iph,'thumb': iph, 'path': pph}\n litems.append(item)\n item = {'label': 'Redtube', 'icon': irt,'thumb': irt, 'path': prt}\n litems.append(item)\n item = {'label': 'Spankwire', 'icon': isw, 'thumb': isw, 'path': psw}\n litems.append(item)\n item = {'label': 'Tube8', 'icon': it8, 'thumb': it8, 'path': pt8}\n litems.append(item)\n item = {'label': 'Xtube', 'icon': ixt, 'thumb': ixt, 'path': pxt}\n litems.append(item)\n item = {'label': 'YouPorn', 'icon': iyp, 'thumb': iyp, 'path': pyp}\n litems.append(item)\n allitems = []\n for li in litems:\n li.setdefault(li.keys()[0])\n allitems.append(li)\n litems = sorted(allitems, key=lambda allitems: allitems['label'])\n return litems", "title": "" }, { "docid": "99f004007ce20ae945a4c1195f99fff4", "score": "0.5557354", "text": "def send_page_content(id):\n if not g.user:\n flash('You have to Login')\n return redirect('/')\n\n page = Page.query.get_or_404(id)\n book = Book.query.get_or_404(page.book_id)\n\n if book.user_id != g.user.id:\n flash(\"This is not your Book\")\n return redirect('/books')\n else:\n return jsonify(page=page.serialize())", "title": "" }, { "docid": "8e0e32c5657272c6e4c72d15b7d5f096", "score": "0.55360204", "text": "def home(request):\n less_resource.need()\n return {}", "title": "" }, { "docid": "6a6d567507df5b491d50d35f00b2cb4c", "score": "0.5531199", "text": "def web_pagina():\n seq = request.args.get(\"seq\", '')\n seq = seq.upper()\n # Checkt met andere funcie of het DNA is\n if check_dna(seq):\n bio_dna = Seq(seq, generic_dna)\n # Wanneer DNA, returnd hij dat het DNA is en\n # Geeft hij de bijbehoorende RNA en eiwit streng.\n return render_template(\"Afvink4.html\",\n soort='DNA',\n een=(bio_dna.transcribe()),\n twee=(bio_dna.translate()))\n # Wanner het geen DNA is kijkt hij of het RNA is\n elif check_rna(seq):\n bio_rna = Seq(seq, generic_rna)\n # Wanneer RNA, returnd hij dat het RNA is en\n # Geeft hij de bijbehoorende DNA en eiwit streng.\n return render_template(\"Afvink4.html\",\n soort='RNA',\n een=(bio_rna.back_transcribe()),\n twee=(bio_rna.translate()))\n\n # als het zowel geen DNA als RNA is kijkt hij of het een eiwit is\n elif check_eiwit(seq):\n # Wanneer eiwit, returnd hij dat het een eiwit is en\n # geeft hij een link naar de ncbi website met ingevulde resultaat zodat\n # je de eiwit sequentie kan blasten\n return render_template(\"Afvink4.html\",\n soort='Eiwit',\n een=\"klik op de link en druk op blast\",\n twee=\"https://blast.ncbi.nlm.nih.gov/Blast.cgi?PROGRAM=blastp&PAGE_TYPE=BlastSearch&QUERY=\" + str(seq))\n # Als het zowel geen DNA, RNA of eiwit is dan returnd hij dat het\n # geen DNA, RNA of eiwit is\n else:\n\n return render_template(\"Afvink4.html\",\n soort = 'Geen DNA, RNA of eiwit',\n een='',\n twee='')", "title": "" }, { "docid": "73b70ee9fd550f14823b20eb9c0f39cd", "score": "0.55172485", "text": "def get_all_pages() -> DISTILL_RETURN:\n for location in __get_all_files(Path(\"pydis_site\", \"apps\", \"content\", \"resources\")):\n yield {\"location\": location}", "title": "" }, { "docid": "21bc24e03909073d7dfd36bda5037b95", "score": "0.54927194", "text": "def index():\n redirect(URL('list'))", "title": "" }, { "docid": "ac5f9da1669ed74a09de606f6bd0ffd9", "score": "0.54749966", "text": "def index():\n return bad_request(gettext('This URL cannot be requested directly.'))", "title": "" }, { "docid": "4d7ecd4db3021ca173f4ddeb02085f95", "score": "0.54633296", "text": "def admin_page():\r\n pass", "title": "" }, { "docid": "85aeb84a137154865e2ba695c738a83c", "score": "0.54224724", "text": "def start_requests(self):\n\t\tlinks = ['web', 'url', 'list']\n\t\tfor link in links:\n\t\t\turl = link[0]\n\t\t\timage_name = 'file-name'\n\t\t\tyield SplashRequest(\n\t\t\t\t\t\t\t\t\turl=url,\n\t\t\t\t\t\t\t\t\tcallback=self.parse,\n\t\t\t\t\t\t\t\t\tdont_filter=True,\n\t\t\t\t\t\t\t\t\targs={\n\t\t\t\t\t\t\t\t\t\t\t\"html\": 1,\n\t\t\t\t\t\t\t\t\t\t\t\"png\": 1,\n\t\t\t\t\t\t\t\t\t\t\t'wait': 15,\n\t\t\t\t\t\t\t\t\t\t\t'url':url,\n\t\t\t\t\t\t\t\t\t\t\t'render_all': 1\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tendpoint='render.json',\n\t\t\t\t\t\t\t\t\tmeta={'image_name': image_name}\n\t\t\t\t\t\t\t\t)", "title": "" }, { "docid": "416bc11985fbd35a341e13f49c725c6c", "score": "0.54191816", "text": "def render_pages(self):\n for page in self.root.iter_pages():\n rendered_page = page.render()\n with open(self.output_folder / page.path.with_suffix('.html'), 'w') as f:\n f.write(rendered_page)", "title": "" }, { "docid": "fdb9f6484e5666ce62bb0a335c177b50", "score": "0.54188687", "text": "def do_GET(self):\n\t\t#print(\"GET RECEIVED\")\n\t\tif self.path[0] == \"/\":\n\t\t\tself.path = self.path[1:]\n\t\tif self.path == \"\":\n\t\t\tself.path = \"home\"\n\n\t\tpage,ext = self.pageToShow(self.path)\n\t\tif ext in (\".html\", \".css\"):\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Content-type', types_map[ext])\n\t\t\tself.end_headers()\n\t\t\tself.wfile.write(page)", "title": "" }, { "docid": "eee47c1b41fe5916a5054baab9c9a11d", "score": "0.54137254", "text": "def test_all_homepage_items(self):\n pass", "title": "" }, { "docid": "324681a0adb30974fa487bdb3ad8faae", "score": "0.54065853", "text": "def main_page(request):\n if request.method == 'GET':\n return render(request, 'inception/index.html')", "title": "" }, { "docid": "e133d3e5dc73ad54710e0dc7ba88bed1", "score": "0.54065543", "text": "def main():\n msg_body = fetch_and_preprocess_page()\n publish(msg_body)\n log(msg_body)", "title": "" }, { "docid": "c1969e2abb0d20d0d9be939c7d6febff", "score": "0.5403979", "text": "def instellingen():\n return Response(index_file, mimetype='text/html',\n headers=Headers({'Cache-Control': 'max-age=60'}))", "title": "" }, { "docid": "d871e2822920fca9ceb8fee6118245f6", "score": "0.5398492", "text": "def collect():\n all_page = PageInfo.query.filter_by().order_by(PageInfo.content_time.desc()).limit(3)\n page_list = index_deal_page(all_page, False)\n is_admin_page = PageInfo.query.filter_by(is_admin_good=True).order_by(PageInfo.content_time.desc()).limit(10)\n admin_good_page = index_deal_page(is_admin_page, True)\n return render_template('main/workplace.html', page_list=page_list, admin_good_page=admin_good_page)", "title": "" }, { "docid": "8cd072ad872ca1699d43de15c47c70f6", "score": "0.53970796", "text": "def getFacebookFanpage():", "title": "" }, { "docid": "ffb7a85f8b8ece1e918c59d904b92423", "score": "0.53892857", "text": "def index(request):\n\treturn render(request, 'ab_ui/index.html')", "title": "" }, { "docid": "6929fba7702531a0f41f7a03c2b4fd7e", "score": "0.53794736", "text": "def render_html():\n render('www/instances.json', 'in/index.html.mako', 'www/index.html')", "title": "" }, { "docid": "7cf903d91adbe0681545be4f5862fe28", "score": "0.5374177", "text": "def index(self):\n abort(404)", "title": "" }, { "docid": "a3fab4ef91e3fbe8fb4c1ce71a0f6f45", "score": "0.5373379", "text": "def index(request):\n post = Post.objects.all()\n public = Social_Amenities.objects.all()\n return render(request, 'all/index.html', {\"post\": post, \"public\": public})", "title": "" }, { "docid": "9ed3defa8c74244ec6c694544d2cf8c5", "score": "0.5371307", "text": "def landing_page(self, **kwargs) -> LandingPage:\n ...", "title": "" }, { "docid": "da6dee095a73ec6078ef2f2b588d84e9", "score": "0.5371292", "text": "def build_pages():\n legal_pages = [\"cookies\", \"copyright\", \"privacy\"]\n legal_pages_output_path = Path(current_app.config[\"SITE_PATH\"]).joinpath(\"legal\")\n\n print(f\"{len(legal_pages)} legal pages to generate.\")\n _legal_pages_count = 1\n for legal_page in legal_pages:\n print(f\"# Legal page {_legal_pages_count}/{len(legal_pages)}\")\n legal_page_output_path = legal_pages_output_path.joinpath(f\"{legal_page}/index.html\")\n legal_page_output_path.parent.mkdir(exist_ok=True, parents=True)\n\n with open(str(legal_page_output_path), mode=\"w\") as legal_page_file:\n legal_page_file.write(render_template(f\"app/_views/legal/{legal_page}.j2\"))\n print(f\"Ok. Generated legal page for '{legal_page}'.\")\n _legal_pages_count += 1\n print(f\"Ok. {len(legal_pages)} legal pages generated.\")\n\n feedback_page_output_path = Path(current_app.config[\"SITE_PATH\"]).joinpath(\"feedback/index.html\")\n feedback_page_output_path.parent.mkdir(exist_ok=True, parents=True)\n with open(str(feedback_page_output_path), mode=\"w\") as feedback_page_file:\n feedback_page_file.write(render_template(f\"app/_views/feedback.j2\"))\n print(f\"Ok. feedback page generated.\")", "title": "" }, { "docid": "6f630fdf5d5706c6f5ddbc52b70f9e12", "score": "0.53550714", "text": "def walk_pages(self):\n page = self.homepage\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n while page.next_page:\n page.set_active(False)\n page = page.next_page\n page.set_active()\n self.url_context.set_current_url(page.abs_url)\n self.file_context.set_current_path(page.input_path)\n yield page\n page.set_active(False)", "title": "" }, { "docid": "9e4701f7d8b4a4bd9d0e9469dca4a1bf", "score": "0.5340199", "text": "def html(self, page):\n\n # -- Parse accept-language header\n if not self.headers.has_key(\"accept-language\"):\n a = []\n else:\n a = self.headers[\"accept-language\"]\n a = a.split(',')\n a = [e.split(';q=') for e in a]\n a = [(lambda x: len(x)==1 and (1, x[0]) or\n (float(x[1]), x[0])) (e) for e in a]\n a.sort()\n a.reverse()\n a = [x[1] for x in a]\n # now a is an ordered list of preferred languages\n\n # -- Choose the appropriate translation dictionary (default is english)\n lang = \"en\"\n for l in a:\n if translations.has_key(l):\n lang = l\n break\n dico = copy.copy(translations[lang])\n\n # -- Set message and picture\n if message:\n dico[\"message\"] = ('<div id=\"message\">%s</div>' %\n message)\n else:\n dico[\"message\"] = \"\"\n\n if picture != None:\n dico[\"divpicture\"] = self.divpicture\n else:\n dico[\"divpicture\"] = \"\"\n\n # -- Possibly provide download links\n links = \"\"\n names = self.published_files()\n if names:\n for name in names:\n links += '<a href=\"/%s\">%s</a>' % (\n urllib.quote(name.encode('utf-8')),\n name)\n links = '<div id=\"files\">' + links + '</div>'\n dico[\"files\"] = links\n\n # -- Add a link to discover the url\n if self.client_address[0] == \"127.0.0.1\":\n dico[\"port\"] = self.server.server_port\n dico[\"ssl\"] = int(certfile is not None)\n dico[\"linkurl\"] = linkurltmpl % dico\n else:\n dico[\"linkurl\"] = \"\"\n\n return templates[page] % dico", "title": "" }, { "docid": "489ec0d0308520355b75d4454ddca879", "score": "0.5331332", "text": "def landingpage():\n TRENDING_LIMIT = 6\n trending_movies = get_trending_movies()[:TRENDING_LIMIT]\n movie_ids = [m.movie_id for m in trending_movies]\n movie_names = [m.movie_name for m in trending_movies]\n movie_image_urls = get_movie_poster_image_urls(trending_movies)\n\n trending_tvs = get_trending_movies(\"tv\")[:TRENDING_LIMIT]\n tv_ids = [m.movie_id for m in trending_tvs]\n tv_names = [m.movie_name for m in trending_tvs]\n tv_image_urls = get_movie_poster_image_urls(trending_tvs)\n\n if current_user.is_authenticated:\n favorite_movies = get_movie_favorites(TRENDING_LIMIT)\n fav_movie_ids = [m.movie_id for m in favorite_movies]\n fav_movie_names = [m.movie_name for m in favorite_movies]\n fav_movie_image_urls = get_movie_poster_image_urls(favorite_movies)\n\n favorite_tvs = get_tv_favorites(TRENDING_LIMIT)\n fav_tv_ids = [m.movie_id for m in favorite_tvs]\n fav_tv_names = [m.movie_name for m in favorite_tvs]\n fav_tv_image_urls = get_movie_poster_image_urls(favorite_tvs)\n\n return render_template(\n \"static.html\", no_of_movies=list(range(TRENDING_LIMIT)),\n no_of_fav_movies=list(range(len(favorite_movies))),\n no_of_fav_tvs=list(range(len(favorite_tvs))),\n movie_image_urls=movie_image_urls, movie_ids=movie_ids, movie_names=movie_names,\n tv_image_urls=tv_image_urls, tv_ids=tv_ids, tv_names=tv_names,\n fav_movie_image_urls=fav_movie_image_urls, fav_movie_ids=fav_movie_ids, fav_movie_names=fav_movie_names,\n fav_tv_image_urls=fav_tv_image_urls, fav_tv_ids=fav_tv_ids, fav_tv_names=fav_tv_names,\n )\n else:\n return render_template(\n \"static.html\", no_of_movies=list(range(TRENDING_LIMIT)),\n movie_image_urls=movie_image_urls, movie_ids=movie_ids, movie_names=movie_names,\n tv_image_urls=tv_image_urls, tv_ids=tv_ids, tv_names=tv_names,\n )", "title": "" }, { "docid": "38cb79e2d492bcfa50914bda2753dc5b", "score": "0.53256226", "text": "def all_hot(page):\n posts = misc.getPostList(\n misc.postListQueryBase(isSubMod=current_user.can_admin), \"hot\", page\n )\n\n return engine.get_template(\"index.html\").render(\n {\n \"posts\": posts,\n \"sort_type\": \"home.all_hot\",\n \"page\": page,\n \"subOfTheDay\": misc.getSubOfTheDay(),\n \"changeLog\": misc.getChangelog(),\n \"ann\": misc.getAnnouncement(),\n \"kw\": {},\n }\n )", "title": "" }, { "docid": "48000d0fe1633ec8a64047872e727afa", "score": "0.53233874", "text": "def scrapeResourcePages(self):\n return None", "title": "" }, { "docid": "ae65c51f3b956ce90a4a7a4ada0bb487", "score": "0.53183377", "text": "def do_GET(self):\n\n # Allow upstream web server to tell in which location our pages are\n prefix = self.headers.get('X-Status-Server-Location', \"\")\n\n # What pages we serve\n paths = {\n \"{}/\".format(prefix): report_generator.index,\n \"{}/accounts\".format(prefix): report_generator.accounts,\n \"{}/addresses\".format(prefix): report_generator.addresses,\n \"{}/transactions\".format(prefix): report_generator.transactions,\n \"{}/wallets\".format(prefix): report_generator.wallets,\n \"{}/network_transactions\".format(prefix): report_generator.network_transactions,\n \"{}/error\".format(prefix): report_generator.error,\n }\n\n func = paths.get(self.path)\n if not func:\n self.send_error(404)\n return\n\n buf = BytesIO()\n\n try:\n # http://www.macfreek.nl/memory/Encoding_of_Python_stdout\n writer = codecs.getwriter('utf-8')(buf, 'strict')\n self.nav(writer)\n func(writer)\n\n except Exception as e:\n logger.error(\"Could not process page %s: %s\", self.path, e)\n logger.exception(e)\n self.send_response(500, \"Internal server error\")\n self.send_header(\"Content-type\", \"text/html; charset=utf-8\")\n self.end_headers()\n return\n\n self.send_response(200, \"OK\")\n self.send_header(\"Content-type\", \"text/html; charset=utf-8\")\n self.end_headers()\n self.wfile.write(buf.getvalue())", "title": "" }, { "docid": "666d8bc1fe18bcd6e952fc38a007c426", "score": "0.5316845", "text": "def Home_Page():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&lt;start&gt; where start is a date in YYYY-MM-DD format <br>\" \n f\"/api/v1.0/&lt;start&gt;/&lt;end&gt; where start and end are dates in YYYY-MM-DD format\"\n \n )", "title": "" }, { "docid": "6efdd6b06135dd1a996c0d58b92524b6", "score": "0.5313658", "text": "def allPlugs(self):\n \n pass", "title": "" }, { "docid": "86e82fa52737a7ab560777ac74adb1c9", "score": "0.52994764", "text": "def index(self):\n\t\tpage = '''\n\t\t\t<html>\n\t\t\t\t<title>Simple pokemon</title>\n\t\t\t\t<body>\n\t\t\t\t\tWelcome to the very simple pokemon webpage<br/>\n\t\t\t\t\t<br>\n\t\t\t\t\t<a href=\"generation\">Generations</a><br/>\n\t\t\t\t\t<a href=\"about\">About</a>\n\t\t\t\t</body>\n\t\t\t</html>'''\n\t\treturn page", "title": "" }, { "docid": "982e86c4e201ed0557a355063cc5a648", "score": "0.52958906", "text": "def dash_index(request, **kwargs):\n #print('##################after processing /viz/')\n return HttpResponse(dispatcher(request), content_type='text/html')", "title": "" }, { "docid": "9fe2b82ed398d2b86048bb83a1a2b756", "score": "0.5289438", "text": "def _page_instances(self):\n return self._open(self.app.page_instances)", "title": "" }, { "docid": "5b7c5a2ae95f3f8dd9b7a2ca092dcf2e", "score": "0.52831304", "text": "def scraper_dealer(url):\n url_pages = []\n http = tools.get_html(url)\n html = http[2]\n http_code = http[1]\n if html is not None:\n pages = html.cssselect('#main > div.module.filter.alpha_filter > div.page_nav > div > div.pages > ul > li')\n if pages:\n q = len(pages)\n else:\n q = 1\n for i in range(0, q):\n url_pages.append('{0}?view=condensed&page={1}'.format(url, i))\n return url_pages, http_code", "title": "" }, { "docid": "52e5394639254a7064dca0c796800465", "score": "0.5279099", "text": "def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://nso.nato.int/nso/nsdd/'\n starting_url = base_url + 'ListPromulg.html'\n\n global driver\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--disable-dev-shm-usage\")\n options.add_argument(\"--disable-setuid-sandbox\")\n driver = webdriver.Chrome(options=options)\n yield starting_url", "title": "" }, { "docid": "f7453b6dae1a6ad05c1d560698669e89", "score": "0.5277642", "text": "def show_home():\n return redirect(url_for('winebrand_api.show'))", "title": "" }, { "docid": "3f039897ae32f1195577ca44e71ff9f8", "score": "0.52715397", "text": "def scrape_everything(self):", "title": "" }, { "docid": "542290903113fa1ecdeb131af7558bd3", "score": "0.52656376", "text": "def start_requests(self):\n urls = [\n \"https://applicationspub.unil.ch/interpub/noauth/php/Ud/index.php?v_langue=fr&v_isinterne=&v_ueid=174\"\n ]\n\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)", "title": "" }, { "docid": "4412b4e1e776715ac6b8e5674dce9290", "score": "0.5264705", "text": "def index():\n return send_from_directory('.', 'index.html')", "title": "" }, { "docid": "cf3d9a7b382a80dd0161c2f36e9827cd", "score": "0.5262661", "text": "def index():\r\n aaa.require(fail_redirect='/login')\r\n return {'title': 'Index page', 'user': aaa.current_user.username, 'content': 'Index page\\'s content'}", "title": "" }, { "docid": "6f1fd4bb7ae3d656538abc9f2d1fffea", "score": "0.52562535", "text": "def index():\n\n response.view = 'tnrs.html'\n view_dict = get_opentree_services_method_urls(request)\n #view_dict['message'] = \"This would appear at bottom of page..\"\n view_dict['maintenance_info'] = get_maintenance_info(request)\n view_dict['taxonSearchContextNames'] = fetch_current_TNRS_context_names(request)\n return view_dict", "title": "" }, { "docid": "f5497d4b75f0acab104177d53fdae6e1", "score": "0.5250924", "text": "def __init__(self):\n \n super().__init__()\n self.pages = []", "title": "" }, { "docid": "49cfe1e8562e29c0c5f9c8f8e8be98a4", "score": "0.5244985", "text": "def index(request):\n title = _(\"Home\")\n posts = Post.objects.all().order_by('-timestamp')[:5]\n suptechs = Suptech.objects.filter(status=\"En Attente\")[:15]\n return render(request, 'dashboard/index.html', locals())", "title": "" }, { "docid": "0342eebb6a126f773c127dbb9bfc2541", "score": "0.5242171", "text": "def home(self):\n return redirect(\"/static/demo.html\")", "title": "" }, { "docid": "9989f77c76c794d301e765d12185637a", "score": "0.522103", "text": "def get_all_page_urls(self):\n # create pages url list\n page_urls_list = []\n for i in range(1, int(self.get_max_page())+1):\n page_url = f\"https://streeteasy.com/buildings/nyc/new_development:new%20development%7Carea:100,300,400,200?page={i}\"\n page_urls_list.append(page_url)\n print(\"All pages identified, max page = \", self.get_max_page())\n return page_urls_list", "title": "" }, { "docid": "7ccdf58287bf320cb2eeae1378c6c679", "score": "0.52131414", "text": "def static_web_spider(self):\n response = requests.get(self.url).content\n soup = BeautifulSoup(response, 'html.parser', from_encoding='utf-8')\n self.content = soup.html", "title": "" }, { "docid": "bf99cf3f8c786a2af23d41eeb26a00d9", "score": "0.5212774", "text": "def mount(self, base_url):\n\n urls = []\n\n public_pages_url = url(r'%s(?P<page_url>.*)' % base_url, serve_page, name=\"contento-cms\")\n urls.append(public_pages_url)\n return urls", "title": "" }, { "docid": "023a6951a706ce61dff7d7a3053074a7", "score": "0.52111095", "text": "def index():\n return render_template('landing/index.html')", "title": "" }, { "docid": "23f8598fb0f63b2b422bd90c59ea0a96", "score": "0.5201761", "text": "def home():\n return default.send_static_file('index.html')", "title": "" }, { "docid": "94e22a25aa4bbc957c45ba64aef2dde7", "score": "0.51988184", "text": "def get_data(self):\n self.flag_page_loop = False\n for i in range(1, self.nb_page + 1):\n self.load_htlm(self.target.format(page = str(i), ref_url = self.url, start_d = self.start_d))\n self.feed(self.html)", "title": "" }, { "docid": "418f1ea05837c8f5589a72bd3b027595", "score": "0.51936024", "text": "def premier():\n return premier_page()", "title": "" }, { "docid": "751e8c3493902c64a1f8923f5bdd323c", "score": "0.5189809", "text": "def get(self):\n self.render('index.html', page='home')", "title": "" }, { "docid": "788e03385ba749d1bbfb2a5c64e32fe2", "score": "0.51882267", "text": "def pages(self):\n # a workaround to keep the old interface working\n return self", "title": "" }, { "docid": "26089e2cc4849721d04a718ca15c52ed", "score": "0.5183904", "text": "def getAllBerita(self, details, page, cat_link, offset, date=datetime.strftime(datetime.today(), '%Y-%m-%d')):\n\n print(\"page \", page)\n url = \"https://index.sindonews.com/index/\"+ str(cat_link)+ \"/\" + str(offset)+ \"?t=\"+ date\n print(url)\n\n # Make the request and create the response object: response\n try:\n response = requests.get(url)\n except ConnectionError:\n print(\"Connection Error, but it's still trying...\")\n time.sleep(5)\n details = self.getAllBerita(details, page, cat_link, offset, date)\n # Extract HTML texts contained in Response object: html\n html = response.text\n # Create a BeautifulSoup object from the HTML: soup\n soup = BeautifulSoup(html, \"html5lib\")\n\n contentDiv = soup.find('div', class_=\"indeks-news\")\n if contentDiv:\n for post in contentDiv.findAll('div', class_=\"indeks-title\"):\n link = [post.find('a', href=True)['href'], \"\"]\n detail = self.getDetailBerita(link)\n if detail:\n if self.insertDB(detail):\n details.append(detail)\n\n el_page = soup.find('div', class_=\"pagination\")\n if el_page:\n active_page = el_page.find('li', class_=\"active\").get_text(strip=True)\n max_page = el_page.findAll('a')[-1]\n if max_page:\n if active_page != max_page.get_text(strip=True):\n time.sleep(5)\n details = self.getAllBerita(details, page+1, cat_link, offset+10, date)\n # else:\n # max_page = page\n return 'berhasil ambil semua berita'", "title": "" }, { "docid": "fa5ab112e76b7206b6aa4b297998d411", "score": "0.51834315", "text": "def pages(self):\n return self._server.pages", "title": "" }, { "docid": "cae67dbce6f1626561b4eea0536a5f43", "score": "0.5175548", "text": "def process_index_page(self, response):\n logging.info('index: {}'.format(response.url))\n print('index: {}'.format(response.url))\n self.classification_file.write(\"index, {}\\n\".format(response.url))\n self.index_page_count += 1\n time.sleep(self.new_index_page_pause_time)", "title": "" }, { "docid": "f99703399b68850799d226ecd9d5e173", "score": "0.5166426", "text": "def navigate_pages(self):\n print('Retrieving viewing activity...')\n\n # List that is filled with strings of viewing activity\n self.activity_list = []\n\n done = False\n while not done:\n self.get_page_activity()\n try:\n self.driver.find_element_by_id('iyrNext').click()\n time.sleep(1)\n except WebDriverException:\n done = True\n\n common.output_activity(SERVICE, self.activity_list)", "title": "" }, { "docid": "c3a837c09cb5db30a24959ac184d8720", "score": "0.5164111", "text": "def set_pages(self, pages):\n self._server.pages = {\n page.absolute_path: page.bytes\n for page in pages\n }", "title": "" }, { "docid": "f16d52e1823c86e92fc56276948e374e", "score": "0.5162371", "text": "def homepage():\n return(\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end<br/>\")", "title": "" }, { "docid": "f3fa3c4f0872f183c67af8824d808148", "score": "0.5156019", "text": "def scrapeAllPages(self):\n nPages = 0\n while True:\n if self.nextPage is not None:\n self.scrapePage()\n nPages += 1\n self.url = self.nextPage\n self.getNextPage()\n else:\n self.scrapePage()\n nPages += 1\n break\n\n return f\"Scraped {nPages} pages, Total appartments: {len(self.data)}\"", "title": "" }, { "docid": "ca95e49207e1ff223c16a26533cebc9a", "score": "0.5153318", "text": "def index():\n videos_list = get_video_list()\n pictures = get_pictures_list()\n musics = get_music_list()\n documents = get_documents_list()\n applications = get_application_list()\n compressed = get_compressed_list()\n\n return render_template('index.html', section='index',\n videos=videos_list,\n pictures=pictures,\n documents=documents,\n musics=musics,\n compressed=compressed,\n applications=applications)", "title": "" }, { "docid": "cff354bc540625637268d04a07370c6f", "score": "0.51497096", "text": "def get_all_pages(id):\n\n if not g.user:\n flash('You have to Login')\n return redirect('/')\n\n book = Book.query.get_or_404(id)\n\n if book.user_id != g.user.id:\n flash(\"This is not your Book\")\n return redirect('/books')\n else:\n pages = [page.serialize() for page in book.pages]\n\n return jsonify(pages=pages)", "title": "" }, { "docid": "b661a4a3920f0bc9b48f5e320f3b2305", "score": "0.5147754", "text": "def index_alt():\n\n redirect(URL(f=\"household\", args=\"summary\"))", "title": "" }, { "docid": "e90296180ca13fa3e5aab09438aaf69c", "score": "0.51451993", "text": "def applicants(self):\n response = ''\n data = []\n for i in range(100):\n response = requests.get(self.url + 'applicants/page/' + str(i+1)+'/?apikey=' + self.api_token)\n if len(response.text) > 10:\n data.extend(json.loads(response.text))\n else:\n break\n return data", "title": "" }, { "docid": "25eaf0724b5180c6fbf99d815d728f3a", "score": "0.5143958", "text": "def home(request):\n return {}", "title": "" }, { "docid": "4f7435aa2d53877210b15ded613f365d", "score": "0.51407546", "text": "def home():\n table_ids = Table.get_all()\n return jsonify(success=True, data={'table_ids': table_ids})", "title": "" }, { "docid": "47138c88bed4d3fa51f08713d52d3043", "score": "0.51384395", "text": "def link_list(self):\n self.request.response.content_type = \"application/javascript\"\n try:\n language = self.request.language\n\n items = []\n for page in Page.all(self.session):\n translation = page.get_translation(language)\n item = unicode('[\"{}\",\"{}.html\"]')\n item = item.format(translation.title.replace('\"', \"'\"),\n translation.url)\n items.append(item)\n\n response = unicode('var tinyMCELinkList = new Array({});')\n response = response.format(', '.join(items))\n\n except Exception as e:\n log.exception(e)\n self.session.rollback()\n response = '/* ' + str(e) + ' */'\n self.request.response.status = 500\n\n else:\n self.session.commit()\n self.request.response.status = 200\n\n return response", "title": "" }, { "docid": "68795f0c43a58be5b02a48c9b9133586", "score": "0.5136742", "text": "def home():\n return app.send_static_file('index.html')\n #return render_template('index.html')", "title": "" }, { "docid": "b06d001d7d51d22c013d44d0460a226e", "score": "0.513592", "text": "def pages():\n def get_str(chart):\n return chart[\"str\"].upper()\n\n def mk_cap(mode):\n return mode.capitalize()\n\n idx = index.read()\n for name in set(key for mode in (game.mode.star, game.mode.pop) for key in idx[mode]):\n tabs = []\n clean_name = clean(name)\n if exists(path.db.star + clean_name + \".json\"): tabs.append(mk_cap(game.mode.star))\n if exists(path.db.nm + clean_name + \".json\"): tabs.append(get_str(game.chart.nm))\n if exists(path.db.hd + clean_name + \".json\"): tabs.append(get_str(game.chart.hd))\n if exists(path.db.mx + clean_name + \".json\"): tabs.append(get_str(game.chart.mx))\n if exists(path.db.ex + clean_name + \".json\"): tabs.append(get_str(game.chart.ex))\n if len(tabs) > 0:\n _page(tabs, name, \"./images/disc\")\n for name in (key for key in idx[game.mode.club]):\n if exists(path.db.club + clean(name) + \".json\"):\n _page([mk_cap(game.mode.club)], name, \"./images/club\")\n for name in (key for key in idx[game.mode.mission]):\n if exists(path.db.mission + clean(name) + \".json\"):\n _page([mk_cap(game.mode.mission)], name, \"./images/mission\")\n _page([mk_cap(game.mode.star), get_str(game.chart.nm), get_str(game.chart.hd), get_str(game.chart.mx), mk_cap(game.mode.pop), mk_cap(game.mode.club), mk_cap(game.mode.mission)], \"Master\")\n _index(idx)", "title": "" }, { "docid": "00484f183630a778b35ed759a1acffee", "score": "0.5135895", "text": "def home(request):\n assert isinstance(request, HttpRequest)\n search = moviedbAPIInterface()\n search.searchByTitle(\"titanic\")\n return render(request,\n 'app/index.html',\n {\n 'title':'Home Page',\n 'year':datetime.now().year,\n })", "title": "" }, { "docid": "5fa6ef4221bab85595ca34d7265212ca", "score": "0.51324296", "text": "async def root():\n html_content1 = \"\"\"\n <a href=\"/api/v1/docs\">/api/v1/docs</a>\n \"\"\"\n\n html_content2 = \"\"\"\n <a href=\"/maccounts/api/v1/docs\">/maccounts/api/v1/docs</a>\n \"\"\"\n try:\n return HTMLResponse(content=f\"{settings.DESCRIPTION} Visit {html_content1} for the platform endpoints, and{html_content2} for the marketplace api endpoints.\",\n status_code=status.HTTP_200_OK)\n except Exception as e:\n return JSONResponse(content=e, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)", "title": "" }, { "docid": "f4cbf3991dc6c3223cbc015a342d87cb", "score": "0.5130129", "text": "def home():\n\n with open(os.path.join(lib_path, \"failed_bfs.json\"), \"rb\") as sjh:\n contents = sjh.read().decode('utf-8')\n failed_bfs_root = json.loads(contents)\n\n query = failed_bfs_root['query']\n date = failed_bfs_root['date']\n failed_bfs = failed_bfs_root['bfs']\n\n bfs = analyzer.mongo_client.get_bfs()\n\n return render_template(\n 'index.html',\n title='Home Page',\n year=datetime.now().year,\n failed_bfs=bfs,\n query=query,\n date=date,\n bf_count=len(bfs))", "title": "" }, { "docid": "c2acddd59a86dec47ebe68d174163e30", "score": "0.5129384", "text": "def home():\n return app.send_static_file('index.html')", "title": "" }, { "docid": "afe70a033d585ae33e198d422eea32a6", "score": "0.51278186", "text": "def test_wiki_special_all_pages(self):\n response = self.client.get(\"/special/all_pages\", app.config['WIKI_SUBDOMAIN'])\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "d4146114344e3e43c0e403f09beb33ca", "score": "0.5125736", "text": "def send_urls():\n return jsonify(db.get_links())", "title": "" }, { "docid": "d9fd54090b9a2831c4b3888857db24df", "score": "0.51211464", "text": "def index_all(page_query=None):\n\n if page_query is None:\n page_query = Page.objects.live().all()\n\n for page in page_query.specific():\n index_page(page)", "title": "" }, { "docid": "e110e26f67032bc7191de9b809b56f48", "score": "0.5118122", "text": "def home(request):\n return {\n 'home': True,\n 'gh_media': GH_MEDIA,\n 'gh_static': GH_STATIC\n }", "title": "" }, { "docid": "a93fa3210adfebf1b85167c68d008fe5", "score": "0.51174754", "text": "def nav_to_games_list(self):\n self.browser.get(self.live_server_url)", "title": "" }, { "docid": "e30de10a65a3427225efa08f3898a62e", "score": "0.51161313", "text": "def index():\n codes = get_codes()\n return render_template('index.html', codes=codes)", "title": "" }, { "docid": "0892931fd85f81ca9c461ba575da1628", "score": "0.5111974", "text": "def visit_page(self, page_id=1):\r\n like_tieba_url = 'http://tieba.baidu.com/f/like/mylike?&pn=%d' % page_id\r\n fetchRequest = urllib2.Request(like_tieba_url)\r\n fetchResponse = urllib2.urlopen(fetchRequest).read()\r\n fetchPage = BeautifulSoup(fetchResponse, \"lxml\")\r\n # print fetchPage\r\n bar_boxs = fetchPage.find_all(has_title_but_no_class)\r\n if bar_boxs:\r\n temp_like_tieba = [{\r\n 'name': bar['title'].encode('utf-8'),\r\n 'link':'http://tieba.baidu.com'+bar['href']\r\n } for bar in bar_boxs]\r\n # each bar is a dict with name and link\r\n if temp_like_tieba:\r\n if not self.like_tiebas:\r\n self.like_tiebas = temp_like_tieba\r\n else:\r\n self.like_tiebas += temp_like_tieba\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "title": "" } ]
5e1a891d3b7866607a29f1974d1ce28e
DMLablike interface for a Craft environment. Given a `scenario` (basically holding an initial world state), will provide the usual DMLab API for use in RL.
[ { "docid": "1768a3f30f25b192ca42f33c21871d27", "score": "0.0", "text": "def __init__(self,\n scenario,\n task_name,\n task,\n max_steps=100,\n visualise=False,\n render_scale=10,\n extra_pickup_penalty=0.3):\n\n self.world = scenario.world\n self.scenario = scenario\n self.task_name = task_name\n self.task = task\n self.max_steps = max_steps\n self._visualise = visualise\n self.steps = 0\n self._extra_pickup_penalty = extra_pickup_penalty\n self._current_state = self.scenario.init()\n\n # Rendering options\n self._render_state = {}\n self._width, self._height, _ = self._current_state.grid.shape\n self._render_scale = render_scale\n self._inventory_bar_height = 10\n self._goal_bar_height = 30\n self._render_width = self._width * self._render_scale\n self._render_height = (self._height * self._render_scale +\n self._goal_bar_height + self._inventory_bar_height)\n # Colors of entities for rendering\n self._colors = {\n 'player': sns.xkcd_palette(('red', ))[0],\n 'background': sns.xkcd_palette(('white', ))[0],\n 'boundary': sns.xkcd_palette(('black', ))[0],\n 'workshop0': sns.xkcd_palette(('blue', ))[0],\n 'workshop1': sns.xkcd_palette(('pink', ))[0],\n 'workshop2': sns.xkcd_palette(('violet', ))[0],\n 'water': sns.xkcd_palette(('water blue', ))[0],\n 'wood': sns.xkcd_palette(('sienna', ))[0],\n 'cloth': sns.xkcd_palette(('off white', ))[0],\n 'flag': sns.xkcd_palette(('cyan', ))[0],\n 'grass': sns.xkcd_palette(('grass', ))[0],\n 'iron': sns.xkcd_palette(('gunmetal', ))[0],\n 'stone': sns.xkcd_palette(('stone', ))[0],\n 'rock': sns.xkcd_palette(('light peach', ))[0],\n 'hammer': sns.xkcd_palette(('chestnut', ))[0],\n 'knife': sns.xkcd_palette(('greyblue', ))[0],\n 'slingshot': sns.xkcd_palette(('dusty orange', ))[0],\n 'bench': sns.xkcd_palette(('umber', ))[0],\n 'arrow': sns.xkcd_palette(('cadet blue', ))[0],\n 'bow': sns.xkcd_palette(('dark khaki', ))[0],\n 'gold': sns.xkcd_palette(('gold', ))[0],\n 'gem': sns.xkcd_palette(('bright purple', ))[0],\n 'bridge': sns.xkcd_palette(('grey', ))[0],\n 'stick': sns.xkcd_palette(('sandy brown', ))[0],\n 'bundle': sns.xkcd_palette(('toupe', ))[0],\n 'shears': sns.xkcd_palette(('cherry', ))[0],\n 'plank': sns.xkcd_palette(('brown', ))[0],\n 'ladder': sns.xkcd_palette(('metallic blue', ))[0],\n 'goldarrow': sns.xkcd_palette(('golden', ))[0],\n 'bed': sns.xkcd_palette(('fawn', ))[0],\n 'rope': sns.xkcd_palette(('beige', ))[0],\n 'axe': sns.xkcd_palette(('charcoal', ))[0]\n }", "title": "" } ]
[ { "docid": "bed98706b436b68588217f2ebed80aaa", "score": "0.5636561", "text": "def func_with_scenario():\n Scenario(test=my_scenario)()", "title": "" }, { "docid": "7cecb62700356ff3bdc1e0f8a36300d8", "score": "0.5341338", "text": "def _run_scenario(self, scenario):\r\n \r\n scenario_data = list()\r\n # Set boundaries for random actions\r\n low = np.array([-0.5, -0.02])\r\n high = np.array([0.5, 0.02])\r\n\r\n self._env.reset(scenario=scenario)\r\n done = False\r\n \r\n while done is False:\r\n action = np.random.uniform(low=low, high=high, size=(2, ))\r\n observation, reward, done, info = self._env.step(action)\r\n\r\n # Calculate the labels for the supervised setting\r\n action_labels = self._calc_labels(observation)\r\n\r\n # Save datum in data_scenario\r\n datum = dict()\r\n datum[\"observation\"] = observation\r\n datum[\"label\"] = action_labels\r\n scenario_data.append(datum)\r\n\r\n return scenario_data", "title": "" }, { "docid": "2cb81dfc403486db8bd99758c818311d", "score": "0.5285189", "text": "def main():\n # parse arguments from the command line (argparse validates arguments)\n args = _get_args()\n # build the environment with the given ID\n env = gym.make(args.env)\n if args.seed is not None:\n env.seed(args.seed)\n # wrap the environment with an action space if specified\n if args.actionspace != 'nes':\n # unwrap the actions list by key\n actions = _ACTION_SPACES[args.actionspace]\n # wrap the environment with the new action space\n env = JoypadSpace(env, actions)\n # play the environment with the given mode\n if args.mode == 'human':\n play_human(env)\n else:\n play_random(env, args.steps)", "title": "" }, { "docid": "4c37efafc86731b3c9445610ef91ae40", "score": "0.52791965", "text": "def __init__(self, scenario_name='', arglist=None, done=False, logging=False, benchmark=False):\n\n self._env = make_env(scenario_name, arglist, done, logging, benchmark)\n self.num_agents = self._env.n\n self.agent_ids = list(map(str, list(range(self.num_agents))))\n\n # self.obs_space = self._make_dict(self._env.observation_space)\n # self.action_space = self._make_dict(self._env.action_space)\n # self.obs_space = {'0': self._env.observation_space[0], '1': self._env.observation_space[1]}\n # self.obs_space = gym.spaces.Dict({'0': self._env.observation_space[0], '1': self._env.observation_space[1]})\n\n self.obs_space = my_box.Box(\n low=np.array([self._env.observation_space[0].low, self._env.observation_space[1].low]),\n high=np.array([self._env.observation_space[0].high, self._env.observation_space[1].high]),\n shape=(2, ),\n dtype=np.float32)\n self.action_space = gym.spaces.Tuple((self._env.action_space[0], self._env.action_space[1]))", "title": "" }, { "docid": "3b9e8d993d19dbd94ae149901f4a98fc", "score": "0.52537155", "text": "def run(self, scenario):\n assert isinstance(scenario, scenarios.Scenario)\n model = self.new_model(scenario)\n model.run(scenario, learn=True)\n return model", "title": "" }, { "docid": "7571ed3ecff6cae081aaabab4c75621f", "score": "0.51468784", "text": "def test_model_based(env: gym.Env) -> None:\n state = env.initial_state()\n assert env.state_space.contains(state)\n\n action = env.action_space.sample()\n new_state = env.transition(state, action)\n assert env.state_space.contains(new_state)\n\n reward = env.reward(state, action, new_state)\n assert isinstance(reward, float)\n\n done = env.terminal(state, 0)\n assert isinstance(done, bool)\n\n obs = env.obs_from_state(state)\n assert env.observation_space.contains(obs)\n next_obs = env.obs_from_state(new_state)\n assert env.observation_space.contains(next_obs)", "title": "" }, { "docid": "399721e87c26d36849610512f735408f", "score": "0.5136439", "text": "def make_env(stack=True, scale_rew=True, scenario = 'scenario'): #scenario = #'contest'\n #env = grc.RemoteEnv('tmp/sock')\n #env = make(game='SonicTheHedgehog-Genesis', state='SpringYardZone.Act1', bk2dir='videos', monitordir='logs',scenario=scenario)\n#'BustAMove.1pplay.Level1' #BustAMove.Challengeplay0\n #env = make(game='BustAMove-Snes', state='BustAMove.1pplay.Level1', bk2dir='videos', monitordir='logs',scenario=scenario)\n env = make(game='ContraIII-Snes', state='level1.1player.easy.100lives', bk2dir='videos', monitordir='logs',\n scenario=scenario)\n\n env = SonicDiscretizer(env)\n if scale_rew:\n env = RewardScaler(env)\n env = WarpFrame(env)\n if stack:\n env = FrameStack(env, 4)\n return env", "title": "" }, { "docid": "ae1a2100d4a979416037fa2160885a38", "score": "0.5109307", "text": "def __init__(self,\n base_env_name=None,\n batch_size=None,\n env_wrapper_fn=None,\n reward_range=(-np.inf, np.inf),\n discrete_rewards=True,\n parallelism=1,\n **env_kwargs):\n\n # Call the super's ctor.\n problem.Problem.__init__(self, was_reversed=False, was_copy=False)\n\n # Name for the base environment, will be used in `gym.make` in\n # the default implementation of `initialize_environments`.\n self._base_env_name = base_env_name\n\n # An env generates data when it is given actions by an agent which is either\n # a policy or a human -- this is supposed to be the `id` of the agent.\n #\n # In practice, this is used only to store (and possibly retrieve) history\n # to an appropriate directory.\n self._agent_id = \"default\"\n\n # We clip rewards to this range before processing them further, as described\n # in `process_rewards`.\n self._reward_range = reward_range\n\n # If set, we discretize the rewards and treat them as integers.\n self._discrete_rewards = discrete_rewards\n\n # Initialize the environment(s).\n\n # This can either be a list of environments of len `batch_size` or this can\n # be a Neural Network, in which case it will be fed input with first\n # dimension = `batch_size`.\n self._envs = None\n self._pool = None\n self._parallelism = parallelism\n\n self._observation_space = None\n self._action_space = None\n\n # A data structure to hold the `batch_size` currently active trajectories\n # and also the ones that are completed, i.e. done.\n self._trajectories = None\n\n self._batch_size = None\n\n self._env_wrapper_fn = env_wrapper_fn\n\n if batch_size is not None:\n self.initialize(batch_size=batch_size, **env_kwargs)", "title": "" }, { "docid": "d973e4a9144230b00770dd8959f926ef", "score": "0.50582916", "text": "def RunAssemblyScenario(scenarios, realize_connections):", "title": "" }, { "docid": "141c532813c217b70a2a530db96aa5a6", "score": "0.5020394", "text": "def load_scenario(self, scenario, agent=None):\n self._reset()\n self._agent = AgentWrapper(agent) if agent else None\n if self._agent is not None:\n self._sync_mode = True\n\n # original agent\n self.original_agent = agent\n\n self.scenario_class = scenario\n self.scenario = scenario.scenario\n self.scenario_tree = self.scenario.scenario_tree\n self.ego_vehicles = scenario.ego_vehicles\n self.other_actors = scenario.other_actors\n\n # To print the scenario tree uncomment the next line\n # py_trees.display.render_dot_tree(self.scenario_tree)\n\n if self._agent is not None:\n self._agent.setup_sensors(self.ego_vehicles[0], self._debug_mode)", "title": "" }, { "docid": "dd89f90afd0d2243d83b6aaa8665e614", "score": "0.5018974", "text": "def scenario_step(self):", "title": "" }, { "docid": "f5b64ab78bfece33eb05cf47486bcbb7", "score": "0.49979162", "text": "def add_scenario(self, scenario_name, scenario_script, rewrite=None):\n params = dict()\n \n params['scenario_name']=scenario_name\n\n params['scenario_script']=scenario_script\n\n \n if rewrite is not None:\n params['rewrite']=rewrite\n\n \n res = self._perform_request('AddScenario', params)\n if \"error\" in res:\n raise VoximplantException(res[\"error\"][\"msg\"])\n \n return res", "title": "" }, { "docid": "d5dc695422925f77e78822570f87236e", "score": "0.4994561", "text": "def _setup_world(self, env_name):\n if env_name in ['gym_reacher']:\n self._env = \\\n reacher.env(env_name, 1234, misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_cheetah', 'gym_walker2d',\n 'gym_hopper', 'gym_swimmer', 'gym_ant']:\n self._env = \\\n walker.env(env_name, 1234, misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_nostopslimhumanoid']:\n self._env = \\\n humanoid.env(env_name, 1234, misc_info={'reset_type': 'gym'})\n\n elif env_name in ['gym_pendulum']:\n self._env = \\\n pendulum.env(env_name, 1234, misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_invertedPendulum']:\n self._env = invertedPendulum.env(env_name, 1234,\n misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_acrobot']:\n self._env = \\\n acrobot.env(env_name, 1234, misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_mountain']:\n self._env = mountain_car.env(env_name, 1234,\n misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_cartpole']:\n self._env = \\\n cartpole.env(env_name, 1234, misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_petsCheetah', 'gym_petsReacher', 'gym_petsPusher']:\n self._env = \\\n pets.env(env_name, 1234, misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_cheetahO01', 'gym_cheetahO001',\n 'gym_cheetahA01', 'gym_cheetahA003']:\n self._env = noise_gym_cheetah.env(env_name, 1234,\n misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_pendulumO01', 'gym_pendulumO001']:\n self._env = noise_gym_pendulum.env(env_name, 1234,\n misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_cartpoleO01', 'gym_cartpoleO001']:\n self._env = noise_gym_cartpole.env(env_name, 1234,\n misc_info={'reset_type': 'gym'})\n elif env_name in ['gym_fwalker2d', 'gym_fant', 'gym_fhopper']:\n self._env = fixed_walker.env(\n env_name, 1234,\n misc_info={'reset_type': 'gym', 'no_termination': True}\n )\n elif env_name in ['gym_fswimmer']:\n self._env = fixed_swimmer.env(\n env_name, 1234,\n misc_info={'reset_type': 'gym', 'no_termination': True}\n )\n else:\n raise NotImplementedError\n\n self.x0 = [np.array(self._hyperparams['x0'])\n for _ in range(self._hyperparams['conditions'])]", "title": "" }, { "docid": "8b66d59164b09945cf9e6bb9fa50041f", "score": "0.49662867", "text": "def parse_scenario(string, language=None):\n feature_str = \"\"\"\n Функция: parse_scenario\n \"\"\"\n feature_str += string\n feature = Feature.from_string(feature_str, language=language)\n\n return feature.scenarios[0]", "title": "" }, { "docid": "cc9e75e9787fb1808eba23846bcddd82", "score": "0.4959656", "text": "def before_scenario(context: Context, scenario: Scenario):\n logging.debug('Starting scenario: %s', scenario.name)\n context.scenario_data = initialize_scenario_data()\n desired_capabilities = context.desired_capabilities\n desired_capabilities[\"name\"] = scenario.name\n if CONFIG[\"hub_url\"]:\n context.driver = webdriver.Remote(\n desired_capabilities=desired_capabilities,\n command_executor=CONFIG[\"hub_url\"])\n else:\n browser_name = CONFIG[\"environments\"][0][\"browser\"]\n drivers = {\n \"chrome\": webdriver.Chrome,\n \"edge\": webdriver.Edge,\n \"firefox\": webdriver.Firefox,\n \"ie\": webdriver.Ie,\n \"phantomjs\": webdriver.PhantomJS,\n \"safari\": webdriver.Safari,\n }\n # start the browser\n context.driver = drivers[browser_name.lower()]()\n context.driver.set_page_load_timeout(time_to_wait=27)\n try:\n context.driver.maximize_window()\n logging.debug(\"Maximized the window.\")\n except WebDriverException:\n logging.debug(\"Failed to maximize the window.\")\n try:\n context.driver.set_window_size(1600, 1200)\n logging.warning(\"Set window size to 1600x1200\")\n except WebDriverException:\n logging.warning(\"Failed to set window size, will continue as is\")\n logging.debug(\"Browser Capabilities: %s\", context.driver.capabilities)", "title": "" }, { "docid": "9d4325804e546cbdf47516067943cb07", "score": "0.4956983", "text": "def scenario(\n _func: Optional[FunctionType] = None,\n *,\n name: Optional[str] = None,\n infuse: bool = True,\n auto: bool = False,\n auto_run_queries: List[str] = [],\n):\n\n def decorator_scenario(func: FunctionType):\n\n if not isinstance(func, FunctionType):\n raise NotFunctionTypeError(func)\n\n __name: str = name or func.__name__\n func.__name__ = __name\n\n @functools.wraps(func)\n def wrapper_scenario(*args, ignore: bool = False, **kwargs):\n nonlocal func\n logger.debug(f\"Executing '{func.__name__}' scenario.\")\n\n ctx = get_context()\n\n # Connection is expected to be supplied as the first arrgument\n # to the executed function.\n connection: Optional[Connection] = None\n if not args:\n logger.debug(\n (\n \"No connection argument has been supplied to the \"\n f\"'{func.__name__}' scenario. Initiating a fresh \"\n \"connection.\"\n )\n )\n\n # Prepare context\n ctx.infuse()\n\n # Initiate a new connection\n connection = get_connection()\n\n else:\n connection = args[0]\n args = args[1:]\n\n if not isinstance(connection, Connection):\n raise ConnectionTypeError(connection)\n\n if connection._closed:\n raise ConnectionClosedError(__name)\n\n result: Any = None\n try:\n # Auto Run Queries.\n # If there is a list of query names that should be ran\n # automatically when this scenario is invoked, then run\n # them.\n query_names = ctx.scenarios[__name].auto_run_queries\n if query_names:\n logger.debug(\n f\"Execuing auto-run queries for scenario '{__name}': {query_names}\"\n )\n for q in query_names:\n with connection.cursor() as cur:\n ctx.queries[q].function(cur, ignore=ignore)\n\n if auto:\n connection.commit()\n else:\n result = func(connection, *args, **kwargs)\n\n except Exception as e:\n if ignore:\n logger.warning(\n f\"Error occured in scenario but was handled: {e}\"\n )\n else:\n raise ScenarioExecutionError(e) from None\n\n return result\n\n setattr(wrapper_scenario, \"_is_decorated_by_scenario\", True)\n\n ctx = get_context()\n ctx.register_scenario(\n wrapper_scenario,\n name=__name,\n infuse=infuse,\n auto=auto,\n auto_run_queries=auto_run_queries,\n )\n\n return wrapper_scenario\n\n if _func is None:\n return decorator_scenario\n else:\n return decorator_scenario(_func)", "title": "" }, { "docid": "c34aa8a3ef4cb0c93cd55e3fe66f24c8", "score": "0.4954952", "text": "def bind_scenario(self, scenario_id=None, scenario_name=None, rule_id=None, rule_name=None, application_id=None, application_name=None, bind=None):\n params = dict()\n \n passed_args = []\n if scenario_id is not None:\n passed_args.append('scenario_id')\n if scenario_name is not None:\n passed_args.append('scenario_name')\n \n if len(passed_args) > 1:\n raise VoximplantException(\", \". join(passed_args) + \" passed simultaneously into bind_scenario\")\n if len(passed_args) == 0:\n raise VoximplantException(\"None of scenario_id, scenario_name passed into bind_scenario\")\n \n \n passed_args = []\n if rule_id is not None:\n passed_args.append('rule_id')\n if rule_name is not None:\n passed_args.append('rule_name')\n \n if len(passed_args) > 1:\n raise VoximplantException(\", \". join(passed_args) + \" passed simultaneously into bind_scenario\")\n if len(passed_args) == 0:\n raise VoximplantException(\"None of rule_id, rule_name passed into bind_scenario\")\n \n \n passed_args = []\n if application_id is not None:\n passed_args.append('application_id')\n if application_name is not None:\n passed_args.append('application_name')\n \n if len(passed_args) > 1:\n raise VoximplantException(\", \". join(passed_args) + \" passed simultaneously into bind_scenario\")\n if len(passed_args) == 0:\n raise VoximplantException(\"None of application_id, application_name passed into bind_scenario\")\n \n \n \n if scenario_id is not None:\n params['scenario_id']=self._serialize_list(scenario_id)\n\n if scenario_name is not None:\n params['scenario_name']=self._serialize_list(scenario_name)\n\n if rule_id is not None:\n params['rule_id']=rule_id\n\n if rule_name is not None:\n params['rule_name']=rule_name\n\n if application_id is not None:\n params['application_id']=application_id\n\n if application_name is not None:\n params['application_name']=application_name\n\n if bind is not None:\n params['bind']=bind\n\n \n res = self._perform_request('BindScenario', params)\n if \"error\" in res:\n raise VoximplantException(res[\"error\"][\"msg\"])\n \n return res", "title": "" }, { "docid": "6bb0c89f4448ad064a23b866129d83a1", "score": "0.49103233", "text": "def CreateNewAssemblyScenario(name):", "title": "" }, { "docid": "38537b24ba4d01b908234ea28b32aa6d", "score": "0.48609015", "text": "def setup_scenario():\n world.headers = _get_default_headers(world.token_id, world.tenant_id)\n world.configured_node_list = list()\n world.software_to_generate = list()", "title": "" }, { "docid": "1e85bc088288f13e63f702dc5aa548fc", "score": "0.48501286", "text": "def __init__(self, world, ego_vehicles, config, randomize=False, debug_mode=False, criteria_enable=True,\n timeout=60):\n\n self.config = config\n\n # Call constructor of BasicScenario\n super(NewScenario, self).__init__(\n name=\"NewScenario\",\n ego_vehicles,\n config,\n world,\n debug_mode,\n criteria_enable=criteria_enable))", "title": "" }, { "docid": "8342eb58ebf1301271731b40e0873df3", "score": "0.4794908", "text": "def run():\n global my_scn\n if not my_scn:\n logging.error(\"No existing scenario to run. Please create a scenario\")\n return\n if not my_scn.has_testcase:\n logging.error(\"No testcase to run. Please add a testcase\")\n return\n my_scn.run()\n my_scn = None", "title": "" }, { "docid": "a4811712715f299ce9509f94cdaeb7df", "score": "0.4784103", "text": "def run_scenario(config_file):\n # Construct the scenario object\n print(Style.BRIGHT + Fore.GREEN +\n \"\\nLoading scenario config... \" +\n Style.RESET_ALL, end='')\n\n scenario = Scenario(config_file)\n print(Style.BRIGHT + Fore.GREEN + \"Done.\" +\n \"\\nInitiating model run...\\n\" + Style.RESET_ALL)\n code_start_time = time.time()\n\n # `IFSystem` object that contains a list of components\n infrastructure = ingest_spreadsheet(config_file)\n\n post_processing_list = calculate_response(scenario, infrastructure)\n # After the response has been calculated the post processing\n # will record the results\n post_processing(infrastructure, scenario, post_processing_list)", "title": "" }, { "docid": "38d1f3b1e76be31c20e6997887c0ff7a", "score": "0.4779238", "text": "def run(self, scenario, learn=True):\n\n assert isinstance(scenario, scenarios.Scenario)\n\n previous_match_set = None\n\n # Repeat until the scenario has run its course.\n while scenario.more():\n # Gather information about the current state of the\n # environment.\n situation = scenario.sense()\n\n # Determine which rules match the current situation.\n match_set = self.match(situation)\n\n # Select the best action for the current situation (or a random\n # one, if we are on an exploration step).\n match_set.select_action()\n\n # Perform the selected action\n # and find out what the received reward was.\n reward = scenario.execute(match_set.selected_action)\n\n # If the scenario is dynamic, don't immediately apply the\n # reward; instead, wait until the next iteration and factor in\n # not only the reward that was received on the previous step,\n # but the (discounted) reward that is expected going forward\n # given the resulting situation observed after the action was\n # taken. This is a classic feature of temporal difference (TD)\n # algorithms, which acts to stitch together a general picture\n # of the future expected reward without actually waiting the\n # full duration to find out what it will be.\n if learn:\n # Ensure we are not trying to learn in a non-learning\n # scenario.\n assert reward is not None\n\n if scenario.is_dynamic:\n if previous_match_set is not None:\n match_set.pay(previous_match_set)\n previous_match_set.apply_payoff()\n match_set.payoff = reward\n\n # Remember the current reward and match set for the\n # next iteration.\n previous_match_set = match_set\n else:\n match_set.payoff = reward\n match_set.apply_payoff()\n\n # This serves to tie off the final stitch. The last action taken\n # gets only the immediate reward; there is no future reward\n # expected.\n if learn and previous_match_set is not None:\n previous_match_set.apply_payoff()", "title": "" }, { "docid": "09098934584e2e59ba538e5577a5d56d", "score": "0.47694534", "text": "def load_scenario(self, scenario, agent, rep_number):\n\n GameTime.restart()\n self._agent = AgentWrapper(agent)\n self.scenario_class = scenario\n self.scenario = scenario.scenario\n self.scenario_tree = self.scenario.scenario_tree\n self.ego_vehicles = scenario.ego_vehicles\n self.other_actors = scenario.other_actors\n self.repetition_number = rep_number\n\n # To print the scenario tree uncomment the next line\n # py_trees.display.render_dot_tree(self.scenario_tree)\n\n self._agent.setup_sensors(self.ego_vehicles[0], self._debug_mode)", "title": "" }, { "docid": "7480d8cd6ef3497eafda7243ef860a8c", "score": "0.47645384", "text": "def run_model(self, scenario, policy):\n kwargs = scenario\n loc = kwargs.pop(\"lookup shortage loc\")\n speed = kwargs.pop(\"lookup shortage speed\")\n lookup = [self.f(x / 10, speed, loc) for x in range(0, 100)]\n kwargs[\"shortage price effect lookup\"] = lookup\n\n speed = kwargs.pop(\"lookup price substitute speed\")\n begin = kwargs.pop(\"lookup price substitute begin\")\n end = kwargs.pop(\"lookup price substitute end\")\n lookup = [self.priceSubstite(x, speed, begin, end) for x in range(0, 100, 10)]\n kwargs[\"relative price substitute lookup\"] = lookup\n\n scale = kwargs.pop(\"lookup returns to scale speed\")\n speed = kwargs.pop(\"lookup returns to scale scale\")\n lookup = [self.returnsToScale(x, speed, scale) for x in range(0, 101, 10)]\n kwargs[\"returns to scale lookup\"] = lookup\n\n scale = kwargs.pop(\"lookup approximated learning speed\")\n speed = kwargs.pop(\"lookup approximated learning scale\")\n start = kwargs.pop(\"lookup approximated learning start\")\n lookup = [self.approxLearning(x, speed, scale, start) for x in range(0, 101, 10)]\n kwargs[\"approximated learning effect lookup\"] = lookup\n\n super().run_model(kwargs, policy)", "title": "" }, { "docid": "9844b0316426516be95f4279b1c947b1", "score": "0.4763872", "text": "def make_environment(\n taskstr=\"gym,MountainCarContinuous-v0\"):\n\n # Load the gym environment.\n module, task = taskstr.split(\",\")\n\n if module == \"gym\":\n environment = gym.make(task)\n environment = wrappers.GymWrapper(environment) \n elif module == \"atari\":\n environment = gym.make(task, full_action_space=True)\n environment = gym_wrapper.GymAtariAdapter(environment)\n environment = atari_wrapper.AtariWrapper(environment, to_float=True, max_episode_len=5000, zero_discount_on_life_loss=True,\n)\n elif module == \"dm_control\":\n t1,t2 = task.split(\":\")\n environment = suite.load(t1, t2)\n elif module == \"bsuite\":\n environment = bsuite.load_and_record_to_csv(\n bsuite_id=task,\n results_dir=\"./bsuite_results\"\n )\n \n\n # Make sure the environment obeys the dm_env.Environment interface.\n environment = wrappers.SinglePrecisionWrapper(environment)\n\n return environment", "title": "" }, { "docid": "15a691e9a2c1fa012fbeae53a8e26156", "score": "0.47633588", "text": "def generic(self, node=\"clickhouse1\"):\n self.context.node = self.context.cluster.node(node)\n\n for scenario in loads(current_module(), Scenario, Suite):\n Scenario(run=scenario)", "title": "" }, { "docid": "6f5fb5f84b9c56789937eff721066741", "score": "0.4752578", "text": "def load(\n environment_name: Text,\n discount: types.Int = 1.0,\n max_episode_steps: Optional[types.Int] = None,\n gym_env_wrappers: Sequence[\n types.GymEnvWrapper\n ] = DEFAULT_ATARI_GYM_WRAPPERS,\n env_wrappers: Sequence[types.PyEnvWrapper] = (),\n spec_dtype_map: Optional[Dict[gym.Space, np.dtype]] = None,\n) -> py_environment.PyEnvironment:\n if spec_dtype_map is None:\n spec_dtype_map = {gym.spaces.Box: np.uint8}\n\n gym_spec = gym.spec(environment_name)\n gym_env = gym_spec.make()\n\n if max_episode_steps is None and gym_spec.max_episode_steps is not None:\n max_episode_steps = gym_spec.max_episode_steps\n\n return suite_gym.wrap_env(\n gym_env,\n discount=discount,\n max_episode_steps=max_episode_steps,\n gym_env_wrappers=gym_env_wrappers,\n time_limit_wrapper=atari_wrappers.AtariTimeLimit,\n env_wrappers=env_wrappers,\n spec_dtype_map=spec_dtype_map,\n auto_reset=False,\n )", "title": "" }, { "docid": "458e46f22811fb0b4979c58c7a7cce55", "score": "0.47137848", "text": "def before_scenario(context, scenario):\n context.client = TestClient(app)\n\n db = [v for v in get_db()][0]\n db.query(models.UserAddressMapping).delete()\n db.query(models.User).delete()\n db.query(models.Address).delete()\n db.query(models.Account).delete()\n db.commit()\n context.db = db", "title": "" }, { "docid": "f4310b13184ad81283f5f81bbd79023f", "score": "0.47114936", "text": "def env_step(instance_id):\r\n json = request.get_json()\r\n action = get_required_param(json, 'action')\r\n render = get_optional_param(json, 'render', False)\r\n [obs_jsonable, reward, done, info] = envs.step(instance_id, action, render)\r\n return jsonify(observation = obs_jsonable,\r\n reward = reward, done = done, info = info)", "title": "" }, { "docid": "7e10a1f0455c2b61ee56e7e39901ba3b", "score": "0.46995845", "text": "def environment_step(self, actions):\n raise NotImplementedError()", "title": "" }, { "docid": "00f71eaf4bda7ebcf88e0567a3d9e2c2", "score": "0.46739355", "text": "def test_add_env():\r\n model, env = make_model()\r\n assert len(model.envs) == 1\r\n assert env.id == 4 # Environment is added after agents (ID 1-3)\r\n assert env.color == 'green' # Attribute passed as kwarg\r\n assert type(env) == ap.Environment\r\n assert env == model.envs[0]\r\n assert model.agents == env.agents\r\n assert model.agents[0].envs == model.envs", "title": "" }, { "docid": "6de6f22ba68b51c0f56c03b8284bd0cf", "score": "0.466084", "text": "def create_scenario(self, name, description, content):\n \n self.__update_database(scenarioId, 'create', name, description)\n \n self.__update_test_file(scenarioId, 'create', content)\n \n pass", "title": "" }, { "docid": "641fffcbbbca9c88e0062604449b21dc", "score": "0.46170852", "text": "def readscenario(filename):\r\n f = open(filename)\r\n content = f.read()\r\n f.close()\r\n \r\n structure = json.loads(content)\r\n \r\n name = structure[\"name\"]\r\n del structure[\"name\"]\r\n structure[\"type\"] = \"Assembly\"\r\n \r\n env = popelements(name, structure)\r\n \r\n# pyvsim.System.save(env, name, \"json\")\r\n pyvsim.System.save(env, name)\r\n return env", "title": "" }, { "docid": "68597684e92d031acdc92ff85d557260", "score": "0.4610285", "text": "def make_test():\n\n # Make the environment\n env = gym_super_mario_bros.make('SuperMarioBros-1-4-v0')\n \n env = JoypadSpace(env, SIMPLE_MOVEMENT)\n\n # Scale the rewards\n #env = RewardScaler(env)\n\n # PreprocessFrame\n env = PreprocessFrame(env)\n\n # Stack 4 frames\n env = FrameStack(env, 4) # This can be changed. \n\n # Allow back tracking that helps agents are not discouraged too heavily\n # from exploring backwards if there is no way to advance\n # head-on in the level.\n env = AllowBacktracking(env)\n\n return env", "title": "" }, { "docid": "4ef41f89033ac649419d1f3628f51236", "score": "0.46031258", "text": "def update_scenario(self, scenarioId, name, description, content):\n \n self.__update_database(scenarioId, 'update', name, description)\n \n self.__update_test_file(scenarioId, 'update', content)", "title": "" }, { "docid": "699853cbf3024c2ac684f4df54aa40d7", "score": "0.45972872", "text": "def run_scenario(self, sc_dict):\n app_module = import_module(f\"apps.{self.app_name}\")\n app_region = app_module.app.get_region(self.region_name)\n\n build_model = app_region.build_model\n params = copy.deepcopy(app_region.params)\n params[\"default\"] = merge_dicts(self.root_model_params, params[\"default\"])\n\n # Create and run the optimisation scenario\n params[\"scenarios\"][1] = merge_dicts(sc_dict, params[\"default\"])\n opti_scenario = Scenario(build_model, idx=1, params=params)\n opti_scenario.run(base_model=self.root_model)\n\n return opti_scenario.model", "title": "" }, { "docid": "254f3fd4e85aebc39c2abad6c9830fd6", "score": "0.45766932", "text": "def main(process_id, udf_name, set_step_udf_value, step_name):\n lims = Lims(config.BASEURI, config.USERNAME, config.PASSWORD)\n step = Step(lims, id=process_id)\n \n next_step_uri = None\n\n # Get next steps\n for transition in step.configuration.transitions:\n if transition.get('name') == step_name:\n next_step_uri = transition['next-step-uri']\n\n if not next_step_uri:\n print \"Couldn't find the configured steps\"\n sys.exit(1)\n\n next_actions = step.actions.next_actions\n # Pre-cache everything\n artifacts = [Artifact(lims, uri=na['artifact-uri']) for na in next_actions]\n lims.get_batch(artifacts)\n lims.get_batch(artifact.samples[0] for artifact in artifacts)\n\n # For error reporting\n missing_values = []\n\n for na, artifact in zip(next_actions, artifacts):\n try:\n udf_value = artifact.samples[0].udf[udf_name]\n except KeyError:\n missing_values.append(artifact.name)\n continue\n \n if str(udf_value) == set_step_udf_value:\n na['action'] = 'nextstep'\n na['step-uri'] = next_step_uri\n\n if missing_values:\n print \"{0} not specified for samples: {1}\".format(udf_name, \", \".join(missing_values))\n sys.exit(1)\n\n step.actions.put()", "title": "" }, { "docid": "516a783e5ecf1d67c2b820cd4f77e477", "score": "0.45747122", "text": "def create_big_bank_workspace():\n\n workspace = Workspace(\n name=\"Big Bank plc\",\n description=(\n \"This is an example workspace to illustrate the key features of \"\n \"Structurizr, based around a fictional online banking system.\"\n ),\n )\n\n existing_system_tag = \"Existing System\"\n bank_staff_tag = \"Bank Staff\"\n web_browser_tag = \"Web Browser\"\n mobile_app_tag = \"Mobile App\"\n database_tag = \"Database\"\n failover_tag = \"Failover\"\n\n model = workspace.model\n views = workspace.views\n\n model.enterprise = Enterprise(name=\"Big Bank plc\")\n\n # people and software systems\n customer = model.add_person(\n location=Location.External,\n name=\"Personal Banking Customer\",\n description=\"A customer of the bank, with personal bank accounts.\",\n id=\"customer\",\n )\n\n internet_banking_system = model.add_software_system(\n location=Location.Internal,\n name=\"Internet Banking System\",\n description=(\n \"Allows customers to view information about \"\n \"their bank accounts, and make payments.\"\n ),\n id=\"internetBankingSystem\",\n )\n customer.uses(\n internet_banking_system, \"Views account balances, and makes payments using\"\n )\n\n mainframe_banking_system = model.add_software_system(\n location=Location.Internal,\n name=\"Mainframe Banking System\",\n description=(\n \"Stores all of the core banking information \"\n \"about customers, accounts, transactions, etc.\"\n ),\n id=\"mainframe\",\n )\n mainframe_banking_system.tags.add(existing_system_tag)\n internet_banking_system.uses(\n mainframe_banking_system,\n \"Gets account information from, and makes payments using\",\n )\n\n email_system = model.add_software_system(\n location=Location.Internal,\n name=\"E-mail System\",\n description=\"The internal Microsoft Exchange e-mail system.\",\n id=\"email\",\n )\n internet_banking_system.uses(\n destination=email_system,\n description=\"Sends e-mail using\",\n )\n email_system.tags.add(existing_system_tag)\n email_system.delivers(\n destination=customer,\n description=\"Sends e-mails to\",\n )\n\n atm = model.add_software_system(\n location=Location.Internal,\n name=\"ATM\",\n description=\"Allows customers to withdraw cash.\",\n id=\"atm\",\n )\n atm.tags.add(existing_system_tag)\n atm.uses(mainframe_banking_system, \"Uses\")\n customer.uses(atm, \"Withdraws cash using\")\n\n customer_service_staff = model.add_person(\n location=Location.Internal,\n name=\"Customer Service Staff\",\n description=\"Customer service staff within the bank.\",\n id=\"supportStaff\",\n )\n customer_service_staff.tags.add(bank_staff_tag)\n customer_service_staff.uses(mainframe_banking_system, \"Uses\")\n customer.interacts_with(\n customer_service_staff, \"Asks questions to\", technology=\"Telephone\"\n )\n\n back_office_staff = model.add_person(\n location=Location.Internal,\n name=\"Back Office Staff\",\n description=\"Administration and support staff within the bank.\",\n id=\"backoffice\",\n )\n back_office_staff.tags.add(bank_staff_tag)\n back_office_staff.uses(mainframe_banking_system, \"Uses\")\n\n # containers\n single_page_application = internet_banking_system.add_container(\n \"Single-Page Application\",\n (\n \"Provides all of the Internet banking functionality \"\n \"to customers via their web browser.\"\n ),\n \"JavaScript and Angular\",\n id=\"singlePageApplication\",\n )\n single_page_application.tags.add(web_browser_tag)\n mobile_app = internet_banking_system.add_container(\n \"Mobile App\",\n \"Provides a limited subset of the Internet banking functionality to \"\n \"customers via their mobile device.\",\n \"Xamarin\",\n id=\"mobileApp\",\n )\n mobile_app.tags.add(mobile_app_tag)\n web_application = internet_banking_system.add_container(\n \"Web Application\",\n \"Delivers the static content and the Internet banking single page \"\n \"application.\",\n \"Java and Spring MVC\",\n id=\"webApplication\",\n )\n api_application = internet_banking_system.add_container(\n \"API Application\",\n \"Provides Internet banking functionality via a JSON/HTTPS API.\",\n \"Java and Spring MVC\",\n id=\"apiApplication\",\n )\n database = internet_banking_system.add_container(\n \"Database\",\n \"Stores user registration information, hashed authentication credentials, \"\n \"access logs, etc.\",\n \"Relational Database Schema\",\n id=\"database\",\n )\n database.tags.add(database_tag)\n\n customer.uses(web_application, \"Uses\", technology=\"HTTPS\")\n customer.uses(single_page_application, \"Uses\")\n customer.uses(mobile_app, \"Uses\")\n web_application.uses(\n single_page_application, \"Delivers to the customers web browser\"\n )\n api_application.uses(database, \"Reads from and writes to\", technology=\"JDBC\")\n api_application.uses(mainframe_banking_system, \"Uses\", technology=\"XML/HTTPS\")\n api_application.uses(email_system, \"Sends e-mail using\", technology=\"SMTP\")\n\n # components\n # - for a real-world software system, you would probably want to extract the\n # components using static analysis/reflection rather than manually specifying\n # them all\n\n signin_controller = api_application.add_component(\n name=\"Sign In Controller\",\n description=\"Allows users to sign in to the Internet Banking System.\",\n technology=\"Spring MVC Rest Controller\",\n id=\"signinController\",\n )\n accounts_summary_controller = api_application.add_component(\n name=\"Accounts Summary Controller\",\n description=\"Provides customers with a summary of their bank accounts.\",\n technology=\"Spring MVC Rest Controller\",\n id=\"accountsSummaryController\",\n )\n reset_password_controller = api_application.add_component(\n name=\"Reset Password Controller\",\n description=\"Allows users to reset their passwords with a single use URL.\",\n technology=\"Spring MVC Rest Controller\",\n id=\"resetPasswordController\",\n )\n security_component = api_application.add_component(\n name=\"Security Component\",\n description=\"Provides functionality related to signing in, changing passwords, \"\n \"etc.\",\n technology=\"Spring Bean\",\n id=\"securityComponent\",\n )\n mainframe_banking_systemFacade = api_application.add_component(\n name=\"Mainframe Banking System Facade\",\n description=\"A facade onto the mainframe banking system.\",\n technology=\"Spring Bean\",\n id=\"mainframeBankingSystemFacade\",\n )\n email_component = api_application.add_component(\n name=\"E-mail Component\",\n description=\"Sends e-mails to users.\",\n technology=\"Spring Bean\",\n id=\"emailComponent\",\n )\n\n for component in api_application.components:\n if component.technology == \"Spring MVC Rest Controller\":\n single_page_application.uses(component, \"Makes API calls to\", \"JSON/HTTPS\")\n mobile_app.uses(component, \"Makes API calls to\", \"JSON/HTTPS\")\n\n signin_controller.uses(security_component, \"Uses\")\n accounts_summary_controller.uses(mainframe_banking_systemFacade, \"Uses\")\n reset_password_controller.uses(security_component, \"Uses\")\n reset_password_controller.uses(email_component, \"Uses\")\n security_component.uses(database, \"Reads from and writes to\", \"JDBC\")\n mainframe_banking_systemFacade.uses(mainframe_banking_system, \"Uses\", \"XML/HTTPS\")\n email_component.uses(email_system, \"Sends e-mail using\")\n\n # TODO:!\n # model.AddImplicitRelationships()\n\n # deployment nodes and container instances\n developer_laptop = model.add_deployment_node(\n environment=\"Development\",\n name=\"Developer Laptop\",\n description=\"A developer laptop.\",\n technology=\"Microsoft Windows 10 or Apple macOS\",\n )\n apache_tomcat = developer_laptop.add_deployment_node(\n name=\"Docker - Web Server\",\n description=\"A Docker container.\",\n technology=\"Docker\",\n ).add_deployment_node(\n name=\"Apache Tomcat\",\n description=\"An open source Java EE web server.\",\n technology=\"Apache Tomcat 8.x\",\n instances=1,\n properties={\"Xmx\": \"512M\", \"Xms\": \"1024M\", \"Java Version\": \"8\"},\n )\n apache_tomcat.add_container(web_application)\n apache_tomcat.add_container(api_application)\n\n developer_laptop.add_deployment_node(\n \"Docker - Database Server\", \"A Docker container.\", \"Docker\"\n ).add_deployment_node(\n \"Database Server\", \"A development database.\", \"Oracle 12c\"\n ).add_container(\n database\n )\n\n developer_laptop.add_deployment_node(\n \"Web Browser\", \"\", \"Chrome, Firefox, Safari, or Edge\"\n ).add_container(single_page_application)\n\n customer_mobile_device = model.add_deployment_node(\n \"Customer's mobile device\", \"\", \"Apple iOS or Android\", environment=\"Live\"\n )\n customer_mobile_device.add_container(mobile_app)\n\n customer_computer = model.add_deployment_node(\n \"Customer's computer\",\n \"\",\n \"Microsoft Windows or Apple macOS\",\n environment=\"Live\",\n )\n customer_computer.add_deployment_node(\n \"Web Browser\", \"\", \"Chrome, Firefox, Safari, or Edge\"\n ).add_container(single_page_application)\n\n big_bank_data_center = model.add_deployment_node(\n \"Big Bank plc\", \"\", \"Big Bank plc data center\", environment=\"Live\"\n )\n\n live_web_server = big_bank_data_center.add_deployment_node(\n \"bigbank-web***\",\n \"A web server residing in the web server farm, accessed via F5 BIG-IP LTMs.\",\n \"Ubuntu 16.04 LTS\",\n instances=4,\n properties={\"Location\": \"London and Reading\"},\n )\n live_web_server.add_deployment_node(\n \"Apache Tomcat\",\n \"An open source Java EE web server.\",\n \"Apache Tomcat 8.x\",\n instances=1,\n properties={\"Xmx\": \"512M\", \"Xms\": \"1024M\", \"Java Version\": \"8\"},\n ).add_container(web_application)\n\n live_api_server = big_bank_data_center.add_deployment_node(\n \"bigbank-api***\",\n \"A web server residing in the web server farm, accessed via F5 BIG-IP LTMs.\",\n \"Ubuntu 16.04 LTS\",\n instances=8,\n properties={\"Location\": \"London and Reading\"},\n )\n live_api_server.add_deployment_node(\n \"Apache Tomcat\",\n \"An open source Java EE web server.\",\n \"Apache Tomcat 8.x\",\n instances=1,\n properties={\"Xmx\": \"512M\", \"Xms\": \"1024M\", \"Java Version\": \"8\"},\n ).add_container(api_application)\n\n primary_database_server = big_bank_data_center.add_deployment_node(\n \"bigbank-db01\",\n \"The primary database server.\",\n \"Ubuntu 16.04 LTS\",\n instances=1,\n properties={\"Location\": \"London\"},\n ).add_deployment_node(\n \"Oracle - Primary\", \"The primary, live database server.\", \"Oracle 12c\"\n )\n primary_database_server.add_container(database)\n\n big_bank_db_02 = big_bank_data_center.add_deployment_node(\n \"bigbank-db02\",\n \"The secondary database server.\",\n \"Ubuntu 16.04 LTS\",\n instances=1,\n properties={\"Location\": \"Reading\"},\n )\n big_bank_db_02.tags.add(failover_tag)\n secondary_database_server = big_bank_db_02.add_deployment_node(\n \"Oracle - Secondary\",\n \"A secondary, standby database server, used for failover purposes only.\",\n \"Oracle 12c\",\n )\n secondary_database_server.tags.add(failover_tag)\n secondary_database = secondary_database_server.add_container(database)\n\n # model.Relationships.Where(r=>r.Destination.Equals(secondary_database)).ToList()\n # .ForEach(r=>r.tags.add(failover_tag))\n data_replication_relationship = primary_database_server.uses(\n secondary_database_server, \"Replicates data to\"\n )\n secondary_database.tags.add(failover_tag)\n\n # views/diagrams\n system_landscape_view = views.create_system_landscape_view(\n key=\"SystemLandscape\",\n description=\"The system landscape diagram for Big Bank plc.\",\n )\n system_landscape_view.add_all_elements()\n system_landscape_view.paper_size = PaperSize.A5_Landscape\n\n system_context_view = views.create_system_context_view(\n software_system=internet_banking_system,\n key=\"SystemContext\",\n description=\"The system context diagram for the Internet Banking System.\",\n )\n system_context_view.enterprise_boundary_visible = False\n system_context_view.add_nearest_neighbours(internet_banking_system)\n system_context_view.paper_size = PaperSize.A5_Landscape\n\n container_view = views.create_container_view(\n software_system=internet_banking_system,\n key=\"Containers\",\n description=\"The container diagram for the Internet Banking System.\",\n )\n container_view.add(customer)\n container_view.add_all_containers()\n container_view.add(mainframe_banking_system)\n container_view.add(email_system)\n container_view.paper_size = PaperSize.A5_Landscape\n\n component_view = views.create_component_view(\n container=api_application,\n key=\"Components\",\n description=\"The component diagram for the API Application.\",\n )\n component_view.add(mobile_app)\n component_view.add(single_page_application)\n component_view.add(database)\n component_view.add_all_components()\n component_view.add(mainframe_banking_system)\n component_view.add(email_system)\n component_view.paper_size = PaperSize.A5_Landscape\n\n # systemLandscapeView.AddAnimation(internet_banking_system, customer,\n # mainframe_banking_system, emailSystem)\n # systemLandscapeView.AddAnimation(atm)\n # systemLandscapeView.AddAnimation(customerServiceStaff, back_office_staff)\n\n # systemContextView.AddAnimation(internet_banking_system)\n # systemContextView.AddAnimation(customer)\n # systemContextView.AddAnimation(mainframe_banking_system)\n # systemContextView.AddAnimation(emailSystem)\n\n # containerView.AddAnimation(customer, mainframe_banking_system, emailSystem)\n # containerView.AddAnimation(webApplication)\n # containerView.AddAnimation(singlePageApplication)\n # containerView.AddAnimation(mobile_app)\n # containerView.AddAnimation(apiApplication)\n # containerView.AddAnimation(database)\n\n # componentView.AddAnimation(singlePageApplication, mobile_app)\n # componentView.AddAnimation(signinController, securityComponent, database)\n # componentView.AddAnimation(accountsSummaryController,\n # mainframe_banking_systemFacade, mainframe_banking_system)\n # componentView.AddAnimation(resetPasswordController, emailComponent, database)\n\n dynamic_view = views.create_dynamic_view(\n element=api_application,\n key=\"SignIn\",\n description=\"Summarises how the sign in feature works in the single-page application.\",\n )\n dynamic_view.add(\n single_page_application, signin_controller, \"Submits credentials to\"\n )\n dynamic_view.add(\n signin_controller, security_component, \"Calls isAuthenticated() on\"\n )\n dynamic_view.add(\n security_component, database, \"select * from users where username = ?\"\n )\n dynamic_view.paper_size = PaperSize.A5_Landscape\n\n development_deployment_view = views.create_deployment_view(\n software_system=internet_banking_system,\n key=\"DevelopmentDeployment\",\n description=\"An example development deployment scenario for the Internet \"\n \"Banking System.\",\n environment=\"Development\",\n )\n development_deployment_view.add(developer_laptop)\n development_deployment_view.paper_size = PaperSize.A5_Landscape\n\n live_deployment_view = views.create_deployment_view(\n software_system=internet_banking_system,\n key=\"LiveDeployment\",\n description=\"An example live deployment scenario for the Internet Banking \"\n \"System.\",\n environment=\"Live\",\n )\n live_deployment_view += big_bank_data_center\n live_deployment_view += customer_mobile_device\n live_deployment_view += customer_computer\n live_deployment_view += data_replication_relationship\n live_deployment_view.paper_size = PaperSize.A5_Landscape\n\n # colours, shapes and other diagram styling\n styles = views.configuration.styles\n styles.add(\n ElementStyle(tag=Tags.SOFTWARE_SYSTEM, background=\"#1168bd\", color=\"#ffffff\")\n )\n styles.add(ElementStyle(tag=Tags.CONTAINER, background=\"#438dd5\", color=\"#ffffff\"))\n styles.add(ElementStyle(tag=Tags.COMPONENT, background=\"#85bbf0\", color=\"#000000\"))\n styles.add(\n ElementStyle(\n tag=Tags.PERSON,\n background=\"#08427b\",\n color=\"#ffffff\",\n shape=Shape.Person,\n font_size=22,\n )\n )\n styles.add(\n ElementStyle(tag=existing_system_tag, background=\"#999999\", color=\"#ffffff\")\n )\n styles.add(ElementStyle(tag=bank_staff_tag, background=\"#999999\", color=\"#ffffff\"))\n styles.add(ElementStyle(tag=web_browser_tag, shape=Shape.WebBrowser))\n styles.add(ElementStyle(tag=mobile_app_tag, shape=Shape.MobileDeviceLandscape))\n styles.add(ElementStyle(tag=database_tag, shape=Shape.Cylinder))\n styles.add(ElementStyle(tag=failover_tag, opacity=25))\n styles.add(RelationshipStyle(tag=failover_tag, opacity=25, position=70))\n\n return workspace", "title": "" }, { "docid": "7cba24574d520cf0443f8e7dd481bcc3", "score": "0.4568673", "text": "def main():\n print(\"Env created !\")\n\n env = PandaReachGymEnv(renders=True)\n\n env.render(mode='rgb_array')\n\n model = DDPG.load(\"ddpg_panda_reach\")\n print(\"model loaded !\")\n\n while True:\n obs, done = env.reset(), False\n print(\"===================================\")\n print(\"obs\")\n print(obs)\n episode_rew = 0\n while not done:\n env.render(mode='rgb_array')\n action, _states = model.predict(obs)\n obs, rew, done, info = env.step(action)\n episode_rew += rew\n print(\"Episode reward\", episode_rew)", "title": "" }, { "docid": "58a23dfd1f6065ecf900811b90516faf", "score": "0.45644736", "text": "def step_impl(context):\n if hasattr(context, 'dburl') is False:\n assert False, \"Database URI not specified\"\n dbconn = fuzzdb.open_database(context)\n if dbconn is None:\n assert False, \"Cannot open database %s\" % context.dburl\n dbconn.close()", "title": "" }, { "docid": "1811db902eefc4f9c82fcdf1b582b4c9", "score": "0.45616004", "text": "def reset_data(scenario):\n world.absorb({}, 'scenario_dict')", "title": "" }, { "docid": "f5d93bda109360c30ffc4d0b40012cf1", "score": "0.45479956", "text": "def scenario(self, name=None):\n\n if name:\n cmd = self.command('set-scenario')\n cmd.tag('scenario').set('name', name)\n cmd.execute()\n else:\n resp = self.command('current-scenario').execute()\n name = resp.find('scenario').get('name')\n\n return name", "title": "" }, { "docid": "c298db99208385c3c88c6b3cd9c56351", "score": "0.45467633", "text": "def make_single_atari_env(gym_id,\n seed,\n idx,\n capture_video,\n run_name,\n use_episodic_life_env=True):\n\n def gen_env():\n game = pyspiel.load_game(\n \"atari\", {\n \"gym_id\": gym_id,\n \"seed\": seed,\n \"idx\": idx,\n \"capture_video\": capture_video,\n \"run_name\": run_name,\n \"use_episodic_life_env\": use_episodic_life_env\n })\n return Environment(\n game,\n chance_event_sampler=ChanceEventSampler(seed=seed),\n observation_type=ObservationType.OBSERVATION)\n\n return gen_env", "title": "" }, { "docid": "c79464d88579fce8c48abb92c0213501", "score": "0.45428926", "text": "def make(environment: str) -> gym.Env:\n return gym.make(environment)", "title": "" }, { "docid": "d39295711247cd49f4f8ea7752515557", "score": "0.4541374", "text": "def make_robotics_env(env_id, seed, rank=0):\n set_global_seeds(seed)\n env = gym.make(env_id)\n env = FlattenObservation(FilterObservation(env, ['observation', 'desired_goal']))\n env = Monitor(\n env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),\n info_keywords=('is_success',))\n env.seed(seed)\n return env", "title": "" }, { "docid": "4e683352b1b4064df6acfb54fd023a7e", "score": "0.4524054", "text": "def __update_database(self, scenarioId, action, name, description):\n\n pass", "title": "" }, { "docid": "a4f7c93ca89186f87304f292b31f8037", "score": "0.45088062", "text": "def scenario(self, scenario):\n super().scenario(scenario)\n Scenario(scenario).trigger(self.config)", "title": "" }, { "docid": "87c8eaf6250eff795a3c015cee693f1e", "score": "0.45084977", "text": "def run_scenario(uri, prj_name, script, args):\n __clone_from_git(uri)\n __open_prj(prj_name)\n __launch_script(script, args)", "title": "" }, { "docid": "e76c9a02447aa13fa966f8076d97ee80", "score": "0.4503313", "text": "def from_string(new_scenario, string,\n with_file=None,\n original_string=None,\n language=None,\n previous_scenario=None):\n # ignoring comments\n string = \"\\n\".join(strings.get_stripped_lines(string, ignore_lines_starting_with='#'))\n\n if not language:\n language = Language()\n\n splitted = strings.split_wisely(string, u\"(%s):\" % language.examples, True)\n string = splitted[0]\n keys = []\n outlines = []\n if len(splitted) > 1:\n parts = [l for l in splitted[1:] if l not in language.examples]\n part = \"\".join(parts)\n keys, outlines = strings.parse_hashes(strings.get_stripped_lines(part))\n\n lines = strings.get_stripped_lines(string)\n\n scenario_line = lines.pop(0).strip()\n\n for repl in (language.scenario_outline, language.scenario):\n scenario_line = strings.remove_it(scenario_line, u\"(%s): \" % repl).strip()\n\n\n\n scenario = new_scenario(\n name=scenario_line,\n remaining_lines=lines,\n keys=keys,\n outlines=outlines,\n with_file=with_file,\n original_string=original_string,\n language=language,\n previous_scenario=previous_scenario,\n )\n\n return scenario", "title": "" }, { "docid": "7d25bd545abdca73115a4f7020bc7cfb", "score": "0.45033", "text": "def testAddMaterialToScope(self):\n cmds.file(new=True, force=True)\n\n # Create a proxy shape with empty stage to start with.\n proxyShape = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()\n\n # Create a ContextOps interface for the proxy shape.\n proxyPathSegment = mayaUtils.createUfePathSegment(proxyShape)\n proxyShapePath = ufe.Path([proxyPathSegment])\n proxyShapeItem = ufe.Hierarchy.createItem(proxyShapePath)\n contextOps = ufe.ContextOps.contextOps(proxyShapeItem)\n\n # Create multiple objects to test with. \n cmd = contextOps.doOpCmd(['Add New Prim', 'Capsule'])\n ufeCmd.execute(cmd)\n\n rootHier = ufe.Hierarchy.hierarchy(proxyShapeItem)\n self.assertTrue(rootHier.hasChildren())\n self.assertEqual(len(rootHier.children()), 1)\n\n capsuleItem = rootHier.children()[0]\n\n capsulePrim = usdUtils.getPrimFromSceneItem(capsuleItem)\n self.assertFalse(capsulePrim.HasAPI(UsdShade.MaterialBindingAPI))\n\n contextOps = ufe.ContextOps.contextOps(capsuleItem)\n\n # Create a new material and apply it to our cube, sphere and capsule objects.\n cmdPS = contextOps.doOpCmd(['Assign New Material', 'USD', 'UsdPreviewSurface'])\n self.assertIsNotNone(cmdPS)\n ufeCmd.execute(cmdPS)\n\n scopeItem = rootHier.children()[-1]\n scopeHier = ufe.Hierarchy.hierarchy(scopeItem)\n self.assertTrue(scopeHier.hasChildren())\n self.assertEqual(len(scopeHier.children()), 1)\n\n scopeOps = ufe.ContextOps.contextOps(scopeItem)\n cmdAddSS = scopeOps.doOpCmd(['Add New Material', 'MaterialX', 'ND_standard_surface_surfaceshader'])\n ufeCmd.execute(cmdAddSS)\n\n # Should now be two materials in the scope.\n self.assertEqual(len(scopeHier.children()), 2)\n\n cmds.undo()\n\n self.assertEqual(len(scopeHier.children()), 1)\n\n cmds.redo()\n\n self.assertEqual(len(scopeHier.children()), 2)\n\n newMatItem = scopeHier.children()[-1]\n\n connectionHandler = ufe.RunTimeMgr.instance().connectionHandler(newMatItem.runTimeId())\n self.assertIsNotNone(connectionHandler)\n connections = connectionHandler.sourceConnections(newMatItem)\n self.assertIsNotNone(connectionHandler)\n conns = connections.allConnections()\n self.assertEqual(len(conns), 1)\n\n mxConn = conns[0]\n self.assertEqual(ufe.PathString.string(mxConn.src.path), \"|stage1|stageShape1,/mtl/standard_surface1/standard_surface1\")\n self.assertEqual(mxConn.src.name, \"outputs:out\")\n self.assertEqual(ufe.PathString.string(mxConn.dst.path), \"|stage1|stageShape1,/mtl/standard_surface1\")\n self.assertEqual(mxConn.dst.name, \"outputs:mtlx:surface\")", "title": "" }, { "docid": "7e47aee2b66c58bc6ab6914f2147ab5d", "score": "0.44995278", "text": "def loadEnv(args, verbose=True):\n print(\"\\n== Environment Information ==\")\n env_name = \"dubins_car-v1\"\n if args.forceCPU:\n device = torch.device(\"cpu\")\n else:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n env = gym.make(env_name, device=device, mode=\"RA\", doneType=\"toEnd\")\n\n if args.low:\n env.set_target(radius=0.4)\n env.set_radius_rotation(R_turn=0.75, verbose=False)\n else:\n env.set_target(radius=0.5)\n env.set_radius_rotation(R_turn=0.6, verbose=False)\n\n state_dim = env.state.shape[0]\n action_num = env.action_space.n\n print(\n \"State Dimension: {:d}, ActionSpace Dimension: {:d}\".format(\n state_dim, action_num\n )\n )\n\n print(\"Dynamic parameters:\")\n print(\" CAR\")\n print(\n \" Constraint radius: {:.1f}, \".format(env.car.constraint_radius)\n + \"Target radius: {:.1f}, \".format(env.car.target_radius)\n + \"Turn radius: {:.2f}, \".format(env.car.R_turn)\n + \"Maximum speed: {:.2f}, \".format(env.car.speed)\n + \"Maximum angular speed: {:.3f}\".format(env.car.max_turning_rate)\n )\n print(\" ENV\")\n print(\n \" Constraint radius: {:.1f}, \".format(env.constraint_radius)\n + \"Target radius: {:.1f}, \".format(env.target_radius)\n + \"Turn radius: {:.2f}, \".format(env.R_turn)\n + \"Maximum speed: {:.2f}\".format(env.speed)\n )\n print(env.car.discrete_controls)\n if 2 * env.car.R_turn - env.car.constraint_radius > env.car.target_radius:\n print(\"Type II Reach-Avoid Set\")\n else:\n print(\"Type I Reach-Avoid Set\")\n return env", "title": "" }, { "docid": "36b15300cc820df64441a381cd06114f", "score": "0.4497826", "text": "def load_scenario(name: str, job_gen_seed: Optional[int],\n override_env_params: Optional[Dict] = None) -> Scenario:\n assert name in SCENARIO_CONSTRUCTORS, f'No Scenario Named: {name}.'\n if override_env_params is not None:\n override_env_params['job_gen_seed'] = job_gen_seed\n else:\n override_env_params = dict()\n\n env_params = get_scenario_default_params(name, job_gen_seed)\n env_params.update(override_env_params)\n return Scenario(name, SCENARIO_CONSTRUCTORS[name](**env_params))", "title": "" }, { "docid": "f1b95a0de864bb41b3142a5422f9763f", "score": "0.44879678", "text": "def run(cmd):\n \n print('> > > - - - Run cmd={} start - - - > > > '.format(cmd)) \n ##############\n # Create the environment\n # Flags:\n # verbose - set to True to display additional output from the simulation\n # num_dummies - discrete number of dummy agents in the environment, default is 100\n # grid_size - discrete number of intersections (columns, rows), default is (8, 6)\n #env = Environment(verbose=True, num_dummies=5, grid_size=(4,2))\n env = Environment(verbose=True)\n \n ##############\n # Create the driving agent\n # Flags:\n # learning - set to True to force the driving agent to use Q-learning\n # * epsilon - continuous value for the exploration factor, default is 1\n # * alpha - continuous value for the learning rate, default is 0.5\n if cmd == 1 : # run no-learning\n agent = env.create_agent( LearningAgent, learning=False )\n elif cmd == 2 : # run default-learning\n agent = env.create_agent( LearningAgent, learning=True )\n agent.use_default_decay() # use default decay function e t+1 = e - 0.05 \n else: # 3 # run improved learning\n #agent = env.create_agent( LearningAgent, learning=True, epsilon=0.6, alpha=0.4 )\n agent = env.create_agent( LearningAgent, learning=True, epsilon=0.4, alpha=0.4 )\n \n ##############\n # Follow the driving agent\n # Flags:\n # enforce_deadline - set to True to enforce a deadline metric\n if cmd == 1:\n env.set_primary_agent(agent)\n else: # 2, 3\n env.set_primary_agent(agent, enforce_deadline=True)\n\n ##############\n # Create a Simulator\n # Flags:\n # update_delay - continuous time (in seconds) between actions, default is 2.0 seconds\n # display - set to False to disable the GUI if PyGame is enabled\n # log_metrics - set to True to log trial and simulation results to /logs\n # optimized - set to True to change the default log file name\n if cmd == 1: # run no-learning\n sim = Simulator(env, update_delay=0.01, log_metrics=True, display=True)\n elif cmd == 2: # default learning\n sim = Simulator(env, update_delay=0.01, log_metrics=True, display=False)\n else: # 3 improved learning\n sim = Simulator(env, update_delay=0.01, log_metrics=True, display=False, optimized=True )\n \n ##############\n # Run the simulator\n # Flags:\n # tolerance - epsilon tolerance before beginning testing, default is 0.05 \n # n_test - discrete number of testing trials to perform, default is 0\n if cmd == 1:\n sim.run(n_test=10) \n elif cmd == 2:\n sim.run(n_test=10) \n else: #3\n sim.run(n_test=25, tolerance=0.0875 ) # tolerance 0.0875 DA A+A+ log_11 0.875 A+A+A+A+ logs\n \n print('> > > - - - Run End - - - > > > ')", "title": "" }, { "docid": "2d0f12826c2fd118e7dda8ccde80fe77", "score": "0.44804627", "text": "def main(test_mode = True):\n\n bot = DragonBot()\n\n bot.setup(\"localhost\", port = 6667,\n password = \"hunter2\",\n nick = \"Toothless\")\n\n bot.admins([\"StoickTheVast\"])\n bot.devs([\"Hiccup\", \"Astrid\"])\n\n bot.skills(\"skills\", \"skills.db\")\n\n if not test_mode:\n\n bot.start_client()\n\n else:\n\n bot.start_shell()", "title": "" }, { "docid": "93d31aaadee83b2602616b0e99fe8427", "score": "0.44776383", "text": "def create_scenario(scn_descr: dict, common_options=0, log_level: int = logging.INFO):\n logging.basicConfig(level=log_level, format=\"[%(levelname)s] - %(message)s\")\n global my_scn\n if my_scn:\n logging.error(\"A scenario already exists. It will be overwritten\")\n my_scn = ObjScn(scn_descr, common_options)", "title": "" }, { "docid": "e8f0aa8a55568406ee4c0b6aa66a3ea4", "score": "0.4474218", "text": "def __init__(self,\n model,\n storyKind,\n # e.g. \"object model\", \"context\" ...\n ensureCheckAfter,\n infoIfCheckAdded,\n allowDefinition,\n allowAction,\n allowVerb,\n allowedIncludeKinds,\n getStoryId,\n astStory):\n #type: ('Model', Text, bool, bool, bool, bool, bool, List[Text], Optional[GetStoryFun], 'ASTStory') -> None\n self.astStory=astStory\n self.model=model\n\n self.storyKind=storyKind\n #type: Text\n # Some string like \"object model\" or \"context\".\n # This string is used in error message.\n\n self.ensureCheckAfter=ensureCheckAfter\n #type: bool\n\n self.infoIfCheckAdded=infoIfCheckAdded\n #type: bool\n # Indicates if an \"info\" issue must be created each time\n # an implicit check is added. See also noInfoForScenario.\n\n self.allowDefinition=allowDefinition\n #type: bool\n\n self.allowAction=allowAction\n # type: bool\n\n self.allowVerb=allowVerb\n # type: bool\n\n self.allowedIncludeKinds=allowedIncludeKinds\n # type: List[Text]\n # The kinds of storyId that are allowed.\n # For instance the 'scenarios' module set this\n # parameter to ['fragment', 'context']. See this module\n # for an example and the usage of the parameter below to\n # see how it works.\n\n self.getStoryId=getStoryId\n #type: Optional[GetStoryFun]\n # This function must convert (kind,name) from the story syntax\n # to a story model. It must be defined when \"allowIncluded\".\n # It must be None otherwise. The function return None in case\n # of a syntatical error in the id.\n\n self.storyContainer=None\n #type: Optional['StoryContainer']\n # The container of this story if any. In practice this\n # will be a scenarios.StoryContainer but the type is not\n # specified here because the whole module has been designed\n # to be independent from the module scenarios.\n # This variable is set directly by the scenario parser\n # after having created the Story.\n # In practice this variable is used to give better label\n # to step and in particular to object step. It is useful\n # as well for the computation of superSubjects/subjectLabel\n\n self.checkSteps=[]\n #type: List[CheckStep]\n # List of all CheckStep of the story, including implicit\n # (before/after) checks.\n\n self._is_check_needed=False\n # type: bool\n # This variable is a \"temporary\" variable with respect to\n # the construction of Story. It is used to control the\n # creation of implicit CheckStep.\n # These steps should be created only if at least an operation\n # was issued just before the potential check point.\n # Variable to keep along the creation of statements the\n # need to create an implict check statement. This is the\n # case when a new operation occur. Otherwise text block\n # do not count, and check statements set it to no.\n # See _add_check_if_needed.", "title": "" }, { "docid": "304ddaa5c60cb4fd24699ce3acbb3f2f", "score": "0.44702446", "text": "def wrap_env(env):\n env = EpisodicLifeEnv(env)\n env = MaxAndSkipEnv(env, skip=4)\n env = WarpFrame(env)\n env = NormalizedEnv(env)\n env = FrameStack(env, 4)\n env = ClipRewardEnv(env)\n return env", "title": "" }, { "docid": "d897f73a8d25c689862529ec2e0f3015", "score": "0.44648057", "text": "def __init__(self):\n self.env = gym.make('draughts-v0')\n self.save_game = GameMemory(self.env)", "title": "" }, { "docid": "edf4b465cc056309cb84b8763240ce8e", "score": "0.44647038", "text": "def save_scenario(scenario_name):\r\n\r\n # Create folder\r\n HERE = os.path.abspath(__file__)\r\n HERE_DIR = os.path.dirname(HERE)\r\n scenario_path = os.path.join(HERE_DIR, \"results\", scenario_name)\r\n os.makedirs(scenario_path, exist_ok=True)\r\n\r\n # Compute some additional info\r\n ARGMIN, _ = torch.solve(-F_B, F_A)\r\n MIN = F.func(ARGMIN)\r\n DIST_INIT = (THETA_INIT - ARGMIN.reshape(-1)).norm()\r\n LOSS_INIT = F.func(THETA_INIT)\r\n\r\n info_dict = {\r\n \"scenario_name\": scenario_name,\r\n \"F_A\": tensor_to_list(F_A),\r\n \"F_B\": tensor_to_list(F_B),\r\n \"F_C\": tensor_to_list(F_C),\r\n \"THETA_INIT\": tensor_to_list(THETA_INIT),\r\n \"BATCH_SIZE\": BATCH_SIZE,\r\n \"NOF_STEPS\": NOF_STEPS,\r\n \"NOF_RUNS\": NOF_RUNS,\r\n \"LR_SGD\": LR_SGD,\r\n \"LR_BDN\": LR_BDN,\r\n \"LR_CDN\": LR_CDN,\r\n \"CDN_DAMPINGS\": tensor_to_list(CDN_DAMPINGS),\r\n \"BDN_DAMPING_GRID\": tensor_to_list(BDN_DAMPING_GRID),\r\n \"EDN_DAMPING_START\": EDN_DAMPING_START,\r\n \"EDN_DAMPING_END\": EDN_DAMPING_END,\r\n \"GAMMAS_VARIANCE\": tensor_to_list(GAMMAS_VARIANCE),\r\n \"LAMBDAS_VARIANCE\": tensor_to_list(LAMBDAS_VARIANCE),\r\n \"ARGMIN\": tensor_to_list(ARGMIN.reshape(-1)),\r\n \"MIN\": MIN.cpu().item(),\r\n \"DIST_INIT\": DIST_INIT.cpu().item(),\r\n \"LOSS_INIT\": LOSS_INIT.cpu().item(),\r\n }\r\n\r\n # Save dictionary\r\n file_path = os.path.join(scenario_path, \"scenario_info.json\")\r\n with open(file_path, \"w\") as json_file:\r\n json.dump(info_dict, json_file, indent=4)", "title": "" }, { "docid": "e3e37f3ad96975d031f93070ffd46674", "score": "0.44637397", "text": "def __init__(self, args):\n #import pdb;pdb.set_trace()\n from doom_emulator import DoomEmulator\n #from ale_python_interface import ALEInterface\n #filename = args.rom_path + \"/\" + args.game + \".bin\"\n #ale_int = ALEInterface()\n #ale_int.loadROM(str.encode(filename))\n #self.num_actions = len(ale_int.getMinimalActionSet())\n\n # TODO: do better\n self.num_actions = 3\n #self.num_actions = 7\n self.create_environment = lambda i: DoomEmulator(i, args)", "title": "" }, { "docid": "a5f279a866e23a960d8cb16d810a878c", "score": "0.44520512", "text": "def run_para_scen(hazard_level, infrastructure, scenario):\n return infrastructure.expose_to(hazard_level, scenario)", "title": "" }, { "docid": "588ef38b9c6b4930f7786dae08d31708", "score": "0.44418946", "text": "def mobility(args):\n import cea.analysis.lca.mobility\n import cea.inputlocator\n cea.analysis.lca.mobility.lca_mobility(locator=cea.inputlocator.InputLocator(args.scenario))", "title": "" }, { "docid": "0604d9002f332ade44b030ad3a5940c4", "score": "0.44350544", "text": "def parser_scenario_step(self,scenario,goal):\n which_step_action=deepcopy(self.stepsList[goal['stepIndex']]['action'])\n which_step_name=deepcopy(self.stepsList[goal['stepIndex']]['name'])\n\n if scenario == 'receptionist':\n ##### rajouter des conditions si nouvelles actions importantes\n if which_step_action != '':\n if which_step_action=='askOpenDoor':\n self.dynamic_view(goal['stepIndex'],None,wait_for_event=True)\n\n elif which_step_action=='presentPerson':\n self.dynamic_view(goal['stepIndex'],goal['data'])\n\n elif which_step_action=='seatGuest':\n self.dynamic_view(goal['stepIndex'],goal['data'])\n\n elif which_step_action==\"pointTo\":\n self.dynamic_view(goal['stepIndex'],goal['data'])\n\n elif which_step_action=='askToFollow':\n self.dynamic_view(goal['stepIndex'],goal['data'])\n\n else:\n self.static_view(goal['stepIndex'])\n \n else:\n if \"Ask infos\" in which_step_name:\n self.load_multiple_views(goal['stepIndex'],procedure_type='guestInfos')\n\n else:\n self.static_view(goal['stepIndex'])\n\n elif scenario == 'cleanup':\n if 'Ask room' in which_step_name:\n self.load_multiple_views(goal['stepIndex'],procedure_type='chooseRoom')\n else:\n self.static_view(goal['stepIndex'])", "title": "" }, { "docid": "0f514bb660cd8b92bf402bb94e1f8883", "score": "0.4415957", "text": "def make_env(env_idx):\n\n\n # Make the environment\n\n\n levelList = ['SuperMarioBros-1-1-v0','SuperMarioBros-1-2-v0','SuperMarioBros-1-3-v0','SuperMarioBros-1-4-v0','SuperMarioBros-2-1-v0','SuperMarioBros-2-2-v0','SuperMarioBros-2-3-v0','SuperMarioBros-2-4-v0']\n\n\n # record_path = \"./records/\" + dicts[env_idx]['state']\n env = gym_super_mario_bros.make(levelList[env_idx])\n \n\n env = JoypadSpace(env, SIMPLE_MOVEMENT)\n\n #env = RewardScaler(env)\n\n # PreprocessFrame\n env = PreprocessFrame(env)\n\n\n # Stack 4 frames\n env = FrameStack(env,4)\n\n # Allow back tracking that helps agents are not discouraged too heavily\n # from exploring backwards if there is no way to advance\n # head-on in the level.\n env = AllowBacktracking(env)\n\n return env", "title": "" }, { "docid": "c4732280c3eba5f345575dbdfea523d3", "score": "0.44154653", "text": "def gym_env_creator(env_context: EnvContext, env_descriptor: str):\n import gym\n # Allow for PyBullet or VizdoomGym envs to be used as well\n # (via string). This allows for doing things like\n # `env=CartPoleContinuousBulletEnv-v0` or\n # `env=VizdoomBasic-v0`.\n try:\n import pybullet_envs\n pybullet_envs.getList()\n except (ModuleNotFoundError, ImportError):\n pass\n try:\n import vizdoomgym\n vizdoomgym.__name__ # trick LINTer.\n except (ModuleNotFoundError, ImportError):\n pass\n\n # Try creating a gym env. If this fails we can output a\n # decent error message.\n try:\n return gym.make(env_descriptor, **env_context)\n except gym.error.Error:\n raise EnvError(ERR_MSG_INVALID_ENV_DESCRIPTOR.format(env_descriptor))", "title": "" }, { "docid": "4d4582e2e1f419820cac0eebcec320d1", "score": "0.44148746", "text": "def run_adventure():\r\n \r\n #obtains user's name\r\n name_input()\r\n\r\n #obtains user's character's race\r\n character_create_race()\r\n\r\n #obtains user's character's class\r\n character_create_class()\r\n\r\n #starts the phoenix + warrior selection route\r\n phoenix_route_warrior_1()", "title": "" }, { "docid": "524d59fa798405a6eb33301895777c3b", "score": "0.44122776", "text": "def simulate(model, seed=None, video_env=None):\n if video_env is None:\n # Since we are using multiple processes, it is simpler if each worker\n # just creates their own copy of the environment instead of trying to\n # share the environment. This also makes the function \"pure.\"\n env = gym.make(\"LunarLander-v2\")\n else:\n env = video_env\n\n if seed is not None:\n env.seed(seed)\n\n action_dim = env.action_space.n\n obs_dim = env.observation_space.shape[0]\n model = model.reshape((action_dim, obs_dim))\n\n total_reward = 0.0\n impact_x_pos = None\n impact_y_vel = None\n all_y_vels = []\n obs = env.reset()\n done = False\n\n while not done:\n action = np.argmax(model @ obs) # Linear policy.\n obs, reward, done, _ = env.step(action)\n total_reward += reward\n\n # Refer to the definition of state here:\n # https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py#L306\n x_pos = obs[0]\n y_vel = obs[3]\n leg0_touch = bool(obs[6])\n leg1_touch = bool(obs[7])\n all_y_vels.append(y_vel)\n\n # Check if the lunar lander is impacting for the first time.\n if impact_x_pos is None and (leg0_touch or leg1_touch):\n impact_x_pos = x_pos\n impact_y_vel = y_vel\n\n # If the lunar lander did not land, set the x-pos to the one from the final\n # timestep, and set the y-vel to the max y-vel (we use min since the lander\n # goes down).\n if impact_x_pos is None:\n impact_x_pos = x_pos\n impact_y_vel = min(all_y_vels)\n\n # Only close the env if it was not a video env.\n if video_env is None:\n env.close()\n\n return total_reward, impact_x_pos, impact_y_vel", "title": "" }, { "docid": "7ffd33bccf2b5fc7dc0566ab27498358", "score": "0.4412109", "text": "def create_env():\n return aicrowd_gym.make(\"NetHackChallenge-v0\")", "title": "" }, { "docid": "2921190066ce10aac7fdc130b49d3ac1", "score": "0.4410043", "text": "def test_set_env(tmp_path, model_class):\n\n # use discrete for DQN\n env = DummyVecEnv([lambda: select_env(model_class)])\n env2 = DummyVecEnv([lambda: select_env(model_class)])\n env3 = select_env(model_class)\n env4 = DummyVecEnv([lambda: select_env(model_class) for _ in range(2)])\n\n kwargs = {}\n if model_class in {DQN, DDPG, SAC, TD3}:\n kwargs = dict(learning_starts=50, train_freq=4)\n elif model_class in {A2C, PPO}:\n kwargs = dict(n_steps=64)\n\n # create model\n model = model_class(\"MlpPolicy\", env, policy_kwargs=dict(net_arch=[16]), **kwargs)\n # learn\n model.learn(total_timesteps=64)\n\n # change env\n model.set_env(env2, force_reset=True)\n # Check that last obs was discarded\n assert model._last_obs is None\n # learn again\n model.learn(total_timesteps=64, reset_num_timesteps=True)\n assert model.num_timesteps == 64\n\n # change env test wrapping\n model.set_env(env3)\n # learn again\n model.learn(total_timesteps=64)\n\n # num_env must be the same\n with pytest.raises(AssertionError):\n model.set_env(env4)\n\n # Keep the same env, disable reset\n model.set_env(model.get_env(), force_reset=False)\n assert model._last_obs is not None\n # learn again\n model.learn(total_timesteps=64, reset_num_timesteps=False)\n assert model.num_timesteps == 2 * 64\n\n current_env = model.get_env()\n model.save(tmp_path / \"test_save.zip\")\n del model\n # Check that we can keep the number of timesteps after loading\n # Here the env kept its state so we don't have to reset\n model = model_class.load(tmp_path / \"test_save.zip\", env=current_env, force_reset=False)\n assert model._last_obs is not None\n model.learn(total_timesteps=64, reset_num_timesteps=False)\n assert model.num_timesteps == 3 * 64\n\n del model\n # We are changing the env, the env must reset but we should keep the number of timesteps\n model = model_class.load(tmp_path / \"test_save.zip\", env=env3, force_reset=True)\n assert model._last_obs is None\n model.learn(total_timesteps=64, reset_num_timesteps=False)\n assert model.num_timesteps == 3 * 64\n\n del model\n # Load the model with a different number of environments\n model = model_class.load(tmp_path / \"test_save.zip\", env=env4)\n model.learn(total_timesteps=64)\n\n # Clear saved file\n os.remove(tmp_path / \"test_save.zip\")", "title": "" }, { "docid": "6d9afafe3022f8650125d529b814b2ea", "score": "0.4405221", "text": "def scenarioCharged(self,json):\n rospy.loginfo(\"HRI : SCENARIO CHARGED\")\n self.json_for_GM=json\n self.scenario_loaded=True", "title": "" }, { "docid": "225854c35239757c2b0c1d69297304c0", "score": "0.43986684", "text": "def readinstructions(filename):\r\n f = open(filename)\r\n content = f.read()\r\n f.close()\r\n \r\n structure = json.loads(content)\r\n \r\n environment = pyvsim.System.load(structure[\"scenario\"])\r\n \r\n commands = structure[\"commands\"]\r\n \r\n for target, command in commands:\r\n action = Command(name = target, commands = command)\r\n environment.acceptVisitor(action)\r\n if not action.executed:\r\n raise ValueError(\"Object \" + action.name + \" not found!\")\r\n \r\n return environment", "title": "" }, { "docid": "bed5ca672ff5100bb438cc64c72e7080", "score": "0.4397274", "text": "def test07_add_environment(self):\r\n\r\n self.DUT.request_profile(0)\r\n\r\n (_results, _error_code, _last_id) = self.DUT.add_environment(0, 0, 1)\r\n self.assertTrue(_results)\r\n self.assertEqual(_error_code, 0)\r\n\r\n _mission = self.DUT.dicProfiles[0].dicMissions[0]\r\n _environment = _mission.dicPhases[1].dicEnvironments[_last_id]\r\n self.assertTrue(isinstance(_environment, Environment))", "title": "" }, { "docid": "7fc67e1ef8609852d0b1c0177b748401", "score": "0.43961832", "text": "def make_batch(env_id, batch_size, parallel=False):\n env_id = textlabs_agent_gym.make(env_id)\n batch_env_id = \"batch{}-\".format(batch_size) + env_id\n env_spec = spec(env_id)\n entry_point= 'textlabs_agent_gym.envs:BatchEnv'\n if parallel:\n entry_point = 'textlabs_agent_gym.envs:ParallelBatchEnv'\n\n register(\n id=batch_env_id,\n entry_point=entry_point,\n max_episode_steps=env_spec.max_episode_steps,\n max_episode_seconds=env_spec.max_episode_seconds,\n nondeterministic=env_spec.nondeterministic,\n reward_threshold=env_spec.reward_threshold,\n trials=env_spec.trials,\n # Setting the 'vnc' tag avoid wrapping the env with a TimeLimit wrapper. See\n # https://github.com/openai/gym/blob/4c460ba6c8959dd8e0a03b13a1ca817da6d4074f/gym/envs/registration.py#L122\n tags={\"vnc\": \"foo\"},\n kwargs={'env_id': env_id, 'batch_size': batch_size}\n )\n\n return batch_env_id", "title": "" }, { "docid": "301d9103e9306138936694221b2ff32f", "score": "0.43898782", "text": "def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env,\n rendered_env_resize_to, sticky_actions, output_dtype):\n # rl_env_max_episode_steps is None or int.\n assert ((not rl_env_max_episode_steps) or\n isinstance(rl_env_max_episode_steps, int))\n\n wrap_with_time_limit = ((not rl_env_max_episode_steps) or\n rl_env_max_episode_steps >= 0)\n\n if wrap_with_time_limit:\n env = remove_time_limit_wrapper(env)\n\n if sticky_actions:\n env = StickyActionEnv(env)\n\n if maxskip_env:\n env = MaxAndSkipEnv(env) # pylint: disable=redefined-variable-type\n\n if rendered_env:\n env = RenderedEnv(\n env, resize_to=rendered_env_resize_to, output_dtype=output_dtype)\n\n if wrap_with_time_limit and rl_env_max_episode_steps is not None:\n env = gym.wrappers.TimeLimit(\n env, max_episode_steps=rl_env_max_episode_steps)\n return env", "title": "" }, { "docid": "df2ced01bdc226d04f76d65343e84e21", "score": "0.43846923", "text": "def step_impl(context):\n raise NotImplementedError", "title": "" }, { "docid": "76312f8c110ef0b89d6dc9d53d5ba8ba", "score": "0.4370503", "text": "def create_environment(config, visible=True):\n\n game = DoomGame()\n \n # Load the correct configuration\n game.load_config(config['game_config'])\n \n # Load the correct scenario (in our case basic scenario)\n game.set_doom_scenario_path(config['game_scenario'])\n \n game.set_window_visible(visible)\n\n game.init()\n \n # actions = [config['actions'][key] for key in config['actions']]\n\n actions = config['actions']\n\n return game, actions", "title": "" }, { "docid": "121a748b5c394d4334eb73f07bee6f66", "score": "0.43691945", "text": "def appendScenarios(story, text, default_language='en'):\n\n language = default_language\n scenarios = []\n scenario = None\n outline = None\n outline_variables = None\n outline_examples = 0\n previousStep = None\n\n for line in text.splitlines():\n\n languageMatch = language_regex.match(line)\n if languageMatch:\n language = languageMatch.group(1)\n\n scenarioMatch = scenario_regex.match(line, language)\n if scenarioMatch:\n scenario = Scenario(scenarioMatch.group(1), story=story)\n outline = None\n outline_variables = None\n outline_examples = 0\n previousStep = None\n scenarios.append(scenario)\n continue\n\n outlineMatch = outline_regex.match(line, language)\n if outlineMatch:\n scenario = None\n outline = Scenario(outlineMatch.group(1))\n outline_variables = None\n outline_examples = 0\n previousStep = None\n continue\n\n examplesMatch = examples_regex.match(line, language)\n if outline and examplesMatch:\n outline_variables = []\n continue\n\n if outline_variables is not None:\n values = examples_table_cell_regex.findall(line.strip())\n if values and not outline_variables:\n # First row is a table header\n outline_variables = [name.strip() for name in values]\n elif values:\n # Preceding rows are transformed into scenarios\n if len(values) < len(outline_variables):\n raise ValueError(\"Not enough values in %s\" % line)\n elif len(values) > len(outline_variables):\n raise ValueError(\"Too many values in %s\" % line)\n\n mapping = {}\n for idx in range(len(values)):\n mapping[outline_variables[idx]] = values[idx].strip()\n def translate(s, mapping):\n for key in mapping:\n s = s.replace(u\"<%s>\" % key, mapping[key])\n return s\n\n outline_examples += 1\n example = Scenario(\n u\"%s #%02d\" % (outline.name, outline_examples), story=story)\n previousStep = None\n for given in outline.givens:\n example.givens.append(given.__class__(\n translate(given.text, mapping), previousStep))\n previousStep = \"given\"\n for when in outline.whens:\n example.whens.append(when.__class__(\n translate(when.text, mapping), previousStep))\n previousStep = \"when\"\n for then in outline.thens:\n example.thens.append(then.__class__(\n translate(then.text, mapping), previousStep))\n previousStep = \"then\"\n scenarios.append(example)\n continue\n\n givenMatch = given_regex.match(line, language)\n if givenMatch:\n if previousStep:\n raise ValueError(\"Found %s, but previous step was %s\" % (line, previousStep,))\n\n if scenario:\n scenario.givens.append(Step(givenMatch.group(1), previousStep))\n elif outline:\n outline.givens.append(Step(givenMatch.group(1), previousStep))\n else:\n story.givens.append(Step(givenMatch.group(1), previousStep))\n previousStep = \"given\"\n continue\n\n whenMatch = when_regex.match(line, language)\n if whenMatch:\n if previousStep not in ('given', None):\n raise ValueError(\"Found %s, but previous step was %s\" % (line, previousStep,))\n\n if scenario:\n scenario.whens.append(Step(whenMatch.group(1), previousStep))\n elif outline:\n outline.whens.append(Step(whenMatch.group(1), previousStep))\n else:\n story.whens.append(Step(whenMatch.group(1), previousStep))\n previousStep = \"when\"\n continue\n\n thenMatch = then_regex.match(line, language)\n if thenMatch:\n if previousStep != 'when':\n raise ValueError(\"Found %s, but previous step was %s\" % (line, previousStep,))\n\n if scenario:\n scenario.thens.append(Step(thenMatch.group(1), previousStep))\n elif outline:\n outline.thens.append(Step(thenMatch.group(1), previousStep))\n else:\n story.thens.append(Step(thenMatch.group(1), previousStep))\n previousStep = \"then\"\n continue\n\n andMatch = and_regex.match(line, language) or but_regex.match(line, language)\n if andMatch:\n if previousStep is None:\n raise ValueError(\"Found %s, but no previous step found\" % line)\n\n if scenario:\n if previousStep == \"given\":\n scenario.givens.append(Step(andMatch.group(1), previousStep))\n elif previousStep == \"when\":\n scenario.whens.append(Step(andMatch.group(1), previousStep))\n elif previousStep == \"then\":\n scenario.thens.append(Step(andMatch.group(1), previousStep))\n elif outline:\n if previousStep == \"given\":\n outline.givens.append(Step(andMatch.group(1), previousStep))\n elif previousStep == \"when\":\n outline.whens.append(Step(andMatch.group(1), previousStep))\n elif previousStep == \"then\":\n outline.thens.append(Step(andMatch.group(1), previousStep))\n else:\n if previousStep == \"given\":\n story.givens.append(Step(andMatch.group(1), previousStep))\n elif previousStep == \"when\":\n story.whens.append(Step(andMatch.group(1), previousStep))\n elif previousStep == \"then\":\n story.thens.append(Step(andMatch.group(1), previousStep))\n continue\n\n story.scenarios.extend(scenarios)", "title": "" }, { "docid": "e4aa6800ea5d3215e3b5e0f23e68bc22", "score": "0.43653694", "text": "def test_auto_wrap(model_class):\n # Use different environment for DQN\n if model_class is DQN:\n env_id = \"CartPole-v1\"\n else:\n env_id = \"Pendulum-v1\"\n env = gym.make(env_id)\n model = model_class(\"MlpPolicy\", env)\n model.learn(100)", "title": "" }, { "docid": "319147b5c8ce09d22a30bc574139ebc5", "score": "0.43591344", "text": "def __init__(self, env, skip=5):\n\t\tgym.Wrapper.__init__(self, env)\n\t\tself._skip = skip", "title": "" }, { "docid": "ccfb3548fbe670b20287e8cd90fa8220", "score": "0.43589082", "text": "def run(parameter):\n print(json.dumps(parameter, indent=2))\n time.sleep(2)\n slot_set = pickle.load(file=open(parameter[\"slot_set\"], \"rb\"))\n action_set = pickle.load(file=open(parameter[\"action_set\"], \"rb\"))\n disease_symptom = pickle.load(file=open(parameter[\"disease_symptom\"], \"rb\"))\n steward = RunningSteward(parameter=parameter, checkpoint_path=parameter[\"checkpoint_path\"])\n\n print('action_set', action_set)\n warm_start = parameter.get(\"warm_start\")\n warm_start_epoch_number = parameter.get(\"warm_start_epoch_number\")\n train_mode = parameter.get(\"train_mode\")\n agent_id = parameter.get(\"agent_id\")\n simulate_epoch_number = parameter.get(\"simulate_epoch_number\")\n\n # Warm start.\n if warm_start == True and train_mode == True:\n print(\"warm starting...\")\n agent = AgentRule(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,\n parameter=parameter)\n steward.dialogue_manager.set_agent(agent=agent)\n steward.warm_start(epoch_number=warm_start_epoch_number)\n # exit()\n if agent_id.lower() == 'agentdqn':\n agent = AgentDQN(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,\n parameter=parameter)\n elif agent_id.lower() == 'agentrandom':\n agent = AgentRandom(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,\n parameter=parameter)\n elif agent_id.lower() == 'agentrule':\n agent = AgentRule(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,\n parameter=parameter)\n elif agent_id.lower() == 'agenthrl':\n agent = AgentHRL(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,\n parameter=parameter)\n elif agent_id.lower() == 'agentwithgoaljoint':\n agent = AgentWithGoalJoint(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,\n parameter=parameter)\n elif agent_id.lower() == 'agentwithgoal':\n agent = AgentWithGoal(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,\n parameter=parameter)\n else:\n raise ValueError(\n 'Agent id should be one of [AgentRule, AgentDQN, AgentRandom, AgentHRL, AgentWithGoal, AgentWithGoalJoint].')\n\n steward.dialogue_manager.set_agent(agent=agent)\n if train_mode is True: # Train\n steward.simulate(epoch_number=simulate_epoch_number, train_mode=train_mode)\n else: # test\n for index in range(simulate_epoch_number):\n steward.evaluate_model(dataset='test', index=index)", "title": "" }, { "docid": "964f3c987640a87ff815049f2eb1a4f5", "score": "0.43517506", "text": "def __init__(self, name = \"Unnamed scenario\"):\r\n super(Scenario, self).__init__()\r\n \r\n self._name = name", "title": "" }, { "docid": "8a29385915f276967a94108365478864", "score": "0.4346478", "text": "def make_env(env_id, rank=0, seed=0, log_dir=None, wrapper_class=None, env_kwargs=None):\n if log_dir is not None:\n os.makedirs(log_dir, exist_ok=True)\n\n if env_kwargs is None:\n env_kwargs = {}\n\n def _init():\n set_random_seed(seed * 128 + rank)\n env = gym.make(env_id, **env_kwargs)\n\n # Wrap first with a monitor (e.g. for Atari env where reward clipping is used)\n log_file = os.path.join(log_dir, str(rank)) if log_dir is not None else None\n # Monitor success rate too for the real robot\n info_keywords = (\"is_success\",) if \"Neck\" in env_id else ()\n env = Monitor(env, log_file, info_keywords=info_keywords)\n\n # Dict observation space is currently not supported.\n # https://github.com/hill-a/stable-baselines/issues/321\n # We allow a Gym env wrapper (a subclass of gym.Wrapper)\n if wrapper_class:\n env = wrapper_class(env)\n\n env.seed(seed * 128 + rank)\n return env\n\n return _init", "title": "" }, { "docid": "1a8db18231c34a354141a940e4530901", "score": "0.43454957", "text": "def step_impl(context):\n\n if not hasattr(context, \"proxy_address\"):\n assert False, \"The feature file requires a proxy, but one has not \" \\\n \"been defined in environment.py.\"\n assert True", "title": "" }, { "docid": "67007f8506e5eb8e055fdbde2c677090", "score": "0.43422922", "text": "def __init__(self, config, **kwargs):\n self.config = config['environment']\n\n # Grid, positions\n self.grid_size = self.config['grid_size'] # 10\n self.grid = np.zeros([self.grid_size, self.grid_size])\n self.agent_start_location = [self.grid_size//2, self.grid_size//2] # Start at the middle of the grid\n self.position = self.agent_start_location\n self.goal_position = []\n self.window_name = 'Test env'\n\n # Gym-related part\n self.r = 0 # Total episode reward\n self.done = False # Termination\n self.episode = 0 # Episode number\n self.steps = 0 # Current step in the episode\n self.max_steps = self.config['step_limit']\n self.goals_reached = 0\n\n self.create_window()\n\n # Action and observation spaces\n self.action_space = gym.spaces.Discrete(4)\n\n if self.config['image_as_state']: # Image based (CNN)\n self.observation_space = gym.spaces.Box(shape=(self.grid_size, self.grid_size, 1), high=1, low=0, dtype=np.uint8)\n else: # Vector based (MLP)\n self.observation_space = gym.spaces.Box(shape=(4,), high=10, low=0, dtype=np.uint8)", "title": "" }, { "docid": "20b3c5169cf4b0b4fbbeb22447741fdb", "score": "0.43396318", "text": "def make_env_fn(\n config: \"DictConfig\",\n env_class: Union[Type[Env], Type[RLEnv]],\n dataset=None,\n) -> Union[Env, RLEnv]:\n if \"habitat\" in config:\n config = config.habitat\n if dataset is None:\n dataset = make_dataset(config.dataset.type, config=config.dataset)\n env = env_class(config=config, dataset=dataset)\n env.seed(config.seed)\n return env", "title": "" }, { "docid": "c2347620957f62693527cc49f0ecd909", "score": "0.43351537", "text": "def build_model():\n mod = Model(id=\"ACT-R Base\")\n mod_graph = Graph(id=\"actr_base\")\n mod.graphs.append(mod_graph)\n\n # Declarative memory\n dm_node = Node(id=\"declarative_memory\", parameters={\"chunks\": [], \"chunk_types\": []})\n dm_ip = InputPort(id=\"dm_input\")\n dm_node.input_ports.append(dm_ip)\n retrieval_f = Function(\n id=\"retrieve_chunk\",\n function=\"retrieve_chunk\",\n args={\"pattern\": dm_ip.id, \"dm_chunks\": \"chunks\", \"types\": \"chunk_types\"}\n )\n dm_node.functions.append(retrieval_f)\n dm_op = OutputPort(id=\"dm_output\", value=retrieval_f.id)\n dm_node.output_ports.append(dm_op)\n mod_graph.nodes.append(dm_node)\n\n # Retrieval buffer\n retrieval_node = Node(id=\"retrieval_buffer\")\n retrieval_ip = InputPort(id=\"retrieval_input\")\n retrieval_node.input_ports.append(retrieval_ip)\n retrieval_op = OutputPort(id=\"retrieval_output\", value=retrieval_ip.id)\n retrieval_node.output_ports.append(retrieval_op)\n mod_graph.nodes.append(retrieval_node)\n\n # Goal buffer with state\n goal_node = Node(id=\"goal_buffer\", parameters={\"first_goal\": {}})\n goal_ip = InputPort(id=\"goal_input\")\n goal_node.input_ports.append(goal_ip)\n goal_f = Function(\n id=\"change_goal\",\n function=\"change_goal\",\n args={\"pattern\": goal_ip.id, \"curr_goal\": \"goal_state\"}\n )\n goal_node.functions.append(goal_f)\n goal_state = State(\n id=\"goal_state\", \n default_initial_value=\"first_goal\", \n value=\"first_goal if %s == {} else %s\" % (goal_ip.id, goal_f.id))\n goal_node.states.append(goal_state)\n goal_op = OutputPort(id=\"goal_output\", value=goal_state.id)\n goal_node.output_ports.append(goal_op)\n mod_graph.nodes.append(goal_node)\n\n # Procedural memory\n pm_node = Node(id=\"procedural_memory\", parameters={\"productions\": []})\n pm_op = OutputPort(id=\"pm_output\", value=\"productions\")\n pm_node.output_ports.append(pm_op)\n mod_graph.nodes.append(pm_node)\n\n # Pattern matching\n pattern_node = Node(id=\"pattern_matching\")\n pattern_ip1 = InputPort(id=\"pattern_input_from_pm\")\n pattern_ip2 = InputPort(id=\"pattern_input_from_goal\")\n pattern_ip3 = InputPort(id=\"pattern_input_from_retrieval\")\n pattern_node.input_ports.extend([pattern_ip1, pattern_ip2, pattern_ip3])\n pattern_f = Function(\n id=\"pattern_matching_function\", \n function=\"pattern_matching_function\",\n args={\"productions\": pattern_ip1.id, \"goal\": pattern_ip2.id, \"retrieval\": pattern_ip3.id}\n )\n pattern_node.functions.append(pattern_f)\n pattern_op = OutputPort(id=\"pattern_output\", value=pattern_f.id)\n pattern_node.output_ports.append(pattern_op)\n mod_graph.nodes.append(pattern_node)\n\n # Conflict resolution\n conflict_node = Node(id=\"conflict_resolution\")\n conflict_ip = InputPort(id=\"conflict_input\")\n conflict_node.input_ports.append(conflict_ip)\n conflict_f = Function(\n id=\"conflict_resolution_function\", \n function=\"conflict_resolution_function\",\n args={\"productions\": conflict_ip.id}\n )\n conflict_node.functions.append(conflict_f)\n conflict_op1 = OutputPort(id=\"conflict_output_to_fire_prod\", value=conflict_f.id)\n conflict_op2 = OutputPort(id=\"conflict_output_to_check\", value=conflict_f.id)\n conflict_node.output_ports.extend([conflict_op1, conflict_op2])\n mod_graph.nodes.append(conflict_node)\n\n # Node for firing productions\n fire_prod_node = Node(id=\"fire_production\")\n fire_prod_ip = InputPort(id=\"fire_prod_input\")\n fire_prod_node.input_ports.append(fire_prod_ip)\n fire_prod_f1 = Function(\n id=\"update_goal\", \n function=\"update_goal\",\n args={\"production\": fire_prod_ip.id}\n )\n fire_prod_f2 = Function(\n id=\"update_retrieval\", \n function=\"update_retrieval\",\n args={\"production\": fire_prod_ip.id}\n )\n fire_prod_node.functions.extend([fire_prod_f1, fire_prod_f2])\n fire_prod_op1 = OutputPort(id=\"fire_prod_output_to_goal\", value=fire_prod_f1.id)\n fire_prod_op2 = OutputPort(id=\"fire_prod_output_to_retrieval\", value=fire_prod_f2.id)\n fire_prod_node.output_ports.extend([fire_prod_op1, fire_prod_op2])\n mod_graph.nodes.append(fire_prod_node)\n\n # Node to check termination\n check_node = Node(id=\"check_termination\")\n check_ip = InputPort(id=\"check_input\")\n check_node.input_ports.append(check_ip)\n check_f = Function(\n id=\"check_termination\", \n function=\"check_termination\",\n args={\"production\": check_ip.id}\n )\n check_node.functions.append(check_f)\n check_op = OutputPort(id=\"check_output\", value=check_f.id)\n check_node.output_ports.append(check_op)\n mod_graph.nodes.append(check_node)\n\n # Edges\n dm_to_retrieval = Edge(\n id=\"dm_to_pattern_edge\",\n sender=dm_node.id,\n sender_port=dm_op.id,\n receiver=retrieval_node.id,\n receiver_port=retrieval_ip.id,\n )\n mod_graph.edges.append(dm_to_retrieval)\n\n retrieval_to_pattern = Edge(\n id=\"retrieval_to_pattern_edge\",\n sender=retrieval_node.id,\n sender_port=retrieval_op.id,\n receiver=pattern_node.id,\n receiver_port=pattern_ip3.id,\n )\n mod_graph.edges.append(retrieval_to_pattern)\n\n goal_to_pattern = Edge(\n id=\"goal_to_pattern_edge\",\n sender=goal_node.id,\n sender_port=goal_op.id,\n receiver=pattern_node.id,\n receiver_port=pattern_ip2.id,\n )\n mod_graph.edges.append(goal_to_pattern)\n\n pm_to_pattern = Edge(\n id=\"pm_to_pattern_edge\",\n sender=pm_node.id,\n sender_port=pm_op.id,\n receiver=pattern_node.id,\n receiver_port=pattern_ip1.id,\n )\n mod_graph.edges.append(pm_to_pattern)\n\n pattern_to_conflict = Edge(\n id=\"pattern_to_conflict_edge\",\n sender=pattern_node.id,\n sender_port=pattern_op.id,\n receiver=conflict_node.id,\n receiver_port=conflict_ip.id,\n )\n mod_graph.edges.append(pattern_to_conflict)\n\n conflict_to_fire_prod = Edge(\n id=\"conflict_to_fire_prod_edge\",\n sender=conflict_node.id,\n sender_port=conflict_op1.id,\n receiver=fire_prod_node.id,\n receiver_port=fire_prod_ip.id,\n )\n mod_graph.edges.append(conflict_to_fire_prod)\n\n conflict_to_check = Edge(\n id=\"conflict_to_check_edge\",\n sender=conflict_node.id,\n sender_port=conflict_op1.id,\n receiver=check_node.id,\n receiver_port=check_ip.id,\n )\n mod_graph.edges.append(conflict_to_check)\n\n # Conditions\n cond_dm = Condition(type=\"Always\")\n cond_retrieval = Condition(type=\"JustRan\", dependency=dm_node.id)\n cond_goal = Condition(type=\"Always\")\n cond_pm = Condition(type=\"Always\")\n cond_pattern = Condition(\n type=\"And\",\n dependencies=[\n Condition(type=\"EveryNCalls\", dependency=retrieval_node.id, n=1),\n Condition(type=\"EveryNCalls\", dependency=goal_node.id, n=1),\n Condition(type=\"EveryNCalls\", dependency=dm_node.id, n=1)\n ]\n )\n cond_conflict = Condition(type=\"JustRan\", dependency=pattern_node.id)\n cond_fire_prod = Condition(type=\"JustRan\", dependency=conflict_node.id)\n cond_check = Condition(type=\"JustRan\", dependency=conflict_node.id)\n cond_term = Condition(\n type=\"While\",\n dependencies=[check_node.id]\n )\n mod_graph.conditions = ConditionSet(\n node_specific={\n dm_node.id: cond_dm, \n retrieval_node.id: cond_retrieval,\n goal_node.id: cond_goal,\n pm_node.id: cond_pm,\n pattern_node.id: cond_pattern,\n conflict_node.id: cond_conflict,\n fire_prod_node.id: cond_fire_prod,\n check_node.id: cond_check\n },\n termination={\"check_term_true\": cond_term}\n )\n\n return mod", "title": "" }, { "docid": "1c70d3f87859880edc37782ec387adfd", "score": "0.43304542", "text": "def given_a_dataset_with_location_id(t: Env):\n prj = preset.project_created(t)\n ds = preset.dataset_created(t, prj)\n\n ds.meta.description = \"# HELLO\"\n ds.meta.description_set = True\n\n t.given_events(prj, ds)\n\n t.scenario(\n when.view_dataset(ds.dataset_id),\n then.text(f'main #ds-{ds.dataset_id} .description h4', 'HELLO'),\n )", "title": "" }, { "docid": "50e9a35af46bb67e2cd1a0eb7db290ab", "score": "0.43283287", "text": "def step_impl(context):\n\n if context.radamsa_location is None:\n assert False, \"The feature file requires Radamsa, but the path is \" \\\n \"undefined.\"\n try:\n subprocess.check_output([context.radamsa_location, \"--help\"],\n stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError) as error:\n assert False, \"Could not execute Radamsa from %s: %s\" % (context.radamsa_location, error)\n assert True", "title": "" }, { "docid": "b943bc694d2ca35a00ba22aac9c930ba", "score": "0.4322604", "text": "def make_mujoco_env(env_id, seed, allow_early_resets=True):\n rank = MPI.COMM_WORLD.Get_rank()\n set_global_seeds(seed + 10000 * rank)\n env = gym.make(env_id)\n env.seed(seed)\n return env", "title": "" }, { "docid": "5411e7c4df958f24acdd89ffc9d1e278", "score": "0.43210062", "text": "def run_scenario(self, scenario_id, credentials):\n return self.post('/scenarios/%i/run' % (scenario_id,), credentials)", "title": "" }, { "docid": "8cc3fadfb9a0aaa9e4b0bf32bc8e60a5", "score": "0.43159288", "text": "def make_env(env_id, rank, seed=0):\n def _init():\n env = gym.make(env_id)\n env.seed(seed + rank)\n return env\n set_global_seeds(seed)\n return _init", "title": "" }, { "docid": "8cc3fadfb9a0aaa9e4b0bf32bc8e60a5", "score": "0.43159288", "text": "def make_env(env_id, rank, seed=0):\n def _init():\n env = gym.make(env_id)\n env.seed(seed + rank)\n return env\n set_global_seeds(seed)\n return _init", "title": "" }, { "docid": "b610cb60fd4683c438b69cbc1dd3bef1", "score": "0.4315188", "text": "def make_mujoco_env(env_id, seed, reward_scale=1.0):\r\n rank = MPI.COMM_WORLD.Get_rank()\r\n myseed = seed + 1000 * rank if seed is not None else None\r\n set_global_seeds(myseed)\r\n env = gym.make(env_id)\r\n env.seed(seed)\r\n\r\n return env", "title": "" }, { "docid": "5c7ffbbc4a16c627f044f1ef7fb4ca89", "score": "0.4310157", "text": "def download_scenario(user, scenario_id):\n create_dir()\n # Get the scenario object from the Database\n scenario = Scenario.objects.get(id=scenario_id)\n \n # the base of the TsuDAT user directory structures from settings.py \n TsuDATBase = settings.TSUDAT_BASE_DIR\n TsuDATMux = settings.TSUDAT_MUX_DIR\n\n # change setup value to one of expected strings\n print('original scenario.model_setup=%s' % scenario.model_setup)\n trial_edit = {'t': 'trial', 'T': 'trial', 'trial': 'trial', 'TRIAL': 'trial',\n 'f': 'final', 'F': 'final', 'final': 'final', 'FINAL': 'final'}\n actual_setup = trial_edit.get(scenario.model_setup, 'trial')\n print('actual_setup=%s' % actual_setup)\n\n # fake a prject name ##?\n if not scenario.project.name: ##?\n scenario.project.name = _slugify(scenario.name) ##?\n \n # create the user working directory\n (work_dir, raw_elevations, boundaries, meshes, polygons, gauges,\n topographies, user_dir) = run_tsudat.make_tsudat_dir(TsuDATBase, user.username,\n _slugify(scenario.project.name),\n _slugify(scenario.name),\n##? scenario.model_setup,\n actual_setup,\n scenario.event.tsudat_id)\n\n project_geom = scenario.project.geom\n project_extent = scenario.project.geom.extent\n centroid = project_geom.centroid\n\n # This somewhat naively assumes that the whole bounding polygon is in the same zone\n (UTMZone, UTMEasting, UTMNorthing) = LLtoUTM(23, centroid.coords[1], centroid.coords[0])\n if(len(UTMZone) == 3):\n utm_zone = int(UTMZone[0:2])\n else:\n utm_zone = int(UTMZone[0:1])\n if(centroid.coords[1] > 0):\n srid_base = 32600\n else:\n srid_base = 32700\n srid = srid_base + utm_zone\n scenario.project.srid = srid\n scenario.project.save()\n\n project_geom.transform(srid) \n\n # Polygons\n print polygons\n bounding_polygon_file = open(os.path.join(polygons, 'bounding_polygon.csv'), 'w')\n for coord in project_geom.coords[0][:-1]:\n bounding_polygon_file.write('%f,%f\\n' % (coord[0], coord[1]))\n bounding_polygon_file.close()\n \n # Internal Polygons \n internal_polygons = InternalPolygon.objects.filter(project=scenario.project).order_by('value')\n count = 0\n InteriorRegions = []\n for ip in internal_polygons:\n ipfile = open(os.path.join(polygons, 'ip%s.csv' % count), 'w')\n geom = ip.geom\n geom.transform(srid)\n for coord in geom.coords[0][:-1]:\n ipfile.write('%f,%f\\n' % (coord[0], coord[1]))\n if(ip.type == 1):\n type = \"resolution\"\n elif(ip.type == 2):\n type = \"friction\"\n elif(ip.type == 3):\n type = \"aoi\"\n InteriorRegions.append([type, ipfile.name, ip.value])\n ipfile.close()\n geom = ipfile = None\n count += 1\n\n # Raw Elevation Files\n RawElevationFiles = []\n elevation_files = []\n\n wcs_url = settings.GEOSERVER_BASE_URL + 'wcs'\n wcs = WebCoverageService(wcs_url, version='1.0.0')\n pds = ProjectDataSet.objects.filter(project=scenario.project).order_by('ranking')\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(srid)\n dst_wkt = srs.ExportToPrettyWkt()\n eResampleAlg = None\n create_options = None\n \n output_format = \"AAIGrid\"\n driver = gdal.GetDriverByName(output_format)\n \n for ds in pds:\n layer = Layer.objects.get(typename=ds.dataset.typename)\n elevation_files.append(layer.typename)\n logger.info(wcs.contents)\n metadata = wcs.contents[layer.typename]\n print metadata.grid\n resx = metadata.grid.offsetvectors[0][0]\n resy = abs(float(metadata.grid.offsetvectors[1][1]))\n formats = metadata.supportedFormats\n print formats\n cvg = wcs.getCoverage(identifier=layer.typename, \n format='GeoTIFF', \n crs=\"EPSG:4326\", \n bbox=(project_extent[0], \n project_extent[1], \n project_extent[2], \n project_extent[3]), \n resx=resx, \n resy=resy)\n # Need to make sure the ranking numbers are unique for each project (enforced with DB constraint?)\n tif_file_name = '%s.tif' % ds.ranking\n tif_file_path = os.path.join(raw_elevations, tif_file_name)\n asc_file_name = '%s.asc' % ds.ranking\n asc_file_path = os.path.join(raw_elevations, asc_file_name)\n out = open(tif_file_path, 'wb')\n out.write(cvg.read())\n out.close()\n \n # Warp to UTM\n cmd = \"/usr/bin/gdalwarp -srcnodata -9999 -dstnodata -9999 -t_srs EPSG:%d %s %s.tmp\" % (srid, tif_file_path, tif_file_path)\n os.system(cmd)\n # Convert to AAIGrid\n cmd = \"/usr/bin/gdal_translate -a_nodata -9999 -of %s %s.tmp %s\" % (output_format, tif_file_path, asc_file_path)\n os.system(cmd)\n # Remove Intermediate files\n #os.remove(tif_file_path)\n #os.remove(tif_file_path + \".tmp\")\n \n # Rename the .prj file to .prj.wkt\n shutil.move(asc_file_path.replace('.asc', '.prj'), asc_file_path.replace('.asc', '.prj.wkt'))\n \n # Generate a prj.adf style prj file\n # NOTE: Not sure if this will work in all cases?\n prj_file_name = '%s.prj' % ds.ranking\n prj_file = open(os.path.join(raw_elevations, prj_file_name), 'w')\n prj_file.write('Projection UTM\\n')\n prj_file.write('Zone %d\\n' % utm_zone)\n prj_file.write('Datum WGS1984\\n')\n prj_file.write('Zunits NO\\n')\n prj_file.write('Units METERS\\n')\n prj_file.write('Spheroid WGS_1984\\n')\n prj_file.write('Xshift 500000\\n')\n prj_file.write('Yshift 10000000\\n')\n prj_file.write('Parameters\\n')\n prj_file.write('NODATA_value -9999')\n prj_file.close() \n\n RawElevationFiles.append(asc_file_path)\n \n '''\n src_ds = gdal.Open( str(tif_file_path), GA_ReadOnly )\n dst_ds_tmp = driver.CreateCopy( str(asc_file_name + '.tmp'), src_ds, 0)\n dst_ds = driver.Create( str(asc_file_path), dst_ds_tmp.RasterXSize, dst_ds_tmp.RasterYSize)\n gdal.ReprojectImage(src_ds, dst_ds, None, dst_wkt)\n dst_ds = None\n dst_ds_tmp = None\n src_ds = None\n '''\n\n # Landward Boundary\n \n # Iterate over the in the project geometry and add a l or s flag and call landward.landward with them\n points_list = []\n for coord in project_geom.coords[0][:-1]:\n pnt_wkt = 'SRID=%s;POINT(%f %f)' % (srid, coord[0], coord[1])\n land = Land.objects.filter(the_geom__intersects=pnt_wkt)\n if(land.count() > 0):\n points_list.append((coord[0], coord[1], \"l\")) \n else:\n points_list.append((coord[0], coord[1], \"s\")) \n print('points_list=%s' % str(points_list))\n landward_points = landward.landward(points_list)\n print('landward_points=%s' % str(landward_points))\n \n # Write out the landward points to a file\n landward_boundary_file = open(os.path.join(boundaries, 'landward_boundary.csv'), 'w')\n for pt in landward_points:\n landward_boundary_file.write('%f,%f\\n' % (pt[0], pt[1]))\n landward_boundary_file.close()\n\n # Interior Hazard Points File\n interior_hazard_points_file = open(os.path.join(boundaries, 'interior_hazard_points.csv'), 'w')\n hps = HazardPoint.objects.filter(geom__intersects=project_geom).order_by('tsudat_id')\n for hp in hps:\n the_geom = hp.geom\n latitude=the_geom.coords[1]\n longitude=the_geom.coords[0]\n the_geom.transform(srid)\n interior_hazard_points_file.write('%d,%f,%f,%f,%f\\n' % (hp.tsudat_id,longitude,latitude,the_geom.coords[0], the_geom.coords[1]))\n interior_hazard_points_file.close()\n \n # Gauges\n gauge_file = open(os.path.join(gauges, 'gauges.csv'), 'w')\n gauge_file.write('easting,northing,name,elevation\\n')\n gauge_points = GaugePoint.objects.filter(project=scenario.project)\n for gauge in gauge_points:\n gauge_geom = gauge.geom\n gauge_geom.transform(srid)\n gauge_file.write('%f,%f,%s,%f\\n' % (gauge_geom.coords[0], gauge_geom.coords[1], gauge.name, 0.0))\n gauge_file.close()\n \n # Layers \n scenario_layers = scenario.output_layers.all()\n layers = []\n for layer in scenario_layers:\n layers.append(layer.name)\n\n # build the scenario json data file\n date_time = strftime(\"%Y%m%d%H%M%S\", gmtime()) \n json_file = os.path.join(work_dir, '%s.%s.json' % (_slugify(scenario.name), date_time))\n\n json_dict = {\n 'user': user.username,\n 'user_directory': user_dir,\n 'project': _slugify(scenario.project.name),\n 'project_id': scenario.project.id,\n 'scenario': _slugify(scenario.name),\n 'scenario_id': scenario.id,\n##? 'setup': scenario.model_setup,\n 'setup': actual_setup,\n 'event_number': scenario.event.tsudat_id,\n 'working_directory': TsuDATBase,\n 'mux_directory': TsuDATMux,\n 'initial_tide': scenario.initial_tidal_stage,\n 'start_time': scenario.start_time,\n 'end_time': scenario.end_time,\n 'smoothing': scenario.smoothing_param,\n 'bounding_polygon_file': bounding_polygon_file.name,\n 'raw_elevation_directory': raw_elevations,\n 'elevation_data_list': RawElevationFiles,\n 'mesh_friction': scenario.default_friction_value,\n 'raster_resolution': scenario.raster_resolution,\n 'export_area': \"AOI\" if scenario.use_aoi == True else \"ALL\",\n 'gauge_file': gauge_file.name,\n 'bounding_polygon_maxarea': scenario.project.max_area,\n 'interior_regions_list': InteriorRegions,\n 'interior_hazard_points_file': interior_hazard_points_file.name, \n 'landward_boundary_file': landward_boundary_file.name,\n 'zone_number': utm_zone,\n 'layers_list': layers, \n 'get_results_max': True,\n 'get_timeseries': True \n }\n\n with open(json_file, 'w') as fd:\n json.dump(json_dict, fd, indent=2, separators=(',', ':'))\n\n scenario.tsudat_payload = json.dumps(json_dict) \n scenario.save()\n \n # now run the simulation\n run_tsudat.run_tsudat(json_file)\n scenario.anuga_status = \"QUEUE\"\n scenario.save()\n return True", "title": "" }, { "docid": "b3be2e9766170b099bdc7c20e7001e51", "score": "0.43089592", "text": "def run(args):\n\n env = gym.make(ENV_NAME)\n backtrack_logger = VisualizeScores(ENV_NAME)\n observation_space = env.observation_space.shape[0]\n action_space = env.action_space.n\n dqn_backtrack = backtrackDQN(observation_space, action_space, learning_rate=args.lr, batch_size=args.batch_size)\n episode = 0\n for epoch in range(args.num_epochs):\n step = 0\n total_reward = 0\n episode += 1\n state = env.reset()\n state = np.reshape(state, (1, observation_space))\n while True:\n step += 1\n env.render() # visualize the environment\n action = dqn_backtrack.chooseAction(state)\n next_state, reward, terminal, info = env.step(action)\n total_reward += reward\n next_state = np.reshape(next_state, (1, observation_space))\n # add the new information to memory\n dqn_backtrack.addToReplayBuffer(state, action, total_reward, next_state, terminal)\n state = next_state\n if terminal:\n print('Episode: {}, exploration: {}, score:'\\\n ' {}'.format(episode, np.round(dqn_backtrack.epsilon, 3), step))\n backtrack_logger.add_score(step, episode, output_path=os.path.join(args.output_dir,args.output_filename))\n break\n dqn_backtrack.replayBuffer()", "title": "" }, { "docid": "c0b6ab38fac9765a3c80dfe8d21e8178", "score": "0.43033996", "text": "def test_environment_network(environment_path, env):\n no_error_loop = False\n try:\n env_path = '.'.join([environment_path, env])\n env_module = importlib.import_module(env_path)\n e = env_module.Environment()\n es = {p: env_module.Environment() for p in e.players()}\n for _ in range(100):\n e.reset()\n for p, e_ in es.items():\n info = e.diff_info(p)\n e_.update(info, True)\n while not e.terminal():\n actions = {}\n for player in e.turns():\n assert set(e.legal_actions(player)) == set(es[player].legal_actions(player))\n action = random.choice(es[player].legal_actions(player))\n actions[player] = es[player].action2str(action, player)\n actions = {p: e.str2action(a, p) for p, a in actions.items()}\n e.step(actions)\n for p, e_ in es.items():\n info = e.diff_info(p)\n e_.update(info, False)\n e.reward()\n e.outcome()\n no_error_loop = True\n except Exception:\n traceback.print_exc()\n\n assert no_error_loop", "title": "" }, { "docid": "3ec6e90824c1fd96ca1e951371919fef", "score": "0.42974883", "text": "def run_learningtype_wizard(self):", "title": "" }, { "docid": "38f1865c54bec650a6c0a72b0f530087", "score": "0.42973298", "text": "def func_with_inline_scenario():\n with Scenario(\"my test\") as test:\n note(f\"hi from test {test.name}\")", "title": "" }, { "docid": "4d73ffa767e78835e3b514209f020966", "score": "0.4294382", "text": "def test_gym_env_load(self):\n curdir = os.getcwd()\n os.chdir(os.path.join(ROOT_DIR, \"examples\", \"gym_ex\"))\n gym_env_path = \"gyms.env.BanditNArmedRandom\"\n configuration = ConnectionConfig(\n connection_id=GymConnection.connection_id, env=gym_env_path\n )\n identity = Identity(\"name\", address=self.my_address)\n gym_con = GymConnection(\n gym_env=None, identity=identity, configuration=configuration\n )\n assert gym_con.channel.gym_env is not None\n os.chdir(curdir)", "title": "" }, { "docid": "e5f92ca304512cbbd6e2ebcf1a9c882a", "score": "0.42920578", "text": "def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:\n parser.add_argument(\"tests\", nargs=\"*\", default=None, help=\"run specified tests\")\n args = parser.parse_args()\n\n c.up(\"redpanda\", \"postgres\", \"materialized\")\n\n c.up(\"testdrive\", persistent=True)\n\n for database_object in database_objects:\n if (\n args.tests is not None\n and len(args.tests) > 0\n and database_object.name not in args.tests\n ):\n continue\n\n print(f\"Running scenario {database_object.name} ...\")\n\n c.testdrive(\n dedent(\n \"\"\"\n $ postgres-execute connection=postgres://mz_system@materialized:6877/materialize\n DROP SCHEMA IF EXISTS public CASCADE;\n CREATE SCHEMA public;\n GRANT ALL PRIVILEGES ON SCHEMA public TO materialize;\n \"\"\"\n )\n )\n\n c.testdrive(database_object.testdrive)\n\n # Make sure the storage is fully accounted for\n print(\n f\"Sleeping for {COLLECTION_INTERVAL_SECS + 1} seconds so that collection kicks in ...\"\n )\n time.sleep(COLLECTION_INTERVAL_SECS + 1)\n\n c.testdrive(\n dedent(\n f\"\"\"\n $ set-regex match=\\d+ replacement=<SIZE>\n\n # Select the raw size as well, so if this errors in testdrive, its easier to debug.\n > SELECT size_bytes, size_bytes BETWEEN {database_object.expected_size//3} AND {database_object.expected_size*3}\n FROM mz_storage_usage\n WHERE collection_timestamp = ( SELECT MAX(collection_timestamp) FROM mz_storage_usage )\n AND object_id = ( SELECT id FROM mz_objects WHERE name = 'obj' );\n <SIZE> true\n \"\"\"\n )\n )", "title": "" } ]
edb5457970fd4d56ccfdc469aad81c35
DCN+ Dynamic Decoder. Builds decoder graph that iterates over possible solutions to problem until it returns same answer in two consecutive iterations or reaches `max_iter` iterations.
[ { "docid": "7c7734fd4e07a8e8edda01b29ca9d7cc", "score": "0.5828644", "text": "def dcn_decode(encoding, document_length, state_size=100, pool_size=4, max_iter=4, keep_prob=1.0):\n\n with tf.variable_scope('decoder_loop', reuse=tf.AUTO_REUSE):\n batch_size = tf.shape(encoding)[0]\n lstm_dec = tf.contrib.rnn.LSTMCell(num_units=state_size)\n lstm_dec = tf.contrib.rnn.DropoutWrapper(lstm_dec, input_keep_prob=keep_prob)\n\n # initialise loop variables\n start = tf.zeros((batch_size,), dtype=tf.int32)\n end = document_length - 1\n answer = tf.stack([start, end], axis=1)\n state = lstm_dec.zero_state(batch_size, dtype=tf.float32)\n not_settled = tf.tile([True], (batch_size,))\n logits = tf.TensorArray(tf.float32, size=max_iter, clear_after_read=False)\n\n def calculate_not_settled_logits(not_settled, answer, output, prev_logit):\n enc_masked = tf.boolean_mask(encoding, not_settled)\n output_masked = tf.boolean_mask(output, not_settled)\n answer_masked = tf.boolean_mask(answer, not_settled)\n document_length_masked = tf.boolean_mask(document_length, not_settled)\n new_logit = decoder_body(enc_masked, output_masked, answer_masked, state_size, pool_size, document_length_masked, keep_prob)\n new_idx = tf.boolean_mask(tf.range(batch_size), not_settled)\n logit = tf.dynamic_stitch([tf.range(batch_size), new_idx], [prev_logit, new_logit]) # TODO test that correct\n return logit\n\n for i in range(max_iter):\n if i > 1:\n names = 'not_settles_iter_'+ str(i+1)\n #tf.summary.scalar(f'not_settled_iter_{i+1}', tf.reduce_sum(tf.cast(not_settled, tf.float32)))\n tf.summary.scalar(names, tf.reduce_sum(tf.cast(not_settled, tf.float32)))\n \n output, state = lstm_dec(start_and_end_encoding(encoding, answer), state)\n if i == 0:\n logit = decoder_body(encoding, output, answer, state_size, pool_size, document_length, keep_prob)\n else:\n prev_logit = logits.read(i-1)\n logit = tf.cond(\n tf.reduce_any(not_settled),\n lambda: calculate_not_settled_logits(not_settled, answer, output, prev_logit),\n lambda: prev_logit\n )\n start_logit, end_logit = logit[:, :, 0], logit[:, :, 1]\n start = tf.argmax(start_logit, axis=1, output_type=tf.int32)\n end = tf.argmax(end_logit, axis=1, output_type=tf.int32)\n new_answer = tf.stack([start, end], axis=1)\n if i == 0:\n not_settled = tf.tile([True], (batch_size,))\n else:\n not_settled = tf.reduce_any(tf.not_equal(answer, new_answer), axis=1)\n not_settled = tf.reshape(not_settled, (batch_size,)) # needed to establish dimensions\n answer = new_answer\n logits = logits.write(i, logit)\n\n return logits", "title": "" } ]
[ { "docid": "a0cc24fd0397519dd87475cb3d28dda1", "score": "0.5929235", "text": "def dynamic_bidecode(fw_decoder, bw_decoder,\n output_time_major=False,\n impute_finished=False,\n maximum_iterations=None,\n parallel_iterations=32,\n swap_memory=False,\n scope=None):\n if not isinstance(fw_decoder, Decoder):\n raise TypeError(\"Expected fw_decoder to be type Decoder, but saw: %s\" %\n type(fw_decoder))\n\n if not isinstance(bw_decoder, Decoder):\n raise TypeError(\"Expected bw_decoder to be type Decoder, but saw: %s\" %\n type(bw_decoder))\n\n with tf.variable_scope(scope,\"bi_decoder\") as scope:\n # Forward\n with tf.variable_scope(\"fw\") as fw_scope:\n fw_final_outputs, fw_final_state, fw_final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n fw_decoder, output_time_major=output_time_major, \n impute_finished=impute_finished, \n maximum_iterations=maximum_iterations,\n parallel_iterations=parallel_iterations, \n swap_memory=swap_memory,\n scope=fw_scope\n )\n\n # Backward direction\n if not output_time_major:\n time_dim = 1\n batch_dim = 0\n else:\n time_dim = 0\n batch_dim = 1\n\n def _reverse(input_, seq_lengths, seq_dim, batch_dim):\n if seq_lengths is not None:\n return tf.reverse_sequence(\n input=input_, seq_lengths=seq_lengths,\n seq_dim=seq_dim, batch_dim=batch_dim)\n else:\n return tf.reverse(input_, axis=[seq_dim])\n\n with tf.variable_scope(\"bw\") as bw_scope:\n bw_final_outputs, bw_final_state, bw_final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n bw_decoder, output_time_major=output_time_major, \n impute_finished=impute_finished, \n maximum_iterations=maximum_iterations,\n parallel_iterations=parallel_iterations, \n swap_memory=swap_memory,\n scope=bw_scope\n )\n \n if not isinstance(fw_decoder, tf.contrib.seq2seq.BeamSearchDecoder):\n # no beam search\n fw_rnn_output = fw_final_outputs.rnn_output\n bw_rnn_output = bw_final_outputs.rnn_output\n else:\n fw_rnn_output = tf.no_op()\n bw_rnn_output = tf.no_op()\n\n rnn_outputs = (fw_rnn_output, bw_rnn_output)\n output_states = (fw_final_state, bw_final_state)\n decoder_outputs = (fw_final_outputs, bw_final_outputs)\n\n return (rnn_outputs, output_states, decoder_outputs)", "title": "" }, { "docid": "1e0e4ade7ca0c98eb3722529b193fdef", "score": "0.5793611", "text": "def __call__(self, init_hidden, max_len, tau=1., **dec_kwargs):\n assert max_len > 1\n bs = init_hidden.size(0)\n device = init_hidden.device\n\n decoded_word_hots = []\n decoded_mask = []\n word_embds = []\n\n start_one_hot = T.zeros((bs, self.vocab_size), dtype=T.float32,\n device=device)\n start_one_hot[:, self.start_id] = 1.\n\n # the start symbol is not decoded so just appending corresponding\n # dummy mask and embedding\n decoded_word_hots.append(start_one_hot)\n word_embds.append(self._embed_one_hot(start_one_hot))\n decoded_mask.append(T.ones(bs, device=device))\n\n arts_coll = {} # additional artifacts collector\n\n # previous recurring items from the decoder\n # TODO: can I get rid off the hard-coded name?\n # TODO: E.g. use constants instead\n prev_recc = {\"init_hidden\": init_hidden}\n\n for t in range(1, max_len):\n\n # computing the soft mask\n mask = self._create_mask(prev_word_hots=decoded_word_hots[-1],\n prev_mask=decoded_mask[-1])\n decoded_mask.append(mask)\n\n # merging static and recurrent values together\n new_kwargs = merge_dicts(dec_kwargs, prev_recc)\n\n prev_words_embds = word_embds[-1].unsqueeze(1)\n mask = mask.unsqueeze(1)\n out = self.decoding_func(prev_words_embds, mask, **new_kwargs)\n assert isinstance(out, DecState)\n\n scores = out.word_scores\n\n # updating the recurring values so they would be fed in the next\n # loop\n prev_recc = out.rec_vals\n\n # collecting artifacts if produced by the decoder\n if out.coll_vals:\n collect_arts(arts_coll, out.coll_vals)\n\n scores = scores.squeeze(dim=1)\n curr_word_hots = gumbel_softmax(scores, hard=True, tau=tau)\n\n decoded_word_hots.append(curr_word_hots)\n word_embds.append(self._embed_one_hot(curr_word_hots))\n\n # converting to tensors\n decoded_word_hots = T.stack(decoded_word_hots, dim=1)\n word_embds = T.stack(word_embds, dim=1)\n decoded_mask = T.stack(decoded_mask, dim=1)\n\n if arts_coll:\n # concatenating artifacts produced by the decoder over sequences\n for k, ar in arts_coll.items():\n arts_coll[k] = T.cat(ar, dim=1)\n\n return decoded_word_hots, word_embds, decoded_mask, arts_coll\n\n return decoded_word_hots, word_embds, decoded_mask", "title": "" }, { "docid": "ea547d1eb2cff05912416dde75fa874d", "score": "0.5753391", "text": "def viterbi(posterior):\n states = len(posterior)\n kmers = len(posterior[0])\n # states = posterior.shape[0]\n # i.e. for 5mkers, 2**5 states\n # kmers = posterior.shape[1]\n # number of \"days\"\n V = [{}]\n for st in range(states):\n # initialise first \"day\" of dp table\n V[0][st] = {\"prob\": posterior[st][0], \"prev\": None}\n # posterior[0] : all the probabilities for A for three days\n # posterior[0][0]: probability for A for day 1.\n # V[0]: results for day 1\n # V[0][0] results for day 1 base 1\n # transition probabilities now count for something\n for t in range(1, kmers):\n V.append({})\n for st in range(states):\n # look at a single branch of probabilities out of states.\n # (i.e. A G T C)\n # see for this \"day\" which path has maximum probability\n # for that state\n max_prob = max(\n V[t-1][prev_st][\"prob\"]*transition(prev_st, st) for prev_st in range(states)\n )\n # print(\"max chosen out of\")\n # print(\n # [V[t-1][prev_st][\"prob\"]*transition(prev_st, st) for prev_st in range(states)])\n # print([transition(prev_st, st) for prev_st in range(states)])\n for prev_st in range(states):\n # find the correct st and prob to save in to the dpgraph\n if V[t-1][prev_st][\"prob\"]*transition(prev_st, st) == max_prob:\n max_prob_until = max_prob * posterior[st][t]\n V[t][st] = {\"prob\":max_prob_until, \"prev\":prev_st}\n break\n for line in dptable(V):\n print(line)\n for i in V:\n print(i)\n opt = []\n max_prob = max(value[\"prob\"] for value in V[-1].values())\n previous = None\n for st, data in V[-1].items():\n if data[\"prob\"] == max_prob:\n opt.append(st)\n previous = st\n break\n\n for t in range(len(V) - 2, - 1, - 1):\n opt.insert(0, V[t+1][previous][\"prev\"])\n previous = V[t + 1][previous][\"prev\"]\n\n print(\n 'The steps of states are ' + ' '.join(str(opt)) + ' with highest probability of %s' % max_prob)", "title": "" }, { "docid": "d2e0d6239337574bf283266ab3e1c967", "score": "0.56393856", "text": "def decoder2D(self):\n decoder_ipt = tf.keras.Input(shape=(self._latent_dim,))\n opt = tf.keras.layers.Dense(64, name='decoder_dense_1')(decoder_ipt)\n opt = tf.keras.layers.LeakyReLU(name='decoder_dense_leakyRelu_1')(opt)\n opt = tf.keras.layers.Dense(128, name='decoder_dense_2')(opt)\n opt = tf.keras.layers.LeakyReLU(name='decoder_dense_leakyRelu_2')(opt)\n opt = tf.keras.layers.Dense(14 * 14 * self._channels, name='decoder_dense_3')(opt)\n opt = tf.keras.layers.LeakyReLU(name='decoder_dense_leakyRelu_3')(opt)\n opt = tf.keras.layers.Reshape(target_shape=(14, 14, self._channels))(opt)\n opt = tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=3, strides=1, padding='same', name='Cov2DT_1')(\n opt)\n opt = tf.keras.layers.LeakyReLU(name='Cov2DT_1_leakyRelu')(opt)\n opt = tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same', name='Cov2DT_2')(\n opt)\n opt = tf.keras.layers.LeakyReLU(name='Cov2DT_2_leakyRelu')(opt)\n opt = tf.keras.layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same', name='Cov2DT_3')(\n opt)\n opt = tf.keras.layers.LeakyReLU(name='Cov2DT_3_leakyRelu')(opt)\n opt = tf.keras.layers.Conv2D(filters=32, kernel_size=4, strides=1, padding='valid', name='Cov2D_5')(opt)\n opt = tf.keras.layers.LeakyReLU(name='Cov2D_5_leakyRelu')(opt)\n opt = tf.keras.layers.Conv2D(filters=32, kernel_size=4, strides=1, padding='valid', name='Cov2D_6')(opt)\n opt = tf.keras.layers.LeakyReLU(name='Cov2D_6_leakyRelu')(opt)\n opt = tf.keras.layers.Conv2DTranspose(filters=self._channels, kernel_size=3, strides=2,\n padding='same',\n activation='sigmoid',\n name='Cov2DT_7')(opt)\n decoder_model = tf.keras.models.Model(decoder_ipt, opt, name='decoder')\n return decoder_model", "title": "" }, { "docid": "b785e8797890b0822d620e9c73d4beb3", "score": "0.55947113", "text": "def inference_decoding_layer(self, embeddings, start_token, end_token, dec_cell, initial_state, output_layer,\n max_summary_length, batch_size):\n\n start_tokens = tf.tile(tf.constant([start_token], dtype=tf.int32), [batch_size], name='start_tokens')\n\n inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embeddings,\n start_tokens,\n end_token)\n\n inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,\n inference_helper,\n initial_state,\n output_layer)\n\n inference_logits, _, _ = tf.contrib.seq2seq.dynamic_decode(inference_decoder,\n output_time_major=False,\n impute_finished=True,\n maximum_iterations=max_summary_length)\n\n return inference_logits", "title": "" }, { "docid": "8259fadf9f1123628e28ba1752f2459e", "score": "0.5593506", "text": "def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,\n end_of_sequence_id, max_target_sequence_length,\n vocab_size, output_layer, batch_size, keep_prob):\n dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, \n output_keep_prob=keep_prob)\n \n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, \n tf.fill([batch_size], start_of_sequence_id), \n end_of_sequence_id)\n \n decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, \n helper, \n encoder_state, \n output_layer)\n \n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, \n impute_finished=True, \n maximum_iterations=max_target_sequence_length)\n return outputs", "title": "" }, { "docid": "f4bf78ba8b975dd67eb658da58cc33df", "score": "0.55029535", "text": "def CreateDecodingGraph(self, params):\n\n out_embeddings = self.word_embedder.GetAllEmbeddings()\n\n # placeholders for decoder\n self.prev_word = tf.placeholder(tf.int32, (), name='prev_word')\n self.prev_c = tf.get_variable('prev_c', [1, params.cell_size], dtype=tf.float32,\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n self.prev_h = tf.get_variable('prev_h', [1, params.cell_size], dtype=tf.float32,\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n self.temperature = tf.placeholder_with_default([1.0], [1])\n\n # lookup embedding\n prev_embed = tf.nn.embedding_lookup(out_embeddings, self.prev_word)\n prev_embed = tf.expand_dims(prev_embed, 0)\n\n if params.use_softmax_adaptation:\n prev_embed = prev_embed[:, self.context_size:]\n\n # one iteration of recurrent layer\n state = tf.nn.rnn_cell.LSTMStateTuple(self.prev_c, self.prev_h)\n with tf.variable_scope('RNN', reuse=True):\n result, (self.next_c, self.next_h) = self.cell(prev_embed, state)\n\n proj_result = tf.matmul(result, self.linear_proj)\n if params.use_softmax_adaptation:\n proj_result = tf.concat(axis=1, values=[self.final_context_embed, proj_result])\n \n # softmax layer\n bias = self.base_bias\n if params.use_context_dependent_bias:\n hval = self.hash_func(self.all_ids, self.context_placeholders)\n bias += hval\n\n self.beam_size = tf.placeholder_with_default(1, (), name='beam_size')\n logits = tf.matmul(proj_result, out_embeddings, transpose_b=True) + bias\n self.next_prob = tf.nn.softmax(logits / self.temperature)\n #self.selected = tf.multinomial(logits / self.temperature, self.beam_size)\n self.selected = tf.squeeze(tf.multinomial(logits / self.temperature, self.beam_size))\n self.selected, _ = tf.unique(self.selected)\n self.selected_p = tf.nn.embedding_lookup(tf.transpose(self.next_prob), self.selected)\n \n assign1 = self.prev_c.assign(self.next_c)\n assign2 = self.prev_h.assign(self.next_h)\n self.assign_op = tf.group(assign1, assign2)\n\n # reset state\n assign1 = self.prev_c.assign(tf.zeros_like(self.prev_c))\n assign2 = self.prev_h.assign(tf.zeros_like(self.prev_h))\n self.reset_state = tf.group(assign1, assign2)", "title": "" }, { "docid": "0367e1ed60a254a7a9b47ac45e42f82a", "score": "0.54343235", "text": "def _decoder(self, enc_state, dec_embed_input):\n output_lengths = tf.ones([self.batch_size], tf.int32) * self.max_seq_len\n helper = tf.contrib.seq2seq.TrainingHelper(\n dec_embed_input,\n output_lengths,\n time_major=False)\n\n cells = [self._lstm_cell(self.hidden_size) for _ in range(self.num_layers)]\n dec_cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)\n\n decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, enc_state)\n\n dec_outputs = tf.contrib.seq2seq.dynamic_decode(\n decoder,\n output_time_major=False,\n impute_finished=True,\n maximum_iterations=self.max_seq_len, swap_memory=True)\n \n return dec_outputs", "title": "" }, { "docid": "057cfb637f01361416b2a49933637112", "score": "0.5386504", "text": "def build_program_decoder(token_emb_size, rnn_cell, just_tokens=False):\n decoder_rnn = td.ScopedLayer(\n rnn_cell,\n 'decoder'\n )\n decoder_rnn_output = td.RNN(\n decoder_rnn,\n initial_state_from_input=True\n ) >> td.GetItem(0)\n\n fc_layer = td.FC(\n token_emb_size,\n activation=tf.nn.relu,\n initializer=tf.contrib.layers.xavier_initializer(),\n name='encoder_fc' # this is fantastic\n )\n\n # un_normalised_token_probs = decoder_rnn_output >> td.Map(fc_layer)\n if just_tokens:\n return decoder_rnn_output >> td.Map(fc_layer)\n else:\n return decoder_rnn_output >> td.AllOf(td.Map(fc_layer), td.Identity())\n # return un_normalised_token_probs", "title": "" }, { "docid": "62f30b47b690ae37f44d06cf064376a8", "score": "0.53640705", "text": "def compute_D(max_n):\n max_e = max_n # TODO: think about that\n # The 'e' dimension needs additional max_n space, because we have the recursive step D(e+1, n-1)\n D = -np.ones([max_e + max_n+1, max_n+1]) # Initialize with -1\n D[0, :] = 0\n for e in range(D.shape[0]):\n D[e, 0] = 1\n\n # Use dynamic programming for a simple O(n^2) solution\n def get_value(e, n):\n if D[e, n] == -1:\n D[e, n] = get_value(e-1, n) + get_value(e, n-1) + get_value(e+1, n-1)\n return D[e, n]\n\n for e in range(max_e + 1):\n for n in range(max_n + 1):\n D[e, n] = get_value(e, n)\n return D", "title": "" }, { "docid": "f24de9b6ffca7133f9efd241213be44c", "score": "0.53211343", "text": "def decoder(inputs, initial_state, num_layers, width, embedding_matrix,\n vocab_size):\n # we are going to have to project it to vocab_size\n with tf.variable_scope('softmax_projection'):\n proj_W = tf.get_variable('weights', shape=[width, vocab_size])\n proj_b = tf.get_variable('bias', shape=[vocab_size])\n batch_size = inputs[0].get_shape()[0].value\n _, outputs, _ = gen_adv._recurrent_model(\n inputs, num_layers, width, batch_size, None,\n embedding_matrix=embedding_matrix, feed_previous=True,\n argmax=True, starting_state=initial_state,\n output_projection=(proj_W, proj_b), cell='gru')\n return outputs[0]", "title": "" }, { "docid": "378002bf145ae96f34e1b277b8ae529a", "score": "0.5319174", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n\n x, pool_param = cache\n N, C, H, W = x.shape\n\n # Accepts a volume of size W1*H1*D1\n # Requires three hyperparameters:\n # their spatial extent F,\n # the stride S,\n\n FH = pool_param['pool_height']\n FW = pool_param['pool_width']\n S = pool_param['stride']\n\n # Produces a volume of size W2*H2*D2 where:\n # W2=(W1-F)/S+1\n # H2=(H1-F)/S+1\n # D2=D1\n\n H_out = (H - FH)/S + 1\n W_out = (W - FW)/S + 1\n\n # the derivative of x will have the same size.\n dx = np.zeros(x.shape)\n\n # for each training example\n for n in xrange(N):\n # for each depth/color channel\n for c in xrange(C):\n # for the output pooled height\n for h_out in xrange(H_out):\n # for the output pooled width\n for w_out in xrange(W_out):\n # get the pool window\n window = x[n, c, h_out*S : h_out*S+FH, w_out*S : w_out*S+FW]\n # mask of the window where 1 = max_value, else 0\n mask = (window == np.max(window))\n # multiply the returned derivative from the level up by a binary mask of the window where 1 = max_value, else 0\n dx[n, c, h_out*S : h_out*S+FH, w_out*S : w_out*S+FW] += mask * dout[n, c, h_out, w_out]\n\n return dx", "title": "" }, { "docid": "a3e8bc335c3014a9664bec5b59ce4c61", "score": "0.531418", "text": "def make_decoder_7_deconvs(\n filters=(512, 256, 128, 128, 64, 32), num_latents=32, num_channels=3,\n activation='relu', batch_normalization=True, bn_momentum=0.1,\n bn_epsilon=1e-5):\n # Check arguments\n num_internal_deconvs = 6\n filters = _check_iterable_arg(filters, num_internal_deconvs)\n if batch_normalization is True:\n bn_momentums = _check_iterable_arg(bn_momentum, num_internal_deconvs)\n bn_epsilons = _check_iterable_arg(bn_epsilon, num_internal_deconvs)\n activations = [activation] * num_internal_deconvs\n paddings = ['valid'] * 2 + ['same'] * 4\n strides = [1] * 2 + [2] * 4\n filter_sizes = [1] + [4] * 5\n\n input_ = Input(shape=(num_latents,))\n z = Reshape((1, 1, num_latents))(input_)\n for idx in range(num_internal_deconvs):\n conv_layer = Conv2DTranspose(\n filters[idx], kernel_size=filter_sizes[idx],\n strides=strides[idx], padding=paddings[idx],\n activation=activations[idx], kernel_initializer='glorot_normal')\n z = conv_layer(z)\n if batch_normalization is True:\n bn_layer = BatchNormalization(\n axis=-1, momentum=bn_momentums[idx],\n epsilon=bn_epsilons[idx])\n z = bn_layer(z)\n y = Conv2DTranspose(\n num_channels, 4, strides=2, padding='same', activation='sigmoid')(z)\n decoder = Model(inputs=input_, outputs=y, name='decoder')\n return decoder", "title": "" }, { "docid": "07ae9aa87d4e8b0e72b45a13293ed1c5", "score": "0.52907914", "text": "def defineDecoder(self):\n if (self.x is None) or (self.h_dec is None):\n raise RuntimeError(\"To define a decoder you must define the encoder\")\n self.biases_decoder = []\n with self.graph.as_default():\n with tf.name_scope(self.prefix_name + \"-decoder\"):\n x_current = self.h_dec\n for current_layer in range(0, self.layers):\n layer = self.layers - (current_layer + 1)\n name_deconv = self.prefix_name + \"-decoder-deconvolution-%i\" % layer\n name_sum = self.prefix_name + \"-decoder-sum-%i\" % layer\n name_out = self.prefix_name + \"-decoder-out-%i\" % layer\n name_B = self.prefix_name + \"-dec-bias-%d\" % layer\n\n W = self.weights[layer]\n B = tf.Variable(tf.zeros([W.get_shape().as_list()[2]]), name=name_B)\n self.add2summary(B, name_B)\n\n self.biases_decoder.append(B)\n\n shape = self.shapes[layer]\n x_current = self.leakRelu(tf.add(\n tf.nn.conv2d_transpose(\n x_current, W, shape, strides=self.strides, padding=self.padding,\n name=name_deconv),\n B, name=name_sum\n ), name=name_out)\n # x_current = self.leakRelu(h_layer, name=name_out)\n if self.residual_learning:\n self.y = x_current + self.x\n self.addXsummary(x_current, self.prefix_name + '-y-residuals')\n else:\n self.y = x_current\n self.addXsummary(self.y, self.prefix_name + '-y')\n self.biases_decoder = self.biases_decoder[::-1]\n return self", "title": "" }, { "docid": "b555095047fdb34f2da49a2acb717db6", "score": "0.528749", "text": "def define_simple_decoder(hidden_size, input_vocab_len, output_vocab_len, max_length):\n\n # Write your implementation here\n decoder = model.DecoderRNN(hidden_size, output_vocab_len)\n # End of implementation\n\n return decoder", "title": "" }, { "docid": "1fb188a16d984bcbca49560465d266db", "score": "0.52495074", "text": "def rnn_decoder(decoder_inputs,\n initial_state,\n cell,\n loop_function=None,\n scope=None):\n with variable_scope.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n i = 0\n\n # 这里并没有将state返回,当然目前的这个模型中暂时用不到这个\n def map_fn_fn(inp, prev=prev, state=state, i=i):\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n output, state = cell(inp, state)\n outputs.append(output)\n if loop_function is not None:\n prev = output\n i += 1\n\n tf.map_fn(fn=lambda inp: map_fn_fn(inp=inp, prev=prev, state=state, i=i)\n , elems=decoder_inputs)\n return outputs, state", "title": "" }, { "docid": "9d5398a303ffb243b1ad264b440677cf", "score": "0.52327067", "text": "def make_decoder(opt, embeddings, tgt_dict):\n bidirectional = True if opt.encoder_type == 'brnn' else False\n pad = tgt_dict.to_ind(markers.PAD)\n if \"multibank\" in opt.global_attention:\n return MultiAttnDecoder(opt.rnn_type, bidirectional,\n opt.dec_layers, opt.rnn_size,\n attn_type=opt.global_attention,\n dropout=opt.dropout,\n embeddings=embeddings,\n pad=pad)\n elif opt.decoder_type == \"transformer\":\n return TransformerDecoder(opt.dec_layers, opt.rnn_size,\n opt.global_attention, opt.copy_attn,\n opt.dropout, embeddings)\n elif opt.decoder_type == \"cnn\":\n return CNNDecoder(opt.dec_layers, opt.rnn_size,\n opt.global_attention, opt.copy_attn,\n opt.cnn_kernel_width, opt.dropout,\n embeddings)\n elif opt.input_feed:\n return InputFeedRNNDecoder(opt.rnn_type, bidirectional,\n opt.dec_layers, opt.rnn_size,\n attn_type=opt.global_attention,\n dropout=opt.dropout,\n embeddings=embeddings)\n else:\n return StdRNNDecoder(opt.rnn_type, bidirectional,\n opt.dec_layers, opt.rnn_size,\n attn_type=opt.global_attention,\n dropout=opt.dropout,\n embeddings=embeddings,\n pad=pad)", "title": "" }, { "docid": "16a6e7709f32b7b29c235377d332a0e3", "score": "0.5218031", "text": "def _complexity_optimize_differential(signal, delay_max=100, dimension_max=20, surrogate_iter=5):\n\n # Initalize vectors\n if isinstance(delay_max, int):\n tau_sequence = np.arange(1, delay_max)\n else:\n tau_sequence = np.array(delay_max)\n if isinstance(dimension_max, int):\n dimension_seq = np.arange(1, dimension_max + 1)\n else:\n dimension_seq = np.array(dimension_max)\n\n N = len(signal)\n\n surrogate_list = []\n optimal = {}\n\n for dimension in dimension_seq:\n optimal[dimension] = []\n # Calculate differential entropy for each embedded\n for tau in tau_sequence:\n signal_embedded = complexity_embedding(signal, delay=tau, dimension=dimension)\n signal_entropy = _complexity_optimize_get_differential(signal_embedded, k=1)\n\n # calculate average of surrogates entropy\n for i in range(surrogate_iter): # pylint: disable=W0612\n surrogate, _, __ = _complexity_optimize_iaaft(signal)\n surrogate_embedded = complexity_embedding(surrogate, delay=tau, dimension=dimension)\n surrogate_entropy = _complexity_optimize_get_differential(surrogate_embedded, k=1)\n surrogate_list.append(surrogate_entropy)\n surrogate_entropy_average = sum(surrogate_list) / len(surrogate_list)\n\n # entropy ratio for each set of d and tau\n entropy_ratio = signal_entropy / surrogate_entropy_average + (dimension * np.log(N)) / N\n optimal[dimension].append(entropy_ratio)\n\n # optimal dimension and tau is where entropy_ratio is minimum\n optimal_df = pd.DataFrame.from_dict(optimal)\n optimal_delay, optimal_dimension = np.unravel_index(np.nanargmin(optimal_df.values), optimal_df.shape)\n\n optimal_delay = optimal_delay + 1 # accounts for zero indexing\n\n return optimal_dimension, optimal_delay", "title": "" }, { "docid": "ef63d401333284cababc7020c6b54d6b", "score": "0.5217132", "text": "def rnn_decoder_search(cell,\n embedding_fn,\n output_layer,\n batch_size,\n maximum_iterations,\n start_id,\n end_id,\n swap_memory=True,\n greedy_search=True):\n # Initialise `AttentionWrapperState` with provided RNN state\n state_init = cell.zero_state(batch_size, tf.float32)\n start_ids = tf.tile([start_id], multiples=[batch_size])\n _dprint('rnn_decoder_search: Initial state: {}'.format(state_init))\n _dprint('rnn_decoder_search: Cell state size: {}'.format(cell.state_size))\n \n if greedy_search:\n logger.debug('Building subgraph V4 for Greedy Search.')\n helper_fn = tf.contrib.seq2seq.GreedyEmbeddingHelper\n else:\n logger.debug('Building subgraph V4 for Sample Search.')\n helper_fn = tf.contrib.seq2seq.SampleEmbeddingHelper\n helper = helper_fn(\n embedding=embedding_fn,\n start_tokens=start_ids,\n end_token=end_id)\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=cell,\n helper=helper,\n initial_state=state_init,\n output_layer=output_layer)\n dec_outputs, dec_states, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n output_time_major=True,\n impute_finished=False,\n maximum_iterations=maximum_iterations,\n parallel_iterations=1,\n swap_memory=swap_memory)\n \n # `dec_outputs` will be a `BasicDecoderOutput` object\n # `dec_states` may be a `AttentionWrapperState` object\n rnn_out = dec_outputs.rnn_output\n output_ids = dec_outputs.sample_id\n \n return output_ids, rnn_out, dec_states", "title": "" }, { "docid": "717bf2f481b3df40152fa221a9cce78c", "score": "0.5213082", "text": "def rnn_decoder_beam_search(cell,\n embedding_fn,\n output_layer,\n batch_size,\n beam_size,\n length_penalty_weight,\n maximum_iterations,\n start_id,\n end_id,\n swap_memory=True):\n logger.debug('Building subgraph V4 for Beam Search.')\n \n state_init = cell.zero_state(batch_size * beam_size, tf.float32)\n start_ids = tf.tile([start_id], multiples=[batch_size])\n _dprint('rnn_decoder_beam_search: Initial state: {}'.format(state_init))\n _dprint('rnn_decoder_beam_search: Cell state size: {}'.format(cell.state_size))\n \n # decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n decoder = BeamSearchDecoderMultiHead(\n cell=cell,\n embedding=embedding_fn,\n start_tokens=start_ids,\n end_token=end_id,\n initial_state=state_init,\n beam_width=beam_size,\n output_layer=output_layer,\n length_penalty_weight=length_penalty_weight,\n reorder_tensor_arrays=True) # r1.9 API\n dec_outputs, dec_states, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n output_time_major=True,\n impute_finished=False,\n maximum_iterations=maximum_iterations,\n parallel_iterations=1,\n swap_memory=swap_memory)\n _dprint('rnn_decoder_beam_search: Final BeamSearchDecoderState: {}'.format(dec_states))\n \n # `dec_outputs` will be a `FinalBeamSearchDecoderOutput` object\n # `dec_states` will be a `BeamSearchDecoderState` object\n predicted_ids = dec_outputs.predicted_ids # (time, batch_size, beam_size)\n scores = dec_outputs.beam_search_decoder_output.scores # (time, batch_size, beam_size)\n # top_sequence = predicted_ids[:, :, 0]\n # top_score = scores[:, :, 0] # log-softmax scores\n return predicted_ids, scores, dec_states.cell_state", "title": "" }, { "docid": "de818fb33097ab0b1962f55bf9639cc1", "score": "0.5204731", "text": "def _build_decoder(self,\n encoder_outputs,\n enc_src_lengths,\n tgt_inputs = None,\n tgt_lengths = None,\n GO_SYMBOL = 1,\n END_SYMBOL = 2,\n out_layer_activation = None):\n def _add_residual_wrapper(cells, start_ind=1):\n for idx, cell in enumerate(cells):\n if idx>=start_ind:\n cells[idx] = tf.contrib.rnn.ResidualWrapper(cell)\n return cells\n\n with tf.variable_scope(\"Decoder\"):\n tgt_vocab_size = self.model_params['tgt_vocab_size']\n tgt_emb_size = self.model_params['tgt_emb_size']\n self._tgt_w = tf.get_variable(name='W_tgt_embedding',\n shape=[tgt_vocab_size, tgt_emb_size], dtype=getdtype())\n batch_size = self.model_params['batch_size']\n\n cell_params = copy.deepcopy(self.model_params)\n cell_params[\"num_units\"] = self.model_params['decoder_cell_units']\n decoder_cells = create_rnn_cell(cell_type=self.model_params['decoder_cell_type'],\n cell_params=cell_params,\n num_layers=self.model_params['decoder_layers'],\n dp_input_keep_prob=self.model_params[\n 'decoder_dp_input_keep_prob'] if self._mode == \"train\" else 1.0,\n dp_output_keep_prob=self.model_params[\n 'decoder_dp_output_keep_prob'] if self._mode == \"train\" else 1.0,\n residual_connections=False if self.model_params['attention_type'].startswith('gnmt')\n else self.model_params['decoder_use_skip_connections'],\n wrap_to_multi_rnn=not self.model_params['attention_type'].startswith('gnmt'))\n\n output_layer = layers_core.Dense(tgt_vocab_size, use_bias=False,\n activation = out_layer_activation)\n\n def attn_decoder_custom_fn(inputs, attention):\n # to make shapes equal for skip connections\n if self.model_params['decoder_use_skip_connections']:\n input_layer = layers_core.Dense(self.model_params['decoder_cell_units'], dtype=getdtype())\n return input_layer(tf.concat([inputs, attention], -1))\n else:\n return tf.concat([inputs, attention], -1)\n\n if self.mode == \"infer\":\n if self._decoder_type == \"beam_search\":\n self._length_penalty_weight = 0.0 if \"length_penalty\" not in self.model_params else self.model_params[\n \"length_penalty\"]\n # beam_width of 1 should be same as argmax decoder\n self._beam_width = 1 if \"beam_width\" not in self.model_params else self.model_params[\"beam_width\"]\n tiled_enc_outputs = tf.contrib.seq2seq.tile_batch(encoder_outputs, multiplier=self._beam_width)\n tiled_enc_src_lengths = tf.contrib.seq2seq.tile_batch(enc_src_lengths, multiplier=self._beam_width)\n attention_mechanism = self._build_attention(tiled_enc_outputs, tiled_enc_src_lengths)\n\n if self.model_params['attention_type'].startswith('gnmt'):\n attention_cell = decoder_cells.pop(0)\n attention_cell = tf.contrib.seq2seq.AttentionWrapper(\n attention_cell,\n attention_mechanism = attention_mechanism,\n attention_layer_size=None, # don't use attenton layer.\n output_attention=False,\n name=\"gnmt_attention\")\n attentive_decoder_cell = GNMTAttentionMultiCell(\n attention_cell, _add_residual_wrapper(decoder_cells),\n use_new_attention=(self.model_params['attention_type']=='gnmt_v2'))\n else:\n attentive_decoder_cell = tf.contrib.seq2seq.AttentionWrapper(cell=decoder_cells,\n attention_mechanism=attention_mechanism,\n cell_input_fn=attn_decoder_custom_fn)\n batch_size_tensor = tf.constant(batch_size)\n decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n cell=attentive_decoder_cell,\n embedding=self._tgt_w,\n start_tokens=tf.tile([GO_SYMBOL], [batch_size]),\n end_token=END_SYMBOL,\n initial_state=attentive_decoder_cell.zero_state(dtype=getdtype(),\n batch_size=batch_size_tensor * self._beam_width),\n beam_width=self._beam_width,\n output_layer=output_layer,\n length_penalty_weight=self._length_penalty_weight)\n else:\n attention_mechanism = self._build_attention(encoder_outputs, enc_src_lengths)\n if self.model_params['attention_type'].startswith('gnmt'):\n attention_cell = decoder_cells.pop(0)\n attention_cell = tf.contrib.seq2seq.AttentionWrapper(\n attention_cell,\n attention_mechanism = attention_mechanism,\n attention_layer_size=None,\n output_attention=False,\n name=\"gnmt_attention\")\n attentive_decoder_cell = GNMTAttentionMultiCell(\n attention_cell, _add_residual_wrapper(decoder_cells),\n use_new_attention=(self.model_params['attention_type']=='gnmt_v2'))\n else:\n attentive_decoder_cell = tf.contrib.seq2seq.AttentionWrapper(cell=decoder_cells,\n attention_mechanism=attention_mechanism,\n cell_input_fn=attn_decoder_custom_fn)\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n embedding=self._tgt_w,\n start_tokens=tf.fill([batch_size], GO_SYMBOL),\n end_token=END_SYMBOL)\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=attentive_decoder_cell,\n helper=helper,\n initial_state=attentive_decoder_cell.zero_state(batch_size=batch_size, dtype=getdtype()),\n output_layer=output_layer)\n\n elif self.mode == \"train\":\n attention_mechanism = self._build_attention(encoder_outputs, enc_src_lengths)\n if self.model_params['attention_type'].startswith('gnmt'):\n attention_cell = decoder_cells.pop(0)\n attention_cell = tf.contrib.seq2seq.AttentionWrapper(\n attention_cell,\n attention_mechanism=attention_mechanism,\n attention_layer_size=None,\n output_attention=False,\n name=\"gnmt_attention\")\n attentive_decoder_cell = GNMTAttentionMultiCell(\n attention_cell, _add_residual_wrapper(decoder_cells),\n use_new_attention=(self.model_params['attention_type'] == 'gnmt_v2'))\n else:\n attentive_decoder_cell = tf.contrib.seq2seq.AttentionWrapper(cell=decoder_cells,\n attention_mechanism=attention_mechanism,\n cell_input_fn=attn_decoder_custom_fn)\n input_vectors = tf.nn.embedding_lookup(self._tgt_w, tgt_inputs)\n helper = tf.contrib.seq2seq.TrainingHelper(\n inputs = input_vectors,\n sequence_length = tgt_lengths)\n\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=attentive_decoder_cell,\n helper=helper,\n output_layer=output_layer,\n initial_state=attentive_decoder_cell.zero_state(batch_size, dtype=getdtype()))\n else:\n raise NotImplementedError(\"Unknown mode\")\n\n final_outputs, final_state, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(\n decoder = decoder,\n impute_finished=False if self._decoder_type == \"beam_search\" else True,\n maximum_iterations=tf.reduce_max(tgt_lengths) if self._mode == 'train' else tf.reduce_max(enc_src_lengths)*2,\n swap_memory = False if 'use_swap_memory' not in self.model_params else self.model_params['use_swap_memory'])\n\n return final_outputs, final_state, final_sequence_lengths", "title": "" }, { "docid": "88b74095a4b39fd0820bd5af5543ba20", "score": "0.51977587", "text": "def viterbi_decoding(self, observations):\n back_pointers = []\n # observation score for BEG tag\n init = observations[0]\n prev = init\n transition_T = dy.transpose(self.transitions)\n trans_exprs = [transition_T[idx] for idx in range(self.dim_ts_y + 2)]\n for obs in observations[1:]:\n bpts_t = []\n vvars_t = []\n for next_y in range(self.dim_ts_y + 2):\n # trans_exprs[next_y], transition probabilities that ends with next_y\n next_y_expr = prev + trans_exprs[next_y]\n next_y_arr = next_y_expr.npvalue()\n best_y = np.argmax(next_y_arr)\n bpts_t.append(best_y)\n vvars_t.append(dy.pick(next_y_expr, best_y))\n prev = dy.concatenate(vvars_t) + obs\n back_pointers.append(bpts_t)\n # end tags\n #terminal_expr = prev + trans_exprs[self.dim_ts_y+1]\n #terminal_arr = terminal_expr.npvalue()\n final = prev\n final_arr = final.npvalue()\n best_y = np.argmax(final_arr)\n assert best_y == (self.dim_ts_y + 1)\n path_score = dy.pick(final, best_y)\n # reverse over the backpointers to get the best path\n # backtracking\n best_path = []\n for bpts_t in reversed(back_pointers):\n best_y = bpts_t[best_y]\n best_path.append(best_y)\n # remove the beg label\n BEG = best_path.pop()\n best_path.reverse()\n assert BEG == self.dim_ts_y\n return best_path, path_score", "title": "" }, { "docid": "f970a5bae3d464eae0679a41f142ab38", "score": "0.5182165", "text": "def build_decoder(cfg):\n rnn_state = tf.keras.Input(shape=[cfg.num_rnn_units], name='rnn_state')\n latent_code = tf.keras.Input(shape=[cfg.latent_code_size], name='latent_code')\n hidden = layers.Concatenate()([rnn_state, latent_code])\n hidden = layers.Dense(128, **cfg.dense_layer_kwargs)(hidden)\n keypoints = layers.Dense(cfg.num_keypoints * 3, activation=tf.nn.tanh)(\n hidden)\n return tf.keras.Model(\n inputs=[rnn_state, latent_code], outputs=keypoints, name='decoder')", "title": "" }, { "docid": "a2423ab7847093632c9854e4e425c82a", "score": "0.516594", "text": "def _decode_infer(self, decoder, bridge, encoder_outputs):\n batch_size = tf.shape(encoder_outputs.outputs)[0]\n\n # if self.use_beam_search:\n # batch_size = self.beam_width\n # TODO: why?\n\n target_embedding = self._generate_target_embedding(reuse=True)\n\n helper_infer = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n # embedding=self.decoder_outputs_train.logits,\n embedding=target_embedding, # embedding of predicted labels\n # start_tokens=tf.fill([batch_size], self.sos_index),\n start_tokens=tf.tile([self.sos_index], [batch_size]),\n end_token=self.eos_index)\n # ex.)\n # Output tensor has shape [2, 3].\n # tf.fill([2, 3], 9) ==> [[9, 9, 9]\n # [9, 9, 9]]\n # TODO: beam_search_decoder\n\n decoder_initial_state = bridge(reuse=True)\n\n # Call decoder class\n (decoder_outputs, final_state) = decoder(\n initial_state=decoder_initial_state,\n helper=helper_infer,\n mode=tf.contrib.learn.ModeKeys.INFER)\n # NOTE: They are time-major if self.time_major is True\n\n return (decoder_outputs, final_state)", "title": "" }, { "docid": "bf04743ead5984bed52a4d5853fc7817", "score": "0.5157566", "text": "def _loop_body_vdecoder(self, time, memory_state, outputs, free_gates, allocation_gates, write_gates,\n read_weightings, write_weightings, usage_vectors, controller_state,\n outputs_cache, controller_hiddens,\n encoder_write_weightings,\n encoder_controller_hiddens,\n kl_losses, zs, dist1s, dist2s, mixturews, last_reads):\n\n # dynamic tensor array input\n if self.decoder_mode:\n def fn1():\n return tf.zeros([self.batch_size, self.output_size])\n def fn2():\n def fn2_1():\n return self.target_output[:, time - 1, :]\n\n def fn2_2():\n inds = tf.argmax(outputs_cache.read(time - 1), axis=-1)\n return tf.one_hot(inds, depth=self.output_size)\n\n if self.use_teacher:\n return tf.cond(self.teacher_force[time - 1], fn2_1, fn2_2)\n else:\n return fn2_2()\n\n feed_value = tf.cond(time>0,fn2,fn1)\n\n\n if not self.use_emb_decoder:\n r = tf.reshape(feed_value, [self.batch_size, self.input_decoder_size])\n step_input = r\n elif self.dual_emb:\n step_input = tf.matmul(feed_value, self.W_emb_decoder)\n else:\n step_input = tf.matmul(feed_value, self.W_emb_encoder)\n\n else:\n if self.use_emb_decoder:\n if self.dual_emb:\n step_input = tf.matmul(self.unpacked_input_decoder_data.read(time), self.W_emb_decoder)\n else:\n step_input = tf.matmul(self.unpacked_input_decoder_data.read(time), self.W_emb_encoder)\n else:\n step_input = self.unpacked_input_decoder_data.read(time)\n\n if self.gt_type=='bow':\n \"\"\"bag of word encoding of given data\"\"\"\n feed_value_gt = tf.reduce_sum(self.target_output[:,:time,:], axis=1)\n\n if self.use_emb_decoder:\n if self.dual_emb:\n step_gt = tf.matmul(feed_value_gt, self.W_emb_decoder)\n else:\n step_gt = tf.matmul(feed_value_gt, self.W_emb_encoder)\n else:\n step_gt = feed_value_gt\n else:\n \"\"\"rnn encoding of given data\"\"\"\n step_gt = self.encode_gt_rnn(time)\n\n # compute one step of controller\n output_list = self._step_op_vdecoder(time, step_input, step_gt, memory_state, controller_state)\n\n # update memory parameters\n\n # new_controller_state = tf.zeros(1)\n new_memory_state = tuple(output_list[0:7])\n new_controller_state = output_list[11] # state hidden values\n\n if self.nlayer > 1:\n try:\n controller_hiddens = controller_hiddens.write(time, new_controller_state[-1][-1])\n print('state include c and h')\n except:\n controller_hiddens = controller_hiddens.write(time, new_controller_state[-1])\n print('state include only h')\n else:\n controller_hiddens = controller_hiddens.write(time, new_controller_state[-1])\n print('single layer')\n\n\n\n outputs = outputs.write(time, output_list[7]) # new output is updated\n outputs_cache = outputs_cache.write(time, output_list[7]) # new output is updated\n # collecting memory view for the current step\n free_gates = free_gates.write(time, output_list[8])\n allocation_gates = allocation_gates.write(time, output_list[9])\n write_gates = write_gates.write(time, output_list[10])\n read_weightings = read_weightings.write(time, output_list[5])\n write_weightings = write_weightings.write(time, output_list[4])\n usage_vectors = usage_vectors.write(time, output_list[1])\n kl_losses = kl_losses.write(time, output_list[12])\n zs = zs.write(time, output_list[13])\n dist1s = dist1s.write(time, output_list[14])\n dist2s = dist2s.write(time, output_list[15])\n mixturews = mixturews.write(time, output_list[16])\n last_reads = last_reads.write(time, output_list[17])\n\n # all variables have been updated should be return for next step reference\n return (\n time + 1, # 0\n new_memory_state, # 1\n outputs, # 2\n free_gates, allocation_gates, write_gates, # 3 4 5\n read_weightings, write_weightings, usage_vectors, # 6 7 8\n new_controller_state, # 9\n outputs_cache, # 10\n controller_hiddens, # 11\n encoder_write_weightings, #12\n encoder_controller_hiddens, #13\n kl_losses, #14\n zs, #15\n dist1s, #16\n dist2s, #17\n mixturews, #18\n last_reads, #19\n )", "title": "" }, { "docid": "fc01d7829331d9027737509779f6eec6", "score": "0.5157338", "text": "def decoding_layer_infer(self, encoder_outputs, encoder_state, dec_cell,\n dec_embeddings, start_of_sequence_id,\n end_of_sequence_id, max_target_sequence_length,\n vocab_size, output_layer, batch_size, keep_prob,\n target_sequence_length, rnn_size):\n dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, \n output_keep_prob=keep_prob)\n \n infer_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, \n tf.fill([batch_size], start_of_sequence_id), \n end_of_sequence_id)\n \n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(rnn_size, encoder_outputs,\n memory_sequence_length=target_sequence_length)\n \n attention_cell = tf.contrib.seq2seq.AttentionWrapper(dec_cell, attention_mechanism,\n attention_layer_size=rnn_size/2)\n \n state = attention_cell.zero_state(dtype=tf.float32, batch_size=batch_size)\n state = state.clone(cell_state=encoder_state)\n \n decoder = tf.contrib.seq2seq.BasicDecoder(cell=attention_cell, helper=infer_helper, \n initial_state=state,\n output_layer=output_layer)\n \n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True,\n maximum_iterations=max_target_sequence_length)\n \n return outputs", "title": "" }, { "docid": "c4485121c6e6eb2ddffdc23c38d9f719", "score": "0.51526666", "text": "def init_decoder(self, verbose=False):\n # TODO : construct automatically from input_shape\n #decoder = Model(inputs=self.autoencoder.get_layer('reshape', 6), outputs=self.autoencoder.get_layer('conv_3d', -1).output)\n encoded_input = Input(shape=(392,))\n deco = self.autoencoder.layers[-8](encoded_input)\n for i in range(-7, 0):\n deco = self.autoencoder.layers[i](deco)\n # create the decoder model\n decoder = Model(encoded_input, deco)\n print('decoder initialized.')\n if verbose:\n decoder.summary()\n return decoder", "title": "" }, { "docid": "34c067d6fe2211fb6a355c7d2b6bc002", "score": "0.51502675", "text": "def r2rtdecoder(self):\n\n lower_triangular_ones = tf.constant(\n np.tril(np.ones([self._max_length, self._max_length])), dtype=tf.float32)\n seqlen_mask = tf.slice(tf.gather(lower_triangular_ones, self.seqlen - 1),\n [0, 0], [self._batch_size2, self._max_length])\n\n # RNN\n state_size = self._emb_dim\n num_classes = self._class_num\n\n cell = tf.contrib.rnn.BasicRNNCell(state_size)\n\n init_state = tf.get_variable('init_state', [1, state_size],\n initializer=tf.constant_initializer(0.0))\n init_state = tf.tile(init_state, [self._batch_size2, 1])\n rnn_outputs, final_state = tf.nn.dynamic_rnn(\n cell, self.x_one_hot, sequence_length=self.seqlen, initial_state=init_state)\n\n y_reshaped = tf.reshape(self.y, [-1])\n\n \"\"\"\n decoder\n \n use the last step output of encoder as the input\n \"\"\"\n #en_last_output = self.last_relevant(rnn_outputs, self.seqlen)\n idx = tf.range(self._batch_size2) * \\\n tf.shape(rnn_outputs)[1] + (self.seqlen - 1)\n last_rnn_output = tf.gather(tf.reshape(\n rnn_outputs, [-1, state_size]), idx)\n\n with tf.variable_scope('decoder'):\n decoder_cell = tf.contrib.rnn.BasicRNNCell(self._emb_dim)\n dec_input = last_rnn_output\n dec_in_state = final_state\n dec_outputs = []\n with tf.variable_scope('multi_decoder') as scope:\n for id in range(self._max_length):\n if id > 0:\n scope.reuse_variables()\n dec_output, dec_out_state = seq2seq_lib.rnn_decoder(\n [dec_input], dec_in_state, decoder_cell)\n # variable_scope.get_variable_scope().reuse_variables()\n dec_input = dec_output[0]\n dec_in_state = dec_out_state\n dec_outputs += dec_output\n\n # dec_outputs: [batch_size, max_length, state_size]\n # [batch_size*maxlenth, state_size]\n dec_final_output = tf.concat(dec_outputs, axis=0)\n\n # Softmax layer\n # with tf.variable_scope('softmax'):\n # W = tf.get_variable('W', [state_size, num_classes])\n # b = tf.get_variable('b', [num_classes], initializer=tf.constant_initializer(0.0))\n # weight = tf.Variable([self._emb_dim, self._class_num])\n\n W = tf.Variable(tf.truncated_normal(\n [self._emb_dim, self._class_num], stddev=0.01))\n b = tf.Variable(tf.constant(0.1, shape=[self._class_num, ]))\n logits = tf.matmul(dec_final_output, W) + b\n\n # order not the same as y with tf.concat\n l1 = tf.reshape(logits, [self._max_length, -1, self._class_num])\n l2 = tf.transpose(l1, [1, 0, 2])\n logits = tf.reshape(l2, [-1, self._class_num])\n\n preds = tf.nn.softmax(logits)\n final_output = tf.argmax(preds, 1)\n \"\"\"\n Accuracy\n \"\"\"\n # To calculate the number of correctly predicted value(we want to count\n # padded steps as incorrect)\n correct = tf.cast(tf.equal(tf.cast(final_output, tf.int32), y_reshaped), tf.int32) * \\\n tf.cast(tf.reshape(seqlen_mask, [-1]), tf.int32)\n truevalue = y_reshaped\n # To calculate accuracy we want to divide by the number of non-padded time-steps,\n # rather than taking the mean\n accuracy = tf.reduce_sum(\n tf.cast(correct, tf.float32)) / tf.reduce_sum(tf.cast(self.seqlen, tf.float32))\n \"\"\"\n Loss function\n \"\"\"\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=y_reshaped, logits=logits)\n loss = loss * tf.reshape(seqlen_mask, [-1])\n\n # To calculate average loss, we need to divide by number of non-padded time-steps,\n # rather than taking the mean\n loss = tf.reduce_sum(loss) / tf.reduce_sum(seqlen_mask)\n optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)\n\n saver = tf.train.Saver()\n\n \"\"\"\n Training\n \"\"\"\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n e_loss = []\n e_acc = []\n learning_rate = 2 * 1e-3\n for epoch in range(self._epoch_num):\n total_loss = []\n total_acc = []\n for batch in range(self._batch_num):\n batch_X, batch_y, batch_len = self.getNextBacth(\n self._batch_size, batch)\n batch_size_2 = batch_X.shape[0]\n feed = {self.x: batch_X, self.y: batch_y, self.seqlen: batch_len,\n self._batch_size2: batch_size_2, self.learning_rate: learning_rate}\n cor, dec_out, y_re, log, acc, cost, _ = sess.run(\n [correct, dec_outputs, y_reshaped, logits, accuracy, loss, optimizer], feed_dict=feed)\n total_loss.append(cost)\n total_acc.append(acc)\n\n total_loss = np.sum(np.array(total_loss))\n total_acc = np.mean(np.array(total_acc))\n e_loss.append(total_loss)\n e_acc.append(total_acc)\n print(\"Epoch\" + str(epoch) + \":\")\n print(\"Loss: \" + str(total_loss) + \" \" +\n \"Accuracy: \" + str(total_acc))\n\n if total_loss < 1000:\n learning_rate = 1e-3\n if total_loss < 300:\n learning_rate = 1e-4\n #print(\"Learning rate changed.\")\n\n if epoch == self._epoch_num - 1 or total_loss < 0.5: # or total_acc>0.985:\n hidden_code = []\n rnn_code = []\n total_acc = []\n for test_batch in range(self._batch_num_test):\n if test_batch == self._batch_num_test - 1:\n a = 1\n batch_testX, batch_y, batch_testlen = self.getNextTestBatch(\n self._batch_size, test_batch)\n batch_testsize_2 = batch_testX.shape[0]\n\n feed = {self.x: batch_testX, self.y: batch_y, self.seqlen: batch_testlen,\n self._batch_size2: batch_testsize_2, self.learning_rate: learning_rate}\n last_rnno, rnno, t, f, code, acc = sess.run(\n [last_rnn_output, rnn_outputs, truevalue, final_output, final_state, accuracy], feed_dict=feed)\n code = code.reshape([-1, self._emb_dim])\n hidden_code.extend(code)\n total_acc.append(acc)\n\n #print(\"Batch: \"+str(test_batch))\n print(\"True\" + str(t[0:self._max_length]))\n print(\"Pred\" + str(f[0:self._max_length]))\n total_acc = np.mean(np.array(total_acc))\n print(\"Accuracy:\" + str(total_acc))\n codes = np.array(hidden_code).reshape(-1, self._emb_dim)\n df = pd.DataFrame(codes[0:len(self.testdata), :])\n # file_hidden=\"twornn_hidden\"+train_filename[4:len(train_filename)-4]+\"_\"+str(self._emb_dim)+\".csv\"\n file_hidden = \"hiddencode_50000_\" + \\\n str(self._emb_dim) + \".csv\"\n df.to_csv(file_hidden, float_format='%.5f')\n # df = pd.DataFrame(np.array(rnn_code).reshape(-1, self._emb_dim))\n # df.to_csv(\"twornn_output_airline12.csv\", float_format='%.5f')\n\n break\n # Save the variables to disk.\n save_path = saver.save(sess, \"savemodel/twornn3.ckpt\")\n print(\"Model saved in file: \" + save_path)\n\n self.plot(np.array(e_loss), np.array(e_acc))\n\n return", "title": "" }, { "docid": "ee9191d4a5bd43a2fc56fee300f548d5", "score": "0.5145198", "text": "def rnn_decoder_no_iteration(decoder_inputs,\n initial_state,\n cell,\n loop_function=None,\n scope=None):\n with variable_scope.variable_scope(scope or \"rnn_decoder\"):\n state = initial_state\n outputs = []\n prev = None\n # 注意,这里有loop_fun ,不需要再为每一个decoder单元提供输入,则不需要再用迭代循环decoder_inputs了\n # for i, inp in enumerate(decoder_inputs):\n for i in range(max_line_char_num):\n inp = decoder_inputs\n if loop_function is not None and prev is not None:\n with variable_scope.variable_scope(\"loop_function\", reuse=True):\n inp = loop_function(prev, i)\n # if i > 0:\n # variable_scope.get_variable_scope().reuse_variables()\n output, state = cell(inp, state)\n outputs.append(output)\n if loop_function is not None:\n prev = output\n return outputs, state", "title": "" }, { "docid": "d57093ea3c9f54b42da4251f952a1c25", "score": "0.5131037", "text": "def decoder46(**kwargs):\n return Decoder(DecoderBottleneck, [6, 4, 3], **kwargs)", "title": "" }, { "docid": "82aaa2bd3b5e479d2648c37c14edf730", "score": "0.50946945", "text": "def _inference_step(self, state):\n\n decoder_inputs = state[\"inputs\"]\n encoder_outputs = state[\"encoder_outputs\"]\n enc_dec_attention_bias = state[\"encoder_decoder_attention_bias\"]\n alignment_positions = state[\"alignment_positions\"]\n\n outputs = self._decode_pass(\n decoder_inputs=decoder_inputs,\n encoder_outputs=encoder_outputs,\n enc_dec_attention_bias=enc_dec_attention_bias,\n alignment_positions=alignment_positions\n )\n\n with tf.variable_scope(\"inference_step\"):\n next_inputs_mel = outputs[\"post_net_spec\"][:, -1:, :]\n next_inputs_mel = self._expand(next_inputs_mel, self.reduction_factor)\n next_inputs_mag = outputs[\"mag_spec\"][:, -1:, :]\n next_inputs_mag = self._expand(next_inputs_mag, self.reduction_factor)\n next_inputs = tf.concat([next_inputs_mel, next_inputs_mag], axis=-1)\n\n n_features = self.n_mel + self.n_mag\n next_inputs = self._shrink(next_inputs, n_features, self.reduction_factor)\n\n # Set zero if sequence is finished\n next_inputs = tf.where(\n state[\"finished\"],\n tf.zeros_like(next_inputs),\n next_inputs\n )\n next_inputs = tf.concat([decoder_inputs, next_inputs], 1)\n\n # Update lengths\n lengths = state[\"outputs\"][\"lengths\"]\n lengths = tf.where(\n state[\"finished\"],\n lengths,\n lengths + 1 * self.reduction_factor\n )\n outputs[\"lengths\"] = lengths\n\n # Update spec, post_net_spec and mag_spec\n for key in [\"spec\", \"post_net_spec\", \"mag_spec\"]:\n output = outputs[key][:, -1:, :]\n output = tf.where(state[\"finished\"], tf.zeros_like(output), output)\n outputs[key] = tf.concat([state[\"outputs\"][key], output], 1)\n\n # Update stop token logits\n stop_token_logits = outputs[\"stop_token_logits\"][:, -1:, :]\n stop_token_logits = tf.where(\n state[\"finished\"],\n tf.zeros_like(stop_token_logits) + 1e9,\n stop_token_logits\n )\n stop_prediction = tf.sigmoid(stop_token_logits)\n stop_prediction = tf.reduce_max(stop_prediction, axis=-1)\n\n # Uncomment next line if you want to use stop token predictions\n finished = tf.reshape(tf.cast(tf.round(stop_prediction), tf.bool), [-1])\n finished = tf.reshape(finished, [-1])\n\n stop_token_logits = tf.concat(\n [state[\"outputs\"][\"stop_token_logits\"], stop_token_logits],\n axis=1\n )\n outputs[\"stop_token_logits\"] = stop_token_logits\n\n with tf.variable_scope(\"alignments\"):\n forward = \"ForwardPass\" if self.mode == \"infer\" else \"ForwardPass_1\"\n weights = []\n\n for index in range(len(self.attentions)):\n op = forward + \"/centaur_decoder/while/attention_block_%d/attention/attention/attention_weights\" % index\n weights_operation = tf.get_default_graph().get_operation_by_name(op)\n weight = weights_operation.values()[0]\n weights.append(weight)\n\n weights = tf.stack(weights)\n outputs[\"alignments\"] = [weights]\n\n alignment_positions = tf.argmax(\n weights,\n axis=-1,\n output_type=tf.int32\n )[:, :, :, -1:]\n state[\"alignment_positions\"] = tf.concat(\n [state[\"alignment_positions\"], alignment_positions],\n axis=-1\n )\n\n state[\"iteration\"] = state[\"iteration\"] + 1\n state[\"inputs\"] = next_inputs\n state[\"finished\"] = finished\n state[\"outputs\"] = outputs\n\n return state", "title": "" }, { "docid": "7acd1cdacdae1af214edece06d0046ea", "score": "0.5092598", "text": "def build_graph(self):\n\n # make dynamic time step length tensor\n self.unpacked_input_encoder_data = utility.unpack_into_tensorarray(self.input_encoder, 1, self.sequence_length)\n\n # want to store all time step values of these variables\n eoutputs = tf.TensorArray(tf.float32, self.sequence_length)\n eoutputs_cache = tf.TensorArray(tf.float32, self.sequence_length)\n efree_gates = tf.TensorArray(tf.float32, self.sequence_length)\n eallocation_gates = tf.TensorArray(tf.float32, self.sequence_length)\n ewrite_gates = tf.TensorArray(tf.float32, self.sequence_length)\n eread_weightings = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n ewrite_weightings = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n eusage_vectors = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n econtroller_hiddens = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n\n # make dynamic time step length tensor\n self.unpacked_input_decoder_data = utility.unpack_into_tensorarray(self.input_decoder, 1, self.decode_length)\n\n # want to store all time step values of these variables\n doutputs = tf.TensorArray(tf.float32, self.decode_length)\n doutputs_cache = tf.TensorArray(tf.float32, self.decode_length)\n dfree_gates = tf.TensorArray(tf.float32, self.decode_length)\n dallocation_gates = tf.TensorArray(tf.float32, self.decode_length)\n dwrite_gates = tf.TensorArray(tf.float32, self.decode_length)\n dread_weightings = tf.TensorArray(tf.float32, self.decode_length)\n dwrite_weightings = tf.TensorArray(tf.float32, self.decode_length, clear_after_read=False)\n dusage_vectors = tf.TensorArray(tf.float32, self.decode_length)\n dcontroller_hiddens = tf.TensorArray(tf.float32, self.decode_length, clear_after_read=False)\n\n # inital state for RNN controller\n controller_state = self.controller.zero_state()\n print(controller_state)\n memory_state = self.memory.init_memory()\n if self.persist_mode:\n def p1():\n return memory_state, controller_state\n def p2():\n return (self.cur_mem_content, self.cur_u, self.cur_p,\n self.cur_L, self.cur_ww, self.cur_rw, self.cur_rv), \\\n tuple(self.cur_encoder_rnn_state)\n memory_state, controller_state=tf.cond(self.clear_mem, p1, p2)\n\n\n # final_results = None\n with tf.variable_scope(\"sequence_encoder_loop\"):\n time = tf.constant(0, dtype=tf.int32)\n\n # use while instead of scan --> suitable with dynamic time step\n encoder_results = tf.while_loop(\n cond=lambda time, *_: time < self.sequence_length,\n body=self._loop_body_encoder,\n loop_vars=(\n time, memory_state, eoutputs,\n efree_gates, eallocation_gates, ewrite_gates,\n eread_weightings, ewrite_weightings,\n eusage_vectors, controller_state,\n eoutputs_cache, econtroller_hiddens\n ), # do not need to provide intial values, the initial value lies in the variables themselves\n parallel_iterations=1,\n swap_memory=True\n )\n\n memory_state2 = self.memory.init_memory(self.read_heads_decode)\n if self.read_heads_decode!=self.read_heads:\n encoder_results_state=(encoder_results[1][0],encoder_results[1][1],encoder_results[1][2],\n encoder_results[1][3],encoder_results[1][4], memory_state2[5],memory_state2[6])\n else:\n encoder_results_state=encoder_results[1]\n with tf.variable_scope(\"sequence_decoder_loop\"):\n time = tf.constant(0, dtype=tf.int32)\n nstate = controller_state\n if self.pass_encoder_state:\n nstate = encoder_results[9]\n # use while instead of scan --> suitable with dynamic time step\n final_results = tf.while_loop(\n cond=lambda time, *_: time < self.decode_length,\n body=self._loop_body_decoder,\n loop_vars=(\n time, encoder_results_state, doutputs,\n dfree_gates, dallocation_gates, dwrite_gates,\n dread_weightings, dwrite_weightings,\n dusage_vectors, nstate,\n doutputs_cache, dcontroller_hiddens,\n encoder_results[7], encoder_results[11]\n ), # do not need to provide intial values, the initial value lies in the variables themselves\n parallel_iterations=1,\n swap_memory=True\n )\n\n if self.persist_mode:\n\n self.cur_mem_content, self.cur_u, self.cur_p, \\\n self.cur_L, self.cur_ww, self.cur_rw, self.cur_rv = encoder_results[1]\n try:\n self.cur_c = encoder_results[9][0][0]\n self.cur_h = encoder_results[9][0][1]\n self.cur_encoder_rnn_state = list(self.controller.zero_state())\n self.cur_encoder_rnn_state[0][0]=self.cur_c\n self.cur_encoder_rnn_state[0][1] = self.cur_h\n except:\n self.cur_c = encoder_results[9][0]\n self.cur_h = encoder_results[9][0]\n self.cur_encoder_rnn_state = list(self.controller.zero_state())\n self.cur_encoder_rnn_state[0] = self.cur_c\n\n\n\n\n dependencies = []\n if self.controller.has_recurrent_nn:\n # tensor array of pair of hidden and state values of rnn\n dependencies.append(self.controller.update_state(final_results[9]))\n\n with tf.control_dependencies(dependencies):\n # convert output tensor array to normal tensor\n self.packed_output = utility.pack_into_tensor(final_results[2], axis=1)\n self.packed_memory_view_encoder = {\n 'free_gates': utility.pack_into_tensor(encoder_results[3], axis=1),\n 'allocation_gates': utility.pack_into_tensor(encoder_results[4], axis=1),\n 'write_gates': utility.pack_into_tensor(encoder_results[5], axis=1),\n 'read_weightings': utility.pack_into_tensor(encoder_results[6], axis=1),\n 'write_weightings': utility.pack_into_tensor(encoder_results[7], axis=1),\n 'usage_vectors': utility.pack_into_tensor(encoder_results[8], axis=1),\n 'final_controller_ch': encoder_results[9],\n }\n self.packed_memory_view_decoder = {\n 'free_gates': utility.pack_into_tensor(final_results[3], axis=1),\n 'allocation_gates': utility.pack_into_tensor(final_results[4], axis=1),\n 'write_gates': utility.pack_into_tensor(final_results[5], axis=1),\n 'read_weightings': utility.pack_into_tensor(final_results[6], axis=1),\n 'write_weightings': utility.pack_into_tensor(final_results[7], axis=1),\n 'usage_vectors': utility.pack_into_tensor(final_results[8], axis=1),\n 'final_controller_ch':final_results[9],\n }", "title": "" }, { "docid": "0759257b656c205ef007a1180ad86dc0", "score": "0.5069306", "text": "def decoder_body(encoding, state, answer, state_size, pool_size, document_length, keep_prob=1.0):\n maxlen = tf.shape(encoding)[1]\n \n def highway_maxout_network(answer):\n span_encoding = start_and_end_encoding(encoding, answer)\n r_input = convert_gradient_to_tensor(tf.concat([state, span_encoding], axis=1))\n r_input = tf.nn.dropout(r_input, keep_prob)\n r = tf.layers.dense(r_input, state_size, use_bias=False, activation=tf.tanh)\n r = tf.expand_dims(r, 1)\n r = tf.tile(r, (1, maxlen, 1))\n highway_input = convert_gradient_to_tensor(tf.concat([encoding, r], 2))\n logit = highway_maxout(highway_input, state_size, pool_size, keep_prob)\n #alpha = two_layer_mlp(highway_input, state_size, keep_prob=keep_prob)\n logit = _maybe_mask_score(logit, document_length, -1e30)\n return logit\n\n with tf.variable_scope('start'):\n alpha = highway_maxout_network(answer)\n\n with tf.variable_scope('end'):\n updated_start = tf.argmax(alpha, axis=1, output_type=tf.int32)\n updated_answer = tf.stack([updated_start, answer[:, 1]], axis=1)\n beta = highway_maxout_network(updated_answer)\n \n return tf.stack([alpha, beta], axis=2)", "title": "" }, { "docid": "b53d9103da8378d6e2d85a93056d9b9b", "score": "0.50678635", "text": "def fista_conv(X, D, lmbd, max_iter, z0=None, verbose=0):\n\n # Initiate the algorithm\n f_cost = ConvL2_z(X, D)\n if z0 is None:\n zk = f_cost.get_z0()\n else:\n zk = np.copy(z0)\n L = f_cost.L\n\n # Define _cost function using keras to get conscistent results\n # def _cost(z):\n # return f_cost(z) + lmbd * abs(z).mean(axis=0).sum()\n Dk = np.transpose(D, (2, 3, 0, 1))[::-1, ::-1]\n if zk.ndim == 5:\n zk_shape = (zk.shape[1],) + zk.shape[3:]\n _cost_model = cost_conv_network((X.shape[1:], zk_shape), Dk, lmbd)\n _cost = lambda zk: _cost_model([X, zk[:, :, 0]]).mean()\n else:\n _cost_model = cost_conv_network((X.shape[1:], zk.shape[1:]), Dk, lmbd)\n _cost = lambda zk: _cost_model([X, zk]).mean()\n\n momentum = 1\n y = zk\n c = _cost(zk)\n cost = [c]\n _log(0, c, verbose)\n for i in range(max_iter):\n\n z_old, momentum_1 = zk, momentum\n\n grad = f_cost.grad(y)\n zk = y - grad / L\n zk = soft_thresholding(zk, lmbd / L)\n momentum = (1 + np.sqrt(1 + 4 * momentum * momentum)) / 2\n y = zk + (momentum_1 - 1) / momentum * (zk - z_old)\n\n c = _cost(zk)\n if c >= cost[-1]:\n # Restart the momentum if cost increase\n zk = z_old - f_cost.grad(z_old) / L\n y = zk = soft_thresholding(zk, lmbd / L)\n c = _cost(zk)\n\n cost += [c]\n _log((i + 1) / max_iter, c, verbose)\n if verbose > 0:\n print()\n\n return zk, cost", "title": "" }, { "docid": "8654a8fc40c938bf12f10a733be694ad", "score": "0.5056519", "text": "def build_graph(self):\n\n # make dynamic time step length tensor\n self.unpacked_input_encoder_data = utility.unpack_into_tensorarray(self.input_encoder, 1, self.sequence_length)\n\n # want to store all time step values of these variables\n eoutputs = tf.TensorArray(tf.float32, self.sequence_length)\n eoutputs_cache = tf.TensorArray(tf.float32, self.sequence_length)\n efree_gates = tf.TensorArray(tf.float32, self.sequence_length)\n eallocation_gates = tf.TensorArray(tf.float32, self.sequence_length)\n ewrite_gates = tf.TensorArray(tf.float32, self.sequence_length)\n eread_weightings = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n ewrite_weightings = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n eusage_vectors = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n econtroller_hiddens = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)\n\n # make dynamic time step length tensor\n self.unpacked_input_decoder_data = utility.unpack_into_tensorarray(self.input_decoder, 1, self.decode_length)\n self.unpacked_target_data = utility.unpack_into_tensorarray(self.target_output, 1, self.decode_length)\n\n # want to store all time step values of these variables\n doutputs = tf.TensorArray(tf.float32, self.decode_length)\n doutputs_cache = tf.TensorArray(tf.float32, self.decode_length)\n dfree_gates = tf.TensorArray(tf.float32, self.decode_length)\n dallocation_gates = tf.TensorArray(tf.float32, self.decode_length)\n dwrite_gates = tf.TensorArray(tf.float32, self.decode_length)\n dread_weightings = tf.TensorArray(tf.float32, self.decode_length)\n dwrite_weightings = tf.TensorArray(tf.float32, self.decode_length, clear_after_read=False)\n dusage_vectors = tf.TensorArray(tf.float32, self.decode_length)\n dcontroller_hiddens = tf.TensorArray(tf.float32, self.decode_length, clear_after_read=False)\n dkl = tf.TensorArray(tf.float32, self.decode_length)\n zs = tf.TensorArray(tf.float32, self.decode_length)\n dist1s = tf.TensorArray(tf.float32, self.decode_length)\n dist2s = tf.TensorArray(tf.float32, self.decode_length)\n mixturews = tf.TensorArray(tf.float32, self.decode_length)\n last_reads = tf.TensorArray(tf.float32, self.decode_length)\n\n # inital state for RNN controller\n controller_state = self.controller.zero_state()\n print(controller_state)\n memory_state = self.memory.init_memory()\n\n\n # final_results = None\n with tf.variable_scope(\"sequence_encoder_loop\"):\n time = tf.constant(0, dtype=tf.int32)\n\n # use while instead of scan --> suitable with dynamic time step\n encoder_results = tf.while_loop(\n cond=lambda time, *_: time < self.sequence_length,\n body=self._loop_body_encoder,\n loop_vars=(\n time, memory_state, eoutputs,\n efree_gates, eallocation_gates, ewrite_gates,\n eread_weightings, ewrite_weightings,\n eusage_vectors, controller_state,\n eoutputs_cache, econtroller_hiddens\n ), # do not need to provide intial values, the initial value lies in the variables themselves\n parallel_iterations=1,\n swap_memory=True\n )\n\n single_kl = 0\n memory_state2 = self.memory.init_memory(self.read_heads_decode)\n if self.read_heads_decode!=self.read_heads:\n encoder_results_state=(encoder_results[1][0],encoder_results[1][1],encoder_results[1][2],\n encoder_results[1][3],encoder_results[1][4], memory_state2[5],memory_state2[6])\n else:\n encoder_results_state=encoder_results[1]\n with tf.variable_scope(\"sequence_vdecoder_loop\"):\n time = tf.constant(0, dtype=tf.int32)\n nstate = controller_state\n if self.pass_encoder_state:\n nstate = encoder_results[9]\n\n\n if self.single_KL:\n def cal_h_y():\n time = tf.constant(0, dtype=tf.int32)\n\n # use while instead of scan --> suitable with dynamic time step\n encoder_results_y = tf.while_loop(\n cond=lambda time, *_: time < self.decode_length,\n body=self._loop_body_vencoder,\n loop_vars=(\n time, memory_state, eoutputs,\n efree_gates, eallocation_gates, ewrite_gates,\n eread_weightings, ewrite_weightings,\n eusage_vectors, controller_state,\n eoutputs_cache, econtroller_hiddens, self.unpacked_target_data\n ), # do not need to provide intial values, the initial value lies in the variables themselves\n parallel_iterations=1,\n swap_memory=True\n )\n print(encoder_results[9])\n return encoder_results_y[9]\n\n def cal_h_wt_y():\n print(self.controller.zero_state())\n return self.controller.zero_state()\n\n nstate_y = tf.cond(self.testing_phase, cal_h_wt_y, cal_h_y)\n if self.nlayer==1:\n nstate=[nstate]\n nstate_y=[nstate_y]\n\n\n def compute_single_z_y():\n newnstate = []\n for ns, ns_y in zip(nstate, nstate_y):\n nh = tf.concat([ns[0]+ns_y[0],ns[1]+ns_y[1]],axis=-1)\n nh = tf.matmul(tf.reshape(nh, [self.batch_size, -1]), self.W_recog)\n dist = self.get_the_prior_dist_uni(tf.tanh(nh))\n z = self.sample_the_uni(dist)\n newnstate.append(LSTMStateTuple(ns[0],z))\n return newnstate, dist\n\n def compute_single_z():\n newnstate = []\n wm=None\n for ns in nstate:\n nh0 = tf.concat([ns[0], ns[1]], axis=-1)\n nh = tf.matmul(tf.reshape(nh0, [self.batch_size, -1]), self.W_prior)\n if self.read_heads_decode==1:\n dist = self.get_the_prior_dist_uni(nh)\n z = self.sample_the_uni(dist)\n else:\n dist = self.get_the_prior_dist(\n tf.reshape(nh,[self.batch_size,self.hidden_controller_dim*2,self.read_heads_decode]),\n self.hidden_controller_dim)\n dist = tf.sigmoid(dist)\n wm = tf.matmul(tf.reshape(nh0, [self.batch_size, -1]), self.W_modew)\n wm=tf.nn.softmax(wm, dim=-1)\n z = self.sample_the_mixture(dist, wm)\n newnstate.append(LSTMStateTuple(ns[0], z))\n return newnstate, dist, wm\n\n newnstate_y, dist_y = compute_single_z_y()\n newnstate_x, dist_x, wm = compute_single_z()\n\n def ns_y():\n return newnstate_y\n\n def ns_x():\n return newnstate_x\n\n newnstate = tf.cond(self.testing_phase, ns_x, ns_y)\n\n\n if self.read_heads_decode==1:\n single_kl = self.KL2gauss_log(dist_y, dist_x, self.hidden_controller_dim)\n else:\n single_kl = self.KLmixgauss_log(dist_y, dist_x, wm, self.hidden_controller_dim)\n if self.nlayer==1:\n newnstate=newnstate[0]\n nstate=tuple(newnstate)\n\n\n final_results = tf.while_loop(\n cond=lambda time, *_: time < self.decode_length,\n body=self._loop_body_decoder,\n loop_vars=(\n time, encoder_results_state, doutputs,\n dfree_gates, dallocation_gates, dwrite_gates,\n dread_weightings, dwrite_weightings,\n dusage_vectors, nstate,\n doutputs_cache, dcontroller_hiddens,\n encoder_results[7], encoder_results[11]\n ), # do not need to provide intial values, the initial value lies in the variables themselves\n parallel_iterations=1,\n swap_memory=True\n )\n else:\n # use while instead of scan --> suitable with dynamic time step\n final_results = tf.while_loop(\n cond=lambda time, *_: time < self.decode_length,\n body=self._loop_body_vdecoder,\n loop_vars=(\n time, encoder_results_state, doutputs,\n dfree_gates, dallocation_gates, dwrite_gates,\n dread_weightings, dwrite_weightings,\n dusage_vectors, nstate,\n doutputs_cache, dcontroller_hiddens,\n encoder_results[7], encoder_results[11],\n dkl, zs, dist1s, dist2s, mixturews, last_reads\n ), # do not need to provide intial values, the initial value lies in the variables themselves\n parallel_iterations=1,\n swap_memory=True\n )\n\n\n\n dependencies = []\n if self.controller.has_recurrent_nn:\n # tensor array of pair of hidden and state values of rnn\n dependencies.append(self.controller.update_state(final_results[9]))\n\n with tf.control_dependencies(dependencies):\n # convert output tensor array to normal tensor\n self.packed_output = utility.pack_into_tensor(final_results[2], axis=1)\n if self.single_KL:\n self.packed_kl_losses = single_kl\n else:\n self.packed_kl_losses = utility.pack_into_tensor(final_results[14], axis=1)\n\n self.packed_memory_view_encoder = {\n 'free_gates': utility.pack_into_tensor(encoder_results[3], axis=1),\n 'allocation_gates': utility.pack_into_tensor(encoder_results[4], axis=1),\n 'write_gates': utility.pack_into_tensor(encoder_results[5], axis=1),\n 'read_weightings': utility.pack_into_tensor(encoder_results[6], axis=1),\n 'write_weightings': utility.pack_into_tensor(encoder_results[7], axis=1),\n 'usage_vectors': utility.pack_into_tensor(encoder_results[8], axis=1),\n 'final_controller_ch': encoder_results[9],\n }\n if self.single_KL:\n self.packed_memory_view_decoder = {\n 'free_gates': utility.pack_into_tensor(final_results[3], axis=1),\n 'allocation_gates': utility.pack_into_tensor(final_results[4], axis=1),\n 'write_gates': utility.pack_into_tensor(final_results[5], axis=1),\n 'read_weightings': utility.pack_into_tensor(final_results[6], axis=1),\n 'write_weightings': utility.pack_into_tensor(final_results[7], axis=1),\n 'usage_vectors': utility.pack_into_tensor(final_results[8], axis=1),\n 'final_controller_ch': final_results[9],\n }\n else:\n self.packed_memory_view_decoder = {\n 'last_reads': utility.pack_into_tensor(final_results[19], axis=1),\n 'free_gates': utility.pack_into_tensor(final_results[3], axis=1),\n 'allocation_gates': utility.pack_into_tensor(final_results[4], axis=1),\n 'write_gates': utility.pack_into_tensor(final_results[5], axis=1),\n 'read_weightings': utility.pack_into_tensor(final_results[6], axis=1),\n 'write_weightings': utility.pack_into_tensor(final_results[7], axis=1),\n 'usage_vectors': utility.pack_into_tensor(final_results[8], axis=1),\n 'final_controller_ch':final_results[9],\n 'zs':utility.pack_into_tensor(final_results[15], axis=1),\n 'dist1s': utility.pack_into_tensor(final_results[16], axis=1),\n 'dist2s': utility.pack_into_tensor(final_results[17], axis=1),\n 'mixturews': utility.pack_into_tensor(final_results[18], axis=1)\n }", "title": "" }, { "docid": "f60be9b6f87b3bb33b419dcf7fe75650", "score": "0.5053913", "text": "def _loop_body_decoder(self, time, memory_state, outputs, free_gates, allocation_gates, write_gates,\n read_weightings, write_weightings, usage_vectors, controller_state,\n outputs_cache, controller_hiddens,\n encoder_write_weightings, encoder_controller_hiddens):\n\n # dynamic tensor array input\n if self.decoder_mode:\n def fn1():\n return tf.zeros([self.batch_size, self.output_size])\n def fn2():\n def fn2_1():\n return self.target_output[:, time - 1, :]\n\n def fn2_2():\n inds = tf.argmax(outputs_cache.read(time - 1), axis=-1)\n return tf.one_hot(inds, depth=self.output_size)\n\n if self.use_teacher:\n return tf.cond(self.teacher_force[time - 1], fn2_1, fn2_2)\n else:\n return fn2_2()\n\n feed_value = tf.cond(time>0,fn2,fn1)\n\n\n if not self.use_emb_decoder:\n r = tf.reshape(feed_value, [self.batch_size, self.input_decoder_size])\n step_input = r\n elif self.dual_emb:\n step_input = tf.matmul(feed_value, self.W_emb_decoder)\n else:\n step_input = tf.matmul(feed_value, self.W_emb_encoder)\n\n else:\n if self.use_emb_decoder:\n if self.dual_emb:\n step_input = tf.matmul(self.unpacked_input_decoder_data.read(time), self.W_emb_decoder)\n else:\n step_input = tf.matmul(self.unpacked_input_decoder_data.read(time), self.W_emb_encoder)\n else:\n step_input = self.unpacked_input_decoder_data.read(time)\n print(step_input.shape)\n print('ssss')\n\n # compute one step of controller\n if not self.use_mem and self.attend_dim > 0:\n print('normal attention or mix pointer mode without memory')\n output_list = self._step_op_decoder(time, step_input, memory_state, controller_state, encoder_controller_hiddens)\n elif self.use_mem and self.attend_dim > 0:\n print('attention and mix pointer mode with memory')\n output_list = self._step_op_decoder(time, step_input, memory_state, controller_state, encoder_write_weightings)\n else:\n output_list = self._step_op_decoder(time, step_input, memory_state, controller_state)\n # update memory parameters\n # update memory parameters\n\n # new_controller_state = tf.zeros(1)\n new_memory_state = tuple(output_list[0:7])\n new_controller_state = output_list[11] # state hidden values\n\n if self.nlayer>1:\n try:\n controller_hiddens = controller_hiddens.write(time, new_controller_state[-1][-1])\n print('state include c and h')\n except:\n controller_hiddens = controller_hiddens.write(time, new_controller_state[-1])\n print('state include only h')\n else:\n controller_hiddens = controller_hiddens.write(time, new_controller_state[-1])\n print('single layer')\n outputs = outputs.write(time, output_list[7]) # new output is updated\n outputs_cache = outputs_cache.write(time, output_list[7]) # new output is updated\n # collecting memory view for the current step\n free_gates = free_gates.write(time, output_list[8])\n allocation_gates = allocation_gates.write(time, output_list[9])\n write_gates = write_gates.write(time, output_list[10])\n read_weightings = read_weightings.write(time, output_list[5])\n write_weightings = write_weightings.write(time, output_list[4])\n usage_vectors = usage_vectors.write(time, output_list[1])\n\n # all variables have been updated should be return for next step reference\n return (\n time + 1, # 0\n new_memory_state, # 1\n outputs, # 2\n free_gates, allocation_gates, write_gates, # 3 4 5\n read_weightings, write_weightings, usage_vectors, # 6 7 8\n new_controller_state, # 9\n outputs_cache, # 10\n controller_hiddens, # 11\n encoder_write_weightings, #12\n encoder_controller_hiddens, #13\n )", "title": "" }, { "docid": "a093a6632fc382b3929cc0a506a141aa", "score": "0.5027724", "text": "def decoder(x):\n # Decoding layer 1\n with tf.name_scope('decoder1'):\n with tf.name_scope('weights'):\n weights1 = weight_variable([64, 512], stddev=0.1)\n variable_summaries(weights1)\n with tf.name_scope('biases'):\n biases1 = bias_variable([512], init_val=0.1)\n layer1 = fc_layer(x, weights1, biases1)\n\n # Decoding layer 2\n with tf.name_scope('decoder2'):\n with tf.name_scope('weights'):\n weights2 = weight_variable([512, 2048], stddev=0.01)\n variable_summaries(weights1)\n with tf.name_scope('biases'):\n biases2 = bias_variable([2048], init_val=0.01)\n layer2 = fc_layer(layer1, weights2, biases2)\n\n # Decoding layer 3\n with tf.name_scope('decoder3'):\n with tf.name_scope('weights'):\n weights3 = weight_variable(\n [2048, INPUT_WIDTH * INPUT_HEIGHT * NUM_CHANNELS], stddev=0.01)\n variable_summaries(weights2)\n with tf.name_scope('biases'):\n biases3 = bias_variable(\n [INPUT_WIDTH * INPUT_HEIGHT * NUM_CHANNELS], init_val=0.01)\n layer3 = fc_layer(layer2, weights3, biases3)\n return layer3", "title": "" }, { "docid": "36f2d0d5db5116672c711d680021054a", "score": "0.5014367", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "title": "" }, { "docid": "7472491b42ea034803bb03717b821f68", "score": "0.50054073", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "title": "" }, { "docid": "cb9c6fa669139000bccc059dbc84b66c", "score": "0.49974525", "text": "def __init__(self, embed_size, hidden_size, vocab_size, num_layers,\n n_max_seq=100,\n n_head=8, d_k=64, d_v=64, d_inner_hid=1024,\n dropout=0.1):\n super(DecoderRNN, self).__init__()\n self.tgt_word_emb = nn.Embedding(vocab_size, embed_size, Constants.PAD)\n\n n_position = n_max_seq + 1\n self.n_max_seq = n_max_seq\n\n d_model = embed_size\n d_word_vec = embed_size\n\n self.d_model = d_model\n\n self.position_enc = nn.Embedding(\n n_position, d_word_vec, padding_idx=Constants.PAD)\n self.position_enc.weight.data = position_encoding_init(n_position, d_word_vec)\n self.dropout = nn.Dropout(dropout)\n\n self.layer_stack = nn.ModuleList([\n DecoderLayer(d_model, d_inner_hid, n_head, d_k, d_v, dropout=dropout)\n for _ in range(num_layers)])\n\n self.linear = nn.Linear(embed_size, vocab_size)\n self.linear.weight.data.uniform_(-0.1, 0.1)\n self.linear.bias.data.fill_(0)", "title": "" }, { "docid": "224346c0dccef7d3054103376a2684da", "score": "0.49945682", "text": "def rnn_decoder_training(cell,\n embeddings,\n output_layer,\n batch_size,\n sequence_length,\n swap_memory=True):\n logger.debug('Building dynamic decode subgraph V4 for training.')\n \n # Initialise `AttentionWrapperState` with provided RNN state\n # batch_size = tf.shape(embeddings)[1]\n state_init = cell.zero_state(batch_size, tf.float32)\n _dprint('rnn_decoder_training: Initial state: {}'.format(state_init))\n _dprint('rnn_decoder_training: Cell state size: {}'.format(cell.state_size))\n \n helper = tf.contrib.seq2seq.TrainingHelper(\n inputs=embeddings,\n sequence_length=sequence_length,\n time_major=True)\n decoder = tf.contrib.seq2seq.BasicDecoder(\n cell=cell,\n helper=helper,\n initial_state=state_init,\n output_layer=output_layer)\n dec_outputs, dec_states, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder=decoder,\n output_time_major=True,\n impute_finished=True,\n maximum_iterations=None,\n parallel_iterations=1,\n swap_memory=swap_memory)\n \n # `dec_outputs` will be a `BasicDecoderOutput` object\n # `dec_states` may be a `AttentionWrapperState` object\n rnn_out = dec_outputs.rnn_output\n output_ids = dec_outputs.sample_id\n \n # Perform padding by copying elements from the last time step.\n # This is required if `impute_finished` is True.\n # This is skipped in inference mode.\n pad_time = tf.shape(embeddings)[0] - tf.shape(rnn_out)[0]\n pad = tf.tile(rnn_out[-1:, :, :], [pad_time, 1, 1])\n rnn_out = tf.concat([rnn_out, pad], axis=0) # (max_time, batch_size, rnn_size)\n pad_ids = tf.tile(output_ids[-1:, :], [pad_time, 1])\n output_ids = tf.concat([output_ids, pad_ids], axis=0) # (max_time, batch_size)\n \n return output_ids, rnn_out, dec_states", "title": "" }, { "docid": "fb2da1556e685d1c7a115029b4928286", "score": "0.49849042", "text": "def progressive_deepening(state, heuristic_fn=always_zero, depth_limit=INF,\n maximize=True) :\n\n\n anytime = AnytimeValue()\n\n for d in range(depth_limit):\n\n new_best_option = minimax_search_alphabeta(state, alpha=-INF, beta=INF, depth_limit=d+1, maximize=maximize, \\\n heuristic_fn=heuristic_fn)\n\n anytime.set_value(val=new_best_option)\n\n return anytime.copy()\n\n raise NotImplementedError", "title": "" }, { "docid": "80f8af413b553c3d52ebaf79db029e69", "score": "0.49845824", "text": "def build_graph(x_placeholder, vocab_size, embedding_size, dropout_placeholder, sequence_length, filter_sizes,\n num_filters, initW, pretrained=False, multichannel=False):\n\n # Keeping track of l2 regularization loss\n l2_loss = tf.constant(0.0)\n\n # Embedding layer\n with tf.name_scope(\"embedding\"):\n if(pretrained):\n W = initW\n else:\n W = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), name=\"W\")\n if(multichannel):\n #Lookup word-ids in the embedding matrix\n embedded_chars = tf.nn.embedding_lookup(W, x_placeholder)\n #Transpose to get correct format\n embedded_chars_expanded = tf.transpose(embedded_chars, [0,1,3,2])\n else:\n #Lookup word-ids in the embedding matrix\n embedded_chars = tf.nn.embedding_lookup(W, x_placeholder)\n #CNN expects 3D input, expand to be 1 channel so it fits\n embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)\n\n # Create a convolution + maxpool layer for each filter size\n pooled_outputs = []\n for i, filter_size in enumerate(filter_sizes):\n with tf.name_scope(\"conv-maxpool-%s\" % filter_size):\n # Convolution Layer\n if(multichannel):\n filter_shape = [filter_size, embedding_size, NUM_CHANNELS, num_filters]\n else:\n filter_shape = [filter_size, embedding_size, 1, num_filters]\n #Initialize weights randomly\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\n #Initialize bias\n b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name=\"b\")\n #Convolution operation, 2D convolution (patch strides over 2d surface for all input channels one at a time) on 4D input\n #VALID padding => No padding, means output width = (width-filter-width +1)/stride\n #strides = [1,1,1,1], one stride for each dimension\n conv = tf.nn.conv2d(embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding=\"VALID\", name=\"conv\")\n # Apply RELU nonlinearity to the output of conv operation added with the bias\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu\")\n # Maxpooling over the outputs of RELU\n # ksize is the dimensions of patch\n # the patch is slided over the input and outputs the max element of each region\n # (intuitively sub-sample the input by focusing on keywords and dropping noise)\n pooled = tf.nn.max_pool(h, ksize=[1, sequence_length - filter_size + 1, 1, 1], strides=[1, 1, 1, 1],\n padding='VALID', name=\"pool\")\n # Since we have one pooling for each conv channel we store all outputs (multi dimensional) in an array\n pooled_outputs.append(pooled)\n\n # Combine all the pooled features\n num_filters_total = num_filters * len(filter_sizes)\n # append pooled features on last axis\n h_pool = tf.concat(pooled_outputs, 3)\n # flatten output\n h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])\n\n # Add dropout\n with tf.name_scope(\"dropout\"):\n h_drop = tf.nn.dropout(h_pool_flat, dropout_placeholder)\n\n # Final (unnormalized) scores and predictions\n with tf.name_scope(\"output\"):\n # Weights between pooled features and output, uses \"Xavier\" initialization from paper \"Understanding the difficulty of training deep feedforward neural networks\"\n W = tf.get_variable(\n \"W\",\n shape=[num_filters_total, NUM_CLASSES],\n initializer=tf.contrib.layers.xavier_initializer())\n # initialize bias\n b = tf.Variable(tf.constant(0.1, shape=[NUM_CLASSES]), name=\"b\")\n # l2 loss\n l2_loss += tf.nn.l2_loss(W)\n l2_loss += tf.nn.l2_loss(b)\n # h_drop x weights + b\n logits = tf.nn.xw_plus_b(h_drop, W, b, name=\"scores\")\n # cast logits to binary predictions\n predictions = tf.where(logits > 0.5, tf.ones_like(logits), tf.zeros_like(logits), name=\"predictions\")\n return logits, predictions, l2_loss", "title": "" }, { "docid": "b745bb2d5c4e36280934d800d485f579", "score": "0.4980384", "text": "def dcgan_generator(inputs,\n depth=64,\n final_size=64,\n num_outputs=3,\n is_training=True,\n reuse=None,\n scope='DCGANGenerator',\n fused_batch_norm=False):\n normalizer_fn = tf.contrib.layers.batch_norm\n normalizer_fn_args = {\n 'is_training': is_training,\n 'zero_debias_moving_mean': True,\n 'fused': fused_batch_norm,\n }\n # inputs.get_shape().assert_has_rank(2)\n if log(final_size) != int(log(final_size)):\n raise ValueError('`final_size` (%i) must be a power of 2.' % final_size)\n if final_size < 8:\n raise ValueError(\n '`final_size` (%i) must be greater than 8.' % final_size)\n\n end_points = {}\n with tf.variable_scope(scope, values=[inputs], reuse=reuse) as scope:\n with slim.arg_scope([normalizer_fn], **normalizer_fn_args):\n with slim.arg_scope([slim.conv2d_transpose],\n normalizer_fn=normalizer_fn,\n stride=2,\n kernel_size=4):\n if len(inputs.get_shape()) == 2:\n # Init stage.\n num_layers = int(log(final_size)) - 1\n net = tf.expand_dims(tf.expand_dims(inputs, 1), 1)\n else:\n # Next stage.\n num_layers = int(log(final_size)) - (\n int(log(inputs.shape[1])) - 1) - 1\n net = inputs\n\n if num_layers > 1:\n # First upscaling is different because it takes the input vector.\n current_depth = depth * 2 ** (num_layers - 1)\n scope = 'deconv1'\n # ! Default activation for slim.conv2d_transpose is relu.\n net = slim.conv2d_transpose(\n net, current_depth, stride=1, padding='VALID',\n scope=scope)\n end_points[scope] = net\n\n for i in range(2, num_layers):\n scope = 'deconv%i' % (i)\n current_depth = depth * 2 ** (num_layers - i)\n net = slim.conv2d_transpose(net, current_depth, scope=scope)\n end_points[scope] = net\n\n # Last layer has different normalizer and activation.\n scope = 'deconv%i' % (num_layers)\n net = slim.conv2d_transpose(\n net, depth, normalizer_fn=None, activation_fn=None,\n scope=scope)\n end_points[scope] = net\n\n # Convert to proper channels.\n scope = 'logits'\n logits = slim.conv2d(\n net,\n num_outputs,\n normalizer_fn=None,\n activation_fn=None,\n kernel_size=1,\n stride=1,\n padding='VALID',\n scope=scope)\n end_points[scope] = logits\n\n logits.get_shape().assert_has_rank(4)\n logits.get_shape().assert_is_compatible_with(\n [None, final_size, final_size, num_outputs])\n\n return logits, end_points", "title": "" }, { "docid": "80ca0d09dd12ab11fee1684fcd856b0b", "score": "0.49802408", "text": "def detect_and_replace_cycle(G, path_d, weight_d, mermap, max_node_index, kmer_size, debug=False):\n for seqid, path in path_d.iteritems():\n has_cycle = True\n while has_cycle:\n has_cycle = False\n for n in path:\n if path.count(n) > 1:\n # add in sanity check\n oldseq = stitch_string_from_path(path, mermap)\n has_cycle = True\n if debug:\n pdb.set_trace()\n # find first and last occurrence of n\n # NOTE: need to handle special case when i=0 and j=last\n i = path.index(n)\n j = len(path) - path[::-1].index(n) # is actually one beyond the last occurrence\n newnode = max_node_index + 1\n max_node_index += 1\n newmer = mermap[path[i]]\n for p in path[i+1:j]: newmer += mermap[p][kmer_size-1:]\n mermap[newnode] = newmer\n # update G before update path\n if i > 0: # if i==0, then nothing to connect with prev\n G.add_edge(path[i-1], newnode, weight=weight_d[seqid])\n if j < len(path): # if j=last, then nothing to connect with after\n G.add_edge(newnode, path[j], weight=weight_d[seqid])\n for k in xrange(max(0,i-1), min(j, len(path)-1)):\n s, t = path[k], path[k+1]\n G[s][t]['weight'] -= weight_d[seqid]\n # now we can update the path\n path_d[seqid] = path[:i] + [newnode] + path[j:]\n path = path_d[seqid]\n\n\n assert stitch_string_from_path(path, mermap) == oldseq\n break\n\n # clean up zero-weight edges and zero degree nodes\n # TODO here\n for s,t,d in G.edges(data=True):\n if d['weight']==0:\n G.remove_edge(s, t)\n bad = []\n for n in G.nodes_iter():\n if G.degree(n) == 0: bad.append(n)\n G.remove_nodes_from(bad)", "title": "" }, { "docid": "757d1cc114dd1f13854a8ede910a4cf8", "score": "0.4973797", "text": "def training_decoding_layer(self, dec_embed_input, summary_length, dec_cell, initial_state, output_layer,\n max_summary_length):\n\n training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,\n sequence_length=summary_length,\n time_major=False)\n\n training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,\n training_helper,\n initial_state,\n output_layer)\n\n training_logits, _, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder,\n output_time_major=False,\n impute_finished=True,\n maximum_iterations=max_summary_length)\n return training_logits", "title": "" }, { "docid": "f138daa85e024eeaa40b7f20bf8f1ace", "score": "0.49732172", "text": "def symbolic_translate(self, inp, greedy=False, max_len = None,\n unroll_scan=False, eps = 1e-30, **flags):\n if unroll_scan:\n assert isinstance(max_len,int), \"if scan is unrolled, max_len must be a constant integer\"\n\n max_len = max_len if max_len is not None else 2 * inp.shape[1]\n\n # initial output tokens (BOS)\n bos = T.zeros_like(inp[:, 0]) + self.out_voc.bos_ix\n l_start = InputLayer((None,),bos)\n\n # Defining custom recurrent layer out of decoder\n rec = Recurrence(\n state_variables=merge_dicts(self.decoder.state_dict,\n {self.decoder.out: self.decoder.inp}),\n state_init=merge_dicts(self.decoder.init_dict, {self.decoder.out: l_start}),\n input_nonsequences=self.decoder.nonseq_dict,\n tracked_outputs=(self.decoder.out, self.decoder.probs, self.decoder.logprobs),\n n_steps=max_len,\n unroll_scan=unroll_scan\n )\n\n translations, logprobs = get_output(rec[self.decoder.out, self.decoder.logprobs],\n {self.encoder.inp:inp,\n l_start:bos},\n recurrence_flags=dict(flags,greedy=greedy),\n **flags)\n\n self.auto_updates = rec.get_automatic_updates()\n if len(self.auto_updates) != 0:\n print(\"symbolic_translate: Please collect auto_updates of random states \"\n \"after you called symbolic_translate (available at model.auto_updates)!\")\n\n # add first step (bos)\n translations = T.concatenate([bos[:,None],translations],axis=1)\n first_logprobs = T.zeros_like(logprobs[:,:1])\n logprobs = T.concatenate([first_logprobs,logprobs],axis=1)\n\n return translations,logprobs", "title": "" }, { "docid": "f4713fb20a5743a6853d737bf3b87285", "score": "0.4965183", "text": "def solve(self) -> dict:\n # Start time of solve phase\n self.comm_time.barrier()\n self.log_info(\"Start solve\")\n\n runtime_solve_start = time.time()\n for iteration in range(self.iter_max):\n\n self.solve_iter = iteration + 1\n time_it_start = time.time()\n self.iteration(lvl=0, cycle_type=self.cycle_type, iteration=iteration, first_f=True)\n self.comm_time.barrier()\n time_it_stop = time.time()\n self.convergence_criterion(iteration=iteration + 1)\n\n if iteration == 0:\n self.log_info('{0: <7}'.format(f\"iter {iteration + 1}\") +\n '{0: <32}'.format(f\" | conv: {self.conv[iteration + 1]}\") +\n '{0: <37}'.format(f\" | conv factor: -\") +\n '{0: <35}'.format(f\" | runtime: {time_it_stop - time_it_start} s\"))\n else:\n self.log_info('{0: <7}'.format(f\"iter {iteration + 1}\") +\n '{0: <32}'.format(f\" | conv: {self.conv[iteration + 1]}\") +\n '{0: <37}'.format(f\" | conv factor: {self.conv[iteration + 1] / self.conv[iteration]}\") +\n '{0: <35}'.format(f\" | runtime: {time_it_stop - time_it_start} s\"))\n\n if self.output_fcn is not None and self.output_lvl == 2:\n self.output_fcn(self)\n\n if self.conv[iteration + 1] < self.tol:\n break\n\n # Stop timer of solve phase\n self.comm_time.barrier()\n self.runtime_solve = time.time() - runtime_solve_start\n self.log_info(f\"Solve took {self.runtime_solve} s\")\n\n if self.output_fcn is not None and self.output_lvl == 1:\n self.output_fcn(self)\n\n self.ouput_run_information()\n return {'conv': self.conv[np.where(self.conv != 0)], 'time_setup': self.runtime_setup,\n 'time_solve': self.runtime_solve}", "title": "" }, { "docid": "f62cea01ebb1fe181992376f523efc8b", "score": "0.495691", "text": "def build_pipeline_graph(input: DataType, output: DataType, registry, max_list_depth=3, max_pipeline_width=3) -> \"PipelineBuilder\":\n \n # First we will unpack the input and output type and\n # store them in actual lists for easier use\n\n if isinstance(input, Tuple):\n input_type = list(input.inner)\n else:\n input_type = [input]\n\n if isinstance(output, Tuple):\n output_type = list(output.inner)\n else:\n output_type = [output]\n\n logger.info(f\"input_type={input_type}\")\n logger.info(f\"output_type={output_type}\")\n\n # Before starting, let's create all the List[T] wrappers up to \n # `max_list_depth` and add them to `registry`, so that they are available later\n for algorithm in list(registry):\n for _ in range(max_list_depth):\n algorithm = make_list_wrapper(algorithm)\n registry.append(algorithm)\n\n # We will also need an index to quickly find out which algorithms\n # accept each input type\n index = defaultdict(set)\n\n for algorithm in registry:\n types = _get_annotations(algorithm).input\n types = list(types.inner) if isinstance(types, Tuple) else [types]\n\n for t in types:\n index[t].add(algorithm)\n\n logger.info(f\"Built algorithm index with {len(index)} entries and {len(registry)} total algorithms.\")\n\n # The graph contains all the algorithms, each algorithm is connected\n # to all those nodes that it can process, which are nodes whose output\n # type is a superset of what the algorithm requires.\n G = Graph()\n\n # For each node stored in the graph, we will store also the full list\n # of all inputs and outputs that we can guarantee are available at this point.\n # Initially we add the `Start` node, which produces all of the inputs,\n # and the `End` node which consumes all the outputs.\n start_node = PipelineStart(input_type)\n end_node = PipelineEnd(output_type)\n G.add_edge(GraphSpace.Start, start_node)\n G.add_edge(end_node, GraphSpace.End)\n\n # We will apply a BFS algorithm at this point. We will make sure\n # that once a node is processed, all the algorithms to which it could\n # potentially connect are stored in the graph.\n # Initially the `Start` node is the only one open.\n open_nodes = [start_node]\n closed_nodes = set()\n\n while open_nodes:\n # This is the next node we will need to connect.\n node = open_nodes.pop(0)\n\n if node in closed_nodes:\n continue\n\n # When leaving this node we can guarantee that we have the types in this list.\n types = node.output\n logger.info(f\"Processing node={node}\")\n\n # We will need this method to check if all of the input types of and algorithm are\n # guaranteed at this point, i.e., if they are available in `types`,\n # or at least a conforming type is.\n def type_is_guaranteed(input_type):\n for other_type in types:\n if conforms(other_type, input_type):\n return True\n\n return False\n\n # In this point we have to identify all the algorithms that could continue\n # from this point on. These are all the algorithms whose input expects a subset\n # of the types that we already have.\n potential_algorithms = set()\n\n for t in types:\n potential_algorithms |= index[t]\n\n for algorithm in potential_algorithms:\n annotations = _get_annotations(algorithm)\n algorithm_input_types = list(annotations.input.inner) if isinstance(annotations.input, Tuple) else [annotations.input]\n algorithm_output_types = list(annotations.output.inner) if isinstance(annotations.output, Tuple) else [annotations.output]\n logger.info(f\"Analyzing algorithm={algorithm.__name__} with inputs={algorithm_input_types} and outputs={algorithm_output_types}\")\n\n if any(not type_is_guaranteed(input_type) for input_type in algorithm_input_types):\n logger.info(f\"Skipping algorithm={algorithm.__name__}\")\n continue\n \n # At this point we can add the current algorithm to the graph.\n # First, we make the current algorithm \"consume\" the input types,\n # hence, the output types produced at this point are the output types\n # this algorithm provides plus any input type not consumed so far.\n output_types = sorted(set([t for t in types if t not in algorithm_input_types] + algorithm_output_types), key=str)\n\n if len(output_types) > max_pipeline_width:\n continue\n \n # We add this node to the graph and we mark that it consumes the inputs,\n # so that later when sampling we can correctly align all the types.\n # When building the node, we can get a `ValueError` if the internal\n # grammar cannot be built; in that case, we simply skip it\n try:\n new_node = PipelineNode(algorithm=algorithm, input=types, output=output_types)\n G.add_node(new_node)\n G.add_edge(node, new_node)\n open_nodes.append(new_node)\n logger.info(f\"Adding node={algorithm.__name__} producing types={output_types}\")\n except ValueError as e:\n logger.warning(f\"Node={algorithm.__name__} cannot be built. Error={e}.\") \n\n # Let's check if we can add the `End` node.\n if all(type_is_guaranteed(t) for t in output_type):\n G.add_edge(node, end_node)\n logger.info(\"Connecting to end node\")\n \n closed_nodes.add(node)\n\n # Once done we have to check if the `End` node was at some point included in the graph.\n # Otherwise that means there is no possible path.\n if GraphSpace.End not in G:\n raise TypeError(\n \"No pipelines can be constructed from input:%r to output:%r.\"\n % (input, output)\n )\n\n # Now we remove all nodes that don't participate in any path\n # leaving to `End`\n reachable_from_end = set(nx.dfs_preorder_nodes(G.reverse(False), GraphSpace.End))\n unreachable_nodes = set(G.nodes) - reachable_from_end\n G.remove_nodes_from(unreachable_nodes)\n\n # If the node `Start` was removed, that means the graph is disconnected.\n if not GraphSpace.Start in G:\n raise TypeError(\n \"No pipelines can be constructed from input:%r to output:%r.\"\n % (input, output)\n )\n\n return PipelineBuilder(G, registry)", "title": "" }, { "docid": "34d034ac088d652a5977a5835df783fc", "score": "0.49559993", "text": "def dec_train_loop(self, init_state, mem, mem_emb, mem_mask, z_sample_enc, \n z_sample_ids, dec_inputs, dec_targets, dec_g):\n inspect = {}\n\n device = dec_inputs.device\n state_size = self.state_size\n batch_size = dec_inputs.size(0)\n max_len = dec_inputs.size(1)\n\n dec_cell = self.p_decoder\n state = init_state\n dec_inputs = dec_inputs.transpose(1, 0) # [max_len, batch, state]\n dec_targets = dec_targets.transpose(1, 0)\n dec_g = dec_g.transpose(1, 0)\n log_prob = []\n dec_outputs = []\n switch_g_nll = []\n switch_g_prob = []\n latent_state_vocab = torch.zeros(\n self.latent_vocab_size, self.vocab_size).to(device)\n z_sample_ids = z_sample_ids.transpose(1, 0)\n\n for i in range(max_len): \n # word loss \n dec_out, state = dec_cell(\n dec_inputs[i] + z_sample_enc, state, mem_emb, mem_mask)\n dec_out = dec_out[0]\n lm_logits = dec_cell.output_proj(dec_out)\n lm_prob = F.softmax(lm_logits, dim=-1)\n if(self.use_copy):\n _, copy_dist = self.p_copy_attn(dec_out, mem_emb, mem_mask)\n copy_prob = tmu.batch_index_put(copy_dist, mem, self.vocab_size)\n copy_g = F.sigmoid(self.p_copy_g(dec_out))\n out_prob = (1 - copy_g) * lm_prob + copy_g * copy_prob\n logits = out_prob.log()\n # TODO: memory-efficient loss calculation for pointers\n # log_prob_i = ... \n log_prob_i = -F.cross_entropy(logits, dec_targets[i], reduction='none')\n else: \n logits = lm_logits\n out_prob = lm_prob\n log_prob_i = -F.cross_entropy(logits, dec_targets[i], reduction='none')\n \n log_prob.append(log_prob_i) \n dec_outputs.append(logits.argmax(dim=-1))\n\n # loss_g\n out_x_prob, out_x = out_prob.max(dim=-1) # [batch]\n out_x_prob = out_x_prob.unsqueeze(1)\n out_emb = self.embeddings(out_x)\n weighted_out_emb =\\\n (1-out_x_prob).detach() * out_emb + out_x_prob * out_emb # ST trick\n switch_g_logits = self.p_switch_g(dec_out + weighted_out_emb).squeeze(1)\n switch_g_prob_ = F.sigmoid(switch_g_logits)\n switch_g_nll_ = -dec_g[i] * torch.log(switch_g_prob_ + 1e-10)\\\n -(1 - dec_g[i]) * torch.log(1 - switch_g_prob_ + 1e-10)\n switch_g_nll.append(switch_g_nll_)\n switch_g_prob.append(switch_g_prob_)\n\n # inspection\n latent_state_vocab[z_sample_ids[i]] += out_prob.detach()\n\n log_prob = torch.stack(log_prob) # [max_len, batch]\n log_prob_ = log_prob.clone()\n mask = dec_targets != self.pad_id # [max_len, batch]\n log_prob.masked_fill_(mask == 0, 0.) \n log_prob = log_prob.sum() / mask.sum()\n ppl = (-log_prob).detach().exp()\n\n switch_g_nll = torch.stack(switch_g_nll) # [max_len, batch]\n switch_g_nll.masked_fill_(mask == 0, 0.)\n switch_g_nll = switch_g_nll.sum() / mask.sum()\n\n switch_g_prob = torch.stack(switch_g_prob)\n switch_g_prob = switch_g_prob.transpose(1, 0)\n inspect['switch_g_prob'] = tmu.to_np(switch_g_prob)\n\n dec_outputs = torch.stack(dec_outputs).transpose(0, 1)\n inspect['train_predictions'] = tmu.to_np(dec_outputs)\n latent_state_vocab_ent =\\\n -latent_state_vocab * torch.log(latent_state_vocab + 1e-10)\n latent_state_vocab_ent = latent_state_vocab_ent.sum(dim=-1)\n inspect['latent_state_vocab_ent'] =\\\n latent_state_vocab_ent.detach().cpu().numpy()\n return log_prob, ppl, switch_g_nll, inspect", "title": "" }, { "docid": "d75ccfe21ea7927e7d714c5a3de2a3c3", "score": "0.49533895", "text": "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, \n output_keep_prob=keep_prob)\n \n # for only input layer\n helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, \n target_sequence_length)\n \n decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, \n helper, \n encoder_state, \n output_layer)\n\n # unrolling the decoder layer\n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, \n impute_finished=True, \n maximum_iterations=max_summary_length)\n return outputs", "title": "" }, { "docid": "3f7391a5d1aa8f61e8ae0d5349a044cc", "score": "0.4944701", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n x, pool_param = cache\n pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the max pooling backward pass.\n # ================================================================ #\n \n pool_width, pool_height, stride = pool_param['pool_width'], pool_param['pool_height'], pool_param['stride']\n N, C, H, W = x.shape\n N, C, H_dout, W_dout = dout.shape\n dx = x\n dout_temp = dout\n \n for n in range(N):\n for c in range(C):\n for i in range(H_dout):\n for j in range(W_dout):\n local_max = np.max(dx[n, c, i*stride:i*stride+pool_height, j*stride:j*stride+pool_width])\n dx_local = dx[n, c, i*stride:i*stride+pool_height, j*stride:j*stride+pool_width]\n dx_local[dx_local<local_max] = 0\n dx_local[dx_local != 0] = 1\n dx_local = dx_local*dout_temp[n, c, i, j]\n dx[n, c, i*stride:i*stride+pool_height, j*stride:j*stride+pool_width] = dx_local\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ # \n\n return dx", "title": "" }, { "docid": "f6c907300513572ded07308782db0461", "score": "0.49309647", "text": "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, \n output_keep_prob=keep_prob)\n \n # for only input layer\n helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, \n target_sequence_length)\n \n decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,\n helper,\n encoder_state,\n output_layer)\n\n # unrolling the decoder layer\n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, \n impute_finished=True, \n maximum_iterations=max_summary_length)\n return outputs, max_summary_length", "title": "" }, { "docid": "32d05b608d6e410d8f5f25f9c8b165b0", "score": "0.49146715", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n ###########################################################################\n # TODO: Implement the max pooling backward pass #\n ###########################################################################\n x, pool_param = cache\n\n N, depth, input_h, input_w = x.shape\n pool_h, pool_w, stride = \\\n pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']\n\n out_h = (input_h - pool_h) // stride + 1\n out_w = (input_w - pool_w) // stride + 1\n\n dx = np.zeros_like(x)\n\n for i in range(N):\n for oh in range(out_h):\n h_start = oh * stride\n h_end = oh * stride + pool_h\n\n for ow in range(out_w):\n w_start = ow * stride\n w_end = ow * stride + pool_w\n\n # Find neurons with maximum value within the selected area\n x_pool = x[i, :, h_start:h_end, w_start:w_end]\n max_x = np.max(x_pool, axis=(1, 2), keepdims=True)\n\n # Gradient with respect to input will only include the value of neuron,\n # which was activated during forward pass, else is zero.\n # No need to propagate on the non-activated values and they bring\n # no influence on the cost function\n neuron_activation_mask = x_pool == max_x\n dx[i, :, h_start:h_end, w_start:w_end] += \\\n neuron_activation_mask * dout[i, :, oh, ow][:, None, None]\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n return dx", "title": "" }, { "docid": "c012bed22cc15ea69b194de389e13779", "score": "0.4908056", "text": "def vdcnn_model(embedding_size=97, seq_maxlen=512, n_quantized_characters=97):\n \n input_text = Input(shape=(seq_maxlen,))\n x = Embedding(input_dim=n_quantized_characters, output_dim=embedding_size,\n input_length=seq_maxlen)(input_text)\n #x = _convolutional_block(64)(x)\n x = Conv1D(filters=64, kernel_size=3, strides=2, padding=\"same\")(x)\n\n #filters = [64, 128, 256, 512]\n filters = [64, 128, 256, 512, 1024]\n\n for n_filt in filters:\n ### convolutional block\n x = Conv1D(n_filt, kernel_size=3, strides=1, padding='same', \n activation='linear',\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.001)) (x)\n \n x = BatchNormalization() (x)\n x = PReLU() (x) #PReLU\n \n x = Conv1D(n_filt, kernel_size=3, strides=1, padding='same', \n activation='linear',\n kernel_initializer=RandomNormal(mean=0.0, stddev=0.001)) (x)\n \n x = BatchNormalization() (x)\n x = PReLU() (x)\n ###\n #if n_filt != filters[-1]:\n # print(\"adding\", n_filt, filters[-1])\n x = MaxPooling1D(pool_size=3, strides=2, padding='same') (x)\n\n # k-max pooling (Finds values and indices of the k largest entries for the last dimension)\n top_k = 8\n def _top_k(x):\n x = tf.transpose(x, [0, 2, 1])\n k_max = tf.nn.top_k(x, k=top_k)\n return tf.reshape(k_max[0], (-1, filters[-1] * top_k))\n \n k_max = Lambda(_top_k, output_shape=(filters[-1] * top_k,))(x)\n\n\n #x = GlobalMaxPool1D()(x)\n \n x = Dropout(0.2)(Dense(128)(k_max))\n x = PReLU() (x)\n x = Dropout(0.2)(Dense(128)(x))\n x = PReLU() (x)\n \n\n y_pred = Dense(6, activation='sigmoid')(x)\n model = Model(inputs=input_text, outputs=y_pred)\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', auc_roc])\n \n return model", "title": "" }, { "docid": "9b0de0dd60d2a1b082658b5963f60665", "score": "0.49080053", "text": "def greedy_decode(model, src, src_mask, src_lengths, max_len=100, sos_index=1, eos_index=None):\n\n with torch.no_grad():\n encoder_hidden, encoder_final = model.encode(src, src_mask, src_lengths)\n # initialize y0 to sos\n prev_y = torch.ones(1, 1).fill_(sos_index).type_as(src)\n trg_mask = torch.ones_like(prev_y)\n\n # list of predicted words\n output = []\n # list to store the attention energies alphai\n attention_scores = []\n hidden = None\n\n for i in range(max_len):\n with torch.no_grad():\n out, hidden, pre_output = model.decode(\n encoder_hidden, encoder_final, src_mask,\n prev_y, trg_mask, hidden)\n\n # we predict from the pre-output layer, which is\n # a combination of Decoder state, prev emb, and context\n # pre_output needs to be passed to the generator which applies the mlp to project the vector to the target\n # space, then , applies a softmax\n prob = model.generator(pre_output[:, -1])\n\n # torch.max returns the index, argmax and for our case we need only the argmax which is the index of the word with max prob\n _, next_word = torch.max(prob, dim=1)\n next_word = next_word.data.item()\n output.append(next_word)\n prev_y = torch.ones(1, 1).type_as(src).fill_(next_word)\n attention_scores.append(model.decoder.attention.alphas.cpu().numpy())\n\n output = np.array(output)\n\n # cut off everything starting from </s>\n # (only when eos_index provided)\n if eos_index is not None:\n first_eos = np.where(output == eos_index)[0]\n if len(first_eos) > 0:\n output = output[:first_eos[0]]\n\n return output, np.concatenate(attention_scores, axis=1)", "title": "" }, { "docid": "466df4e443214c8c4e5f92733e67fdab", "score": "0.48991504", "text": "def progressive_deepening(state, heuristic_fn=always_zero, depth_limit=INF,\n maximize=True) :\n anytime_value=AnytimeValue()\n for i in range (1,depth_limit+1):\n res = minimax_search_alphabeta(state,-INF,INF,heuristic_fn,i,maximize)\n anytime_value.set_value(res)\n return anytime_value", "title": "" }, { "docid": "f1485e7a4fd4b79073d57f720f027e46", "score": "0.48958927", "text": "def pie_encdec(self):\r\n\r\n # Generate input data. the shapes is (sequence_lenght,length of flattened features)\r\n _encoder_input = Input(shape=(self._observe_length, self._encoder_feature_size),\r\n name='encoder_input')\r\n\r\n # Temporal attention module\r\n _attention_net = self.attention_temporal(_encoder_input, self._observe_length)\r\n\r\n # Generate Encoder LSTM Unit\r\n encoder_model = self.create_lstm_model(name='encoder_network')\r\n _encoder_outputs_states = encoder_model(_attention_net)\r\n _encoder_states = _encoder_outputs_states[1:]\r\n\r\n # Generate Decoder LSTM unit\r\n decoder_model = self.create_lstm_model(name='decoder_network', r_state=False)\r\n _hidden_input = RepeatVector(self._predict_length)(_encoder_states[0])\r\n _decoder_input = Input(shape=(self._predict_length, self._decoder_feature_size),\r\n name='pred_decoder_input')\r\n\r\n # Embedding unit on the output of Encoder\r\n _embedded_hidden_input = Dense(self._embed_size, activation='relu')(_hidden_input)\r\n _embedded_hidden_input = Dropout(self._embed_dropout,\r\n name='dropout_dec_input')(_embedded_hidden_input)\r\n\r\n decoder_concat_inputs = Concatenate(axis=2)([_embedded_hidden_input, _decoder_input])\r\n\r\n # Self attention unit\r\n att_input_dim = self._embed_size + self._decoder_feature_size\r\n decoder_concat_inputs = self.attention_element(decoder_concat_inputs, att_input_dim)\r\n\r\n # Initialize the decoder with encoder states\r\n decoder_output = decoder_model(decoder_concat_inputs,\r\n initial_state=_encoder_states)\r\n decoder_output = Dense(self._prediction_size,\r\n activation='linear',\r\n name='decoder_dense')(decoder_output)\r\n\r\n net_model = Model(inputs=[_encoder_input, _decoder_input],\r\n outputs=decoder_output)\r\n net_model.summary()\r\n\r\n return net_model", "title": "" }, { "docid": "936e5c4337a29c8db5ac8b4eefbf3974", "score": "0.48958263", "text": "def greedy_search(self, encoder_output, use_cache, reuse):\n batch_size = tf.shape(encoder_output)[0]\n\n preds = tf.ones([batch_size, 1], dtype=tf.int32) * 2\n scores = tf.zeros([batch_size], dtype=tf.float32)\n finished = tf.zeros([batch_size], dtype=tf.bool)\n cache = tf.zeros([batch_size, 0, self._config.num_blocks, self._config.hidden_units])\n\n def step(i, finished, preds, scores, cache):\n # Where are we.\n i += 1\n\n # Call decoder and get predictions.\n decoder_output, cache = self.decoder_with_caching(preds, cache, encoder_output, is_training=False, reuse=reuse)\n _, next_preds, next_scores = self.test_output(decoder_output, reuse=reuse)\n next_preds = next_preds[:, None, 0]\n next_scores = next_scores[:, 0]\n\n # Update.\n scores = scores + next_scores\n preds = tf.concat([preds, next_preds], axis=1)\n\n # Whether sequences finished.\n has_eos = tf.equal(next_preds[:, 0], 3)\n finished = tf.logical_or(finished, has_eos)\n\n return i, finished, preds, scores, cache\n\n def not_finished(i, finished, preds, scores, cache):\n return tf.logical_and(\n tf.reduce_any(tf.logical_not(finished)),\n tf.less_equal(\n i,\n tf.reduce_min([tf.shape(encoder_output)[1] + 50, self._config.test.max_target_length])\n )\n )\n\n i, finished, preds, scores, cache = \\\n tf.while_loop(cond=not_finished,\n body=step,\n loop_vars=[0, finished, preds, scores, cache],\n shape_invariants=[\n tf.TensorShape([]),\n tf.TensorShape([None]),\n tf.TensorShape([None, None]),\n tf.TensorShape([None]),\n tf.TensorShape([None, None, None, None])],\n back_prop=False)\n\n preds = preds[:, 1:] # remove <S> flag\n return preds", "title": "" }, { "docid": "7cc4f3ef71b9850d3def86b9bf305723", "score": "0.48915005", "text": "def train(\n max_int: int = 128,\n batch_size: int = 16,\n training_steps: int = 500,\n learning_rate: float = 0.001,\n print_output_every_n_steps: int = 10,\n):\n input_length = int(math.log(max_int, 2))\n\n # Models\n generator = Generator(input_length)\n discriminator = Discriminator(input_length)\n\n # Optimizers\n generator_optimizer = torch.optim.Adam(\n generator.parameters(), lr=0.001\n )\n discriminator_optimizer = torch.optim.Adam(\n discriminator.parameters(), lr=0.001\n )\n\n # loss\n loss = nn.BCELoss()\n gen_loss = []\n dis_loss = []\n\n for i in range(training_steps):\n # zero the gradients on each iteration\n generator_optimizer.zero_grad()\n\n # Create noisy input for generator\n # Need float type instead of int\n noise = torch.randint(0, 2, size=(batch_size, input_length)).float()\n generated_data = generator(noise)\n\n # Generate examples of even real data\n # true labels: [1,1,1,1,1,1,....] i.e all ones\n # true data: [[0,0,0,0,1,0,0],....] i.e binary code for even numbers\n true_labels, true_data = generate_even_data(\n max_int, batch_size=batch_size\n )\n true_labels = torch.tensor(true_labels).float()\n true_data = torch.tensor(true_data).float()\n\n # Train the generator\n # We invert the labels here and don't train the discriminator because we want the generator\n # to make things the discriminator classifies as true.\n # true labels: [1,1,1,1,....]\n discriminator_out_gen_data = discriminator(generated_data)\n generator_loss = loss(\n discriminator_out_gen_data.squeeze(), true_labels\n )\n gen_loss.append(generator_loss.item())\n generator_loss.backward()\n generator_optimizer.step()\n\n # Train the discriminator \n # Teach Discriminator to distinguish true data with true label i.e [1,1,1,1,....]\n discriminator_optimizer.zero_grad()\n discriminator_out_true_data = discriminator(true_data)\n discriminator_loss_true_data = loss(\n discriminator_out_true_data.squeeze(), true_labels\n )\n\n # add .detach() here think about this\n discriminator_out_fake_data = discriminator(generated_data.detach())\n fake_labels = torch.zeros(batch_size) # [0,0,0,.....]\n discriminator_loss_fake_data = loss(\n discriminator_out_fake_data.squeeze(), fake_labels \n )\n # total discriminator loss\n discriminator_loss = (\n discriminator_loss_true_data + discriminator_loss_fake_data\n ) / 2\n \n dis_loss.append(discriminator_loss.item())\n \n discriminator_loss.backward()\n discriminator_optimizer.step()\n if i % print_output_every_n_steps == 0:\n output = convert_float_matrix_to_int_list(generated_data)\n even_count = len(list(filter(lambda x: (x % 2 == 0), output)))\n print(f\"steps: {i}, output: {output}, even count: {even_count}/16, Gen Loss: {np.round(generator_loss.item(),4)}, Dis Loss: {np.round(discriminator_loss.item(),4)}\")\n\n history = {}\n history['dis_loss'] = dis_loss\n history['gen_loss'] = gen_loss\n\n return generator, discriminator, history", "title": "" }, { "docid": "b1688a055f7d7773bc02d6100a6b0478", "score": "0.4889757", "text": "def solve2(self):\n i = 0\n j = 0\n max_iter = 100\n itr = 0\n solved = False\n\n while not solved:\n # while not solved and itr < max_iter:\n while (i, j) in self.fixed:\n i, j = self.next_element(i, j)\n\n while not solved:\n # while not solved and itr < max_iter:\n # itr += 1\n if i > 8 or j > 8:\n # Then we solved it!\n solved = True\n break\n\n self.available = self.get_available(i, j)\n # print i, j, self.available\n\n if len(self.available) == 0:\n # Backtrack\n self.grid[i][j] = 0\n i, j = self.prev_element(i, j)\n while (i, j) in self.fixed:\n i, j = self.prev_element(i, j)\n continue\n\n # Get next available\n self.grid[i][j] = self.available.pop(0)\n # print self.__str__()\n\n if self.verify2():\n # Move forward\n i, j = self.next_element(i, j)\n while (i, j) in self.fixed:\n i, j = self.next_element(i, j)", "title": "" }, { "docid": "28c701fa9801cee5d1953a40344bccba", "score": "0.4889034", "text": "def building_2dense_model_task2(max_len, dict_size, number_neurons, n_class, drop_per, drop_hid, final_act, folder, \n optimizer=Adam()):\n input_seq = Input(shape=(max_len, dict_size), dtype='float32')\n dropout_seq = Dropout(drop_per)(input_seq)\n \n #Denses\n dense_seq1 = Dense(number_neurons[0], activation='relu')(dropout_seq)\n dropout_seq1 = Dropout(drop_hid)(dense_seq1)\n dense_seq2 = Dense(number_neurons[1], activation='relu')(dropout_seq1)\n dropout_seq2 = Dropout(drop_hid)(dense_seq2)\n flatten_seq = Flatten()(dropout_seq2)\n main_dense = Dense(n_class, activation=final_act)(flatten_seq)\n model = Model(inputs=[input_seq], outputs=[main_dense])\n print(model.summary())\n \n #adamm = Adam()\n adamm = optimizer\n model.compile(loss='categorical_crossentropy', optimizer = adamm, metrics=['accuracy'])\n \n # saving the model\n file_model = os.path.join(absPath, 'data/', folder, 'model.h5')\n\n model.save(file_model)\n return model", "title": "" }, { "docid": "e61dd8137350efc08f3b552f4b8926f0", "score": "0.4884382", "text": "def max_pool_backward(dout, cache):\n ##############################################################################\n # IMPLEMENT YOUR CODE #\n ##############################################################################\n (x, pool_param) = cache\n N, H, W, C = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n SH, SW = stride[1], stride[2]\n # padding == 'valid'\n PH = 0\n PW = 0\n # output size\n OH = int((H - pool_height + 2*0)/SH + 1)\n OW = int((W - pool_width + 2*0)/SW + 1)\n # compute dx\n dx = np.zeros(x.shape)\n for i in range(N):\n for k in range(OH):\n for l in range(OW):\n for c in range(C):\n temp = x[i, k*SH:k*SH+pool_height, l*SW:l*SW+pool_width, c]\n max_idx = np.unravel_index(np.argmax(temp, axis=None), temp.shape)\n temp = np.zeros(temp.shape)\n temp[max_idx] = 1\n dx[i, k*SH:k*SH+pool_height, l*SW:l*SW+pool_width, c] += temp * dout[i, k, l, c]\n #pass\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dx", "title": "" }, { "docid": "7826b621c6d2303f0e86eeff0075b295", "score": "0.4884062", "text": "def build_dilatedconv_model_gradual(is_training, inputs, params):\n seis = inputs['Seis']\n assert seis.get_shape().as_list() == [None, params.input_size_z, params.input_size_x, params.input_size_y, params.input_size_c]\n out = seis\n\n bn_momentum = params.bn_momentum\n num_channels=params.num_channels#Number of channels kept constant in each layer\n\n # Network divided into blocks for easily splitting over the two GPUs being used\n # Note some empirical experimentation required to decide optimal memory splitting over the two GPUs\n\n num_conv_layers_block1=2\n dilate_factors_block1=[1,1] # No dilation of conv filters for first two layers\n\n num_conv_layers_block2=2\n dilate_factors_block2=[2,4] # Increase dilation exponentially\n\n num_conv_layers_block3=0\n dilate_factors_block3=[8]\n\n num_conv_layers_block4=3\n dilate_factors_block4=[8,16,32]\n\n\n # Graph for first gpu\n with tf.device('/gpu:0'):\n\n # conv3d layers in first block\n for j in range(num_conv_layers_block1):\n d=dilate_factors_block1[j]\n out = tf.layers.conv3d(out, num_channels, 3, 1, padding='same',dilation_rate=(d,d,d))\n if params.use_batch_norm: # Flag for using batch-norm\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n if params.use_epistemic: # Flag for using Monte-Carlo dropout\n out=tf.nn.dropout(out,keep_prob=params.dropout_keep_prob)\n\n # first conv3d_transpose layer to go upsample time dimension of seismic to depth dimension of facies volumes\n out=tf.layers.conv3d_transpose(out, num_channels, (33,1,1), 1, padding='valid')#30 for nt=66, 26 for nt=75\n if params.use_batch_norm:\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n if params.use_epistemic: # Flag for using Monte-Carlo dropout\n out=tf.nn.dropout(out,keep_prob=params.dropout_keep_prob)\n\n # conv3d layers in second block\n for j in range(num_conv_layers_block2):\n d=dilate_factors_block2[j]\n out = tf.layers.conv3d(out, num_channels, 3, 1, padding='same',dilation_rate=(d,d,d))\n if params.use_batch_norm: # Flag for using batch-norm\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n if params.use_epistemic: # Flag for using Monte-Carlo dropout\n out=tf.nn.dropout(out,keep_prob=params.dropout_keep_prob)\n\n # Graph for second gpu\n with tf.device('/gpu:1'):\n\n # conv3d layers in third block\n for j in range(num_conv_layers_block3):\n d=dilate_factors_block3[j]\n out = tf.layers.conv3d(out, num_channels, 3, 1, padding='same',dilation_rate=(d,d,d))\n if params.use_batch_norm:\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n if params.use_epistemic:\n out=tf.nn.dropout(out,keep_prob=params.dropout_keep_prob)\n\n # second conv3d_transpose layer to go upsample time dimension of seismic to depth dimension of facies volumes\n out=tf.layers.conv3d_transpose(out, num_channels, (68,1,1), (2,1,1), padding='valid')#62 for nt=66, 52 for nt=71\n if params.use_batch_norm:\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n if params.use_epistemic:\n out=tf.nn.dropout(out,keep_prob=params.dropout_keep_prob)\n\n # conv3d layers in fourth block\n for j in range(num_conv_layers_block4):\n d=dilate_factors_block4[j]\n out = tf.layers.conv3d(out, num_channels, 3, 1, padding='same',dilation_rate=(d,d,d))\n if params.use_batch_norm:\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n out = tf.nn.relu(out)\n if params.use_epistemic:\n out=tf.nn.dropout(out,keep_prob=params.dropout_keep_prob)\n\n # Final conv3D layer to predict logts for each facies class\n out = tf.layers.conv3d(out, params.num_labels, 3, 1, padding='same')\n if params.use_batch_norm:\n out = tf.layers.batch_normalization(out, momentum=bn_momentum, training=is_training)\n\n logits=out\n\n return logits", "title": "" }, { "docid": "1fbbf974c0a3efcce0886aa24c623938", "score": "0.48769832", "text": "def __init__(self, sess, input_dim_list=[784,400]):\n assert len(input_dim_list) >= 2\n self.W_list = []\n self.encoding_b_list = []\n self.decoding_b_list = []\n self.dim_list = input_dim_list\n self.learning_rate = tf.placeholder(tf.float32)\n ## Encoders parameters\n for i in range(len(input_dim_list)-1):\n init_max_value = np.sqrt(6. / (self.dim_list[i] + self.dim_list[i+1]))\n self.W_list.append(tf.Variable(tf.random_uniform([self.dim_list[i],self.dim_list[i+1]],\n np.negative(init_max_value),init_max_value)))\n self.encoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i+1]],-0.1,0.1)))\n ## Decoders parameters\n for i in range(len(input_dim_list)-2,-1,-1):\n self.decoding_b_list.append(tf.Variable(tf.random_uniform([self.dim_list[i]],-0.1,0.1)))\n ## Placeholder for input\n self.input_x = tf.placeholder(tf.float32,[None,self.dim_list[0]])\n ## coding graph :\n last_layer = self.input_x\n for weight,bias in zip(self.W_list,self.encoding_b_list):\n hidden = tf.sigmoid(tf.matmul(last_layer,weight) + bias)\n last_layer = hidden\n self.hidden = hidden \n ## decode graph:\n for weight,bias in zip(reversed(self.W_list),self.decoding_b_list):\n hidden = tf.sigmoid(tf.matmul(last_layer,tf.transpose(weight)) + bias)\n last_layer = hidden\n self.recon = last_layer\n \n self.cost = 200 * tf.reduce_mean(tf.square(self.input_x - self.recon))\n# self.cost = 200*tf.losses.log_loss(self.recon, self.input_x)\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost)\n sess.run(tf.global_variables_initializer())", "title": "" }, { "docid": "6f6641641b99e918def11b22bb5173b3", "score": "0.48722062", "text": "def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n min_length,\n sampling,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n batch_size,\n num_return_sequences,\n length_penalty,\n num_beams,\n vocab_size,\n encoder_outputs,\n graph_embeddings=None,\n attention_mask=None,\n ):\n\n incremental_state = {} # starts empty but is changed inside decoder forward\n\n # generated hypotheses\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty)\n for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros(\n (batch_size, num_beams), dtype=torch.float, device=input_ids.device\n )\n\n # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times\n if sampling is False:\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n while cur_len < max_length:\n decoder_out = self.decoder.forward(\n input_ids,\n encoder_out=encoder_outputs,\n incremental_state=incremental_state,\n graph_embeddings=graph_embeddings,\n ) # (batch_size * num_beams, cur_len, vocab_size)\n next_token_logits = decoder_out[0][\n :, -1, :\n ] # (batch_size * num_beams, vocab_size)\n\n scores = F.log_softmax(\n next_token_logits, dim=-1\n ) # (batch_size * num_beams, vocab_size)\n\n scores = self.postprocess_next_token_scores(\n scores=scores,\n input_ids=input_ids,\n cur_len=cur_len,\n min_length=min_length,\n max_length=max_length,\n repetition_penalty=repetition_penalty,\n batch_size=batch_size,\n num_beams=num_beams,\n )\n\n assert scores.shape == (\n batch_size * num_beams,\n vocab_size,\n ), \"Shapes of scores: {} != {}\".format(\n scores.shape, (batch_size * num_beams, vocab_size)\n )\n\n if sampling:\n _scores = scores + beam_scores[:, None].expand_as(\n scores\n ) # (batch_size * num_beams, vocab_size)\n # Temperature\n if temperature != 1.0:\n _scores = _scores / temperature\n # Top-p/top-k filtering\n _scores = top_k_top_p_filtering(\n _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together to sample from all beam_idxs\n _scores = _scores.contiguous().view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)\n probs = F.softmax(_scores, dim=-1)\n next_tokens = torch.multinomial(\n probs, num_samples=2 * num_beams\n ) # (batch_size, num_beams * 2)\n # Compute next scores\n next_scores = torch.gather(\n _scores, -1, next_tokens\n ) # (batch_size, num_beams * 2)\n # sort the sampled vector to make sure that the first num_beams samples are the best\n next_scores, next_scores_indices = torch.sort(\n next_scores, descending=True, dim=1\n )\n next_tokens = torch.gather(\n next_tokens, -1, next_scores_indices\n ) # (batch_size, num_beams * 2)\n\n else:\n next_scores = scores + beam_scores[:, None].expand_as(\n scores\n ) # (batch_size * num_beams, vocab_size)\n\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n next_scores = next_scores.view(\n batch_size, num_beams * vocab_size\n ) # (batch_size, num_beams * vocab_size)\n\n next_scores, next_tokens = torch.topk(\n next_scores, 2 * num_beams, dim=1, largest=True, sorted=True\n )\n\n assert (\n next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)\n )\n\n # next batch beam content\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence, add a pad token\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(\n num_beams\n )\n next_batch_beam.extend(\n [(0, self.pad_token_id, 0)] * num_beams\n ) # pad the batch\n continue\n\n # next sentence beam content, this will get added to next_batch_beam\n next_sent_beam = []\n\n # next tokens for this sentence\n for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(\n zip(next_tokens[batch_idx], next_scores[batch_idx])\n ):\n # get beam and token IDs\n beam_id = beam_token_id // vocab_size\n token_id = beam_token_id % vocab_size\n\n effective_beam_id = batch_idx * num_beams + beam_id\n # add to generated hypotheses if end of sentence\n if (self.eos_token_id is not None) and (\n token_id.item() == self.eos_token_id\n ):\n # if beam_token does not belong to top num_beams tokens, it should not be added\n is_beam_token_worse_than_top_num_beams = (\n beam_token_rank >= num_beams\n )\n if is_beam_token_worse_than_top_num_beams:\n continue\n generated_hyps[batch_idx].add(\n input_ids[effective_beam_id].clone(),\n beam_token_score.item(),\n )\n else:\n # add next predicted token since it is not eos_token\n next_sent_beam.append(\n (beam_token_score, token_id, effective_beam_id)\n )\n\n # once the beam for next step is full, don't add more tokens to it.\n if len(next_sent_beam) == num_beams:\n break\n\n # Check if we are done so that we can save a pad step if all(done)\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item(), cur_len\n )\n\n # update next beam content\n assert len(next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (\n batch_idx + 1\n ), \"We should have added num_beams each step\"\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_tokens = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new([x[2] for x in next_batch_beam])\n\n # re-order batch and update current length\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)\n cur_len = cur_len + 1\n\n # finalize all open beam hypotheses and add to generated hypotheses\n for batch_idx in range(batch_size):\n if done[batch_idx]:\n continue\n\n # test that beam scores match previously calculated scores if not eos and batch_idx not done\n if self.eos_token_id is not None and all(\n (token_id % vocab_size).item() != self.eos_token_id\n for token_id in next_tokens[batch_idx]\n ):\n assert torch.all(\n next_scores[batch_idx, :num_beams]\n == beam_scores.view(batch_size, num_beams)[batch_idx]\n ), \"If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}\".format(\n next_scores[:, :num_beams][batch_idx],\n beam_scores.view(batch_size, num_beams)[batch_idx],\n )\n\n # need to add best num_beams hypotheses to generated hyps\n for beam_id in range(num_beams):\n effective_beam_id = batch_idx * num_beams + beam_id\n final_score = beam_scores[effective_beam_id].item()\n final_tokens = input_ids[effective_beam_id]\n generated_hyps[batch_idx].add(final_tokens, final_score)\n\n # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch\n output_batch_size = (\n batch_size if sampling else batch_size * num_return_sequences\n )\n output_num_return_sequences_per_batch = 1 if sampling else num_return_sequences\n\n # select the best hypotheses\n sent_lengths = input_ids.new(output_batch_size)\n best = []\n\n # retrieve best hypotheses\n for i, hypotheses in enumerate(generated_hyps):\n sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])\n for j in range(output_num_return_sequences_per_batch):\n effective_batch_idx = output_num_return_sequences_per_batch * i + j\n best_hyp = sorted_hyps.pop()[1]\n sent_lengths[effective_batch_idx] = len(best_hyp)\n best.append(best_hyp)\n\n # shorter batches are padded\n if sent_lengths.min().item() != sent_lengths.max().item():\n sent_max_len = min(sent_lengths.max().item() + 1, max_length)\n decoded = input_ids.new(output_batch_size, sent_max_len).fill_(\n self.pad_token_id\n )\n\n # fill with hypothesis and eos_token_id if necessary\n for i, hypo in enumerate(best):\n decoded[i, : sent_lengths[i]] = hypo\n if sent_lengths[i] < max_length:\n decoded[i, sent_lengths[i]] = self.eos_token_id\n else:\n # none of the hypotheses have an eos_token\n assert (len(hypo) == max_length for hypo in best)\n decoded = (\n torch.stack(best).type(torch.long).to(next(self.parameters()).device)\n )\n\n return decoded", "title": "" }, { "docid": "d491db74f293d3d116030608d9db59e1", "score": "0.48684165", "text": "def evaluate(opt):\n MIN_DEPTH = 1e-3\n MAX_DEPTH = 80\n\n opt.load_weights_folder = os.path.expanduser(opt.load_weights_folder)\n print(\"-> Loading weights from {}\".format(opt.load_weights_folder))\n\n # Load Encoder and Decoder\n encoder_path = os.path.join(opt.load_weights_folder, \"encoder.pth\")\n decoder_path = os.path.join(opt.load_weights_folder, \"depth.pth\")\n encoder_dict = torch.load(encoder_path)\n\n encoder = networks.ResnetEncoder(opt.num_layers, False)\n depth_decoder = networks.DepthDecoder(encoder.num_ch_enc, num_output_channels=3)\n\n model_dict = encoder.state_dict()\n encoder.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict})\n depth_decoder.load_state_dict(torch.load(decoder_path))\n\n encoder.cuda()\n encoder.eval()\n depth_decoder.cuda()\n depth_decoder.eval()\n\n\n encoder_path = os.path.join('/home/shengjie/Documents/Project_SemanticDepth/tmp/patchmatch_bs/weights_13', \"encoder.pth\")\n decoder_path = os.path.join('/home/shengjie/Documents/Project_SemanticDepth/tmp/patchmatch_bs/weights_13', \"depth.pth\")\n encoder_dict = torch.load(encoder_path)\n\n encoder_bs = networks.ResnetEncoder(opt.num_layers, False)\n depth_decoder_bs = networks.DepthDecoder(encoder.num_ch_enc, num_output_channels=3)\n\n model_dict = encoder.state_dict()\n encoder_bs.load_state_dict({k: v for k, v in encoder_dict.items() if k in model_dict})\n depth_decoder_bs.load_state_dict(torch.load(decoder_path))\n\n encoder_bs.cuda()\n encoder_bs.eval()\n depth_decoder_bs.cuda()\n depth_decoder_bs.eval()\n\n\n filenames = readlines('/home/shengjie/Documents/Project_SemanticDepth/splits/eigen/test_files.txt')\n\n\n opt.frame_ids.append(\"s\")\n dataset = datasets.KITTIRAWDataset(opt.data_path, filenames,\n encoder_dict['height'], encoder_dict['width'],\n [0], 4, is_train=False)\n dataloader = DataLoader(dataset, 16, shuffle=False, num_workers=opt.num_workers,\n pin_memory=True, drop_last=False)\n\n count = 0\n with torch.no_grad():\n for idx, inputs in enumerate(dataloader):\n for key, ipt in inputs.items():\n if not (key == 'entry_tag' or key == 'syn_tag'):\n inputs[key] = ipt.to(torch.device(\"cuda\"))\n input_color = inputs[(\"color\", 0, 0)].cuda()\n outputs = depth_decoder(encoder(input_color))\n outputs_bs = depth_decoder_bs(encoder_bs(input_color))\n for i in range(input_color.shape[0]):\n figbs = tensor2disp(outputs_bs[('disp', 0)][:,2:3,:,:], vmax = 0.1, ind = i)\n fig2 = tensor2disp(outputs[('disp', 0)][:, 2:3, :, :], vmax=0.1, ind=i)\n figrgb = tensor2rgb(inputs[(\"color\", 0, 0)], ind = i)\n combined = np.concatenate([np.array(figrgb), np.array(figbs), np.array(fig2)])\n pil.fromarray(combined).save(os.path.join('/media/shengjie/c9c81c9f-511c-41c6-bfe0-2fc19666fb32/Visualizations/Project_SemanDepth/vls_patchmatch_test_visualization', str(count) + '.png'))\n count = count + 1", "title": "" }, { "docid": "023ecdccfb30b43fd18ee9c1ad20655a", "score": "0.48672494", "text": "def __init__(self, hidden_size, output_size, max_length, max_predict_len, teacher_forcing_ratio):\n super(Decoder, self).__init__()\n\n self.hidden_size = hidden_size\n self.output_size = output_size\n self.max_length = max_length\n self.max_predict_len = max_predict_len\n self.teacher_forcing_ratio = teacher_forcing_ratio\n\n self.gru = nn.GRU(hidden_size + word_vec_d, hidden_size, batch_first=True)\n self.out = nn.Linear(hidden_size * 2 + word_vec_d, output_size)\n self.softmax = nn.Softmax(dim=1)\n self.log_softmax = nn.LogSoftmax(dim=1) # work with NLLLoss = CrossEntropyLoss\n self.tanh = nn.Tanh()\n\n self.attn = nn.Linear(hidden_size * 2, hidden_size)\n self.energy_combine = nn.Linear(hidden_size, 1, bias=False)\n self.attn_combine = nn.Linear(hidden_size + word_vec_d, hidden_size)", "title": "" }, { "docid": "60a470074c6203a11f62858deac61b44", "score": "0.48646644", "text": "def build_decoder(opt,tar_dict):\n\n max_len = 128\n tar_embedding = build_embedding(opt,tar_dict,max_len,for_encoder=False,dtype=opt.decode_pos)\n return transformer.Decoder(\n opt.dec_layer,opt.num_head,\n opt.model_dim,opt.nin_dim_de,len(tar_dict),max_len,\n opt.self_attn_type,opt.dropout,tar_embedding\n )", "title": "" }, { "docid": "c4aaeb0684b1b66f77b095752003b751", "score": "0.4864325", "text": "def gen_algorithm(self, limit):\n gen_counter = 1\n solution_found = False\n while gen_counter < limit and not solution_found:\n self.comp_fitnesses()\n new_population = self.elite(1)\n new_psize = 1\n while new_psize < self.population_size:\n ind1 = self.nat_selection()\n ind2 = self.nat_selection()\n offspring = ind1.recombine(ind2)\n offspring.mutate()\n new_population.append(offspring)\n new_psize += 1\n self.population = new_population\n sol, fit = self.best_solution()\n if gen_counter % 50 == 0:\n print('current best solution at the ' + str(gen_counter) + 'th iteration is',\n str(sol), '| fitness:', str(fit))\n solution_found = sol.attack_number() == 0\n gen_counter += 1\n print('\\n', 'program ended at the ' + str(gen_counter) + 'th iteration', '\\n')\n sol, fit = self.best_solution()\n print('overall best solution is',\n str(sol), '| fitness:', str(fit))", "title": "" }, { "docid": "fd3fddcde618dcf518197f3ca09095c2", "score": "0.48596293", "text": "def max_pool_backward_naive(dout, cache):\n # Get data back from cache\n x, pool_param = cache\n\n # Get input tensor and parameter\n N, C, H, W = x.shape\n S = pool_param[\"stride\"]\n H_P = pool_param[\"pool_height\"]\n W_P = pool_param[\"pool_width\"]\n N,C,HH,WW = dout.shape\n\n # Inititalize dx\n dx = None\n dx = np.zeros(x.shape)\n\n # Calculate dx (mask * dout)\n for n in xrange(N): # For each element on batch\n for depth in xrange(C): # For each input depth\n for r in xrange(HH): # Slide vertically (use stride on the fly)\n for c in xrange(WW): # Slide horizontally (use stride on the fly)\n # Get window and calculate the mask\n x_pool = x[n,depth,r*S:r*S+H_P,c*S:c*S+W_P]\n mask = (x_pool == np.max(x_pool))\n # Calculate mask*dout\n dx[n,depth,r*S:r*S+H_P,c*S:c*S+W_P] = mask*dout[n,depth,r,c]\n\n # Return dx\n return dx", "title": "" }, { "docid": "046de35a12787880acc9cc4ca12e122f", "score": "0.48569465", "text": "def build_decoder(self, encoder_outputs, encoder_final_state,\n decoder_inputs, decoder_targets,\n decoder_lengths, encoder_input_lengths):\n\n with tf.variable_scope(\"decoder\"):\n\n # Embeddings for ARPA phonetic characters\n arpa_embeddings = tf.Variable(tf.random_uniform((self.n_arpa, self.embed_dims), -1.0, 1.0),\n name=\"arpa_embeddings\")\n decoder_input_embeddings = tf.nn.embedding_lookup(arpa_embeddings, decoder_inputs)\n\n # Dense layer that each timestep output is sent to\n with tf.variable_scope(\"projection\"):\n projection_layer = tf.layers.Dense(\n self.n_arpa, use_bias=False)\n\n # Cell definition with dropout for training\n decoder_dims = self.hidden_dims\n if self.bidir:\n decoder_dims *= 2\n decoder_cell = self.cell_class_fn(decoder_dims)\n if self.mode == \"training\":\n decoder_cell = DropoutWrapper(decoder_cell,\n input_keep_prob=1.0-self.dropout,\n output_keep_prob=1.0-self.dropout,\n state_keep_prob=1.0-self.dropout)\n\n # Attention wrapper\n if self.attention_fn is not None:\n attention_states = tf.transpose(encoder_outputs, [1, 0, 2])\n attention_mechanism = self.attention_fn(\n decoder_dims, attention_states,\n memory_sequence_length=encoder_input_lengths)\n\n decoder_cell = tf.contrib.seq2seq.AttentionWrapper(\n decoder_cell, attention_mechanism,\n attention_layer_size=decoder_dims)\n\n # Define decoder initial state\n if self.attention_fn is not None:\n decoder_initial_state = decoder_cell.zero_state(self.batch_size, tf.float32).clone(\n cell_state=encoder_final_state)\n else:\n decoder_initial_state = encoder_final_state\n\n\n # Define helper\n # Input at each timestep is label ARPA phonetic sequence\n if self.mode == \"train\":\n helper = tf.contrib.seq2seq.TrainingHelper(\n inputs=decoder_input_embeddings, \n sequence_length=decoder_lengths,\n time_major=True)\n # Inference argmax predictions are inputs to the next timestep\n elif self.mode == \"inference\":\n start_tokens = tf.fill([self.batch_size], START_CODE)\n helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n arpa_embeddings,\n start_tokens,\n END_CODE)\n\n my_decoder = tf.contrib.seq2seq.BasicDecoder(\n decoder_cell,\n helper,\n decoder_initial_state,\n output_layer=projection_layer)\n\n # Inference predictions are limited to 2 times the input sequence length\n maximum_iterations = tf.round(tf.reduce_max(encoder_input_lengths) * 2)\n\n # Decoding loop output\n outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(\n my_decoder,\n output_time_major=True,\n impute_finished=True,\n maximum_iterations=maximum_iterations)\n logits = outputs.rnn_output\n\n # Transposed so that not time major\n predictions_arpa = tf.transpose(tf.argmax(logits, 2))\n\n return logits, predictions_arpa", "title": "" }, { "docid": "21a5c6cd88cdc41be3dadd064a597beb", "score": "0.48562294", "text": "def _init_optimizer(self):\n with tf.name_scope(\"optimization\"):\n with tf.name_scope(\"encoder_training\"):\n self.global_step_encoder = tf.Variable(0, trainable=False, name=\"global_step\")\n self.encoder_loss = tf.reduce_mean(tf.square(self.encoder_targets - self.encoder_outputs))\n encoder_optimizer = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate)\n encoder_gradients = encoder_optimizer.compute_gradients(self.encoder_loss,\n var_list=tf.trainable_variables(\"encoder\"))\n encoder_capped_gradients = [(tf.clip_by_value(grad,\n -self.config.gradient_clip,\n self.config.gradient_clip), var)\n for grad, var in encoder_gradients if grad is not None]\n self.encoder_train_op = encoder_optimizer.apply_gradients(encoder_capped_gradients,\n global_step=self.global_step_encoder)\n\n with tf.name_scope(\"decoder_training\"):\n self.global_step_decoder = tf.Variable(0, trainable=False, name=\"global_step\")\n self.decoder_loss_tr = tf.reduce_mean(tf.square(self.decoder_targets - self.training_decoder_outputs))\n self.decoder_loss_inf = tf.reduce_mean(tf.square(self.decoder_targets - self.inference_decoder_outputs))\n decoder_optimizer = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate)\n decoder_gradients = decoder_optimizer.compute_gradients(self.decoder_loss_tr,\n var_list=tf.trainable_variables(\"decoder\"))\n decoder_capped_gradients = [(tf.clip_by_value(grad,\n -self.config.gradient_clip,\n self.config.gradient_clip), var)\n for grad, var in decoder_gradients if grad is not None]\n self.decoder_train_op = decoder_optimizer.apply_gradients(decoder_capped_gradients,\n global_step=self.global_step_decoder)\n # ALTERNATIVE without gradient clipping\n # self.decoder_train_op = decoder_optimizer.minimize(self.decoder_loss_tr,\n # var_list=self.decoder_variable_list)\n\n with tf.name_scope(\"evaluation\"):\n r_one = tf.constant(1, dtype=tf.float32)\n\n ## encoder\n encoder_ssr = tf.reduce_sum(tf.square(tf.subtract(self.encoder_targets, self.encoder_outputs)))\n encoder_tss = tf.reduce_sum(tf.square(tf.subtract(self.encoder_targets,\n tf.reduce_mean(self.encoder_targets))))\n self.encoder_r_squared = tf.subtract(r_one, tf.divide(encoder_ssr, encoder_tss), name=\"encoder_r_squared\")\n\n # self.backscaled_encoder_targets = self.inv_transform(self.encoder_targets)\n # self.backscaled_encoder_outputs = self.inv_transform(self.encoder_outputs)\n #\n # backscaled_encoder_loss = tf.reduce_mean(tf.square(\n # self.backscaled_encoder_outputs - self.backscaled_encoder_targets))\n # self.encoder_rmse = tf.sqrt(backscaled_encoder_loss, name=\"encoder_rmse\")\n\n ## decoder\n decoder_ssr_tr = tf.reduce_sum(tf.square(tf.subtract(self.decoder_targets, self.training_decoder_outputs)))\n decoder_ssr_inf = tf.reduce_sum(tf.square(tf.subtract(self.decoder_targets, self.inference_decoder_outputs)))\n decoder_tss = tf.reduce_sum(tf.square(tf.subtract(self.decoder_targets,\n tf.reduce_mean(self.decoder_targets))))\n self.decoder_r_squared_tr = tf.subtract(r_one, tf.divide(decoder_ssr_tr, decoder_tss),\n name=\"decoder_r_squared_training\")\n self.decoder_r_squared_inf = tf.subtract(r_one, tf.divide(decoder_ssr_inf, decoder_tss),\n name=\"decoder_r_squared_inference\")\n\n # self.backscaled_decoder_targets = self.inv_transform(self.decoder_targets)\n # self.backscaled_decoder_outputs_tr = self.inv_transform(self.training_decoder_outputs)\n # self.backscaled_decoder_outputs_inf = self.inv_transform(self.inference_decoder_outputs)\n #\n # backscaled_decoder_loss_tr = tf.reduce_mean(tf.square(\n # self.backscaled_decoder_targets - self.backscaled_decoder_outputs_tr))\n # backscaled_decoder_loss_inf = tf.reduce_mean(tf.square(\n # self.backscaled_decoder_targets - self.backscaled_decoder_outputs_inf))\n # self.decoder_rmse_tr = tf.sqrt(backscaled_decoder_loss_tr, name=\"decoder_rmse_tr\")\n # self.decoder_rmse_inf = tf.sqrt(backscaled_decoder_loss_inf, name=\"decoder_rmse_inf\")", "title": "" }, { "docid": "979cfee7b350777cfa2ffc03011f3082", "score": "0.48473752", "text": "def get_loop(softmax_w, softmax_b):\n\n def train_loop(time, cell_output, cell_state, loop_state):\n if cell_output is None: \n next_cell_state = cell.zero_state(tf.shape(inputs)[1], tf.float32)\n emit_output = tf.zeros([self.voc.vocab_size], dtype=tf.float32)\n else:\n next_cell_state = cell_state\n emit_output = tf.nn.softmax(tf.matmul(cell_output, softmax_w) + softmax_b) \n elements_finished = (time >= sequence_lengths)\n finished = tf.reduce_all(elements_finished)\n next_input = tf.cond(\n finished,\n lambda: tf.zeros([self.BATCH_SIZE, self.voc.vocab_size], dtype=tf.float32),\n lambda: inputs_ta.read(time))\n next_loop_state = None\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)\n\n def sample_loop(time, cell_output, cell_state, loop_state):\n if cell_output is None: # time == 0\n if init_cell_state==None:\n next_cell_state = cell.zero_state(self.BATCH_SIZE, tf.float32)\n else:\n next_cell_state = tuple(tf.unstack(init_cell_state, axis=0))\n emit_output = tf.zeros([self.voc.vocab_size], dtype=tf.float32) \n cell_output = inputs_ta.read(time) \n elements_finished = tf.equal(tf.argmax(cell_output, axis=1), self.voc.vocab['EOS'])\n else:\n next_cell_state = cell_state\n cell_output = tf.matmul(cell_output, softmax_w) + softmax_b \n output_sampling = tf.squeeze(tf.multinomial(cell_output, 1))\n cell_output_softmax = tf.nn.softmax(cell_output)\n elements_finished = tf.equal(output_sampling, self.voc.vocab['EOS'])\n cell_output = tf.one_hot(output_sampling, depth=self.voc.vocab_size)\n emit_output = tf.where(tf.equal(cell_output, 0), \\\n tf.zeros_like(cell_output), cell_output_softmax)\n elements_finished = tf.logical_or(elements_finished, (time>=sequence_lengths))\n finished = tf.reduce_all(elements_finished)\n next_input = tf.cond(\n finished,\n lambda: tf.zeros([cell_output.get_shape()[0], self.voc.vocab_size], dtype=tf.float32),\n lambda: cell_output)\n next_loop_state = None\n return (elements_finished, next_input, next_cell_state,\n emit_output, next_loop_state)\n\n if sample:\n return sample_loop\n else:\n return train_loop", "title": "" }, { "docid": "82334f0446c9185cd523201f40135c43", "score": "0.4847052", "text": "def __init__(self, g: Graph):\n super().__init__()\n self.G = nx.DiGraph(nx.line_graph(g.G))\n self.input_n = g.input_n\n self.n = g.n + 1 # length of node representation\n self.k = self.n + 1 # length of edge representation\n self.N = self.G.number_of_nodes()\n self.E = self.G.number_of_edges()\n self.quadruples = []\n self._find_quadruples() # 1. find all quadruples.\n self.decompositions = []\n self._find_decompositions() # 2. find all decompositions.\n \"\"\"\n decompositions_with_cycles is a list of tuples:\n (current_decomposition_with_cycles, cycles_in_each_length_in_decomposition).\n current_decomposition_with_cycles is a list of all the cycles in the decomposition.\n cycles_in_each_length_in_decomposition is a list in size |E|, and the [i] place has the number of the \n cycles with the length of i+1 (the first place is [0] - has the number of cycles with length 1)\n \"\"\"\n self.decompositions_with_cycles = []\n \"\"\"\n unique_cycles_in_each_length is a list in size |E|, and the [i] place is a Set that has all the \n cycles (a cycle is a list) with the length of i+1 (the first place is [0] - has all the cycles with length 1)\n ***but the cycles in each set are sorted by sort function, so they are not in the order of the cycle!!!!!***\n \"\"\"\n self.unique_cycles_in_each_length = [[] for _ in range(int(self.E / 2))]\n self._find_decompositions_cycles() # 3. find all cycles in decompositions.", "title": "" }, { "docid": "e36b88b96fdc8cbdef2fb9c6c3184c16", "score": "0.48450765", "text": "def valueIterationAlgorithm(debug=False):\n total_states = 12\n gamma = 0.999 # Discount factor\n iteration = 0 # Iteration counter\n epsilon = 0.01 # Stopping criteria small value\n\n graph_list = list() # List containing the data for each iteration\n\n # Transition Matrix is 12 Starting States x 12 Next States x 4 Actions\n T = np.load(\"T.npy\")\n\n # Reward Vector\n r = np.array([-0.04, -0.04, -0.04, +1.0,\n -0.04, 0.0, -0.04, -1.0,\n -0.04, -0.04, -0.04, -0.04])\n\n # Utility Vectors (Arbitrary Utilities for each state. Usually zero)\n u_1 = np.array([0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0])\n\n # Once we reach equilibrium, we have hte utility values we were looking for to be used to estimate which\n # is the best move for each state\n\n while True:\n delta = 0\n u = u_1.copy()\n iteration += 1\n graph_list.append(u)\n\n for s in range(total_states):\n reward = r[s]\n v = np.zeros((1, total_states))\n v[0,s] = 1.0\n u_1[s] = getStateUtility(v, T, u, reward, gamma)\n delta = max(delta, np.abs(u_1[s] - u[s]))\n if debug:\n print \"Iterations: \" + str(iteration)\n print \"Delta: \" + str(delta)\n print \"Gamma: \" + str(gamma)\n print \"Epsilon: \" + str(epsilon)\n print \"=============================\"\n print u[0:4]\n print u[4:8]\n print u[8:12]\n print \"=============================\"\n if delta < epsilon * (1 - gamma) / gamma:\n print \"======= FINAL RESULT ========\"\n print \"Iterations: \" + str(iteration)\n print \"Delta: \" + str(delta)\n print \"Gamma: \" + str(gamma)\n print \"Epsilon: \" + str(epsilon)\n print \"=============================\"\n print u[0:4]\n print u[4:8]\n print u[8:12]\n print \"=============================\"\n break", "title": "" }, { "docid": "3319fe11a91db2c29934f32730b520d8", "score": "0.48438245", "text": "def viterbi():\n\n K = len(evidence)-1 # number of evidence given\n N = 2 # number of states\n T_1 = np.zeros((N, K)) # stores the probabilities of most likely paths so far (not normalized except for first element)\n T_2 = np.zeros((N, K)) # stores the most likely path so far to the state\n\n T_1[:, 0] = forward()[1] # first element is just the forward for t=1\n\n # go through evidence\n for j in range(1, len(evidence)-1):\n p = T_1[:, j-1]*(O[evidence[j+1]] @ T.transpose()) # implement equation 15.11 without maximization\n T_1[:,j] = np.max(p, 1) # find the max probability\n T_2[:,j] = np.argmax(p, 1) # find the state that maximizes this probability\n\n sequence = np.zeros(K) # used to store the sequence of states\n sequence[K-1] = np.argmax(T_1[:, K-1]) # find the most like state at the end\n\n # go backward from the most likely state at the end to find the path\n for j in range(K-1, 0, -1):\n sequence[j-1] = T_2[int(sequence[j]), j] # find the path using T_2 which has the most likely path to that state\n\n print(\"T_1: \", T_1)\n print(\"T_2: \", T_2)\n return sequence", "title": "" }, { "docid": "aa257229c6b33c840bc566266b6562ca", "score": "0.4829854", "text": "def test_infinite_decomposition_loop(self):\n\n class InfiniteOp(qml.operation.Operation):\n num_wires = 1\n\n def decomposition(self):\n return [InfiniteOp(self.wires)]\n\n qs = qml.tape.QuantumScript([InfiniteOp(0)])\n with pytest.raises(DeviceError, match=r\"Reached recursion limit trying to decompose\"):\n expand_fn(qs)", "title": "" }, { "docid": "560d2a659bdfd9e0b633b63c9ecad48a", "score": "0.48270342", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n (x, pool_param) = cache\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n pool_stride = pool_param['stride']\n dx = np.zeros(x.shape)\n for i in range(dout.shape[2]):\n for j in range(dout.shape[3]):\n begin_h = i * pool_stride\n begin_w = j * pool_stride\n maxs = np.max(x[:, :, begin_h:begin_h+pool_height, begin_w:begin_w+pool_width], axis=(2, 3))\n indicies = np.zeros(x[:, :, begin_h:begin_h+pool_height, begin_w:begin_w+pool_width].shape)\n for row in range(maxs.shape[0]):\n for col in range(maxs.shape[1]):\n tupl = np.where(x[:, :, begin_h:begin_h+pool_height, begin_w:begin_w+pool_width]==maxs[row][col])\n indicies[tupl[0],tupl[1],tupl[2],tupl[3]] = 1\n \n dx[:, :, begin_h:begin_h+pool_height, begin_w:begin_w+pool_width] = dout[:, :, i:i+1, j:j+1] * indicies\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "title": "" }, { "docid": "03307046800dbc628e41824aad6f4ee9", "score": "0.48257664", "text": "def compute_bias(M, G, X, max_iter=1000, tol=1e-6, n_orient=1, verbose=None):\n n_sources = X.shape[0]\n\n lipschitz_constant = 1.1 * power_iteration_kron(G, X)\n\n # initializations\n D = np.ones(n_sources)\n Y = np.ones(n_sources)\n t = 1.0\n\n for i in range(max_iter):\n D0 = D\n\n # gradient step\n R = M - np.dot(G * Y, X)\n D = Y + np.sum(np.dot(G.T, R) * X, axis=1) / lipschitz_constant\n # Equivalent but faster than:\n # D = Y + np.diag(np.dot(np.dot(G.T, R), X.T)) / lipschitz_constant\n\n # prox ie projection on constraint\n if n_orient != 1: # take care of orientations\n # The scaling has to be the same for all orientations\n D = np.mean(D.reshape(-1, n_orient), axis=1)\n D = np.tile(D, [n_orient, 1]).T.ravel()\n D = np.maximum(D, 1.0)\n\n t0 = t\n t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t**2))\n Y.fill(0.0)\n dt = (t0 - 1.0) / t\n Y = D + dt * (D - D0)\n\n Ddiff = np.linalg.norm(D - D0, np.inf)\n\n if Ddiff < tol:\n logger.info(\n \"Debiasing converged after %d iterations \"\n \"max(|D - D0| = %e < %e)\" % (i, Ddiff, tol)\n )\n break\n else:\n Ddiff = np.linalg.norm(D - D0, np.inf)\n logger.info(\n \"Debiasing did not converge after %d iterations! \"\n \"max(|D - D0| = %e >= %e)\" % (max_iter, Ddiff, tol)\n )\n return D", "title": "" }, { "docid": "ad994efa3c846419e2b3dfb442080429", "score": "0.48234397", "text": "def decode(encoder_output):\n batch_size = tf.shape(src[\"length\"])[0]\n start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)\n end_token = constants.END_OF_SENTENCE_ID\n\n with tf.variable_scope(\"decoder\"):\n sampled_ids, _, sampled_length, _ = decoder.dynamic_decode_and_search(\n tgt_emb,\n start_tokens,\n end_token,\n vocab_size=tgt_vocab_size,\n initial_state=encoder_output[1],\n beam_width=5,\n maximum_iterations=200,\n output_layer=tgt_gen,\n mode=tf.estimator.ModeKeys.PREDICT,\n memory=encoder_output[0],\n memory_sequence_length=encoder_output[2])\n return sampled_ids, sampled_length", "title": "" }, { "docid": "59775cc276df8b6bb2bd1ee483265209", "score": "0.48222092", "text": "def construct_G(G_guess,out_graph,r_graph):\n\tout={}\n\tremaining={}\n\tremainingout={}\n\tmax_solution=[]\n\tG_constructed=nx.Graph()\n\ti=0\n\tfor connected_component2 in nx.connected_component_subgraphs(r_graph):\n\t\ti+=1\n\t\tj=0\n\t\t\n\t\tcc2_nodes=[]\n\t\tfor nodes_2 in connected_component2.nodes():\n\t\t\t#print(out_graph.nodes(),nodes_2)\n\t\t\tif not edge_between_out_node(G_guess,out_graph.nodes(),nodes_2):\n\t\t\t\tcc2_nodes.append(nodes_2)\n\t\tremaining[\"remaining_\"+str(i)]=(sorted(diff(connected_component2.nodes(),cc2_nodes)))\n\t\tif len(cc2_nodes)>0:\n\t\t\tG_constructed.add_edge(\"remainingout_\"+str(i),\"remaining_\"+str(i),weight=len(cc2_nodes))\n\t\t\tremainingout[\"remainingout_\"+str(i)]=(cc2_nodes)\n\t\tfor connected_component1 in nx.connected_component_subgraphs(out_graph):\n\t\t\tj+=1\n\t\t\tnumber_edges=edge_between(G_guess,connected_component1,connected_component2)\n\t\t\tif number_edges>0:\n\t\t\t\tG_constructed.add_edge(\"out_\"+str(j),\"remaining_\"+str(i),weight=(number_edges/len(connected_component1.nodes())))\n\t\t\tcc1_list=sorted(connected_component1.nodes())\t\t\t\n\t\t\tout[\"out_\"+str(j)]=(cc1_list)\n\t\t\t\n\t#print(out,remaining,remainingout)\n\tmax_matching=nx.max_weight_matching(G_constructed, maxcardinality=False)\n\t#print(max_matching)\n\tmax_matching_edges=[]\n\tfor items in max_matching:\n\t\tedge1=(items,max_matching[items])\n\t\tedge2=(max_matching[items],items)\n\t\tmax_matching_edges.append(edge1)\n\t\tmax_matching_edges.append(edge2)\n\t#print(max_matching_edges)\n\tnon_max_matching=(set(G_constructed.edges())-set(max_matching_edges))\n\t#print(non_max_matching)\n\tfor items in non_max_matching:\n\t\titems=list(items)\n\t\tif items[1] in out or items[1] in remainingout:\n\t\t\titems[0],items[1]=items[1],items[0]\n\t\tif items[0] in out:\n\t\t\tfor node1 in out[items[0]]:\n\t\t\t\tfor node2 in remaining[items[1]]:\n\t\t\t\t\tif G_guess.has_edge(node1,node2):\n\t\t\t\t\t\tmax_solution.append(node2) \n\t\tif items[0] in remainingout:\n\t\t\tfor node1 in remainingout[items[0]]:\n\t\t\t\tfor node2 in remaining[items[1]]:\n\t\t\t\t\tif G_guess.has_edge(node1,node2):\n\t\t\t\t\t\tmax_solution.append(node1)\t\n\t#print_graph_edge(G_constructed)\n\t#plt.show()\n\treturn list(set(max_solution))", "title": "" }, { "docid": "c9d4561132e439595af7a8eacf373566", "score": "0.48208776", "text": "def great_deluge(a, step_factor=500, max_iter=100, max_total_iters=1000):\n water_level = curr_cost = a.cost() # can't be worse than initial guess\n step_size = abs(water_level)/step_factor\n iter_count = 0\n total_iters = 0\n while iter_count < max_iter and total_iters < max_total_iters:\n new = a.perturb()\n new_cost = new.cost()\n if new_cost < water_level:\n if new_cost < curr_cost:\n water_level = max(curr_cost, water_level - step_size)\n iter_count = 0 # WARNING: iter_count is reset here!\n curr_cost = new_cost\n a = new\n else:\n iter_count += 1\n yield ((iter_count, total_iters), a)\n total_iters += 1", "title": "" }, { "docid": "85a729e0b06e00a8264b98bb95cb28dc", "score": "0.48183596", "text": "def generator(inputs):\n\n with tf.variable_scope(\"g_\") as scope:\n net = fc(inputs, 28672, 'fc1')\n net = tf.reshape(net, [-1, 4, 7, 1024])\n net = tf.nn.relu(net)\n net = deconv(net, [FLAGS.kernel_size,FLAGS.kernel_size,1024, 1024], 'deconv1')\n net = batch_norm(True, net, 'bn2')\n net = tf.nn.relu(net)\n net = deconv(net, [FLAGS.kernel_size, FLAGS.kernel_size, 512, 1024], 'deconv2')\n net = batch_norm(True, net, 'bn3')\n net = tf.nn.relu(net)\n net = deconv(net, [FLAGS.kernel_size, FLAGS.kernel_size, 256, 512], 'deconv3')\n net = batch_norm(True, net, 'bn4')\n net = tf.nn.relu(net)\n net = deconv(net, [FLAGS.kernel_size, FLAGS.kernel_size, 128, 256], 'deconv4')\n net = batch_norm(True, net, 'bn5')\n net = tf.nn.relu(net)\n net = deconv(net, [FLAGS.kernel_size, FLAGS.kernel_size, 2, 128], 'deconv5')\n net = net[:, :98, :201, :]\n\n # Map logmag and phase to [-1,1]\n logmag, phase = tf.unstack(net, axis=3)\n phase = tf.nn.tanh(phase)\n logmag = tf.nn.tanh(logmag)\n net = tf.stack([logmag, phase], axis=3)\n\n return net", "title": "" }, { "docid": "e11cac262c0e62bb1b77d41942b216b3", "score": "0.48152477", "text": "def generate_step(\n self,\n inputs,\n end_token_id=None,\n ):\n (\n encoder_token_ids,\n encoder_padding_mask,\n decoder_token_ids,\n decoder_padding_mask,\n ) = (\n inputs[\"encoder_token_ids\"],\n inputs[\"encoder_padding_mask\"],\n inputs[\"decoder_token_ids\"],\n inputs[\"decoder_padding_mask\"],\n )\n\n batch_size = ops.shape(encoder_token_ids)[0]\n\n # Create and seed cache with a single forward pass.\n (\n hidden_states,\n encoder_hidden_states,\n self_attention_cache,\n cross_attention_cache,\n ) = self._build_cache(\n encoder_token_ids, encoder_padding_mask, decoder_token_ids\n )\n # Compute the lengths of all user inputted tokens ids.\n row_lengths = ops.sum(ops.cast(decoder_padding_mask, \"int32\"), axis=-1)\n # Start at the first index that has no user inputted id.\n index = ops.min(row_lengths)\n\n def next(prompt, cache, index):\n # The cache index is the index of our previous token.\n cache_index = index - 1\n num_samples = ops.shape(prompt)[0]\n prompt = ops.slice(prompt, [0, cache_index], [num_samples, 1])\n\n def repeat_tensor(x):\n \"\"\"Repeats tensors along batch axis to match dim for beam search.\"\"\"\n if ops.shape(x)[0] == num_samples:\n return x\n return ops.repeat(x, repeats=num_samples // batch_size, axis=0)\n\n logits, hidden_states, cache, _ = self.call_decoder_with_cache(\n encoder_hidden_states=repeat_tensor(encoder_hidden_states),\n encoder_padding_mask=repeat_tensor(encoder_padding_mask),\n decoder_token_ids=prompt,\n self_attention_cache=cache,\n self_attention_cache_update_index=cache_index,\n cross_attention_cache=repeat_tensor(cross_attention_cache),\n cross_attention_cache_update_index=None,\n )\n return (\n ops.squeeze(logits, axis=1),\n ops.squeeze(hidden_states, axis=1),\n cache,\n )\n\n decoder_token_ids = self._sampler(\n next=next,\n prompt=decoder_token_ids,\n cache=self_attention_cache,\n index=index,\n mask=decoder_padding_mask,\n end_token_id=end_token_id,\n hidden_states=hidden_states,\n )\n\n # Compute an output padding mask with the token ids we updated.\n if end_token_id is not None:\n # Build a mask of `end_token_id` locations not in the original\n # prompt (not in locations where `decoder_padding_mask` is True).\n end_locations = ops.logical_and(\n ops.equal(decoder_token_ids, end_token_id),\n ops.logical_not(decoder_padding_mask),\n )\n end_locations = ops.cast(end_locations, \"int32\")\n # Use cumsum to get ones in all locations after `end_locations`.\n cumsum = ops.cast(ops.cumsum(end_locations, axis=-1), \"int32\")\n overflow = cumsum - end_locations\n # Our padding mask is the inverse of these overflow locations.\n decoder_padding_mask = ops.logical_not(ops.cast(overflow, \"bool\"))\n else:\n # Without early stopping, all locations will have been updated.\n decoder_padding_mask = ops.ones_like(\n decoder_token_ids, dtype=\"bool\"\n )\n\n return {\n \"decoder_token_ids\": decoder_token_ids,\n \"decoder_padding_mask\": decoder_padding_mask,\n }", "title": "" }, { "docid": "b409731185d1d8916ad5604d3b2491f0", "score": "0.4806887", "text": "def max_pool_backward_naive(dout, cache):\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n \n # https://leonardoaraujosantos.gitbooks.io/artificial-inteligence/content/pooling_layer.html\n \n x, maxIdx, pool_param = cache\n \n N, C, H, W = x.shape\n pool_height = pool_param['pool_height']\n pool_width = pool_param['pool_width']\n stride = pool_param['stride']\n \n _, _, out_h, out_w = dout.shape\n \n dx = np.zeros_like(x)\n \n for n in range(N):\n for c in range(C):\n for h in range(out_h):\n for w in range(out_w):\n x_pool = x[n, c, h*stride:h*stride + pool_height, w*stride:w*stride + pool_width]\n mask = (x_pool == np.max(x_pool))\n dx[n, c, h*stride:h*stride + pool_height, w*stride:w*stride + pool_width] = mask*dout[n, c, h, w]\n \n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx", "title": "" }, { "docid": "9a27365189467a9e6006df1a3867bb36", "score": "0.480069", "text": "def decoder_retraining(self):\n model = self.model\n data = self.data\n args = self.args\n x_decoder_retrain, y_decoder_retrain = self.x_decoder_retrain,self.y_decoder_retrain\n \n decoder = eval_model.get_layer('decoder')\n decoder_in = layers.Input(shape=(16*47,))\n decoder_out = decoder(decoder_in)\n retrained_decoder = models.Model(decoder_in,decoder_out)\n if (args.verbose):\n retrained_decoder.summary()\n retrained_decoder.compile(optimizer=optimizers.Adam(lr=args.lr),loss='mse',loss_weights=[1.0])\n if not os.path.exists(args.save_dir+\"/retrained_decoder.h5\"):\n retrained_decoder.fit(x_decoder_retrain, y_decoder_retrain, batch_size=args.batch_size, epochs=20)\n retrained_decoder.save_weights(args.save_dir + '/retrained_decoder.h5')\n else:\n retrained_decoder.load_weights(args.save_dir + '/retrained_decoder.h5')\n \n retrained_reconstructions = retrained_decoder.predict(x_decoder_retrain, batch_size=args.batch_size)\n self.save_output_image(retrained_reconstructions[:100],\"retrained reconstructions\")\n return retrained_decoder", "title": "" }, { "docid": "6af3465ba2459550abb780a72e1fc217", "score": "0.47957367", "text": "def decode(self, enc_hiddens: torch.Tensor, enc_masks: torch.Tensor,\n dec_init_state: Tuple[torch.Tensor, torch.Tensor], target_padded: torch.Tensor) -> torch.Tensor:\n\n # Chop of the <END> token for max length sentences.\n target_padded = target_padded[:-1]\n \n # Initialize the decoder state (hidden and cell)\n dec_state = dec_init_state\n \n enc_hiddens_att_linear = self.att_src_linear(enc_hiddens)\n \n batch_size = enc_hiddens.size(0)\n \n # Initialize previous combined output vector o_{t-1} as zero\n o_prev = torch.zeros(batch_size, self.hidden_size, device=self.device)\n \n tgt_embeds = self.tgt_embed(target_padded)\n\n # Initialize a list we will use to collect the combined output o_t on each step\n combined_outputs = []\n \n ### YOUR CODE HERE\n \n ### END YOUR CODE\n \n return combined_outputs", "title": "" }, { "docid": "8d7a80287253a1d5e59c89972cef39e0", "score": "0.47865495", "text": "def generate_cycles(self, max_length):\n\n vtx_used = [False] * len(self.vs) # vtx_used[i]==True iff vertex i is in current path\n\n def cycle(current_path):\n last_vtx = current_path[-1]\n if self.edge_exists(last_vtx, current_path[0]):\n yield current_path[:]\n if len(current_path) < max_length:\n for e in last_vtx.edges: \n v = e.tgt\n if (len(current_path) + shortest_paths_to_low_vtx[v.id] <= max_length\n and not vtx_used[v.id]):\n current_path.append(v)\n vtx_used[v.id] = True\n for c in cycle(current_path):\n yield c\n vtx_used[v.id] = False\n del current_path[-1]\n\n # Adjacency lists for transpose graph\n transp_adj_lists = [[] for v in self.vs]\n for edge in self.es:\n transp_adj_lists[edge.tgt.id].append(edge.src)\n\n for v in self.vs:\n shortest_paths_to_low_vtx = self.calculate_shortest_path_lengths(\n v, max_length - 1,\n lambda u: (w for w in transp_adj_lists[u.id] if w.id > v.id))\n vtx_used[v.id] = True\n for c in cycle([v]):\n yield c\n vtx_used[v.id] = False", "title": "" }, { "docid": "6f304e5755671808b01dbbf4c5a6b15d", "score": "0.47848698", "text": "def problem(gon=5, max_digit=16):\r\n\r\n extern_nodes = permutations(range(1, 2 * gon + 1), gon)\r\n\r\n arr = []\r\n for extern in extern_nodes:\r\n interns = [val for val in range(1, 2 * gon + 1) if val not in extern]\r\n for intern in permutations(interns):\r\n if len(set(extern + intern)) == len(extern + intern):\r\n _evaluate(\r\n _convert_triple(extern),\r\n _convert_triple(intern),\r\n arr\r\n )\r\n\r\n arr = [int(val) for val in arr if len(val) <= max_digit]\r\n return max(arr)", "title": "" }, { "docid": "3faf91f62856a3775930266d51d960da", "score": "0.47838292", "text": "def compute(self, max_iter=None):\n pass", "title": "" }, { "docid": "4b35370406049d094ee08e299c3029db", "score": "0.47830287", "text": "def build_decoder(shape_before_flattening, conv_filters, conv_kernel_size, conv_strides):\n\n # Number of Conv layers\n n_layers = len(conv_filters)\n\n # Define model input\n decoder_input = Input(shape=(Z_dim,), name='decoder_input')\n\n # To get an exact mirror image of the encoder\n new_shape = np.prod(shape_before_flattening) # flatten (highet * width * channels)\n x = Dense(new_shape)(decoder_input)\n x = Reshape(shape_before_flattening)(x)\n\n # Add convolutional layers\n for i in range(n_layers):\n x = Conv2DTranspose(filters=conv_filters[i],\n kernel_size=conv_kernel_size[i],\n strides=conv_strides[i],\n padding='same',\n name='decoder_conv_' + str(i)\n )(x)\n\n\n if i < n_layers - 1:\n x = LeakyReLU()(x)\n else:\n x = Activation('sigmoid')(x) # image pixels are between 0 and 1 so reconstructed image is the same\n\n # Define model output\n decoder_output = x\n decoder_model = Model(decoder_input, decoder_output)\n print(decoder_model.summary())\n return decoder_input, decoder_output, decoder_model", "title": "" }, { "docid": "4ba9e2651cd87545ab0800955a951d93", "score": "0.47830197", "text": "def run_beam_search_2nd(self, sess, vocab, encoder_input_batch,\n encoder_inputs, query_input_batch, query_inputs,\n tokens_predicted, encoder_outputs, encoder_state, query_outputs, query_state, decoder_inputs, prev_coverage_vec_different):\n # Run the encoder to get the encoder hidden states and decoder initial state\n\n self.model.dropout_param = encoder_input_batch[\"dropout\"]\n max_steps = int(self.config[\"Decoder\"][\"max_decoder_steps\"]) - 1 \n beam_size = int(self.config[\"BeamSearch\"][\"beam_size\"])\n\n initial_states,prev_decoder_states, prev_decoder_seq_length, masked_weights_props = sess.run([self.initial_states_2nd,self.prev_decoder_states, self.prev_decoder_seq_length, self.masked_weights_props],\n feed_dict={encoder_input_batch[\"dropout\"]: 1.0,self.post_first_decoder_pl:tokens_predicted,self.model.encoder_state:encoder_state,\n self.model.query_state:query_state})\n\n #print (\"Shape for prev_decoder_states {}, prev_decoder_attn_state {}, attns_state_different {}\".format(prev_decoder_states.shape, prev_decoder_attn_state, attns_state_different))\n dec_in_state_h = initial_states['decoder_state_h']\n dec_in_state_c = initial_states['decoder_state_c']\n attns_state_different = initial_states[\"attns_state_different\"]\n \n self.prev_time_step_data[\"prev_decoder_states\"] = self.prev_decoder_states\n self.prev_time_step_data[\"prev_decoder_attn_state\"] = initial_states[\"prev_decoder_attn_state\"]\n\n print (\"prev_decoder_states_after_run\", prev_decoder_states.shape)\n initial_states_place = self.model.initial_states\n\n new_prev_coverage_diff = []\n for p, cov in zip(prev_decoder_seq_length, prev_coverage_vec_different):\n new_prev_coverage_diff.append(np.sum(np.stack(cov[:p], axis=1), axis=1))\n\n prev_coverage_vec_different = new_prev_coverage_diff\n\n batch_size = int(self.config[\"BeamSearch\"][\"batch_size\"])\n max_length = int(self.config[\"Encoder\"][\"max_sequence_length\"])\n\n print (len(dec_in_state_c), len(initial_states[\"prev_coverage\"]), len(prev_decoder_states), len(encoder_inputs[\"content_weights\"]), len(prev_coverage_vec_different))\n # property_states = np.mean(encoder_outputs[:, max_length:,:], axis=1)\n hyps_batch = [ [ Hypothesis_2nd(tokens=[vocab.encode_word(\"<s>\")],\n probs=[1.0],\n state_h=dec_in_state_h[i],\n state_c=dec_in_state_c[i],\n state_temp_h = dec_in_state_h[i],\n state_temp_c = dec_in_state_c[i],\n prev_coverage_vec = initial_states[\"prev_coverage\"][i],\n prev_coverage_between_decoders = initial_states[\"prev_coverage_between_decoders\"][i],\n prev_coverage_vec_different = prev_coverage_vec_different[i],\n prev_decoder_states = prev_decoder_states[i],\n attns_state_different = attns_state_different[i],\n output_switch = [],\n attn_state = initial_states['attns_state'][i],\n combined_attn_state = initial_states['combined_attns_state'][i],\n content_weights = encoder_inputs[\"content_weights\"][beam_size*i], # as encoder inputs is already repeated beam size times\n words = encoder_inputs[\"word\"][beam_size*i],\n attn_values = [],\n ) for _ in xrange(int(self.config[\"BeamSearch\"][\"beam_size\"]))] for i in xrange(batch_size) ] \n\n print(\"Preksh1\", encoder_state.shape)\n results = [list([]) for _ in range(batch_size)] # this will contain finished hypotheses (those that have emitted the [STOP] token)\n \n steps = 0\n\n dec_hidden_size = 2* int(self.config[\"Encoder\"][\"hidden_size\"])\n\n #for key in encoder_inputs:\n # print(\"Size\", encoder_inputs[key].shape)\n # encoder_inputs[key] = np.repeat(encoder_inputs[key],beam_size,axis=0)\n\n prev_time_step_data_val = {}\n \n content_weights = [h.content_weights for hyps in hyps_batch for h in hyps]\n words = [h.words for hyps in hyps_batch for h in hyps]\n\n #print (\"MM QQ\",len(encoder_outputs), query_state.shape, len(h.content_weights))\n memory_states = np.repeat(encoder_outputs,beam_size,axis=0)\n query_states = np.repeat(query_state,beam_size,axis=0)\n\n decoder_states_prev_extend = np.repeat(decoder_inputs[\"property_word\"][:,1:], beam_size, axis=0)\n prev_decoder_states = np.repeat(prev_decoder_states, beam_size, axis=0)\n masked_weights_props = np.repeat(masked_weights_props, beam_size, axis=0)\n # prev_coverage_vec_different = np.repeat(prev_decoder_states, beam_size, axis=0)\n\n # print (\"After stacking\", len(prev_coverage_vec_different), len(prev_decoder_states))\n \n #print (\"DECODER STATE\", decoder_states_prev_extend.shape)\n # start of the decoder loop\n while steps < int(self.config[\"Decoder\"][\"max_decoder_steps\"]):\n latest_tokens = [h.latest_token for hyps in hyps_batch for h in hyps ] \n states_h = [h.state_h for hyps in hyps_batch for h in hyps ] # list of current decoder states of the hypotheses\n states_c = [h.state_c for hyps in hyps_batch for h in hyps] # list of current decoder states of the hypotheses\n states_temp_h = [h.state_temp_h for hyps in hyps_batch for h in hyps ] # list of current decoder states of the hypotheses\n states_temp_c = [h.state_temp_c for hyps in hyps_batch for h in hyps] # list of current decoder states of the hypotheses\n attns_state = [h.attn_state for hyps in hyps_batch for h in hyps]\n combined_attns_state = [h.combined_attn_state for hyps in hyps_batch for h in hyps]\n prev_coverage = [h.prev_coverage_vec for hyps in hyps_batch for h in hyps]\n prev_coverage_between_decoders = [h.prev_coverage_between_decoders for hyps in hyps_batch for h in hyps]\n prev_coverage_vec_different = [h.prev_coverage_vec_different for hyps in hyps_batch for h in hyps]\n attns_state_different = [h.attns_state_different for hyps in hyps_batch for h in hyps]\n\n\n # values that will go in the placeholder\n prev_time_step_data_val[\"token\"] = np.asarray(latest_tokens)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"attns_state\"] = np.asarray(attns_state)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"combined_attns_state\"] = np.asarray(combined_attns_state)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"content_weights\"] = np.asarray(content_weights)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"words\"] = np.asarray(words)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"state_h\"] = np.asarray(states_h)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"state_temp_h\"] = np.asarray(states_temp_h)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"state_c\"] = np.asarray(states_c)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"state_temp_c\"] = np.asarray(states_temp_c)#.reshape(batch_size*beam_size,-1)\n prev_time_step_data_val[\"prev_coverage_vec\"] = np.asarray(prev_coverage)\n prev_time_step_data_val[\"prev_coverage_vec_different\"] = np.asarray(prev_coverage_vec_different)\n prev_time_step_data_val[\"prev_coverage_between_decoders\"] = np.reshape(np.asarray(prev_coverage_between_decoders), (-1, int(self.config[\"Decoder\"][\"max_prop_steps\"])-1))\n prev_time_step_data_val[\"attns_state_different\"] = np.asarray(attns_state_different)\n prev_time_step_data_val[\"prev_decoder_states\"] = np.asarray(prev_decoder_states)\n\n # placeholders \n print (\"PREV COVERAGE BETWEEN DECODERS\", prev_time_step_data_val[\"prev_coverage_between_decoders\"].shape)\n\n prev_coverage_bool = self.config[\"Decoder\"][\"use_coverage\"] == \"True\"\n include_prop_weights = self.config[\"Decoder\"][\"include_prop_weights\"] == \"True\"\n\n\n feed_dict = {\n self.prev_time_step_data_second[\"token\"] : prev_time_step_data_val[\"token\"],\n self.prev_time_step_data_second[\"state_h\"] : prev_time_step_data_val[\"state_h\"],\n self.prev_time_step_data_second[\"state_c\"] : prev_time_step_data_val[\"state_c\"],\n self.prev_time_step_data_second[\"state_temp_h\"] : prev_time_step_data_val[\"state_temp_h\"],\n self.prev_time_step_data_second[\"state_temp_c\"] : prev_time_step_data_val[\"state_temp_c\"],\n self.prev_time_step_data_second[\"words\"] : prev_time_step_data_val[\"words\"],\n self.prev_time_step_data_second[\"attns_state\"] : prev_time_step_data_val[\"attns_state\"],\n self.prev_time_step_data_second[\"combined_attns_state\"] : prev_time_step_data_val[\"combined_attns_state\"],\n self.prev_time_step_data_second[\"content_weights\"] : prev_time_step_data_val['content_weights'],\n self.prev_time_step_data_second[\"prev_coverage\"] :prev_time_step_data_val[\"prev_coverage_vec\"],\n self.prev_time_step_data_second[\"prev_coverage_between_decoders\"]: prev_time_step_data_val[\"prev_coverage_between_decoders\"],\n self.prev_time_step_data_second[\"prev_coverage_vec_different\"] : prev_time_step_data_val[\"prev_coverage_vec_different\"],\n self.prev_time_step_data_second[\"prev_decoder_states\"]: prev_decoder_states,\n self.prev_time_step_data_second[\"attns_state_different\"] : prev_time_step_data_val[\"attns_state_different\"],\n\n self.memory_states : memory_states,\n self.masked_weights : encoder_inputs[\"seq_length\"],\n #self.model.model.masked_weights_props : masked_weights_props,\n self.prev_decoder_label_placeholder: decoder_states_prev_extend,\n self.feed_previous_placeholder: True,\n encoder_input_batch[\"dropout\"]: 1.0,\n encoder_input_batch[\"seq_length\"]: encoder_inputs[\"seq_length\"],\n self.content_weights: encoder_inputs[\"content_weights\"],\n\n }\n\n if self.config[\"Query\"][\"use_query\"] == \"True\":\n feed_dict[self.query_state] = query_states \n\n \"\"\"\n feed_dict_order = OrderedDict([\n\n (prev_tokens , prev_time_step_data_val[\"token\"]),\n (prev_state_h , prev_time_step_data_val[\"state_h\"]),\n (prev_state_c , prev_time_step_data_val[\"state_c\"]),\n (prev_state_temp_h , prev_time_step_data_val[\"state_temp_h\"]),\n (prev_state_temp_c , prev_time_step_data_val[\"state_temp_c\"]),\n (prev_words , prev_time_step_data_val[\"words\"]),\n (prev_attns_state , prev_time_step_data_val[\"attns_state\"]),\n (prev_content_weights , prev_time_step_data_val['content_weights']),\n # ( prev_prop_state , prev_time_step_data_val[\"property_state\"]),\n (prev_coverage_vec ,prev_time_step_data_val[\"prev_coverage_vec\"]),\n (prev_decoder_states_pl, prev_decoder_states),\n (prev_decoder_attn_state_pl,prev_time_step_data_val[\"prev_decoder_attn_state\"]),\n (self.memory_states , memory_states),\n (self.masked_weights , encoder_inputs[\"seq_length\"]),\n (self.prev_decoder_label_placeholder, decoder_states_prev_extend),\n (self.query_state , query_states ),\n (self.feed_previous_placeholder, True),\n (encoder_input_batch[\"dropout\"], 1.0),\n (encoder_input_batch[\"seq_length\"], encoder_inputs[\"seq_length\"]),\n (self.content_weights, encoder_inputs[\"content_weights\"]),\n ])\n\n for k in feed_dict_order:\n if isinstance(feed_dict_order[k],bool) or isinstance(feed_dict_order[k],float):\n a = None\n else:\n a = feed_dict[k].shape\n print (k, a)\n print (self.new_state_values_second)\n\n print (\"XXXXXXXXXX \", prev_time_step_data_val[\"token\"].shape, decoder_states_prev_extend.shape)\n \"\"\"\n for i in prev_time_step_data_val:\n print (i, prev_time_step_data_val[i].shape)\n print (self.new_state_values_second)\n new_time_step_data = sess.run(self.new_state_values_second, feed_dict=feed_dict)\n\n topk_ids = np.flip(new_time_step_data[\"comb_projection\"].argsort(axis=1)[:,-2*beam_size:], 1)\n probs_temp = copy.deepcopy(new_time_step_data[\"comb_projection\"])\n probs_temp.sort(axis=1)\n probs = np.flip(probs_temp[:, -2*beam_size:], 1)\n\n states_h_new = new_time_step_data[\"state\"].h\n states_c_new = new_time_step_data[\"state\"].c\n states_temp_h_new = new_time_step_data[\"state_temp\"].h\n states_temp_c_new = new_time_step_data[\"state_temp\"].c\n attns_states_new = new_time_step_data[\"attns_state_different\"]\n combined_attns_state_new = new_time_step_data[\"combined_attns_state\"]\n attns_values = new_time_step_data[\"attention_values\"][0]\n output_switches = new_time_step_data[\"output_switch\"]\n prev_coverages = new_time_step_data[\"prev_coverage\"]\n prev_coverages_between_decoders = new_time_step_data[\"prev_coverage_between_decoders\"]\n prev_coverage_vec_different_new = new_time_step_data[\"prev_coverage_vec_different\"]\n attns_state_different_new = new_time_step_data.get(\"attns_state_different\")\n\n # property_states = new_time_step_data[\"property_state\"]\n\n all_hyps = []\n #print (\"states_h_new\", states_h_new.shape)\n #print (\"states_c_new\", states_c_new.shape)\n #print(\"distract_states_h\", distract_states_h.shape)\n #print(\"distract_states_c\", distract_states_c.shape)\n #print(\"output_switches\", output_switches.shape)\n # On the first step, we only had one original hypothesis (the initial hypothesis). On subsequent steps, all original hypotheses are distinct.\n for batch in xrange(batch_size):\n num_orig_hyps = 1 if steps == 0 else len(hyps_batch[batch])\n all_hyps = []\n for i in xrange(num_orig_hyps):\n new_state_h, new_state_c, output_switch = states_h_new[batch*beam_size + i],states_c_new[batch*beam_size + i], output_switches[batch*beam_size + i], \n new_state_temp_h, new_state_temp_c, new_prev_coverage = states_temp_h_new[batch*beam_size + i], states_temp_c_new[batch*beam_size + i], prev_coverages[batch*beam_size + i]\n h = hyps_batch[batch][i]\n for j in xrange(int(self.config[\"BeamSearch\"][\"beam_size\"]) * 2): # for each of the top 2*beam_size hyps:\n # Extend the ith hypothesis with the jth option\n new_hyp = h.extend(token=topk_ids[batch*beam_size + i, j],\n prob=probs[batch*beam_size + i, j],\n state_h=new_state_h,\n state_c = new_state_c,\n state_temp_h=new_state_temp_h, \n state_temp_c=new_state_temp_c,\n prev_coverage_vec = new_prev_coverage,\n prev_coverage_between_decoders = prev_coverages_between_decoders[batch*beam_size + i],\n prev_coverage_vec_different = prev_coverage_vec_different_new[batch*beam_size + i],\n attn_values=attns_values[batch*beam_size + i],\n output_switch = output_switch,\n content_weights = encoder_inputs[\"content_weights\"][batch*beam_size + i],\n attns_state_different = attns_state_different_new[batch*beam_size + i],\n prev_decoder_states = prev_decoder_states[batch*beam_size + i],\n attn_state = attns_states_new[batch*beam_size + i], \n combined_attn_state = combined_attns_state_new[batch*beam_size + i],\n words = encoder_inputs[\"word\"][batch*beam_size + i])\n\n all_hyps.append(new_hyp)\n #assert(np.all(new_state != states[i]))\n\n # Filter and collect any hypotheses that have produced the end token.\n hyps = [] # will contain hypotheses for the next step\n for h in sort_hyps(all_hyps): # in order of most likely h\n if h.latest_token == vocab.word_to_index[\"<eos>\"]: # if stop token is reached...\n # If this hypothesis is sufficiently long, put in results. Otherwise discard.\n if steps >= int(self.config[\"Decoder\"][\"min_dec_steps\"]):\n results[batch].append(h)\n else: # hasn't reached stop token, so continue to extend this hypothesis\n hyps.append(h)\n if len(hyps) == int(self.config[\"BeamSearch\"][\"beam_size\"]) : #or len(results[batch]) == int(self.config[\"BeamSearch\"][\"beam_size\"]):\n # Once we've collected beam_size-many hypotheses for the next step, or beam_size-many complete hypotheses, stop.\n break\n #hyps_batch.append(hyps)\n hyps_batch[batch]= hyps\n\n steps += 1\n\n # At this point, either we've got beam_size results, or we've reached maximum decoder steps\n for b in range(batch_size):\n if len(results[b])==0: # if we don't have any complete results, add all current hypotheses (incomplete summaries) to results\n results[b] = hyps_batch[b]\n\n # Sort hypotheses by average log probability\n top_sentences = []\n attention_dists = []\n for batch in xrange(batch_size):\n print (\"Batch Number\", batch)\n hyps_sorted = sort_hyps(results[batch])\n for k in hyps_sorted:\n print (self.dataset.decode_to_sentence(k.tokens),k.probs, k.avg_log_prob)\n\n top_sentences.append(hyps_sorted[0])\n decoded_sentences = []\n for i in top_sentences:\n decoded_sentences.append(self.dataset.decode_to_sentence(i.tokens[1:]))\n attn_dists = i.attn_dists\n correct_tokens = [np.argmax(k) for k in attn_dists]\n attention_dists.append(correct_tokens)\n # Return the hypothesis with highest average log prob\n return decoded_sentences, attention_dists", "title": "" }, { "docid": "22636376b5b680ed196e56b5b5d2355c", "score": "0.47822812", "text": "def building_1convdense_model_task2(max_len, dict_size, number_neurons, n_class, drop_per, drop_hid, n_filt, kernel_size,\n final_act, folder, optimizer=Adam()):\n input_seq = Input(shape=(max_len, dict_size), dtype='float32')\n dropout_seq = Dropout(drop_per)(input_seq)\n \n c1 = Conv1D(filters=n_filt, kernel_size=kernel_size, padding='same', strides=1, activation='relu')(dropout_seq)\n #Denses\n dense_seq1 = Dense(number_neurons[0], activation='relu')(c1)\n dropout_seq1 = Dropout(drop_hid)(dense_seq1)\n dense_seq2 = Dense(number_neurons[1], activation='relu')(dropout_seq1)\n dropout_seq2 = Dropout(drop_hid)(dense_seq2)\n flattenn = Flatten()(dropout_seq2)\n main_dense = Dense(n_class, activation=final_act)(flattenn)\n model = Model(inputs=[input_seq], outputs=[main_dense])\n\n print(model.summary())\n \n #adamm = Adam()\n adamm = optimizer\n model.compile(loss='categorical_crossentropy', optimizer = adamm, metrics=['accuracy'])\n \n # saving the model\n file_model = os.path.join(absPath, 'data/', folder, 'model.h5')\n\n model.save(file_model)\n return model", "title": "" }, { "docid": "1136330f64953b7242dabf1d3877add6", "score": "0.4782281", "text": "def viterbi_decoder(self,x,metric_type='soft',quant_level=3):\n if metric_type == 'hard':\n # If hard decision must have 0/1 integers for input else float\n if np.issubdtype(x.dtype, np.integer):\n if x.max() > 1 or x.min() < 0:\n raise ValueError('Integer bit values must be 0 or 1')\n else:\n raise ValueError('Decoder inputs must be integers on [0,1] for hard decisions')\n elif metric_type not in ['hard', 'soft', 'unquant']:\n print('Invalid metric type specified')\n raise ValueError('Invalid metric type specified. Use soft, hard, or unquant')\n\n\t\t# Format G into integer for Rust\n G = np.array([float(int(x, base=2)) for x in self.G_polys])\n\t\t\n # Call Rust Function\n y, paths_cum_metrics, paths_traceback_states, paths_traceback_bits = \\\n\t\t rs_fec_conv.viterbi_decoder(x.astype(float), metric_type, \n\t\t quant_level, G, self.decision_depth)\n\t\n # Convert lists to numpy arrays\n y = np.array(y)\n self.paths.cumulative_metric = np.array(paths_cum_metrics)\n self.paths.traceback_states = np.array(paths_traceback_states)\n self.paths.traceback_bits = np.array(paths_traceback_bits)\n\t\t\n # Update object fields\n\t\t\n return y", "title": "" }, { "docid": "1cdff0780a5116289b40858b0801143d", "score": "0.47785822", "text": "def decomposition_generator(poly_id: int = 0):\n # pylint: disable=invalid-name\n if poly_id == 0:\n P: List[List] = [[(0.0, 0.0), (10.0, 0.0), (10.0, 1.0), (0.0, 1.0)], []]\n\n dec = Decomposition(P)\n dec.add_cell([[(0.0, 0.0), (10.0, 0.0), (10.0, 0.5)], []])\n dec.add_cell([[(0.0, 0.0), (10.0, 0.5), (10.0, 1.0), (5.0, 0.5)], []])\n dec.add_cell([[(5.0, 0.5), (10.0, 1.0), (0.0, 1.0)], []])\n dec.add_cell([[(0.0, 0.0), (5.0, 0.5), (0.0, 1.0)], []])\n dec.add_robot_site(0, (0., 0.))\n dec.add_robot_site(1, (0., 0.))\n dec.add_robot_site(2, (0., 0.))\n dec.add_robot_site(3, (0., 0.))\n\n elif poly_id == 1:\n P = [[(0.0, 0.0), (10.0, 0.0), (10.0, 1.0), (0.0, 1.0)], []]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.0, 0.0), (2.5, 0.0), (2.5, 1.0), (0.0, 1.0)], []])\n dec.add_cell([[(2.5, 0.0), (5.0, 0.0), (5.0, 1.0), (2.5, 1.0)], []])\n dec.add_cell([[(5.0, 0.0), (7.5, 0.0), (7.5, 1.0), (5.0, 1.0)], []])\n dec.add_cell([[(7.5, 0.0), (10.0, 0.0), (10.0, 1.0), (7.5, 1.0)], []])\n\n dec.add_robot_site(0, (0., 0.))\n dec.add_robot_site(1, (0., 1.))\n dec.add_robot_site(2, (10., 1.))\n dec.add_robot_site(3, (10., 0.))\n\n elif poly_id == 2:\n P = [[(1.0, 0.0), (2.0, 0.0), (3.0, 1.0), (3.0, 2.0), (2.0, 3.0), (1.0, 3.0),\n (0.0, 2.0), (0.0, 1.0)], []]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.8333333333333334, 0.16666666666666666),\n (0.9285714285714286, 1.7857142857142854),\n (1.0, 3.0), (0.0, 2.0),\n (0.0, 1.0)], []])\n dec.add_cell([[(0.9285714285714286, 1.7857142857142854),\n (1.6818181818181817, 1.8636363636363633),\n (3.0, 2.0),\n (2.0, 3.0),\n (1.0, 3.0)], []])\n dec.add_cell([[(1.6818181818181817, 1.8636363636363633),\n (0.9285714285714286, 1.7857142857142854),\n (0.8333333333333334, 0.16666666666666666),\n (1.0, 0.0),\n (2.0, 0.0)], []])\n dec.add_cell([[(1.6818181818181817, 1.8636363636363633),\n (2.0, 0.0),\n (3.0, 1.0),\n (3.0, 2.0)], []])\n\n dec.add_robot_site(0, (0., -1.))\n dec.add_robot_site(1, (0., 3.))\n dec.add_robot_site(2, (3., -1.))\n dec.add_robot_site(3, (2., 3.))\n\n elif poly_id == 3:\n P = [[(0.0, 0.0), (4.0, 0.0), (4.0, 4.0), (6.0, 4.0), (6.0, 0.0), (10.0, 0.0), (10.0, 6.0),\n (8.0, 7.0), (7.5, 8.0), (10.0, 7.5), (10.0, 10.0), (0.0, 10.0), (0.0, 5.0),\n (5.0, 6.0), (5.0, 5.0), (0.0, 4.0)], []]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.0, 0.0), (4.0, 0.0), (4.0, 4.0), (6.0, 4.0), (5.0, 5.0), (0.0, 4.0)],\n []])\n dec.add_cell([[(6.0, 4.0), (6.0, 0.0), (10.0, 0.0), (10.0, 6.0), (8.0, 7.0), (5.0, 5.0)],\n []])\n dec.add_cell([[(7.5, 8.0), (10.0, 7.5), (10.0, 10.0), (0.0, 10.0), (0.0, 5.0), (5.0, 6.0)],\n []])\n dec.add_cell([[(5.0, 5.0), (8.0, 7.0), (7.5, 8.0), (5.0, 6.0)], []])\n\n dec.add_robot_site(0, (10, 0))\n dec.add_robot_site(1, (10, 10))\n dec.add_robot_site(2, (0, 10))\n dec.add_robot_site(3, (0, 0))\n\n elif poly_id == 4:\n # A more complex shape with one hole\n P = [[(0.0, 0.0), (6.0, 0.0), (6.0, 5.0), (4.0, 5.0), (4.0, 3.0), (5.0, 3.0), (5.0, 2.0),\n (3.0, 2.0), (3.0, 6.0), (7.0, 6.0), (7.0, 0.0), (10.0, 0.0), (10.0, 10.0),\n (0.0, 10.0)],\n [[(4.0, 7.0), (3.5, 8.0), (4.5, 9.0), (6.0, 8.0)]]]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(6.0, 0.0), (6.0, 5.0), (4.0, 5.0), (4.0, 3.0), (5.0, 3.0),\n (5.0, 2.0)], []])\n dec.add_cell([[(7.0, 6.0), (7.0, 0.0), (10.0, 0.0), (10.0, 10.0), (6.0, 8.0),\n (4.0, 7.0)], []])\n dec.add_cell([[(6.0, 8.0), (10.0, 10.0), (0.0, 10.0), (3.5, 8.0), (4.5, 9.0)], []])\n dec.add_cell([[(0.0, 0.0), (6.0, 0.0), (5.0, 2.0), (3.0, 2.0), (3.0, 6.0), (7.0, 6.0),\n (4.0, 7.0), (3.5, 8.0), (0.0, 10.0)], []])\n\n dec.add_robot_site(0, (10, 0))\n dec.add_robot_site(1, (10, 10))\n dec.add_robot_site(2, (0, 10))\n dec.add_robot_site(3, (0, 0))\n\n elif poly_id == 5:\n # A rectangle\n P = [[(0.0, 0.0), (9.0, 0.0), (9.0, 1.0), (0.0, 1.0)], []]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.0, 0.0), (1.0, 0.0), (1.0, 1.0), (0.0, 1.0)], []])\n dec.add_cell([[(1.0, 0.0), (6.0, 0.0), (6.0, 1.0), (1.0, 1.0)], []])\n dec.add_cell([[(6.0, 0.0), (9.0, 0.0), (9.0, 1.0), (6.0, 1.0)], []])\n\n dec.add_robot_site(0, (0, 0))\n dec.add_robot_site(1, (0, 0))\n dec.add_robot_site(2, (0, 0))\n\n elif poly_id == 6:\n P = [[(0.0, 0.0), (10.0, 0.0), (10.0, 10.0), (0.0, 10.0)],\n [[(7.0, 1.0), (7.0, 3.0), (8.0, 3.0), (8.0, 1.0)],\n [(1.0, 4.0), (1.5, 4.5), (3.0, 5.0), (3.5, 4.5), (3.0, 3.0), (2.0, 3.0)],\n [(4.0, 7.0), (6.0, 9.0), (7.0, 8.0), (6.0, 7.0), (8.0, 6.0), (8.5, 6.5),\n (9.0, 6.0), (8.0, 5.0)]]]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.0, 0.0), (1.0, 4.0), (1.5, 4.5), (3.0, 5.0), (4.0, 7.0), (6.0, 9.0),\n (10.0, 10.0), (0.0, 10.0)], []])\n dec.add_cell([[(0.0, 0.0), (7.0, 1.0), (7.0, 3.0), (8.0, 5.0), (4.0, 7.0), (3.0, 5.0),\n (3.5, 4.5), (3.0, 3.0), (2.0, 3.0), (1.0, 4.0)], []])\n dec.add_cell([[(0.0, 0.0), (10.0, 0.0), (10.0, 10.0), (9.0, 6.0), (8.0, 5.0), (7.0, 3.0),\n (8.0, 3.0), (8.0, 1.0), (7.0, 1.0)], []])\n dec.add_cell([[(10.0, 10.0), (6.0, 9.0), (7.0, 8.0), (6.0, 7.0), (8.0, 6.0), (8.5, 6.5),\n (9.0, 6.0)], []])\n\n dec.add_robot_site(0, (10, 0))\n dec.add_robot_site(1, (10, 10))\n dec.add_robot_site(2, (0, 10))\n dec.add_robot_site(3, (0, 0))\n\n elif poly_id == 7:\n P = [[(0.0, 0.0), (4.0, 0.0), (4.0, 5.0), (2.0, 5.0), (2.0, 6.0), (5.0, 6.0), (5.0, 0.0),\n (10.0, 0.0), (10.0, 4.0), (7.0, 4.0), (7.0, 5.0), (10.0, 5.0), (10.0, 10.0),\n (0.0, 10.0)],\n [[(7.0, 7.0), (7.0, 9.0), (8.0, 9.0), (8.0, 7.0)],\n [(3.0, 7.0), (2.0, 8.0), (3.0, 9.0), (6.0, 8.0)]]]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.0, 0.0), (4.0, 0.0), (4.0, 5.0), (2.0, 5.0), (0.0, 5.0)], []])\n dec.add_cell([[(0.0, 10.0), (0.0, 5.0), (2.0, 5.0), (2.0, 6.0), (3.0, 6.0), (3.0, 7.0),\n (2.0, 8.0), (3.0, 9.0), (3.0, 10.0)], []])\n dec.add_cell([[(10.0, 7.0), (10.0, 10.0), (3.0, 10.0), (3.0, 9.0), (6.0, 8.0), (7.0, 9.0),\n (8.0, 9.0), (8.0, 7.0),], []])\n dec.add_cell([[(10.0, 0.0), (10.0, 4.0), (7.0, 4.0), (7.0, 5.0), (10.0, 5.0), (10.0, 7.0),\n (8.0, 7.0), (7.0, 7.0), (7.0, 9.0), (6.0, 8.0), (3.0, 7.0), (3.0, 6.0),\n (5.0, 6.0), (5.0, 0.0)], []])\n\n dec.add_robot_site(0, (10, 0))\n dec.add_robot_site(1, (10, 10))\n dec.add_robot_site(2, (0, 10))\n dec.add_robot_site(3, (0, 0))\n\n # New combined holes\n elif poly_id == 8:\n # A more complex shape with one hole\n P = [[(0.0, 0.0), (6.0, 0.0), (6.0, 5.0), (4.0, 5.0), (4.0, 3.0), (5.0, 3.0), (5.0, 2.0),\n (3.0, 2.0), (3.0, 6.0), (3.9, 6.0), (3.9, 7.0), (3.5, 8.0), (4.5, 9.0), (6.0, 8.0),\n (4.1, 7.0), (4.1, 6.0), (7.0, 6.0), (7.0, 0.0), (10.0, 0.0), (10.0, 10.0),\n (0.0, 10.0)], []]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(6.0, 0.0), (6.0, 5.0), (4.0, 5.0), (4.0, 3.0), (5.0, 3.0),\n (5.0, 2.0)], []])\n dec.add_cell([[(7.0, 6.0), (7.0, 0.0), (10.0, 0.0), (10.0, 10.0), (6.0, 8.0), (4.1, 7.0),\n (4.1, 6.0)], []])\n dec.add_cell([[(6.0, 8.0), (10.0, 10.0), (0.0, 10.0), (3.5, 8.0), (4.5, 9.0)], []])\n dec.add_cell([[(0.0, 0.0), (6.0, 0.0), (5.0, 2.0), (3.0, 2.0), (3.0, 6.0), (3.9, 6.0),\n (3.9, 7.0), (3.5, 8.0), (0.0, 10.0)], []])\n\n dec.add_robot_site(0, (10, 0))\n dec.add_robot_site(1, (10, 10))\n dec.add_robot_site(2, (0, 10))\n dec.add_robot_site(3, (0, 0))\n\n elif poly_id == 9:\n P = [[(0.0, 0.0), (7.0, 0.0), (7.0, 3.0), (7.4, 3.0), (7.9, 5.0), (4.1, 7.0), (3.1, 5.0),\n (3.5, 4.5), (3.0, 3.0), (2.0, 3.0), (1.0, 4.0), (1.5, 4.5), (2.9, 5.0), (3.9, 7.0),\n (6.0, 9.0), (7.0, 8.0), (6.0, 7.0), (8.0, 6.0), (8.5, 6.5), (9.0, 6.0), (8.1, 5.0),\n (7.6, 3.0), (8.0, 3.0), (8.0, 0.0), (10.0, 0.0), (10.0, 10.0), (0.0, 10.0)], []]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.0, 0.0), (1.0, 4.0), (1.5, 4.5), (2.9, 5.0), (3.9, 7.0), (6.0, 9.0),\n (10.0, 10.0), (0.0, 10.0)], []])\n dec.add_cell([[(0.0, 0.0), (7.0, 0.0), (7.0, 3.0), (7.4, 3.0), (7.9, 5.0), (4.1, 7.0),\n (3.1, 5.0), (3.5, 4.5), (3.0, 3.0), (2.0, 3.0), (1.0, 4.0)], []])\n dec.add_cell([[(8.0, 0.0), (10.0, 0.0), (10.0, 6.0), (9.0, 6.0), (8.1, 5.0), (7.6, 3.0),\n (8.0, 3.0)], []])\n dec.add_cell([[(10.0, 10.0), (6.0, 9.0), (7.0, 8.0), (6.0, 7.0), (8.0, 6.0), (8.5, 6.5),\n (9.0, 6.0), (10.0, 6.0)], []])\n\n dec.add_robot_site(0, (10, 0))\n dec.add_robot_site(1, (10, 10))\n dec.add_robot_site(2, (0, 10))\n dec.add_robot_site(3, (0, 0))\n\n elif poly_id == 10:\n P = [[(0.0, 0.0), (4.0, 0.0), (4.0, 5.0), (2.0, 5.0), (2.0, 6.0), (5.0, 6.0), (5.0, 0.0),\n (10.0, 0.0), (10.0, 4.0), (7.0, 4.0), (7.0, 9.0), (8.0, 9.0), (8.0, 5.0),\n (10.0, 5.0), (10.0, 10.0), (3.1, 10.0), (3.1, 9.0), (6.0, 8.0), (3.0, 7.0),\n (2.0, 8.0), (2.9, 9.0), (2.9, 10.0), (0.0, 10.0)], []]\n\n dec = Decomposition(P)\n\n dec.add_cell([[(0.0, 0.0), (4.0, 0.0), (4.0, 5.0), (2.0, 5.0), (0.0, 5.0)], []])\n dec.add_cell([[(0.0, 10.0), (0.0, 5.0), (2.0, 5.0), (2.0, 6.0), (3.0, 6.0), (3.0, 7.0),\n (2.0, 8.0), (2.9, 9.0), (2.9, 10.0)], []])\n dec.add_cell([[(10.0, 5.0), (10.0, 10.0), (3.1, 10.0), (3.1, 9.0), (6.0, 8.0), (7.0, 9.0),\n (8.0, 9.0), (8.0, 5.0)], []])\n dec.add_cell([[(10.0, 0.0), (10.0, 4.0), (7.0, 4.0), (7.0, 9.0), (6.0, 8.0), (3.0, 7.0),\n (3.0, 6.0), (5.0, 6.0), (5.0, 0.0)], []])\n\n dec.add_robot_site(0, (10, 0))\n dec.add_robot_site(1, (10, 10))\n dec.add_robot_site(2, (0, 10))\n dec.add_robot_site(3, (0, 0))\n\n return dec", "title": "" }, { "docid": "73edf9476ab8fa06cb8672626a9aff15", "score": "0.47725406", "text": "def maxiter() -> int:\n return 100", "title": "" }, { "docid": "75b893dfabd89f6960e1a77cfe875b9a", "score": "0.47717544", "text": "def predict(self, graph, inductive_data, list_connected_node_types, maxid, inductive_index):\n inductive_dict = self.__create_inductive_dict(inductive_data, list_connected_node_types)\n \n degrees = nx.degree(graph)\n train_degrees = dict(degrees)\n train_degrees = collections.OrderedDict(sorted(train_degrees.items()))\n \n v = self.__get_vector(inductive_dict, train_degrees, maxid)\n\n S = np.random.randn(maxid+1, self.intermediate_dimension) / np.sqrt(self.intermediate_dimension)\n \n inductive_degrees = []\n\n for l in inductive_dict.values():\n x = 0\n for i in l:\n if i is not None:\n x+=1\n inductive_degrees.append(x)\n \n\n sqrt_d_inv = np.array([1/np.sqrt(degree) if degree > 0 else 0 for degree in inductive_degrees])\n sqrt_d_inv = scipy.sparse.spdiags(sqrt_d_inv,0, sqrt_d_inv.size, sqrt_d_inv.size)\n\n p = v.dot(S)\n U =(p.dot(self.V)).dot(np.linalg.inv(self.sigma))\n U = sqrt_d_inv.dot(U)\n \n figrl_inductive_emb = pd.DataFrame(U, index = inductive_index)\n \n return figrl_inductive_emb", "title": "" }, { "docid": "adbe886e4e889a14e87cd3228ff13118", "score": "0.4770201", "text": "def get_loop_fn(decoder_inputs, sequence_length, feat, lstm, initial_state,\n attn_fn, init_token, embeddings, feed_prev_output=True, hparams={}):\n input_shape = array_ops.shape(decoder_inputs)\n time_steps = input_shape[0]\n decoder_inputs_ta = tf.TensorArray(dtype=decoder_inputs.dtype, size=time_steps)\n decoder_inputs_ta = decoder_inputs_ta.unpack(decoder_inputs)\n vocab_size = embeddings.get_shape()[0]\n\n def loop_fn(time, cell_output, cell_state, prev_token_embedding):\n \"\"\"\n The loop_state is the embedding for the previous token.\n \"\"\"\n with tf.variable_scope(\"decoder\"): #, reuse=reuse_vars):\n # If it's the initial iteration, there is some special setup to do.\n if cell_output is None:\n # TODO(kjchavez): It would be cleaner to instantiate all model\n # variables outside the loop function and just use them here.\n elements_finished = (time >= sequence_length)\n next_cell_state = initial_state\n selected_token_embedding = tf.nn.embedding_lookup(embeddings,\n init_token)\n attention, context = attn_fn(feat, initial_state[1], hdim=hparams['hdim'],\n vdim=hparams['vdim'], adim=hparams['adim'],\n batch_size=hparams['batch_size'])\n\n # Ignored on first iteration, but used to setup graph.\n logits, probs = token_prob(initial_state[1], context,\n selected_token_embedding, vocab_size,\n hparams=hparams)\n\n next_input = tf.concat(1, [selected_token_embedding, context])\n return (elements_finished, next_input, next_cell_state,\n None, selected_token_embedding)\n\n # All subsequent iterations\n tf.get_variable_scope().reuse_variables()\n next_cell_state = cell_state\n\n # Given the cell output, the emit output is a distribution over tokens.\n attention, context = attn_fn(feat, cell_output, hdim=hparams['hdim'],\n vdim=hparams['vdim'], adim=hparams['adim'],\n batch_size=hparams['batch_size'])\n logits, probs = token_prob(cell_output, context, prev_token_embedding, vocab_size,\n hparams=hparams)\n emit_output = logits\n print \"Emit output shape:\", emit_output.get_shape()\n\n # To produce the next input from the current output, we must do a\n # couple of things. First, we choose a token from the token\n # distribution (or use the 'true' token).\n if feed_prev_output:\n selected_token = tf.argmax(logits, 1, name=\"argmax_token\")\n else:\n # We should be careful not to read past the last token.\n # Since we are using time - 1, this implies that the\n # decoder_inputs should NOT contain the GO token.\n selected_token = decoder_inputs_ta.read(time-1)\n\n selected_token_embedding = tf.nn.embedding_lookup(embeddings, selected_token)\n print \"selected token embedding:\", selected_token_embedding\n\n elements_finished = tf.logical_or(time >= sequence_length,\n selected_token == STOP_ID)\n finished = tf.reduce_all(elements_finished)\n\n next_input = tf.cond(\n finished,\n lambda: tf.zeros([hparams['batch_size'],\n hparams['embedding_dim'] +hparams['vdim']], dtype=tf.float32),\n lambda: tf.concat(1, [selected_token_embedding, context]))\n\n return (elements_finished, next_input, next_cell_state,\n emit_output, selected_token_embedding)\n\n\n return loop_fn", "title": "" }, { "docid": "33b64f0d001ba15c6aff904baed24481", "score": "0.47688603", "text": "def __init__(\r\n self,\r\n num_layers: int = 4,\r\n num_heads: int = 4,\r\n hidden_dim: int = 256,\r\n vocab_size: int = 16000,\r\n dropout: float = 0.1,\r\n ) -> None:\r\n super(Decoder, self).__init__()\r\n self.num_layers = num_layers\r\n self.num_heads = num_heads\r\n self.hidden_dim = hidden_dim\r\n\r\n self.layer_stack = nn.ModuleList(\r\n [\r\n DecoderLayer(hidden_dim, num_heads, dropout=dropout)\r\n for _ in range(num_layers)\r\n ]\r\n )\r\n self.output_normalization = nn.LayerNorm(hidden_dim, eps=1e-6)\r\n self.generator = Generator(hidden_dim, vocab_size)", "title": "" } ]
bb93233fea2728dbe7a710491dbd20c3
Override. Filter the orders
[ { "docid": "64f1bc9be7d931e4af09562d6f9d9f9e", "score": "0.5655497", "text": "def get_queryset(self):\n qs = super(OrderListView, self).get_queryset()\n if self.request.user.profile.is_designer:\n qs = qs.filter(preferred_designer=self.request.user)\n\n params = self.get_params_from_request()\n\n # designer\n designer = params['designer']\n designer_Q = Q(preferred_designer__id=designer) if designer != 'all' else Q()\n\n # status\n status = params['status']\n status_Q = Q(status=status) if status != 'all' else Q()\n\n # style\n style = params['style']\n style_Q = Q(style=style) if style != 'all' else Q()\n\n # age group\n age_group = params['age_group']\n age_group_Q = Q(age_group=age_group) if age_group != 'all' else Q()\n\n # created time\n from_dt = params['created_from']\n to_dt = params['created_to']\n created_time_Q = Q()\n if from_dt and to_dt:\n created_time_Q = Q(created_at__range=(from_dt, to_dt))\n elif from_dt:\n created_time_Q = Q(created_at__gte=from_dt)\n elif to_dt:\n created_time_Q = Q(created_at__lte=to_dt)\n\n qs = qs.filter(designer_Q, status_Q, style_Q, age_group_Q, created_time_Q)\n\n return qs.order_by('-created_at')", "title": "" } ]
[ { "docid": "a4e89f8f2340058f683b47dde9083f5d", "score": "0.7153317", "text": "def filter(self):\n\n keys = ('ord-af', 'clt-af', 'qty-af', 'dat-af', 'tes-af', 'sta-af')\n new_dict = {}\n if 'orders' in self.data_dict:\n self.data_dict.pop('orders')\n for key in keys:\n if key in self.data_dict:\n new_dict[key] = self.data_dict.pop(key)\n\n for key, value in new_dict.items():\n if key == 'ord-af':\n for order in self.order_list[:]:\n if not order.name in new_dict['ord-af']:\n self.order_list.remove(order)\n elif key == 'clt-af':\n for order in self.order_list[:]:\n if not order.client in new_dict['clt-af']:\n self.order_list.remove(order)\n elif key == 'qty-af':\n for order in self.order_list[:]:\n if not str(order.count()) in new_dict['qty-af']:\n self.order_list.remove(order)\n elif key == 'dat-af':\n for order in self.order_list[:]:\n if not str(order.date) in new_dict['dat-af']:\n self.order_list.remove(order)\n elif key == 'tes-af':\n print(f\"Keys: {new_dict['tes-af']}\")\n for order in self.order_list[:]:\n print(f'Order testers: {order.testers}')\n if not all(x in order.testers for x in new_dict['tes-af']):\n print('Removing')\n self.order_list.remove(order)\n elif key == 'sta-af':\n for order in self.order_list[:]:\n if not order.get_status() in new_dict['sta-af']:\n self.order_list.remove(order)", "title": "" }, { "docid": "57abf8b75892ef7806b0155dbc90c0fa", "score": "0.70765233", "text": "def filter_orders(self, orders):\n orders = [ # Filter resends\n order for order in orders if float(order.total_gross_gbp) > 0\n ]\n return orders", "title": "" }, { "docid": "8d37c7008b43379d3da712f3a3bc4e9f", "score": "0.6498844", "text": "def __init__(self):\n self.orders = Orders.objects.exclude(is_sent=1).values_list(\"order_name\", flat=True)", "title": "" }, { "docid": "5577aacae8d2bc79a00a095ce07b90b2", "score": "0.64597684", "text": "def get_queryset(self):\n return Order.objects.filter(pub_date__lte=timezone.now())", "title": "" }, { "docid": "2ab8f0c118db42310b552c2063c10e29", "score": "0.64432186", "text": "def filter_from_request(self, request):\n if request.customer.is_visitor():\n msg = _(\"Only signed in customers can view their orders\")\n raise PermissionDenied(msg)\n return self.get_queryset().filter(customer=request.customer).order_by('-updated_at',)", "title": "" }, { "docid": "4cc39e246ca0921e38da272c4fca7c3b", "score": "0.6222382", "text": "def get_filtered_orders(**kwargs):\n \n\n\n response = []\n try:\n engine = create_db_engine()\n db_conn = engine.connect()\n with db_conn as connection:\n rows = connection.execute(\"select * from orders\")\n response_list = []\n for row in rows:\n res = {}\n loc = dict(row)\n res['id'] = loc.get('id')\n res['order_id'] = loc.get('order_id')\n res['product_id'] = loc.get('product_id')\n res['customer_id'] = loc.get('customer_id')\n res['order_date'] = loc.get('order_date')\n res['quantity'] = loc.get('quantity')\n res['units'] = loc.get('units')\n res['rate'] = loc.get('rate')\n res['discount'] = loc.get('discount')\n res['tax'] = loc.get('tax')\n res['total_amount'] = loc.get('total_amount')\n res['payment_status'] = loc.get('payment_status')\n res['order_status'] = loc.get('order_status')\n res['description'] = loc.get('description')\n res['delivery_date'] = loc.get('delivery_date')\n res['payment_type'] = loc.get('payment_type')\n \n response_list.append(res)\n return response_list\n \n #response = generate_api_response(rows)\n db_conn.close() \n except Exception as e:\n print(\"get products data engne exce---*******************\"+str(e))\n raise ValueError", "title": "" }, { "docid": "5f885a8066827d9c08aaa44db0fae8b9", "score": "0.6216156", "text": "def _filter(self):", "title": "" }, { "docid": "5c146d77c785cad90d7561065a5253d2", "score": "0.6133366", "text": "def read_orders(self):\n raise NotImplementedError(\"Abstractmethod\")", "title": "" }, { "docid": "1472c86be63b6c89ac49776aa3ac0045", "score": "0.6107942", "text": "def _filter(self, filter_condition):", "title": "" }, { "docid": "d991ed6b83537ae67e9a6b0ae5ff5537", "score": "0.6063203", "text": "def loadOrders(self, filter):\n if filter == \"completed\":\n ids = self.__getAllOrderIDs()\n else:\n ids = self.__loadMappedOrders(filter)\n self.__loadItems(ids)\n return True", "title": "" }, { "docid": "53fb58def1d4cfb011170c174c1c3dee", "score": "0.6008538", "text": "def get_queryset(self):\n return WillOrder.objects.filter(user=self.request.user)", "title": "" }, { "docid": "b89a3bfffb2f6b1035ccb7e451461283", "score": "0.59666854", "text": "def filter_queryset(self, queryset):\n filtered_queryset = super().filter_queryset(queryset)\n\n if not filtered_queryset.ordered:\n return filtered_queryset.order_by(*self._default_ordering)\n\n return filtered_queryset", "title": "" }, { "docid": "d5c84634803492e8da19d5bf03916f8f", "score": "0.5929355", "text": "def test_list_orders(self):\n pass", "title": "" }, { "docid": "9a4aa76c96482a1ba8b1dbc1a46dc4ca", "score": "0.58830315", "text": "def return_to_orders_page(self):\n raise NotImplementedError", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.5860064", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "cfc0cffe35e119587e014e8d2a1fb512", "score": "0.5860064", "text": "def on_order(self, order: OrderData):\n pass", "title": "" }, { "docid": "261cb353dfc3da1c24dc823f27ace7cb", "score": "0.58378935", "text": "def get_orders():\n\treturn Order.query", "title": "" }, { "docid": "f2aee494fff07b9f07500d6a740def70", "score": "0.58258307", "text": "def get_all_orders(self):\n return list(self.orders.values())", "title": "" }, { "docid": "430fa85eb78c8e4792cc8e2fbb3e5382", "score": "0.579573", "text": "def raiseOrder(self):\n self.stub.RaiseOrder(filter_pb2.FilterRaiseOrderRequest(filter=self.data),\n timeout=Cuebot.Timeout)", "title": "" }, { "docid": "e1e80cf2378b2d4c75a4e09c67ac5e95", "score": "0.5783071", "text": "def get_queryset(self):\n event = get_event_object_by_uuid(self.kwargs['event_uuid'])\n\n order_list = Order.objects.filter(event=event)\n\n return OrderedItem.objects.filter(order__in=order_list).filter(\n transaction_type=dict(ORDER_ITEM_TRANSACTION_TYPE_CHOICES).get(RECEIPT))", "title": "" }, { "docid": "15af6cfcd8f8a3b28de95a8ded402e21", "score": "0.5754223", "text": "def map_to_orders(self, raw_orders: HitbtcRawOrders) -> HitbtcOrders:", "title": "" }, { "docid": "57a5c54cff139e3c70077db9162cf598", "score": "0.5676042", "text": "def filter(self, field):\n filter = self._filter(field) & self.base_queryset\n order = getattr(self, 'order_%s' % field, None)\n if order:\n return order(filter)\n return filter", "title": "" }, { "docid": "840318aabbc39da3923db3b2388c6cf7", "score": "0.56417793", "text": "def setOrder(self, order):\n self.stub.SetOrder(filter_pb2.FilterSetOrderRequest(filter=self.data, order=order),\n timeout=Cuebot.Timeout)", "title": "" }, { "docid": "750f1a602d0080664fc74bad6176c7ae", "score": "0.56415254", "text": "def get_orders(self, symbol: str = None) -> list:\n response = self.TradeAPI.makeRequest(\"GET\", \"orders\")\n response = response.json()\n # Check if order must be filtered:\n if symbol:\n filteredOrders = []\n for order in response[\"results\"]:\n if order[\"symbol\"] == symbol:\n filteredOrders.append(order)\n return filteredOrders\n else:\n return response", "title": "" }, { "docid": "d786e1038f697f2ac1810370527d6a08", "score": "0.56229514", "text": "def filter(self, *args, **kwargs):\n return self._filter_or_exclude(False, *args, **kwargs)", "title": "" }, { "docid": "6a5ecf02652f8370b225dedcfb76ed05", "score": "0.5620342", "text": "def get_order_form(self):\n return Order.objects.filter(researcher=self)", "title": "" }, { "docid": "53fb7c8a626b4ee00e31d7c935432c7f", "score": "0.5607982", "text": "def process_order(self, data, order):\n raise NotImplementedError('process_order')", "title": "" }, { "docid": "2b8293172a6800fe28262b02374e527a", "score": "0.5599593", "text": "def cancnelAllOrder(self):\n return self._private('/cancel_all_orders/')", "title": "" }, { "docid": "b960802eadf326cbef8b1cea65bc243c", "score": "0.55860716", "text": "def __loadMappedOrders(self, filter):\n matchedOrders = []\n\n for order in self.__getAllOrderIDs():\n if(self.__confirmLastStage(order['orderID'], self.__mappings[filter])):\n matchedOrders.append(order)\n\n return matchedOrders", "title": "" }, { "docid": "8e4fd9b8d93ba151dc4b91e894d64f4c", "score": "0.55854964", "text": "def order_filter_generators(self, filter):\n return self.order_ideal_generators(filter, direction='up')", "title": "" }, { "docid": "776baa09c467eb70d112cb7bb3388e59", "score": "0.5577749", "text": "def filter(self):\n keys = ('name-af', 'qty-af')\n new_dict = {}\n for key in keys:\n if key in self.data_dict:\n new_dict[key] = self.data_dict.pop(key)\n for key, value in new_dict.items():\n if key == 'name-af':\n self.received_batches = self.received_batches.filter(received_batch_name__in=new_dict[key])\n elif key == 'qty-af':\n self.received_batches = self.received_batches.annotate(Count(\"computers__id_computer\")).filter(\n computers__id_computer__count__in=new_dict[key])", "title": "" }, { "docid": "433de9344c007e0c991618a1d46c23e9", "score": "0.55657244", "text": "def get(self):\n return get_all_order()", "title": "" }, { "docid": "c5833c0169089135693b89f5bc887453", "score": "0.5560096", "text": "def filter():", "title": "" }, { "docid": "03e816c03d3a92a3e3cb01ce2798eac1", "score": "0.5545883", "text": "def retrieve_orders():\n return [\n {\n \"id\": \"1\",\n \"status\": Status.Queued.value,\n \"created_at\": \"2020-10-16T10:31:10.969696\",\n \"created_by\": \"USER14\",\n \"equipment\": [\n \"KEYBOARD\", \"MOUSE\"\n ]\n },\n {\n \"id\": \"2\",\n \"status\": Status.Queued.value,\n \"created_at\": \"2020-10-16T10:29:10.969696\",\n \"created_by\": \"USER15\",\n \"equipment\": [\n \"KEYBOARD\", \"WEBCAM\"\n ]\n }\n ]", "title": "" }, { "docid": "b65caf855c0d1db9baf4482f7e8cbb4e", "score": "0.5529329", "text": "def apply_filter(self):\n pass", "title": "" }, { "docid": "58bf7562c05fa6f7a95f7b277a2a8499", "score": "0.55139744", "text": "def _m_get_items_filter():", "title": "" }, { "docid": "bfe95c69f8ac845211da06815e54fe25", "score": "0.5498487", "text": "def get_all_order_detail(self):\r\n return self.order_detail", "title": "" }, { "docid": "8fb5b5310e46e57467fc7cc1e91e9fcb", "score": "0.54778135", "text": "def _filter_entries(self, entries):\n filtered_entries = super()._filter_entries(entries)\n return filtered_entries", "title": "" }, { "docid": "02680c21e963660de01164a1a46f4d79", "score": "0.54756624", "text": "def filter_queryset(self, request, queryset, view):\n raise NotImplementedError(\".filter_queryset() must be overridden.\")", "title": "" }, { "docid": "23c2f11d88bb64b8c593578dcc9fdda9", "score": "0.54423493", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or 'other')\n # to decide how to filter the queryset.\n if self.value() == '-1':\n return queryset.filter(Group__id__exact=297)\n\n if self.value() == '1':\n return queryset.exclude(Group__id__exact=297)", "title": "" }, { "docid": "88ef94929d306b076476746481c382d2", "score": "0.54301804", "text": "def _filter_search(self, filter_func, order, follow_attrs):\n order_opts = {\n \"preorder\": _preorder_traverse,\n \"postorder\": _postorder_traverse,\n \"level\": _level_traverse,\n }\n try:\n order_func = order_opts[order]\n except KeyError:\n raise ValueError(\n \"Invalid order '%s'; must be one of: %s\" % (order, tuple(order_opts))\n )\n\n if follow_attrs:\n get_children = _sorted_attrs\n root = self\n else:\n get_children = lambda elem: elem.clades # noqa: E731\n root = self.root\n return filter(filter_func, order_func(root, get_children))", "title": "" }, { "docid": "d6fe5d04852a69edebd49dedbb102992", "score": "0.5424646", "text": "def futures_liquidation_orders(self, **params):\n return self._request_futures_api('get', 'forceOrders', signed=True, data=params)", "title": "" }, { "docid": "a73ce67c26398a7164c2e3d01e509f94", "score": "0.5422153", "text": "def __Where(self):\n if self.__Accept('WHERE'):\n return self.__FilterList()\n return self.__OrderBy()", "title": "" }, { "docid": "db062e07d15d339d06b0c4822c2be144", "score": "0.5417288", "text": "def _data_filtering(self):\n self._filter_nan_user_or_item()\n self._remove_duplication()\n self._filter_by_field_value()\n self._filter_inter_by_user_or_item()\n self._filter_by_inter_num()\n self._reset_index()", "title": "" }, { "docid": "b0b4f5eab041e3251e741f91745a6a96", "score": "0.54001623", "text": "def import_orders(self):\n if self.source != 'amazon_mws':\n return super(SaleChannel, self).import_orders()\n\n Date = Pool().get('ir.date')\n\n order_api = self.get_amazon_order_api()\n with Transaction().set_context(include_past_orders=True):\n # Import past orders by default in case of Amazon\n # to include FBA orders also.\n order_states = self.get_order_states_to_import()\n\n order_states_to_import_in = set([])\n for order_state in order_states:\n order_states_to_import_in.add(order_state.code)\n if order_state.code in ('Unshipped', 'PartiallyShipped'):\n # Amazon need `Unshipped` and `PartiallyShipped` orderstatus\n # together.\n order_states_to_import_in.update(\n ('Unshipped', 'PartiallyShipped'))\n\n lastupdatedafter = (\n Date.today() - relativedelta(days=10)\n ).strftime('%Y-%m-%dT00:00:01Z')\n response = order_api.list_orders(\n marketplaceids=[self.amazon_marketplace_id],\n lastupdatedafter=lastupdatedafter,\n # Unshipped and PartiallyShipped must be used together in\n # this version of the Orders API section. Using one and not\n # the other returns an error.\n orderstatus=order_states_to_import_in\n ).parsed\n\n if not response.get('Orders'):\n return []\n\n # Orders are returned as dictionary for single order and as\n # list for multiple orders.\n # Convert to list if dictionary is returned\n if not isinstance(response['Orders']['Order'], list):\n orders = [response['Orders']['Order']]\n else:\n orders = response['Orders']['Order']\n\n while response.get('NextToken'):\n # Pull data from pagination\n # TRY to fetch more orders, if api call limit is reached then\n # do not continue.\n try:\n response = order_api.list_orders_by_next_token(\n response['NextToken']['value']\n ).parsed\n except mws.MWSError:\n break\n\n if not isinstance(response['Orders']['Order'], list):\n new_orders = [response['Orders']['Order']]\n else:\n new_orders = response['Orders']['Order']\n\n orders.extend(new_orders)\n\n # Update last order import time for channel\n self.write([self], {'last_order_import_time': datetime.utcnow()})\n\n return self.import_mws_order_bulk(orders)", "title": "" }, { "docid": "7553109cb686f6b34d6d2fef7b173449", "score": "0.53991795", "text": "def create_my_orders(self):", "title": "" }, { "docid": "73d6b5f2ba50803f3cb5cb303291b5bd", "score": "0.53907865", "text": "def order_callback(self, order):\n raise NotImplementedError", "title": "" }, { "docid": "a272b92afd4d172df1552586df9a2762", "score": "0.53901154", "text": "def preferred_order(self, *args, **kwargs):\r\n orders = self.get_queryset().filter(*args, **kwargs)\r\n orders = orders.annotate( custom_order=\r\n models.Case( \r\n models.When(status='Submitted', then=models.Value(0)),\r\n models.When(status='In Progress', then=models.Value(1)),\r\n models.When(status='Complete', then=models.Value(2)),\r\n models.When(status='Billed', then=models.Value(3)),\r\n models.When(status='Problem', then=models.Value(4)),\r\n models.When(status='Canceled', then=models.Value(5)),\r\n models.When(status='Auto', then=models.Value(6)),\r\n default=models.Value(7),\r\n output_field=models.IntegerField(), )\r\n ).order_by('custom_order', 'date_recurring_stop')\r\n return orders", "title": "" }, { "docid": "79e67b1a0942f0c859dd1ff62945222a", "score": "0.5387244", "text": "def list(self, request): \n orders = Order.objects.filter(customer_id=request.auth.user.customer.id, payment_type_id=None)\n \n order = self.request.query_params.get('order', None)\n \n if order is not None:\n orders = Order.filter(pk=request.auth.user)\n\n serializer = OrderSerializer(orders, many=True, context={'request': request})\n\n return Response(serializer.data)\n \n ################# SHOPPING CART ###############################", "title": "" }, { "docid": "4dd8b2c9818d007dc40a39cde8f7c990", "score": "0.5375231", "text": "def getOrders(self):\n data = []\n for item in self.__objects:\n data.append(item.getOrderInfo())\n return data", "title": "" }, { "docid": "7d1e3026d292c70540be9041ac361a30", "score": "0.5365056", "text": "def queryset(self, request, queryset):\n # Compare the requested value (either '80s' or 'other')\n # to decide how to filter the queryset.\n if self.value() == '-1':\n #return queryset.filter(owner__AdAccountOwner__id__exact=208)\n return queryset.filter(owner__isnull=True)\n\n\n if self.value() == '1':\n #return queryset.exclude(owner__AdAccountOwner__id__exact=208)\n return queryset.exclude(owner__isnull=True)", "title": "" }, { "docid": "8e2082cbf1531244e66f188bc0b384a6", "score": "0.5361164", "text": "def _filter_by_price(self):\n self.recomm = self.recomm[self.recomm['attributes.RestaurantsPriceRange2'].isin(self.price)]", "title": "" }, { "docid": "2aafcad1b037ed231816933d701d8b3b", "score": "0.53600216", "text": "def _clear_orders(self):\n all_orders = Order.all().values()\n my_orders = [o for o in all_orders if o.mine and o.is_pending]\n \n self._order_count = len(my_orders)\n\n if self._order_count != 0:\n for order in my_orders:\n if not self._waiting_for_order:\n cancel_order = self._make_cancel_order(order)\n self.send_order(cancel_order)\n self._waiting_for_order = True", "title": "" }, { "docid": "0a6df6cd12717191f0eacfd7e248043f", "score": "0.53581065", "text": "def list(self, request):\n orders = Order.objects.all()\n customer = Customer.objects.get(user=request.auth.user)\n\n # Either send back all closed orders for the order history view, or the single open order to display in cart view\n cart = self.request.query_params.get('orderlist', None)\n orders = orders.filter(customer=customer)\n print(\"orders\", orders)\n if cart is not None:\n orders = orders.filter(payment_type=None).get()\n serializer = OrderSerializer(\n orders, many=False, context={'request': request}\n )\n else:\n serializer = OrderSerializer(\n orders, many=True, context={'request': request}\n )\n return Response(serializer.data)", "title": "" }, { "docid": "477b2d29248c091024889328892f58a8", "score": "0.5353776", "text": "def orderings(self):\n return self.__orderings", "title": "" }, { "docid": "cae3de9e0cad7666ac1536364361de4e", "score": "0.53525656", "text": "def process_orders(self, orders):\n processed_orders = []\n for i, order in enumerate(orders):\n order = Order(self, order)\n processed_orders.append(order)\n print(\n f\"Processing order {order.order_id} ({i + 1} of {len(orders)})\",\n file=sys.stderr,\n )\n return processed_orders", "title": "" }, { "docid": "5d56d1dedfad563e5e209e32f11b3c6a", "score": "0.5348823", "text": "def filter(self, **kwargs):\r\n return self.values().filter(**kwargs)", "title": "" }, { "docid": "c340406c80ff8706ca7b1f632358e9b9", "score": "0.5344465", "text": "def process_orders(self, file_name: str) -> None:\n op = OrderProcessor()\n for an_order in op.process_data(file_name):\n\n product_id = an_order.product_id\n if product_id not in self.item_dic:\n self.item_dic[product_id] = []\n\n # if the order contains more than current inventory place\n # 100 more in inventory.\n if an_order.is_valid and \\\n len(self.item_dic[product_id]) < an_order.quantity:\n if an_order.item.lower() == 'candy':\n for i in range(0, 100):\n self.item_dic[product_id].append(\n an_order.factory.create_candy(\n **an_order.item_details))\n elif an_order.item.lower() == 'stuffedanimal':\n for i in range(0, 100):\n self.item_dic[product_id].append(\n an_order.factory.create_stuffed_animal(\n **an_order.item_details))\n elif an_order.item.lower() == 'toy':\n for i in range(0, 100):\n self.item_dic[product_id].append(\n an_order.factory.create_toy(\n **an_order.item_details))\n\n # subtract the order amount from inventory\n self.item_dic[product_id] = self.item_dic[product_id][\n :-an_order.quantity]\n self.orders.append(an_order)", "title": "" }, { "docid": "9c1675c9192519b61acb0a54b6d17a46", "score": "0.5331928", "text": "def get_orders(self):\n return CCAPI.get_orders_for_dispatch(\n order_type=1, number_of_days=self.number_of_days\n )", "title": "" }, { "docid": "cdada4d1f572825033003f1892d4a4e4", "score": "0.53221035", "text": "def test_chaining_filter_to_existing_queryset(self):\n self.assertEqual(Transaction.objects.count(), 5)\n\n self.assertEqual(\n Transaction.objects.filter_by_related_objects(\n [self.order_1]).count(), 4)\n\n transactions_restricted_by_ledger = (\n Transaction.objects.filter(ledgers__in=[self.ledger])\n )\n\n self.assertEqual(\n transactions_restricted_by_ledger.filter_by_related_objects(\n [self.order_1]).distinct().count(), 4)", "title": "" }, { "docid": "ae3b93bfe304d3a5acabc0f720ffbb14", "score": "0.5293811", "text": "async def watch_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):\n await self.load_markets()\n name = 'orders'\n subscribeObject = {\n 'name': name,\n }\n messageHash = name\n if symbol is not None:\n symbol = self.symbol(symbol)\n marketId = self.market_id(symbol)\n subscribeObject['markets'] = [marketId]\n messageHash = name + ':' + marketId\n orders = await self.subscribe_private(subscribeObject, messageHash)\n if self.newUpdates:\n limit = orders.getLimit(symbol, limit)\n return self.filter_by_since_limit(orders, since, limit, 'timestamp', True)", "title": "" }, { "docid": "3fc715d490d0d4b5023281428d41b3b3", "score": "0.5284219", "text": "def enableFilter(self, items=None):", "title": "" }, { "docid": "05b4455706fa7397733d355f8b3cd69b", "score": "0.52799875", "text": "def testGetOrdersByStatement(self):\n if not self.__class__.order1:\n self.testCreateOrders()\n filter_statement = {'query': 'WHERE advertiserId = \\'%s\\' LIMIT 500'\n % self.__class__.advertiser_id}\n self.assert_(isinstance(\n self.__class__.service.GetOrdersByStatement(filter_statement),\n tuple))", "title": "" }, { "docid": "05b4455706fa7397733d355f8b3cd69b", "score": "0.52799875", "text": "def testGetOrdersByStatement(self):\n if not self.__class__.order1:\n self.testCreateOrders()\n filter_statement = {'query': 'WHERE advertiserId = \\'%s\\' LIMIT 500'\n % self.__class__.advertiser_id}\n self.assert_(isinstance(\n self.__class__.service.GetOrdersByStatement(filter_statement),\n tuple))", "title": "" }, { "docid": "4cb53c9befcaa39c114179983356f045", "score": "0.52766275", "text": "def cancel_orders(self) -> None:\n for order in list(self.orders):\n order.cancel()", "title": "" }, { "docid": "249c4aa7ad12a7ba9eaf9539663a60c8", "score": "0.52722186", "text": "def filter_queryset(self, queryset):\n return queryset.order_by('-created')", "title": "" }, { "docid": "9dac4718cd7d590cf060f1443ffb83dd", "score": "0.5271709", "text": "def get_queryset(self, request):\n qs = self.model.objects.unfiltered()\n ordering = self.get_ordering(request)\n if ordering:\n qs = qs.order_by(*ordering)\n return qs", "title": "" }, { "docid": "44cacf83b9ab4502d6004b9e25397ea9", "score": "0.52582204", "text": "def get_context_data(self, **kwargs):\n order = self.request.GET.get('order', None)\n context = super(ListDetailView, self).get_context_data(**kwargs)\n context['materials'] = self.object.materials\n from models import ORDER_STATES\n if order and order in dict(ORDER_STATES):\n context['materials'] = context['materials'].order_by(order)\n context['order_form'] = ListOrderForm()\n return context", "title": "" }, { "docid": "946b79ea0591bbf1b0115b5304772ab7", "score": "0.5258021", "text": "def items(self):\n # These are all the variations of this main product\n try:\n product_ids = self.product.configurableproduct.productvariation_set.all().values_list('product__id', flat=True)\n except ConfigurableProduct.DoesNotExist:\n product_ids = [self.product.id, ]\n\n exclude_status_ids = [\n Adjustment.DELETED,\n Adjustment.REFUNDED,\n Adjustment.VOIDED,\n Adjustment.VOUCHER_STANDBY_CREATED,\n Adjustment.VOUCHER_OK_TO_BOOK,\n Adjustment.REFUND_REQUESTED,\n ]\n\n # MOVED = 4\n # VOUCHER_STANDBY_USED = 7\n # REFUND_REQUEST_REJECTED = 9\n\n return OrderItem.objects.filter(\n product__id__in=product_ids).exclude(\n adjustment__status__in=exclude_status_ids).order_by('order__contact__last_name')", "title": "" }, { "docid": "3a9c3f28fba613be583802bb76acc33a", "score": "0.5255522", "text": "def test_retrieve_order(self):\n pass", "title": "" }, { "docid": "83a2386f01b8a167bb67858cbd82a90e", "score": "0.5254814", "text": "def test_order_list(self):\n pass", "title": "" }, { "docid": "173a36db42d038049df8d422af7a89b8", "score": "0.52540237", "text": "def Filter(self, context, args):\n pass", "title": "" }, { "docid": "8a765622ec9101535ab8a94b36471eb7", "score": "0.5253311", "text": "def __init__(self) -> None:\n self.orders = []\n # TODO: use a lock (for when threading is implemented)", "title": "" }, { "docid": "aab648c69e29ecf42b121ac212c0bebc", "score": "0.5252336", "text": "def get_all_orders(self, **params):\n return self._get('allOrders', True, data=params)", "title": "" }, { "docid": "aab648c69e29ecf42b121ac212c0bebc", "score": "0.5252336", "text": "def get_all_orders(self, **params):\n return self._get('allOrders', True, data=params)", "title": "" }, { "docid": "e68a475f5531d5cafd31d7472bf1e2bc", "score": "0.52504164", "text": "def futures_liquidation_orders(self, **params):\n return self._request_futures_api('get', 'ticker/allForceOrders', data=params)", "title": "" }, { "docid": "69672974a31f5ff69a2bae0f3daaf91e", "score": "0.5244003", "text": "def get_queryset(self):\n return self.filter_queryset(Product.objects.all())", "title": "" }, { "docid": "69672974a31f5ff69a2bae0f3daaf91e", "score": "0.5244003", "text": "def get_queryset(self):\n return self.filter_queryset(Product.objects.all())", "title": "" }, { "docid": "f14dceb491285443951125a9b9ce4719", "score": "0.524297", "text": "def onOrderUpdated(self, order):\n pass", "title": "" }, { "docid": "a895958aaaaf645cf3666eec9a87fd26", "score": "0.52407736", "text": "def map_to_order(self, raw_order: HitbtcRawOrderModel) -> HitbtcOrderModel:", "title": "" }, { "docid": "a03bef2286e56ac68a650f97e7c585fa", "score": "0.5228238", "text": "def removeSortCriterion():", "title": "" }, { "docid": "36b4b7f995ea85bfe2c5ef21b5ad9cab", "score": "0.52276313", "text": "def refine_orders(self, portfolio, sized_order):\n order_event = OrderEvent(\n sized_order.ticker,\n sized_order.action,\n sized_order.quantity\n )\n return [order_event]", "title": "" }, { "docid": "76cd7d32b6a9d9cfbfba50b5eaf5827e", "score": "0.52182406", "text": "def filter(self):\n return self.__filter.filter()", "title": "" }, { "docid": "76cd7d32b6a9d9cfbfba50b5eaf5827e", "score": "0.52182406", "text": "def filter(self):\n return self.__filter.filter()", "title": "" }, { "docid": "76cd7d32b6a9d9cfbfba50b5eaf5827e", "score": "0.52182406", "text": "def filter(self):\n return self.__filter.filter()", "title": "" }, { "docid": "e916132e20fa220c0c1e57ea549b1f4c", "score": "0.5214306", "text": "def get_json(self, *, orders):\n return {self.ORDERS: orders}", "title": "" }, { "docid": "e90f6a91fbf12a9bfa205a9f7e82b51e", "score": "0.5209395", "text": "def queryset(self, request, queryset):\n if not self.value() or self.separator not in self.value():\n return queryset\n (low, high) = self.value().split(self.separator)\n if low:\n filter_expr = \"{}__gte\".format(self.filter_on)\n queryset = queryset.filter(**{filter_expr: low})\n if high:\n filter_expr = \"{}__lte\".format(self.filter_on)\n queryset = queryset.filter(**{filter_expr: high})\n return queryset", "title": "" }, { "docid": "bcb1d35bb48cd682fbd62b4964cddabb", "score": "0.5209303", "text": "def place_orders(self, **kwargs):\n if not settings.PLACE_ORDER:\n logger.info(\"You are NOT in PLACE_ORDER mode, skip placing order...\")\n order = {}\n order['price'] = self.last_price\n return order\n return self.exchange.place_order(**kwargs)", "title": "" }, { "docid": "2f649e935c3fffaddea41816de0eeec0", "score": "0.520385", "text": "def filters(self):\n return self.filter_sort", "title": "" }, { "docid": "01eb42adcf3e06c172f11e3532efd0c3", "score": "0.5196894", "text": "def __eq__(self, other):\n if not isinstance(other, OrderFilterCriteria):\n return False\n\n return self.__dict__ == other.__dict__", "title": "" }, { "docid": "269b9121034fbf913ab7cdc894797e9a", "score": "0.5194272", "text": "def __iter__(self):\r\n return iter(self.order)", "title": "" }, { "docid": "be49e4476eb5ddc1c18f5176b2d02593", "score": "0.51908284", "text": "def get_queryset(self):\n queryset = FinancialAccountsByAwards.objects.all()\n queryset = self.serializer_class.setup_eager_loading(queryset)\n filtered_queryset = self.filter_records(self.request, queryset=queryset)\n ordered_queryset = self.order_records(self.request, queryset=filtered_queryset)\n return ordered_queryset", "title": "" }, { "docid": "5d0b3569b3782f673a6bd8c3202089a1", "score": "0.5184092", "text": "def test_orders_queryset(self):\n req = RequestFactory().get(\n \"/a/url\", {\"sortfield\": \"name\", \"sortdirection\": \"asc\"})\n qs = Mock()\n self.on_template_response({\"ctx_name\": qs}, request=req)\n\n qs.order_by.assert_called_with(\"name\")", "title": "" }, { "docid": "5f426414b9016c7f3c11724a1d90e769", "score": "0.51838696", "text": "def filter(self, queryset):\r\n for boundfilter in self.boundfilters:\r\n queryset = boundfilter.filter(queryset)\r\n return queryset", "title": "" }, { "docid": "184bd57b24743f6c9cdaa2f82046befd", "score": "0.5181896", "text": "def filter_untracked(orders):\n \n if 'OrderArray' in orders and 'Order' in orders['OrderArray']:\n orders = orders['OrderArray']['Order']\n else:\n orders = []\n \n need_tracking = []\n \n for order in orders:\n \n if 'OrderLineItemID' not in order:\n continue\n try:\n tracking = order['ShippingDetails']['ShipmentTrackingDetails']['ShipmentTrackingNumber']\n except:\n need_tracking.append(order['OrderLineItemID'])\n\n return need_tracking", "title": "" }, { "docid": "69dcd8dfed1123ce4efae0c408a976e5", "score": "0.5176512", "text": "def cancel_orders(self):\n must_cancel = []\n for order in self.gox.orderbook.owns:\n if is_own(order.price):\n must_cancel.append(order)\n \n for order in must_cancel:\n self.gox.cancel(order.oid)", "title": "" }, { "docid": "61550d49eebf7ee9c3986dd60a8815e0", "score": "0.51740074", "text": "def lowerOrder(self):\n self.stub.LowerOrder(filter_pb2.FilterLowerOrderRequest(filter=self.data),\n timeout=Cuebot.Timeout)", "title": "" }, { "docid": "986bea11f0adde17f43e0808580c973c", "score": "0.5172586", "text": "def _filter(self, user_site, queryset):\n\t\traise NotImplementedError", "title": "" }, { "docid": "231061ae52403c05cfad784aef42a77c", "score": "0.51661247", "text": "def _prepare_order_picking(self, cr, uid, order, context=None):\n \n res = super(exchange_order, self)._prepare_order_picking(cr, uid, order, context)\n new_dict={\n 'custody':order.custody,\n 'office':order.office.id,\n 'custody_type':order.custody_type,\n 'employee_to':order.employee_to.id }\n res.update(new_dict)\n return res", "title": "" }, { "docid": "ec5c508cb171ccf30190ebfd27520a3c", "score": "0.5161118", "text": "def get_open_orders(self):\n from_date = datetime.now() - timedelta(days=2)\n order_hist = self.get_order_history(from_date, datetime.now())\n created_items = [x for x in order_hist if x[\"status\"] == \"CONFIRMED\" and\n x[\"type\"] == \"CREATE\"]\n filtered_list = []\n for created_item in created_items:\n item_to_add = False\n for item in order_hist:\n item_to_add = item\n if item[\"orderId\"] == created_item[\"orderId\"] and item[\"type\"] == \"DELETE\":\n item_to_add = None\n break\n if item_to_add:\n filtered_list.append(item_to_add)\n return filtered_list", "title": "" } ]
02f315b9135f4b22ee9ffddc20ae764a
TABLE(viterbi_combined_cs self) > pmt_vector_cfloat
[ { "docid": "8edf48582ac73e74d4876415aca92560", "score": "0.54545975", "text": "def TABLE(self):\n return _trellis_swig.viterbi_combined_cs_TABLE(self)", "title": "" } ]
[ { "docid": "9ca7f83d9e04bbe56e55a5794b3300c5", "score": "0.5984601", "text": "def get_bvec():\n return np.array([0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.0000000,\n 0.0000000, 0.0000000, 0.0000000, 0.000007,\n 0.000024, 0.000059, 0.000112, 0.000199,\n 0.00034, 0.000562, 0.00089, 0.001353,\n 0.001992, 0.002857, 0.003971, 0.005378,\n 0.007133, 0.009261, 0.011806, 0.014816,\n 0.018318, 0.022355, 0.026964, 0.032176,\n 0.038026, 0.044548, 0.051773, 0.059728,\n 0.068448, 0.077958, 0.088286, 0.099462,\n 0.111505, 0.124448, 0.138313, 0.153125,\n 0.16891, 0.185689, 0.203491, 0.222333,\n 0.242244, 0.263242, 0.285354, 0.308598,\n 0.332939, 0.358254, 0.384363, 0.411125,\n 0.438391, 0.466003, 0.4938, 0.521619,\n 0.549301, 0.576692, 0.603648, 0.630036,\n 0.655736, 0.680643, 0.704669, 0.727739,\n 0.749797, 0.770798, 0.790717, 0.809536,\n 0.827256, 0.843881, 0.859432, 0.873929,\n 0.887408, 0.8999, 0.911448, 0.922096,\n 0.931881, 0.94086, 0.949064, 0.95655,\n 0.963352, 0.969513, 0.975078, 0.980072,\n 0.984542, 0.9885, 0.991984, 0.995003,\n 0.99763, 1.0000000], dtype=np.float64)", "title": "" }, { "docid": "3c1483e760ed81e2d2aeb2bc7235fc75", "score": "0.5931255", "text": "def vector_field(self, state):\r\n A = state[: (p := len(self.config.network.populations))]\r\n R = state[p : 2*p]\r\n S = 1 - A - R\r\n CAA = _internals._unflat_scalar_triangle(\r\n state[2*p : 2*p + round(p*(p+1)/2)])\r\n CRR = _internals._unflat_scalar_triangle(\r\n state[2*p + round(p*(p+1)/2) : 2*p + p*(p+1)])\r\n CAR = (state[2*p + p*(p+1) :]).reshape((p,p))\r\n CAS = - CAA - CAR\r\n CRS = - CRR - CAR.transpose()\r\n B = self.config.Q.copy()\r\n VarB = np.zeros(p)\r\n CAB = np.zeros((p,p))\r\n CRB = np.zeros((p,p))\r\n CSB = np.zeros((p,p))\r\n for J, K in np.ndindex((p,p)):\r\n B[J] += self.config.network.c[J,K] * A[K]\r\n for L in range(p):\r\n VarB[J] += (self.config.network.c[J,K] \r\n * self.config.network.c[J,L] * CAA[K,L])\r\n CAB[J,K] += self.config.network.c[K,L] * CAA[J,L]\r\n CRB[J,K] += self.config.network.c[K,L] * CAR[L,J]\r\n CSB[J,K] += self.config.network.c[K,L] * CAS[L,J]\r\n f = np.zeros(self.dim)\r\n dCAA = np.zeros((p,p))\r\n dCRR = np.zeros((p,p))\r\n dCAR = np.zeros((p,p))\r\n for J, popJ in enumerate(self.config.network.populations):\r\n SG = np.where(S[J] == 0, 0,\r\n S[J] * popJ.G(B[J] + CSB[J,J]/S[J], VarB[J]))\r\n f[J] = (- popJ.beta * A[J] + popJ.alpha * SG)\r\n f[J+p] = - popJ.gamma * R[J] + popJ.beta * A[J]\r\n for K, popK in enumerate(self.config.network.populations):\r\n if J <= K:\r\n dCAA[J,K] = (- (popJ.beta + popK.beta) * CAA[J,K]\r\n + popJ.alpha * popJ.H(A[K], S[J], B[J],\r\n CAS[K,J], CAB[K,J], CSB[J,J], VarB[J])\r\n + popK.alpha * popK.H(A[J], S[K], B[K],\r\n CAS[J,K], CAB[J,K], CSB[K,K], VarB[K]))\r\n dCRR[J,K] = (- (popJ.gamma + popK.gamma) * CRR[J,K]\r\n + popJ.beta * CAR[J,K] + popK.beta * CAR[K,J])\r\n dCAR[J,K] = (- (popJ.beta + popK.gamma) * CAR[J,K]\r\n + popK.beta * CAA[J,K]\r\n + popJ.alpha * popJ.H(R[K], S[J], B[J],\r\n CRS[K,J], CRB[K,J], CSB[J,J], VarB[J]))\r\n f[2*p : 2*p + round(p*(p+1)/2)] = dCAA[np.triu_indices(p)]\r\n f[2*p + round(p*(p+1)/2) : 2*p + p*(p+1)] = dCRR[np.triu_indices(p)]\r\n f[2*p + p*(p+1) :] = dCAR.flatten()\r\n return np.array(f, float)", "title": "" }, { "docid": "951fefbb09c4fe751e6fed95243deb4c", "score": "0.5882273", "text": "def compra(vl):\n return vl*0.1", "title": "" }, { "docid": "7ecec6225c7448e3cf58d7ffff5624e6", "score": "0.58143485", "text": "def vector_field(self, state):\r\n A = state[: (p := len(self.config.network.populations))]\r\n R = state[p : 2*p]\r\n S = 1 - A - R\r\n CAA = _internals._unflat_scalar_triangle(\r\n state[2*p : 2*p + round(p*(p+1)/2)])\r\n CRR = _internals._unflat_scalar_triangle(\r\n state[2*p + round(p*(p+1)/2) : 2*p+p*(p+1)])\r\n CAR = (state[2*p + p*(p+1) :]).reshape((p,p))\r\n CAS = - CAA - CAR\r\n CSR = - CRR - CAR\r\n B = self.config.Q.copy()\r\n VarB = np.zeros(p)\r\n CAB = np.zeros((p,p))\r\n CRB = np.zeros((p,p))\r\n for J, K in np.ndindex((p,p)):\r\n B[J] += self.config.network.c[J,K] * A[K]\r\n for L in range(p):\r\n VarB[J] += (self.config.network.c[J,K] \r\n * self.config.network.c[J,L] * CAA[K,L])\r\n CAB[J,K] += self.config.network.c[K,L] * CAA[J,L]\r\n CRB[J,K] += self.config.network.c[K,L] * CAR[L,J]\r\n f = np.zeros(self.dim)\r\n dCAA = np.zeros((p,p))\r\n dCRR = np.zeros((p,p))\r\n dCAR = np.zeros((p,p))\r\n for J, popJ in enumerate(self.config.network.populations):\r\n f[J] = (- popJ.beta * A[J] + popJ.alpha*popJ.F(B[J]) * S[J]\r\n - popJ.alpha*popJ.dF(B[J]) * (CAB[J,J] + CRB[J,J])\r\n + popJ.alpha/2*popJ.ddF(B[J]) * S[J] * VarB[J])\r\n f[J+p] = - popJ.gamma * R[J] + popJ.beta * A[J]\r\n for K, popK in enumerate(self.config.network.populations):\r\n dCAA[J,K] = (- (popJ.beta + popK.beta) * CAA[J,K]\r\n + popJ.alpha*popJ.F(B[J]) * CAS[K,J]\r\n + popK.alpha*popK.F(B[K]) * CAS[J,K]\r\n + popJ.alpha*popJ.dF(B[J]) * S[J] * CAB[K,J]\r\n + popK.alpha*popK.dF(B[K]) * S[K] * CAB[J,K])\r\n dCRR[J,K] = (- (popJ.gamma + popK.gamma) * CRR[J,K]\r\n + popJ.beta * CAR[J,K] + popK.beta * CAR[K,J])\r\n dCAR[J,K] = (- (popJ.beta + popK.gamma) * CAR[J,K]\r\n + popK.beta * CAA[J,K]\r\n + popJ.alpha*popJ.F(B[J]) * CSR[J,K]\r\n + popJ.alpha*popJ.dF(B[J]) * S[J] * CRB[K,J])\r\n f[2*p : 2*p + round(p*(p+1)/2)] = dCAA[np.triu_indices(p)]\r\n f[2*p + round(p*(p+1)/2) : 2*p + p*(p+1)] = dCRR[np.triu_indices(p)]\r\n f[2*p + p*(p+1) :] = dCAR.flatten()\r\n return np.array(f, float)", "title": "" }, { "docid": "799452951c92718544a09cf3cff926bd", "score": "0.5790976", "text": "def fv(c, n, r, g=0):\n return comp(pv(c, n, r, g), n, r)", "title": "" }, { "docid": "2c95954f9bc27e7c187c56e4e66e6859", "score": "0.576676", "text": "def get_11(D):\n return np.array([\n D[\"comp\"].values,\n D[\"rational\"].values,\n D[\"mem\"].values,\n D[\"localmem\"].values,\n D[\"coalesced\"].values,\n D[\"transfer\"].values,\n D[\"wgsize\"].values,\n (D[\"transfer\"].values / (D[\"comp\"].values + D[\"mem\"].values)), # F1\n (D[\"coalesced\"].values / D[\"mem\"].values), # F2\n ((D[\"localmem\"].values / D[\"mem\"].values) * D[\"wgsize\"].values), # F3\n (D[\"comp\"].values / D[\"mem\"].values), # F4\n ]).T", "title": "" }, { "docid": "9000bcbe09292463664d83c3fff1cc10", "score": "0.56270635", "text": "def comparator(t, vf):\n s = np.zeros(len(t))\n v_ref = p_r_ref * (v_zk / p_r_ref_vals[1])\n if isinstance(v_ref, float):\n v_ref = np.full(len(t), v_ref)\n for i in range(len(t)):\n v_e = v_ref[i] - vf[i]\n if v_e > 0:\n s[i] = v_cc\n else:\n s[i] = v_ss\n return s, vf, v_ref", "title": "" }, { "docid": "3fab7f426822055439578a68807b1896", "score": "0.55985355", "text": "def vector_field(self, state):\r\n A = state[: (p := len(self.config.network.populations))]\r\n R = state[p :]\r\n S = 1 - A - R\r\n B = self.config.Q.copy()\r\n for J, K in np.ndindex((p,p)):\r\n B[J] += self.config.network.c[J,K] * A[K]\r\n f = np.zeros(2*p)\r\n for J, popJ in enumerate(self.config.network.populations):\r\n f[J] = - popJ.beta * A[J] + popJ.alpha*popJ.F(B[J]) * S[J]\r\n f[J+p] = 1/self.epsilon * (- popJ.gamma * R[J] + popJ.beta * A[J])\r\n return np.array(f, float)", "title": "" }, { "docid": "091b52d7e28a3dc5e0d8c10b0485ad28", "score": "0.5597578", "text": "def vnl_c_vectorCF_std(p: 'stdcomplexF', n: 'unsigned int') -> \"stdcomplexF\":\n return _vnl_c_vectorPython.vnl_c_vectorCF_std(p, n)", "title": "" }, { "docid": "e5083a91c2adcdec9361f1f23cc81c8b", "score": "0.5587688", "text": "def npv(cf, r):\n return reduce((lambda pv,(n,c): pv+disc(c,n,r)), enumerate(cf), 0.0)", "title": "" }, { "docid": "cfcc9d2332ef489c9fea1fee0e330b19", "score": "0.55780405", "text": "def vector_field(self, state):\r\n A = state[: (p := len(self.config.network.populations))]\r\n R = state[p :]\r\n S = 1 - A - R\r\n B = self.config.Q.copy()\r\n for J, K in np.ndindex((p,p)):\r\n B[J] += self.config.network.c[J,K] * A[K]\r\n f = np.zeros(2*p)\r\n for J, popJ in enumerate(self.config.network.populations):\r\n f[J] = - popJ.beta * A[J] + popJ.alpha*popJ.F(B[J]) * S[J]\r\n f[J+p] = - popJ.gamma * R[J] + popJ.beta * A[J]\r\n return np.array(f, float)", "title": "" }, { "docid": "d792cbff24832d5e491874dd87aeba87", "score": "0.55584365", "text": "def vector_field(self, state):\r\n p = len(self.config.network.populations)\r\n A = state\r\n B = self.config.Q.copy()\r\n for J, K in np.ndindex((p,p)):\r\n B[J] += self.config.network.c[J,K] * A[K]\r\n f = np.zeros(p)\r\n for J, popJ in enumerate(self.config.network.populations):\r\n SJ = 1 - (1 + popJ.beta / popJ.gamma) * A[J]\r\n f[J] = - popJ.beta * A[J] + popJ.alpha*popJ.F(B[J]) * SJ\r\n return np.array(f, float)", "title": "" }, { "docid": "85e95d3e130a5ee9cfdfa577d0704e86", "score": "0.55555737", "text": "def vectorfield(y_,t,p_):\n s = 1.0000000000000001e-05;\n a = 3.9026399999999999e+00;\n b = 2.8697200000000000e+00;\n CaM_Ca1 = y_[0]\n CaM_Ca2 = y_[1]\n CaM_Ca3 = y_[2]\n CaM_Ca4 = y_[3]\n PP2B_CaM = y_[4]\n PP2B_CaM_Ca1 = y_[5]\n PP2B_CaM_Ca2 = y_[6]\n PP2B_CaM_Ca3 = y_[7]\n PP2Bc = y_[8]\n CaMKII_CaM = y_[9]\n CaMKII_CaM_Ca1 = y_[10]\n CaMKII_CaM_Ca2 = y_[11]\n CaMKII_CaM_Ca3 = y_[12]\n CaMKIIc = y_[13]\n pCaMKII = y_[14]\n pCaMKIIaut = y_[15]\n pCaMKII_Ca3 = y_[16]\n pCaMKII_Ca2 = y_[17]\n pCaMKII_Ca1 = y_[18]\n pCaMKII_Ca0 = y_[19]\n PP1__pCaMKIIaut = y_[20]\n\n kf__CaM__Ca = p_[0]\n kf__CaM_Ca1__Ca = p_[1]\n kf__CaM_Ca2__Ca = p_[2]\n kf__CaM_Ca3__Ca = p_[3]\n kf__CaM__PP2B = p_[4]\n kf__CaM_Ca1__PP2B = p_[5]\n kf__CaM_Ca2__PP2B = p_[6]\n kf__CaM_Ca3__PP2B = p_[7]\n kf__CaM_Ca4__PP2B = p_[8]\n kf__PP2B_CaM__Ca = p_[9]\n kf__PP2B_CaM_Ca1__Ca = p_[10]\n kf__PP2B_CaM_Ca2__Ca = p_[11]\n kf__PP2B_CaM_Ca3__Ca = p_[12]\n KD__CaM_Ca3__Ca = p_[13]\n KD__CaM_Ca2__Ca = p_[14]\n KD__CaM_Ca1__Ca = p_[15]\n KD__CaM__Ca = p_[16]\n KD__CaM_Ca4__PP2B = p_[17]\n KD__PP2B_CaM_Ca3__Ca = p_[18]\n KD__PP2B_CaM_Ca2__Ca = p_[19]\n KD__PP2B_CaM_Ca1__Ca = p_[20]\n KD__PP2B_CaM__Ca = p_[21]\n kf__CaM__CaMKII = p_[22]\n kf__CaMKII_CaM_Ca3__Ca = p_[23]\n kf__CaMKII_CaM_Ca2__Ca = p_[24]\n kf__CaMKII_CaM_Ca1__Ca = p_[25]\n kf__CaMKII_CaM__Ca = p_[26]\n kf__CaM_Ca1__CaMKII = p_[27]\n kf__CaM_Ca2__CaMKII = p_[28]\n kf__CaM_Ca3__CaMKII = p_[29]\n kf__CaM_Ca4__CaMKII = p_[30]\n KD__CaM_Ca4__CaMKII = p_[31]\n KD__CaMKII_CaM_Ca3__Ca = p_[32]\n KD__CaMKII_CaM_Ca2__Ca = p_[33]\n KD__CaMKII_CaM_Ca1__Ca = p_[34]\n KD__CaMKII_CaM__Ca = p_[35]\n kf__pCaMKII_Ca3__Ca = p_[36]\n kf__CaM__pCaMKIIaut = p_[37]\n kf__CaM_Ca1__pCaMKIIaut = p_[38]\n kf__CaM_Ca2__pCaMKIIaut = p_[39]\n kf__CaM_Ca3__pCaMKIIaut = p_[40]\n kf__pCaMKII_Ca2__Ca = p_[41]\n kf__pCaMKII_Ca1__Ca = p_[42]\n kf__CaM_Ca4__pCaMKIIaut = p_[43]\n kf__pCaMKII_Ca0__Ca = p_[44]\n KD__pCaMKII_Ca3__Ca = p_[45]\n KD__pCaMKII_Ca2__Ca = p_[46]\n KD__pCaMKII_Ca1__Ca = p_[47]\n KD__pCaMKII_Ca0__Ca = p_[48]\n KD__CaM_Ca4__pCaMKIIaut = p_[49]\n kp__pairedCaMKIIc__CaMKIIc = p_[50]\n kf__PP1__pCaMKIIaut = p_[51]\n kr__PP1__pCaMKIIaut = p_[52]\n kcat__PP1__pCaMKIIaut = p_[53]\n Ca_set = p_[54]\n PP1_0 = p_[55]\n CaMKII_0 = p_[56]\n CaM_0 = p_[57]\n PP2B_0 = p_[58]\n\n KD__CaM_Ca3__PP2B = KD__CaM_Ca4__PP2B/KD__PP2B_CaM_Ca3__Ca*KD__CaM_Ca3__Ca\n KD__CaM_Ca2__PP2B = KD__CaM_Ca2__Ca*KD__CaM_Ca3__PP2B/KD__PP2B_CaM_Ca2__Ca\n KD__CaM_Ca1__PP2B = 1.0/KD__PP2B_CaM_Ca1__Ca*KD__CaM_Ca1__Ca*KD__CaM_Ca2__PP2B\n KD__CaM__PP2B = 1.0/KD__PP2B_CaM__Ca*KD__CaM__Ca*KD__CaM_Ca1__PP2B\n KD__CaM_Ca3__CaMKII = KD__CaM_Ca4__CaMKII*KD__CaM_Ca3__Ca/KD__CaMKII_CaM_Ca3__Ca\n KD__CaM_Ca2__CaMKII = KD__CaM_Ca2__Ca/KD__CaMKII_CaM_Ca2__Ca*KD__CaM_Ca3__CaMKII\n KD__CaM_Ca1__CaMKII = KD__CaM_Ca1__Ca/KD__CaMKII_CaM_Ca1__Ca*KD__CaM_Ca2__CaMKII\n KD__CaM__CaMKII = KD__CaM_Ca1__CaMKII*KD__CaM__Ca/KD__CaMKII_CaM__Ca\n KD__CaM_Ca3__pCaMKIIaut = 1.0/KD__pCaMKII_Ca3__Ca*KD__CaM_Ca4__pCaMKIIaut*KD__CaM_Ca3__Ca\n KD__CaM_Ca2__pCaMKIIaut = KD__CaM_Ca2__Ca/KD__pCaMKII_Ca2__Ca*KD__CaM_Ca3__pCaMKIIaut\n KD__CaM_Ca1__pCaMKIIaut = KD__CaM_Ca2__pCaMKIIaut*KD__CaM_Ca1__Ca/KD__pCaMKII_Ca1__Ca\n KD__CaM__pCaMKIIaut = KD__CaM_Ca1__pCaMKIIaut*KD__CaM__Ca/KD__pCaMKII_Ca0__Ca\n kr__CaM_Ca3__Ca = kf__CaM_Ca3__Ca*KD__CaM_Ca3__Ca\n kr__CaM_Ca2__Ca = KD__CaM_Ca2__Ca*kf__CaM_Ca2__Ca\n kr__CaM_Ca1__Ca = kf__CaM_Ca1__Ca*KD__CaM_Ca1__Ca\n kr__CaM__Ca = KD__CaM__Ca*kf__CaM__Ca\n kr__CaM_Ca4__PP2B = KD__CaM_Ca4__PP2B*kf__CaM_Ca4__PP2B\n kr__CaM_Ca3__PP2B = kf__CaM_Ca3__PP2B*KD__CaM_Ca3__PP2B\n kr__CaM_Ca2__PP2B = KD__CaM_Ca2__PP2B*kf__CaM_Ca2__PP2B\n kr__CaM_Ca1__PP2B = kf__CaM_Ca1__PP2B*KD__CaM_Ca1__PP2B\n kr__CaM__PP2B = kf__CaM__PP2B*KD__CaM__PP2B\n kr__PP2B_CaM_Ca3__Ca = KD__PP2B_CaM_Ca3__Ca*kf__PP2B_CaM_Ca3__Ca\n kr__PP2B_CaM_Ca2__Ca = kf__PP2B_CaM_Ca2__Ca*KD__PP2B_CaM_Ca2__Ca\n kr__PP2B_CaM_Ca1__Ca = KD__PP2B_CaM_Ca1__Ca*kf__PP2B_CaM_Ca1__Ca\n kr__PP2B_CaM__Ca = KD__PP2B_CaM__Ca*kf__PP2B_CaM__Ca\n kr__CaM_Ca4__CaMKII = kf__CaM_Ca4__CaMKII*KD__CaM_Ca4__CaMKII\n kr__CaM_Ca3__CaMKII = kf__CaM_Ca3__CaMKII*KD__CaM_Ca3__CaMKII\n kr__CaM_Ca2__CaMKII = kf__CaM_Ca2__CaMKII*KD__CaM_Ca2__CaMKII\n kr__CaM_Ca1__CaMKII = KD__CaM_Ca1__CaMKII*kf__CaM_Ca1__CaMKII\n kr__CaM__CaMKII = KD__CaM__CaMKII*kf__CaM__CaMKII\n kr__CaMKII_CaM_Ca3__Ca = kf__CaMKII_CaM_Ca3__Ca*KD__CaMKII_CaM_Ca3__Ca\n kr__CaMKII_CaM_Ca2__Ca = kf__CaMKII_CaM_Ca2__Ca*KD__CaMKII_CaM_Ca2__Ca\n kr__CaMKII_CaM_Ca1__Ca = kf__CaMKII_CaM_Ca1__Ca*KD__CaMKII_CaM_Ca1__Ca\n kr__CaMKII_CaM__Ca = kf__CaMKII_CaM__Ca*KD__CaMKII_CaM__Ca\n kr__pCaMKII_Ca3__Ca = KD__pCaMKII_Ca3__Ca*kf__pCaMKII_Ca3__Ca\n kr__pCaMKII_Ca2__Ca = KD__pCaMKII_Ca2__Ca*kf__pCaMKII_Ca2__Ca\n kr__pCaMKII_Ca1__Ca = kf__pCaMKII_Ca1__Ca*KD__pCaMKII_Ca1__Ca\n kr__pCaMKII_Ca0__Ca = kf__pCaMKII_Ca0__Ca*KD__pCaMKII_Ca0__Ca\n kr__CaM_Ca4__pCaMKIIaut = KD__CaM_Ca4__pCaMKIIaut*kf__CaM_Ca4__pCaMKIIaut\n kr__CaM_Ca3__pCaMKIIaut = kf__CaM_Ca3__pCaMKIIaut*KD__CaM_Ca3__pCaMKIIaut\n kr__CaM_Ca2__pCaMKIIaut = KD__CaM_Ca2__pCaMKIIaut*kf__CaM_Ca2__pCaMKIIaut\n kr__CaM_Ca1__pCaMKIIaut = KD__CaM_Ca1__pCaMKIIaut*kf__CaM_Ca1__pCaMKIIaut\n kr__CaM__pCaMKIIaut = kf__CaM__pCaMKIIaut*KD__CaM__pCaMKIIaut\n logistic = 1.0/( exp(-10.0*t)+1.0)\n Ca = logistic*Ca_set\n PP1 = -PP1__pCaMKIIaut+PP1_0\n CaMKII = -PP1_0-pCaMKII_Ca2-pCaMKII-CaMKII_CaM_Ca2+CaMKII_0-pCaMKII_Ca1-pCaMKIIaut-CaMKII_CaM_Ca3-CaMKII_CaM-pCaMKII_Ca0-pCaMKII_Ca3+PP1-CaMKIIc-CaMKII_CaM_Ca1\n CaM = -PP2B_CaM_Ca3+PP1_0-PP2B_CaM+CaMKII-CaM_Ca2-PP2Bc-CaMKII_0-PP2B_CaM_Ca1-CaM_Ca3+pCaMKIIaut+CaM_0-PP2B_CaM_Ca2-CaM_Ca4-PP1-CaM_Ca1\n PP2B = PP2B_0-PP2B_CaM_Ca3-PP2B_CaM-PP2Bc-PP2B_CaM_Ca1-PP2B_CaM_Ca2\n totalCaMKII = pCaMKII_Ca2+CaMKII+pCaMKII+CaMKII_CaM_Ca2+pCaMKII_Ca1+pCaMKIIaut+CaMKII_CaM_Ca3+CaMKII_CaM+pCaMKII_Ca0+pCaMKII_Ca3+CaMKIIc+CaMKII_CaM_Ca1\n ActiveCaMKII = pCaMKII_Ca2+pCaMKII+pCaMKII_Ca1+pCaMKIIaut+pCaMKII_Ca0+pCaMKII_Ca3+CaMKIIc\n r = 1.0/( totalCaMKII+s)*( pCaMKII+CaMKIIc)\n pairedCaMKIIc = 1.0/( b*r+1.0)*a*(r*r)\n ReactionFlux1 = -kr__CaM__Ca*CaM_Ca1+CaM*Ca*kf__CaM__Ca\n ReactionFlux2 = -CaM_Ca2*kr__CaM_Ca1__Ca+kf__CaM_Ca1__Ca*Ca*CaM_Ca1\n ReactionFlux3 = CaM_Ca2*kf__CaM_Ca2__Ca*Ca-CaM_Ca3*kr__CaM_Ca2__Ca\n ReactionFlux4 = -kr__CaM_Ca3__Ca*CaM_Ca4+CaM_Ca3*Ca*kf__CaM_Ca3__Ca\n ReactionFlux5 = kf__CaM__PP2B*CaM*PP2B-PP2B_CaM*kr__CaM__PP2B\n ReactionFlux6 = -PP2B_CaM_Ca1*kr__CaM_Ca1__PP2B+kf__CaM_Ca1__PP2B*PP2B*CaM_Ca1\n ReactionFlux7 = CaM_Ca2*kf__CaM_Ca2__PP2B*PP2B-kr__CaM_Ca2__PP2B*PP2B_CaM_Ca2\n ReactionFlux8 = -PP2B_CaM_Ca3*kr__CaM_Ca3__PP2B+kf__CaM_Ca3__PP2B*CaM_Ca3*PP2B\n ReactionFlux9 = -PP2Bc*kr__CaM_Ca4__PP2B+kf__CaM_Ca4__PP2B*PP2B*CaM_Ca4\n ReactionFlux10 = PP2B_CaM*Ca*kf__PP2B_CaM__Ca-PP2B_CaM_Ca1*kr__PP2B_CaM__Ca\n ReactionFlux11 = PP2B_CaM_Ca1*Ca*kf__PP2B_CaM_Ca1__Ca-kr__PP2B_CaM_Ca1__Ca*PP2B_CaM_Ca2\n ReactionFlux12 = -PP2B_CaM_Ca3*kr__PP2B_CaM_Ca2__Ca+kf__PP2B_CaM_Ca2__Ca*Ca*PP2B_CaM_Ca2\n ReactionFlux13 = PP2B_CaM_Ca3*Ca*kf__PP2B_CaM_Ca3__Ca-kr__PP2B_CaM_Ca3__Ca*PP2Bc\n ReactionFlux14 = -kr__CaM__CaMKII*CaMKII_CaM+CaMKII*CaM*kf__CaM__CaMKII\n ReactionFlux15 = -kr__CaM_Ca1__CaMKII*CaMKII_CaM_Ca1+CaMKII*kf__CaM_Ca1__CaMKII*CaM_Ca1\n ReactionFlux16 = CaMKII*CaM_Ca2*kf__CaM_Ca2__CaMKII-CaMKII_CaM_Ca2*kr__CaM_Ca2__CaMKII\n ReactionFlux17 = -CaMKII_CaM_Ca3*kr__CaM_Ca3__CaMKII+kf__CaM_Ca3__CaMKII*CaMKII*CaM_Ca3\n ReactionFlux18 = CaMKII*kf__CaM_Ca4__CaMKII*CaM_Ca4-kr__CaM_Ca4__CaMKII*CaMKIIc\n ReactionFlux19 = Ca*CaMKII_CaM*kf__CaMKII_CaM__Ca-kr__CaMKII_CaM__Ca*CaMKII_CaM_Ca1\n ReactionFlux20 = Ca*kf__CaMKII_CaM_Ca1__Ca*CaMKII_CaM_Ca1-CaMKII_CaM_Ca2*kr__CaMKII_CaM_Ca1__Ca\n ReactionFlux21 = -kr__CaMKII_CaM_Ca2__Ca*CaMKII_CaM_Ca3+CaMKII_CaM_Ca2*kf__CaMKII_CaM_Ca2__Ca*Ca\n ReactionFlux22 = Ca*CaMKII_CaM_Ca3*kf__CaMKII_CaM_Ca3__Ca-kr__CaMKII_CaM_Ca3__Ca*CaMKIIc\n ReactionFlux23 = kf__CaM_Ca4__pCaMKIIaut*pCaMKIIaut*CaM_Ca4-pCaMKII*kr__CaM_Ca4__pCaMKIIaut\n ReactionFlux24 = kf__CaM_Ca3__pCaMKIIaut*CaM_Ca3*pCaMKIIaut-kr__CaM_Ca3__pCaMKIIaut*pCaMKII_Ca3\n ReactionFlux25 = kf__CaM_Ca2__pCaMKIIaut*CaM_Ca2*pCaMKIIaut-pCaMKII_Ca2*kr__CaM_Ca2__pCaMKIIaut\n ReactionFlux26 = -kr__CaM_Ca1__pCaMKIIaut*pCaMKII_Ca1+pCaMKIIaut*kf__CaM_Ca1__pCaMKIIaut*CaM_Ca1\n ReactionFlux27 = -pCaMKII_Ca0*kr__CaM__pCaMKIIaut+CaM*pCaMKIIaut*kf__CaM__pCaMKIIaut\n ReactionFlux28 = Ca*kf__pCaMKII_Ca0__Ca*pCaMKII_Ca0-kr__pCaMKII_Ca0__Ca*pCaMKII_Ca1\n ReactionFlux29 = -pCaMKII_Ca2*kr__pCaMKII_Ca1__Ca+kf__pCaMKII_Ca1__Ca*pCaMKII_Ca1*Ca\n ReactionFlux30 = -kr__pCaMKII_Ca2__Ca*pCaMKII_Ca3+pCaMKII_Ca2*Ca*kf__pCaMKII_Ca2__Ca\n ReactionFlux31 = -kr__pCaMKII_Ca3__Ca*pCaMKII+kf__pCaMKII_Ca3__Ca*Ca*pCaMKII_Ca3\n ReactionFlux32 = kp__pairedCaMKIIc__CaMKIIc*CaMKIIc*pairedCaMKIIc\n ReactionFlux33 = pCaMKIIaut*PP1*kf__PP1__pCaMKIIaut-PP1__pCaMKIIaut*kr__PP1__pCaMKIIaut\n ReactionFlux34 = PP1__pCaMKIIaut*kcat__PP1__pCaMKIIaut\n\n f_ = numpy.zeros((21,))\n f_[0] = -ReactionFlux26+ReactionFlux1-ReactionFlux2-ReactionFlux15-ReactionFlux6\n f_[1] = -ReactionFlux7+ReactionFlux2-ReactionFlux3-ReactionFlux16-ReactionFlux25\n f_[2] = -ReactionFlux4-ReactionFlux17-ReactionFlux8-ReactionFlux24+ReactionFlux3\n f_[3] = ReactionFlux4-ReactionFlux23-ReactionFlux18-ReactionFlux9\n f_[4] = -ReactionFlux10+ReactionFlux5\n f_[5] = ReactionFlux10-ReactionFlux11+ReactionFlux6\n f_[6] = ReactionFlux7+ReactionFlux11-ReactionFlux12\n f_[7] = ReactionFlux8+ReactionFlux12-ReactionFlux13\n f_[8] = ReactionFlux9+ReactionFlux13\n f_[9] = ReactionFlux14-ReactionFlux19\n f_[10] = -ReactionFlux20+ReactionFlux15+ReactionFlux19\n f_[11] = ReactionFlux20-ReactionFlux21+ReactionFlux16\n f_[12] = ReactionFlux17+ReactionFlux21-ReactionFlux22\n f_[13] = -ReactionFlux32+ReactionFlux18+ReactionFlux22\n f_[14] = ReactionFlux32+ReactionFlux23+ReactionFlux31\n f_[15] = -ReactionFlux26-ReactionFlux23-ReactionFlux33-ReactionFlux27-ReactionFlux24-ReactionFlux25\n f_[16] = ReactionFlux30+ReactionFlux24-ReactionFlux31\n f_[17] = ReactionFlux29-ReactionFlux30+ReactionFlux25\n f_[18] = -ReactionFlux29+ReactionFlux26+ReactionFlux28\n f_[19] = ReactionFlux27-ReactionFlux28\n f_[20] = ReactionFlux33-ReactionFlux34\n\n return f_", "title": "" }, { "docid": "3a694e96a6c0c8b0bf932caa761288a6", "score": "0.55490404", "text": "def objCBV(p,f,ferr,bv):\n nres = ( f - modelCBV(p,bv) ) /ferr\n return np.sum( abs(nres) )", "title": "" }, { "docid": "0e2a0f2845dfe451ee3abcaead7ce8ce", "score": "0.55135787", "text": "def vector_field(self, state):\r\n A, R, CAA, CRR, CAR = state[0], state[1], state[2], state[3], state[4]\r\n S = 1 - A - R\r\n c = self.config.network.c[0,0]\r\n B = c * A + self.config.Q[0]\r\n F = self._pop.F(B)\r\n dF = self._pop.dF(B)\r\n ddF = self._pop.ddF(B)\r\n f = [0, 0, 0, 0, 0]\r\n f[0] = (- self._pop.beta*A + self._pop.alpha*F * S \r\n - self._pop.alpha*dF * c * (CAA + CAR)\r\n + self._pop.alpha/2*ddF * c**2 * S * CAA )\r\n f[1] = - self._pop.gamma*R + self._pop.beta*A\r\n f[2] = (- 2*(self._pop.beta + self._pop.alpha*F) * CAA \r\n - 2*self._pop.alpha*F*CAR \r\n + 2*self._pop.alpha*dF * c* S * CAA )\r\n f[3] = - 2*self._pop.gamma*CRR + 2*self._pop.beta*CAR\r\n f[4] = (- (self._pop.beta + self._pop.gamma \r\n + self._pop.alpha*F) * CAR + self._pop.beta*CAA \r\n - self._pop.alpha*F*CRR \r\n + self._pop.alpha*dF * c * S * CAR )\r\n return np.array(f, float)", "title": "" }, { "docid": "b7649682985e6ee1342ca8227a1811a3", "score": "0.55111176", "text": "def getComponents(self):\n oo = self\n skpdITER = oo.wts.shape[0]\n N = oo.smpx.shape[1] - 2\n _rts = _N.empty((skpdITER, oo.TR, N+1, oo.R, 1)) # real components N = ddN\n _zts = _N.empty((skpdITER, oo.TR, N+1, oo.C, 1)) # imag components \n\n for it in range(skpdITER):\n if len(_N.where(_N.abs(oo.allalfas[it*oo.BsmpxSkp]) == 0)[0]) == 0:\n b, c = dcmpcff(alfa=oo.allalfas[it*oo.BsmpxSkp])\n for tr in range(oo.TR):\n for r in range(oo.R):\n _rts[it, tr, :, r] = b[r] * oo.uts[it, tr, r]\n\n for z in range(oo.C):\n #print \"z %d\" % z\n cf1 = 2*c[2*z].real\n gam = oo.allalfas[it, oo.R+2*z]\n cf2 = 2*(c[2*z].real*gam.real + c[2*z].imag*gam.imag)\n _zts[it, tr, 0:N+1, z] = cf1*oo.wts[it, tr, z, 1:N+2] - cf2*oo.wts[it, tr, z, 0:N+1]\n\n oo.rts = _N.array(_rts[:, :, 1:, :, 0])\n oo.zts = _N.array(_zts[:, :, 1:, :, 0])\n\n zts_stds = _N.mean(_N.std(oo.zts, axis=2), axis=1) # iter x C\n\n srtd = _N.argsort(zts_stds, axis=1) # ITER x C\n for it in range(skpdITER):\n oo.fs[it] = oo.fs[it, srtd[it, ::-1]]\n oo.amps[it] = oo.amps[it, srtd[it, ::-1]]\n for tr in range(oo.TR):\n oo.zts[it, tr] = oo.zts[it, tr, :, srtd[it, ::-1]].T", "title": "" }, { "docid": "02919a572ee52279f0888868ef64a2d4", "score": "0.54837215", "text": "def vnl_c_vectorCF_sum(v: 'stdcomplexF', n: 'unsigned int') -> \"stdcomplexF\":\n return _vnl_c_vectorPython.vnl_c_vectorCF_sum(v, n)", "title": "" }, { "docid": "11ee431652366f541eb9db6858a066f8", "score": "0.5468267", "text": "def D(self):\n return _trellis_swig.viterbi_combined_cs_D(self)", "title": "" }, { "docid": "156cc45c7ff5a6c09d3a8bf22d79da88", "score": "0.5464944", "text": "def vector_representation(self) -> List[float]:\r\n raise NotImplementedError", "title": "" }, { "docid": "3db69230ac40104e1d6dfd7a4f414221", "score": "0.54582417", "text": "def _find_mic(cell, vectors, pbc=[True, True, True]):\n vecs = np.asarray(vectors).reshape(-1, 3)\n if any(pbc):\n vecs = np.einsum(\"ji,nj->ni\", np.linalg.inv(cell), vecs)\n vecs[:, pbc] -= np.rint(vecs)[:, pbc]\n vecs = np.einsum(\"ji,nj->ni\", cell, vecs)\n return vecs.reshape(np.asarray(vectors).shape)", "title": "" }, { "docid": "5d273681a5b31f23ac4bc113119b8519", "score": "0.545079", "text": "def PB(v):\n m = (v[0] + v[1])/2.0\n return np.array([m, m])", "title": "" }, { "docid": "49698fe12decf78f8502efce5989d017", "score": "0.5444081", "text": "def compute_Vth(self):\n b1 = -np.tile(self.Gc * self.Ec, self.N)\n # Interactome rounded to 4, so we followed suit.\n s_eq = round(self.ar / (self.ar + 2 * self.ad), 4)\n b3 = -s_eq * (self.Gs @ self.E)\n\n m1 = -self.Gc * np.identity(self.N)\n # N x 1, where each item is a row sum\n Gg_row_sums = self.Gg.sum(axis = 1)\n # m2 is a diagonal matrix with the negative row sums as the values\n m2 = - np.diag(Gg_row_sums)\n Gs_row_sums = self.Gs.sum(axis = 1)\n m3 = - s_eq * np.diag(Gs_row_sums)\n # I think paper is missing m4. It shouldn't be the case that A is a completely diagonal matrix.\n # However, interactome github code seems to have done this correctly.\n # Our implementation is mathematically equivalent to the github code.\n m4 = self.Gg\n\n A = m1 + m2 + m3 + m4\n # b = b1 + b3\n b = b1 + b3 - self.I_ext\n self.A = A\n self.Vth = np.reshape(linalg.solve(A, b), self.N)", "title": "" }, { "docid": "154ab4374f157b57c8547e106b1f9213", "score": "0.54439616", "text": "def TABLE(self):\n return _trellis_swig.viterbi_combined_cs_sptr_TABLE(self)", "title": "" }, { "docid": "3f8fa8078afd4df17335661f2cd4ff0a", "score": "0.54413015", "text": "def vnl_c_vectorF_std(p: 'float const *', n: 'unsigned int') -> \"double\":\n return _vnl_c_vectorPython.vnl_c_vectorF_std(p, n)", "title": "" }, { "docid": "a06f4bdc01ea881858ebd6a614e104a2", "score": "0.5440043", "text": "def test02c(self):\n N = int(1e1)\n t1 = blz.fromiter(((i, i*2.) for i in xrange(N+1)),\n dtype='i4,f8', count=N+1, rootdir=self.rootdir)\n t2 = blz.fromiter(((i, i*2.) for i in xrange(N+1, N*2)),\n dtype='i4,f8', count=N-1, rootdir=self.rootdir)\n t3 = blz.fromiter(((i, i*2.) for i in xrange(N*2, N*3)),\n dtype='i4,f8', count=N, rootdir=self.rootdir)\n vt = blz.vtable((t1, t2, t3), rootdir=self.rootdir)\n ra = np.fromiter(((i, i*2.) for i in xrange(N*3)), dtype='i4,f8')\n assert_array_equal(vt[3:-4], ra[3:-4],\n \"vtable values are not correct\")", "title": "" }, { "docid": "0d3f58ca325bb4354a3164542562154f", "score": "0.54378843", "text": "def mtxvg_vector(in31, in21):\n return _cspyce0.mtxvg_vector(in31, in21)", "title": "" }, { "docid": "fe1841d41941f4fa138b2ae9133d30a4", "score": "0.54287887", "text": "def intermediate_vector_set(tr, q, bc, n_cores=None):\n amps = intermediate_amplitudes(tr, q, bc, n_cores=n_cores).transpose()\n w = np.arange(1, 1 + len(tr))\n w = 1.0 / np.concatenate((w, w[::-1][1:]))\n sf = [w * np.correlate(a, a, 'full') for a in amps]\n return np.asarray(sf)", "title": "" }, { "docid": "82fddb2c7ae429b021197621a745b07b", "score": "0.54283834", "text": "def vchpot(q_array, c_cheb, c_comb):\n cheb_tk = np.array(np.split(c_cheb, c_cheb.shape[0] / CHEBDIM))\n cheb_tk_mk = np.array(np.split(cheb_tk, CCHEB_SLICE)[0:-1])\n v_matrices = []\n for kdof, m_kp in enumerate(np.delete(DTEN_DIM, CONTR_DOF)):\n v_kp = np.zeros((q_array[kdof].shape[0], m_kp))\n for i_kp, val in enumerate(q_array[kdof]):\n for j_kp in np.arange(m_kp):\n v_kp[i_kp, j_kp] = cheby.chebval(\n val, cheb_tk_mk[kdof][j_kp])\n v_matrices.append(v_kp)\n v_matrices = np.array(v_matrices)\n\n prod = c_comb.reshape(DTEN_DIM, order='F')\n for idx, elem in enumerate(v_matrices):\n prod = tl.tenalg.mode_dot(prod, elem, JOTAS[idx])\n\n return prod", "title": "" }, { "docid": "92c1917bd98a4995f9969cbeb675eb37", "score": "0.5426283", "text": "def bruteForceLCF_p(pos,vol,rMax=20,bNum=20):\n\n dd = N.zeros(bNum)\n dv = N.zeros(bNum)\n vv = N.zeros(bNum)\n\n np = len(vol)\n for i in range(np):\n for j in range(i+1,np):\n r = N.sqrt(N.sum((pos[i]-pos[j])**2))\n b = int(N.floor(r*bNum/rMax))\n if b < bNum:\n dd[b] = dd[b]+1.0\n vv[b] = vv[b]+vol[i]*vol[j]\n dv[b] = dv[b]+vol[i]+vol[j] # dv+vd\n# print i\n\n return dd,dv,vv,dd/vv-1.0, (dd-dv)/vv+1.0", "title": "" }, { "docid": "723483f4fd0f35c84ae996c527ec7efc", "score": "0.54186666", "text": "def bloch_components(self):\n return bcomp(self.data)", "title": "" }, { "docid": "a942bf65fb1c5be99923f46d11a49e25", "score": "0.5405841", "text": "def find_vector_m(A,B):\n\tm = Z.Zeidel_met(A,B,B, 0.0001)\n\tm.insert(0, 0)\n\tm.append(0)\n\treturn m", "title": "" }, { "docid": "8c0e7e49be414b6c86964d6e2e6f5464", "score": "0.54036075", "text": "def vnl_c_vectorCF_allocate_T(n: 'unsigned long long const') -> \"stdcomplexF *\":\n return _vnl_c_vectorPython.vnl_c_vectorCF_allocate_T(n)", "title": "" }, { "docid": "aa5328a601009d153e69eed9cfafff78", "score": "0.5401937", "text": "def D(self):\n return _trellis_swig.viterbi_combined_cs_sptr_D(self)", "title": "" }, { "docid": "4c518033f235e611d0b3e98f82bed128", "score": "0.5395937", "text": "def volpym_b(self, a, b, c, d, p, ab, bb, cb, db, pb):\n fourth = 1.0/4.0\n volpymb = 1.0\n tempb = ((a[1]-c[1])*(b[2]-d[2])-(a[2]-c[2])*(b[1]-d[1]))*volpymb\n tempb0 = -(fourth*tempb)\n tempb1 = (p[0]-fourth*(a[0]+b[0]+c[0]+d[0]))*volpymb\n tempb2 = (b[2]-d[2])*tempb1\n tempb3 = (a[1]-c[1])*tempb1\n tempb4 = -((b[1]-d[1])*tempb1)\n tempb5 = -((a[2]-c[2])*tempb1)\n tempb6 = ((a[2]-c[2])*(b[0]-d[0])-(a[0]-c[0])*(b[2]-d[2]))*volpymb\n tempb7 = -(fourth*tempb6)\n tempb8 = (p[1]-fourth*(a[1]+b[1]+c[1]+d[1]))*volpymb\n tempb9 = (b[0]-d[0])*tempb8\n tempb10 = (a[2]-c[2])*tempb8\n tempb11 = -((b[2]-d[2])*tempb8)\n tempb12 = -((a[0]-c[0])*tempb8)\n tempb13 = ((a[0]-c[0])*(b[1]-d[1])-(a[1]-c[1])*(b[0]-d[0]))*volpymb\n tempb14 = -(fourth*tempb13)\n tempb15 = (p[2]-fourth*(a[2]+b[2]+c[2]+d[2]))*volpymb\n tempb16 = (b[1]-d[1])*tempb15\n tempb17 = (a[0]-c[0])*tempb15\n tempb18 = -((b[0]-d[0])*tempb15)\n tempb19 = -((a[1]-c[1])*tempb15)\n pb[0] = pb[0] + tempb\n ab[0] = ab[0] + tempb16 + tempb11 + tempb0\n bb[0] = bb[0] + tempb19 + tempb10 + tempb0\n cb[0] = cb[0] + tempb0 - tempb11 - tempb16\n db[0] = db[0] + tempb0 - tempb10 - tempb19\n ab[1] = ab[1] + tempb18 + tempb7 + tempb2\n cb[1] = cb[1] + tempb7 - tempb18 - tempb2\n bb[2] = bb[2] + tempb14 + tempb12 + tempb3\n db[2] = db[2] + tempb14 - tempb12 - tempb3\n ab[2] = ab[2] + tempb14 + tempb9 + tempb4\n cb[2] = cb[2] + tempb14 - tempb9 - tempb4\n bb[1] = bb[1] + tempb17 + tempb7 + tempb5\n db[1] = db[1] + tempb7 - tempb17 - tempb5\n pb[1] = pb[1] + tempb6\n pb[2] = pb[2] + tempb13", "title": "" }, { "docid": "253d061dd4303fb76764bcfb455b0c22", "score": "0.5391092", "text": "def evaluate_field_V0(vec, x, p, Nbase, T, bc):\n \n if bc == True:\n N = bsp.collocation_matrix(T, p, x, bc)\n \n elif bc == False:\n N = bsp.collocation_matrix(T, p, x, bc)[:, 1:-1]\n \n else: \n N = bsp.collocation_matrix(T, p, x, False)\n \n eva = np.dot(N, vec) \n \n return eva", "title": "" }, { "docid": "43a9ac61b37c967b5ba4bf12d2984220", "score": "0.53870124", "text": "def V2_cubic_bm(band1, band2, band3, q1, q2, q3, \\\n Ubov1, Ubov2, Ubov3, Ubovm1, Ubovm2, Ubovm3):\n \n V2 = 0.0 \n \n# =============================================================================\n# tmp, Ubov1 = GLSW.eigensystem(q1)\n# tmp, Ubov2 = GLSW.eigensystem(q2)\n# tmp, Ubov3 = GLSW.eigensystem(q3)\n# tmp, Ubovm1 = GLSW.eigensystem(-q1)\n# tmp, Ubovm2 = GLSW.eigensystem(-q2)\n# tmp, Ubovm3 = GLSW.eigensystem(-q3)\n# =============================================================================\n \n for N in range(2):\n for Np in range(2):\n \n for bond in range(num_bond):\n # bond = 10\n bond_vec = delta_ij[:, bond]\n sub1 = sub_idx[bond, 0]\n sub2 = sub_idx[bond, 1]\n \n In = num_sub*N + sub1\n Jn = num_sub*N + sub2\n Inp = num_sub*Np + sub1\n Jnp = num_sub*Np + sub2\n \n tFc1 = Fc_symm(Jn, Jn, Jnp, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 0)\n \n tFd1 = Fd_symm(Jn, Jn, Jnp, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 0)\n \n tFc2 = Fc_symm(In, In, Inp, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 0)\n \n tFd2 = Fd_symm(In, In, Inp, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 0)\n \n\n V2 += JJJ[bond, N, Np]*tFc1 + (JJJ[bond, N, Np]*tFd1).conj() \\\n + III[bond, N, Np]*tFc2 + (III[bond, N, Np]*tFd2).conj() \n \n for M in range(2):\n \n Im = num_sub*M + sub1\n Jm = num_sub*M + sub2\n \n tFc1 = Fc_symm(Jn, Jnp, Im, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 2)\n \n tFd1 = Fd_symm(Jn, Jnp, Im, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 2)\n \n tFc2 = Fc_symm(In, Inp, Jm, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 1)\n \n tFd2 = Fd_symm(In, Inp, Jm, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 1)\n\n\n V2 += 2.0*(JJI[bond, N, Np, M]*tFc1 \\\n + (JJI[bond, N, Np, M]*tFd1).conj() \\\n + IIJ[bond, N, Np, M]*tFc2 \\\n + (IIJ[bond, N, Np, M]*tFd2).conj()) \n\n \n for sublat in range(num_sub):\n \n In = num_sub*N + sublat\n Inp = num_sub*Np + sublat\n \n factor1 = thi[sublat, :] @ f3[:, N, Np] \n factor2 = thi[sublat, :] @ f3[:, N, Np].conj()\n \n tFc = Fc_symm(In, In, Inp, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, 0.0, 0)\n tFd = Fd_symm(In, In, Inp, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, 0.0, 0)\n\n V2 += factor1*tFc + factor2*tFd.conj()\n\n return V2", "title": "" }, { "docid": "c4cf7c2f22f1a1fc1f0813b842c4826f", "score": "0.5382787", "text": "def vnl_c_vectorUI_std(p: 'unsigned int const *', n: 'unsigned int') -> \"double\":\n return _vnl_c_vectorPython.vnl_c_vectorUI_std(p, n)", "title": "" }, { "docid": "a017db87e385830f40f7c65294abb77a", "score": "0.5380621", "text": "def unit_vector(vector):", "title": "" }, { "docid": "7f8fe0925041c7252b1e7addf3fa389e", "score": "0.53794664", "text": "def V_B(self):\n return self.b_B * self.l + self.c_B * self.k", "title": "" }, { "docid": "c1b81490e3cb728d714bca802126fdfc", "score": "0.5369362", "text": "def bp_comm_to_v(C):\n return crypto.point_mulinv8(C)", "title": "" }, { "docid": "a0c4591ffa70bc5d0ff85da0b0edba64", "score": "0.536516", "text": "def __idiv__(self, value: 'stdcomplexF') -> \"vnl_matrixCF &\":\n return _vnl_matrixPython.vnl_matrixCF___idiv__(self, value)", "title": "" }, { "docid": "11dd40955d0eb6b2a461a00d90e8c9c9", "score": "0.5364965", "text": "def e() :\n return x('b')+0.01*sum(x('prec','flex','resl','team','pmat'))", "title": "" }, { "docid": "3dbb1e88743614d82aecc236936bd4be", "score": "0.5342576", "text": "def vnl_c_vectorUC_std(p: 'unsigned char const *', n: 'unsigned int') -> \"double\":\n return _vnl_c_vectorPython.vnl_c_vectorUC_std(p, n)", "title": "" }, { "docid": "a2700838a51e1b24883bdaa3cf4af3cf", "score": "0.5339565", "text": "def test01c(self):\n N = int(1e1)\n t1 = blz.fromiter(((i, i*2.) for i in xrange(N+1)),\n dtype='i4,f8', count=N+1, rootdir=self.rootdir)\n t2 = blz.fromiter(((i, i*2.) for i in xrange(N+1, N*2)),\n dtype='i4,f8', count=N-1, rootdir=self.rootdir)\n t3 = blz.fromiter(((i, i*2.) for i in xrange(N*2, N*3)),\n dtype='i4,f8', count=N, rootdir=self.rootdir)\n vt = blz.vtable((t1, t2, t3), rootdir=self.rootdir)\n ra = np.fromiter(((i, i*2.) for i in xrange(N*3)), dtype='i4,f8')\n assert_array_equal(vt[:], ra, \"vtable values are not correct\")", "title": "" }, { "docid": "f9ba6e26c8648f38577b828ae2797cd1", "score": "0.5337941", "text": "def D(self):\n return _trellis_swig.viterbi_combined_fi_sptr_D(self)", "title": "" }, { "docid": "d7ffe7128aab2c552c9e9400eaea072e", "score": "0.533129", "text": "def CTEDs(self):\n\t\tdiag = dia_matrix((self.lplus.diagonal(),0),shape = lplus.shape)\n\t\toneD = matrix(ones(lplus.shape))*diag\n\t\treturn self.volume*self.ReffDists()", "title": "" }, { "docid": "48de665563c71a367a4fe6c3e3336011", "score": "0.53308004", "text": "def vec_b(t, u, M_bypass=set(), mass=Decimal(1), sign=None):\n\n B = vec_B(t, u, M_bypass=M_bypass, mass=mass, sign=sign)\n return B[0] / (1 + mass ** 2) ** ((t + u) / 2), B[1] / (1 + mass ** 2) ** ((t + u) / 2)", "title": "" }, { "docid": "f13ba965951ba2e60bc4f975cbde75f0", "score": "0.5315599", "text": "def is_collinear_with_sage(vector, basis):\n bitlen = max(v.bit_length() for v in itertools.chain(basis, [vector]))\n sage_basis = [encode_bigint_bitvec(v, bitlen) for v in basis]\n sage_basis.append(encode_bigint_bitvec(vector, bitlen))\n mat_rank = sage.all.matrix(GF2, sage_basis).rank()\n return mat_rank != len(basis) + 1", "title": "" }, { "docid": "0ccd39922bf18ae6250c2f7b1176480a", "score": "0.52839506", "text": "def covComponents ( self ) :\n r = []\n w = self.combiner.weights()\n for c in self.covs :\n a = c.sim ( w )\n r.append ( a )\n \n return tuple ( r )", "title": "" }, { "docid": "2baf6259996e381bb235d6f40fb4cd63", "score": "0.5283562", "text": "def vnl_c_vectorCF_two_nrm2(p: 'stdcomplexF', n: 'unsigned int') -> \"float\":\n return _vnl_c_vectorPython.vnl_c_vectorCF_two_nrm2(p, n)", "title": "" }, { "docid": "979e079c7c3a39422d8f5bbe6997b933", "score": "0.52828497", "text": "def evaluate_field_V1(vec, x, p, Nbase, T, bc):\n \n t = T[1:-1]\n \n if bc == True:\n D = bsp.collocation_matrix(t, p - 1, x, bc, normalize=True)\n \n else:\n D = bsp.collocation_matrix(t, p - 1, x, False, normalize=True)\n \n eva = D.dot(vec)\n \n return eva", "title": "" }, { "docid": "62a34e89fd03fa0cd4704e01e641cdab", "score": "0.5277215", "text": "def TABLE(self):\n return _trellis_swig.viterbi_combined_fs_sptr_TABLE(self)", "title": "" }, { "docid": "92e8c1e1e6810e597f949edd8af6e128", "score": "0.52757776", "text": "def utest_efficient_vector():\n v = EFFICIENT_VECTOR()\n v.add(1,10)\n v.add(2,15)\n for e in v:\n print e\n pass", "title": "" }, { "docid": "c4ce5c6cc08898481189f8adfd578b4f", "score": "0.52702385", "text": "def bruteForceLCF(pos,vol,rMax=20,bNum=20):\n\n dd = N.zeros(bNum)\n dv = N.zeros(bNum)\n vv = N.zeros(bNum)\n\n i = 0\n for p in pos:\n rs = N.sqrt(N.sum((pos[i+1:]-p)**2,axis=1))\n bs = (N.floor(rs*bNum/rMax)).astype(int)\n# 2x slower than python loop\n# for b in range(bNum):\n# dd[b] = N.sum(N.where(bs==b,1,0))\n# dv[b] = N.sum(N.where(bs==b,vol[i+1:]+vol[i],0))\n# vv[b] = N.sum(N.where(bs==b,vol[i+1:]*vol[i],0))\n j = 0\n for b in bs:\n if b < bNum:\n dd[b] = dd[b]+1.0\n vv[b] = vv[b]+vol[i]*vol[i+1+j]\n dv[b] = dv[b]+vol[i]+vol[i+1+j] # dv+vd\n j = j+1\n i = i+1\n print i\n\n return dd,dv,vv,dd/vv-1.0, (dd-dv)/vv+1.0", "title": "" }, { "docid": "8dfdf3fced4868cff1a30afbff49c04f", "score": "0.52571326", "text": "def getVEFP(bfs1,bfs2,q,r):\n nbf1 = len(bfs1)\n nbf2 = len(bfs2)\n V = zeros((nbf1,nbf2),'d')\n for i in xrange(nbf1):\n bfi = bfs1[i]\n for j in xrange(nbf2):\n bfj = bfs2[j]\n v = 0\n for a in xrange(len(q)):\n v += q[a] * bfi.nuclear(bfj,tuple(r[a]))\n V[i,j] = v\n return V", "title": "" }, { "docid": "81b359b4c7f8a2487a7791ec139906a3", "score": "0.5248966", "text": "def BC(self, res=-1.0):\n return _pygamma.TTable1D_BC(self, res)", "title": "" }, { "docid": "c3b8ab4cba3473fdb85a6c5715bbcd1e", "score": "0.5238635", "text": "def CN(atoms, h):\n # image_vector = np.array([p for p in itertools.product([-1, 0, 1], repeat=3)])\n # lower and higher limit of bond length in Millerite unitcell to find bond types of ions\n MilBonds = [2.255, 2.3, 2.55]\n\n for i in range(len(atoms)):\n atoms[i][2][1] = 0\n atoms[i][2][2] = 0\n atoms[i][2][3] = 0\n atoms[i][2][4] = 0\n\n for i in range(len(atoms)):\n si = np.asarray(atoms[i][1])\n for j in range(i + 1, len(atoms)):\n sj = np.asarray(atoms[j][1])\n # xsd files contain fractional coordinates. This will calculate shortest distance considering PBC\n sij = si - sj - np.rint(si - sj)\n rij = np.matmul(sij, h) # converting to cartesian coordinates\n rij_norm = np.linalg.norm(rij) # length of distance vector\n if rij_norm < MilBonds[2]: # if bonded atoms are same type, it is b4!\n set1 = set(atoms[i][4])\n set2 = set(atoms[j][4])\n set1.add(j)\n set2.add(i)\n atoms[i][4] = list(set1)\n atoms[j][4] = list(set2)\n if atoms[i][2][0] == atoms[j][2][0]:\n atoms[i][2][4] += 1\n atoms[j][2][4] += 1\n else:\n if rij_norm < MilBonds[0]:\n atoms[i][2][1] += 1\n atoms[j][2][1] += 1\n elif rij_norm > MilBonds[1]:\n atoms[i][2][3] += 1\n atoms[j][2][3] += 1\n else:\n atoms[i][2][2] += 1\n atoms[j][2][2] += 1\n atoms[i][3] = fromdigits(atoms[i][2], 3) # get bcode from b\n return atoms", "title": "" }, { "docid": "c721b5ea6f30f24860115cc21b645110", "score": "0.5232334", "text": "def get_gsfc_delta_v_mc(aperture, tvs_ref, row, verbose=True):\n\n n_mc = 1000\n\n results = {}\n for flavour in ['calibrated_', 'corrected_']:\n results[flavour] = {}\n results[flavour]['attributes'] = []\n results[flavour]['alignment_parameters'] = {}\n results[flavour]['sigma_alignment_parameters'] = {}\n\n # 1/0\n for key, value in alignment_parameter_mapping['hst_fgs'].items():\n # attribute = '{}{}'.format(flavour, value)\n attribute = '{}{}_arcsec'.format(flavour, key)\n # alignment_parameters[key] = getattr(aperture, attribute)\n results[flavour]['alignment_parameters'][key] = row[attribute]\n results[flavour]['sigma_alignment_parameters'][key] = row['sigma_{}'.format(attribute)]\n results[flavour]['attributes'].append(attribute)\n\n results[flavour]['recomputed_tvs'] = aperture.compute_tvs_matrix(v2_arcsec=results[flavour]['alignment_parameters']['v2_position'],\n v3_arcsec=results[flavour]['alignment_parameters']['v3_position'],\n pa_deg=results[flavour]['alignment_parameters']['v3_angle']/3600.)\n\n tvs_mc = np.zeros((3, 3, n_mc))\n delta_v_mc = np.zeros((3, n_mc))\n\n seed = 1234567\n np.random.seed(seed)\n v2_arcsec_mc = results[flavour]['alignment_parameters']['v2_position'] \\\n + np.random.normal(0., results[flavour]['sigma_alignment_parameters']['v2_position'], n_mc)\n np.random.seed(seed+1)\n v3_arcsec_mc = results[flavour]['alignment_parameters']['v3_position'] \\\n + np.random.normal(0., results[flavour]['sigma_alignment_parameters']['v3_position'], n_mc)\n np.random.seed(seed+2)\n v3_angle_mc = results[flavour]['alignment_parameters']['v3_angle'] \\\n + np.random.normal(0., results[flavour]['sigma_alignment_parameters']['v3_angle'], n_mc)\n\n for j in range(n_mc):\n tvs_mc[:,:,j] = aperture.compute_tvs_matrix(v2_arcsec=v2_arcsec_mc[j],\n v3_arcsec=v3_arcsec_mc[j],\n pa_deg=v3_angle_mc[j]/3600.)\n\n delta_v_mc[:, j] = get_gsfc_delta_v(tvs_mc[:,:,j], tvs_ref, aperture, verbose=False)\n if verbose:\n print('{} TVS offsets ({} Monte Carlo sets)'.format(flavour, n_mc))\n for j in range(3):\n results[flavour]['delta_v{}_mas'.format(j+1)] = np.mean(delta_v_mc[j, :])\n results[flavour]['sigma_delta_v{}_mas'.format(j+1)] = np.std(delta_v_mc[j, :])\n if verbose:\n print('MC: delta V{} {:>6.0f} +/- {:>3.0f} (rms) mas'.format(j+1, results[flavour]['delta_v{}_mas'.format(j+1)], results[flavour]['sigma_delta_v{}_mas'.format(j+1)])\n )\n results[flavour]['mean_tvs'] = np.mean(tvs_mc, axis=2)\n results[flavour]['sigma_mean_tvs'] = np.std(tvs_mc, axis=2)\n\n results['DATE-OBS'] = row['DATE-OBS']\n return results", "title": "" }, { "docid": "25b04c96ac3ad1809583902772cc3e04", "score": "0.5231384", "text": "def test_CotrendingBasisVectors_nonretrieval():\n\n #***\n # Constructor\n # Create some generic CotrendingBasisVectors objects\n\n # Generic CotrendingBasisVectors object\n dataTbl = Table([[1, 2, 3], [False, True, False], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]], \n names=('CADENCENO', 'GAP', 'VECTOR_1', 'VECTOR_3'))\n cbvTime = Time([443.51090033, 443.53133457, 443.55176891], format='bkjd')\n cbvs = CotrendingBasisVectors(data=dataTbl, time=cbvTime)\n assert isinstance(cbvs, CotrendingBasisVectors)\n assert cbvs.cbv_indices == [1, 3]\n assert np.all(cbvs.time.value == [443.51090033, 443.53133457, 443.55176891])\n \n # Auto-initiate 'GAP' and 'CADENCENO'\n dataTbl = Table([[2.0, 3.0, 4.0], [3.0, 4.0, 5.0]], \n names=('VECTOR_3', 'VECTOR_12'))\n cbvTime = Time([443.51090033, 443.53133457, 443.55176891], format='bkjd')\n cbvs = CotrendingBasisVectors(data=dataTbl, time=cbvTime)\n assert isinstance(cbvs, CotrendingBasisVectors)\n assert cbvs.cbv_indices == [3, 12]\n assert np.all(cbvs.gap_indicators == [False, False, False])\n assert np.all(cbvs.cadenceno == [0, 1, 2])\n\n \n #***\n # _to_designmatrix\n # Make sure CBVs are the columns in the returned 2-dim array\n dataTbl = Table([[1, 2, 3], [False, True, False], \n [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], \n names=('CADENCENO', 'GAP', 'VECTOR_1', 'VECTOR_2', 'VECTOR_3'))\n cbvTime = Time([1569.44053967, 1569.44192856, 1569.44331746], format='btjd')\n cbvs = CotrendingBasisVectors(dataTbl, cbvTime)\n cbv_dm_name = 'test cbv set'\n # CBV index 5 does not exists and should be ingored\n cbv_designmatrix = cbvs.to_designmatrix(cbv_indices=[1,3,5], name=cbv_dm_name)\n assert cbv_designmatrix.shape == (3,2)\n assert np.all(cbv_designmatrix['VECTOR_1'] == np.array([1.0, 2.0, 3.0]))\n assert np.all(cbv_designmatrix['VECTOR_3'] == np.array([7.0, 8.0, 9.0]))\n assert cbv_designmatrix.name == cbv_dm_name\n # CBV #2 was not requested, so make sure it is not present\n with pytest.raises(KeyError):\n cbv_designmatrix['VECTOR_2']\n \n #***\n # plot\n ax = cbvs.plot(cbv_indices=[1,2], ax=None)\n assert isinstance(ax, matplotlib.axes.Axes)\n \n # There is no CBV # 5 so the third cbv_indices entry will be ignored\n ax = cbvs.plot(cbv_indices=[1,2,5], ax=ax)\n assert isinstance(ax, matplotlib.axes.Axes)\n \n # CBVs use 1-based indexing. Throw error if requesting CBV index 0\n with pytest.raises(ValueError):\n ax = cbvs.plot(cbv_indices=[0,1,2], ax=ax)\n\n # Only 'all' or specific CBV indices can be requested\n with pytest.raises(ValueError):\n ax = cbvs.plot('Doh!')\n\n \n #***\n # align\n # Set up some cadenceno such that both CBV is trimmed and NaNs inserted\n sample_lc = TessLightCurve(time=[1,2,3,4,6,7], flux=[1,2,3,4,6,7],\n flux_err=[0.1,0.1, 0.1, 0.1, 0.1, 0.1], cadenceno=[1,2,3,4,6,7])\n dataTbl = Table([[1, 2, 3, 5, 6], [False, True, False, False, False], \n [1.0, 2.0, 3.0, 5.0, 6.0 ]], \n names=('CADENCENO', 'GAP', 'VECTOR_1'))\n cbvTime = Time([1569.43915078, 1569.44053967, 1569.44192856,\n 1569.44470635, 1569.44609524], format='btjd')\n cbvs = CotrendingBasisVectors(dataTbl, cbvTime)\n cbvs = cbvs.align(sample_lc)\n assert np.all(sample_lc.cadenceno == cbvs.cadenceno)\n assert len(cbvs.cadenceno) == 6\n assert len(sample_lc.flux) == 6\n assert np.all(cbvs.gap_indicators.value[[1,3,5]])\n # Ignore the warning in to_designmatric due to a low rank matrix\n with warnings.catch_warnings():\n # Instantiating light curves with NaN times will yield a warning\n warnings.simplefilter(\"ignore\", LightkurveWarning)\n cbv_designmatrix = cbvs.to_designmatrix(cbv_indices=[1])\n assert np.all(cbv_designmatrix['VECTOR_1'][[0,1,2,4]] == [1.0, 2.0, 3.0, 6.0])\n assert np.all(np.isnan(cbv_designmatrix['VECTOR_1'][[3,5]]))\n\n #***\n # interpolate\n nLcCadences = 20\n xLc = np.linspace(0.0, 2*np.pi, num=nLcCadences)\n sample_lc = TessLightCurve(time=xLc, flux=np.sin(xLc), flux_err=np.full(nLcCadences, 0.1),\n cadenceno=np.arange(nLcCadences))\n nCbvCadences = 10\n xCbv = np.linspace(0.0, 2*np.pi, num=nCbvCadences)\n dataTbl = Table([np.arange(nCbvCadences), np.full(nCbvCadences, False), \n np.cos(xCbv), np.sin(xCbv+np.pi*0.125)], \n names=('CADENCENO', 'GAP', 'VECTOR_1', 'VECTOR_2'))\n cbvTime = Time(xCbv, format='btjd')\n cbvs = CotrendingBasisVectors(dataTbl, cbvTime)\n cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=False)\n assert np.all(cbv_interpolated.time.value == sample_lc.time.value)\n # Extrapolation test\n xCbv = np.linspace(0.0, 1.5*np.pi, num=nCbvCadences)\n dataTbl = Table([np.arange(nCbvCadences), np.full(nCbvCadences, False), \n np.cos(xCbv), np.sin(xCbv+np.pi*0.125)], \n names=('CADENCENO', 'GAP', 'VECTOR_1', 'VECTOR_2'))\n cbvTime = Time(xCbv, format='btjd')\n cbvs = CotrendingBasisVectors(dataTbl, cbvTime)\n cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=False)\n assert np.all(np.isnan(\n cbv_interpolated['VECTOR_1'].value[\n np.nonzero(cbv_interpolated.time.value > 1.5*np.pi)[0]]))\n cbv_interpolated = cbvs.interpolate(sample_lc, extrapolate=True)\n assert np.all(np.logical_not(np.isnan(\n cbv_interpolated['VECTOR_1'].value[\n np.nonzero(cbv_interpolated.time.value > 1.5*np.pi)[0]])))", "title": "" }, { "docid": "41bffd0949185d7aaaf4d0f3feade0f5", "score": "0.52286494", "text": "def calcSimpleLCF(pos,vol,rMax=20,bNum=20):\n kdt = scipy.spatial.KDTree(pos)\n pairs = kdt.query_pairs(rMax)\n print len(pairs)\n# work around kdtree crashes on nangaku\n# n = len(vol)\n# pairs = [(i,j) for i in range(n) for j in range(n)]\n\n\n dd = N.zeros(bNum)\n dv = N.zeros(bNum)\n vv = N.zeros(bNum)\n dvi = N.zeros(bNum)\n vivi = N.zeros(bNum)\n\n for (i,j) in pairs:\n r = N.sqrt(N.sum((pos[i]-pos[j])**2))\n b = int(N.floor(r*bNum/rMax))\n if b < bNum:\n dd[b] = dd[b]+1.0\n vv[b] = vv[b]+vol[i]*vol[j]\n dv[b] = dv[b]+vol[i]+vol[j] # dv+vd\n vivi[b] = vivi[b]+1/(vol[i]*vol[j])\n dvi[b] = dvi[b]+1/vol[i]+1/vol[j] # dv+vd\n \n return dd,dv,vv,dd/vv-1.0, (dd-dv)/vv+1.0,dvi,vivi,vivi/dd-1,(vivi-dvi)/dd+1.0", "title": "" }, { "docid": "0c5fb4fd41d2dd4451a6208f32886cc5", "score": "0.5225711", "text": "def vnl_c_vectorSC_std(p: 'signed char const *', n: 'unsigned int') -> \"double\":\n return _vnl_c_vectorPython.vnl_c_vectorSC_std(p, n)", "title": "" }, { "docid": "0432a213adbcf41269f55c4fb4c9a291", "score": "0.5219411", "text": "def v(self,pp,p,l): \n \n # first overall prefact of 1pi exchange part (cancel 2pi factors!)\n prefact=self.A\n \n mat=prefact*self._g(pp,p,l)\n\n if (l==0): # add s-wave counter term \n mat+=self.C0*np.exp(-(pp**2+p**2)/self.cutoff**2) # 4pi is take into account by spherical harmonics for l=0\n \n return mat", "title": "" }, { "docid": "0e8400f55b24074097f3825e7e273cae", "score": "0.52157426", "text": "def TABLE(self):\n return _trellis_swig.viterbi_combined_ci_sptr_TABLE(self)", "title": "" }, { "docid": "4676ff7a02607e3cd5ca2a92479c2874", "score": "0.52145976", "text": "def get_vector(self, lbda=0.5):\n return (1-lbda)*self.little_m + lbda*self.big_m", "title": "" }, { "docid": "7f13b60a690d6d46ed33e131c44b0555", "score": "0.5211862", "text": "def test_magnetic_volume_contribution():\n\n TDB = \"\"\"\n ELEMENT FE BCC_A2 55.847 4489.0 27.2797 ! \n TYPE_DEFINITION % SEQ * !\n TYPE_DEFINITION A GES AMEND_PHASE_DESCRIPTION @ MAGNETIC -1 0.4 !\n PHASE BCC_A2 %A 1 1 !\n CONSTITUENT BCC_A2 :FE: !\n PARAMETER G(BCC_A2,FE;0) 0.01 T; 6000.00 N !\n PARAMETER TC(BCC_A2,FE;0) 298.15 +1043 -1e-7*P; 6000 N !\n PARAMETER BMAGN(BCC_A2,FE;0) 298.15 +2.22; 6000 N !\n \n PARAMETER V0(BCC_A2,FE;0) 0.01 1E-6; 6000 N !\n PARAMETER VA(BCC_A2,FE;0) 0.01 1E-6*T; 6000 N !\n \"\"\"\n\n db = Database(TDB)\n res1 = calculate(db, ['FE'], 'BCC_A2', T=500, P=101325, N=1, output='molar_volume')\n res2 = calculate(db, ['FE'], 'BCC_A2', T=500, P=10e9, N=1, output='molar_volume')\n\n assert np.allclose(np.squeeze(res1['molar_volume']).values, 1.857e-6)\n assert np.allclose(np.squeeze(res2['molar_volume']).values, 1.0e-6)", "title": "" }, { "docid": "2bcb16e791284fbefe827266273e40d6", "score": "0.5198625", "text": "def vnl_c_vectorLD_std(p: 'long double const *', n: 'unsigned int') -> \"long double\":\n return _vnl_c_vectorPython.vnl_c_vectorLD_std(p, n)", "title": "" }, { "docid": "03c8903e353faec8661f7357728e575c", "score": "0.51967573", "text": "def vectorfield(t , y):\r\n M0 = y[0]*n_d\r\n M1 = y[1]*n_d*v1\r\n M2 = y[2]*n_d*v1*v1\r\n S = y[3]\r\n \r\n #pdb.set_trace()\r\n \r\n if y[0] > 1e-99 and y[1] > 1e-99 and y[2] > 1e-99:\r\n vg = M1*M1 / ( M0 ** (1.5) * M2 ** (0.5))\r\n if vg > 0:\r\n rg = (3.0*vg/(4.0*PI))**(1.0/3)\r\n linsig = 1.0/9 * ma.log(M0*M2/(M1*M1))\r\n if linsig > 0:\r\n sd = ma.exp(linsig**0.5)\r\n else:\r\n sd = 1.0\r\n else:\r\n rg = 0.0\r\n linsig = 0.0\r\n sd = 1.0\r\n else:\r\n vg = 0.0\r\n rg = 0.0\r\n linsig = 0.0\r\n sd = 1.0\r\n \r\n \r\n # Call all the functions that are required here and store the values in appropriate variables\r\n # unpack\r\n com0 , com2 = coag(sd,rg,COAG)\r\n coefm1 , coefsx , coefm2 = cond(sd,vg,COND)\r\n kstar , x11 = nucl(S,NUCL)\r\n R = reac()\r\n\r\n \r\n ######################################## NUCLEATION ############################################ \r\n dxNUC_S = x11 / ns * kstar * tau\r\n dxNUC_N = x11 / n_d * tau\r\n dxNUC_V = x11 / n_d * kstar * tau\r\n dxNUC_W = x11 / n_d * kstar * kstar * tau\r\n \r\n \r\n ######################################## REACTION ############################################ \r\n dxREA_S = R / (ns / tau)\r\n dxREA_N = 0.0\r\n dxREA_V = 0.0\r\n dxREA_W = 0.0\r\n \r\n \r\n ######################################## CONDENSATION ############################################ \r\n if y[0] > 1e-50 and y[1] > 1e-50:\r\n dxCON_S = coefsx * y[0] * ( S - 1.0 ) * tau /v1 \r\n dxCON_V = coefm1 * y[0] * ( S - 1.0 ) * tau /v1 * ns/n_d\r\n dxCON_W = coefm2 * y[1] * ( S - 1.0 ) * tau /v1 * ns/n_d\r\n else:\r\n dxCON_S = 0.0 \r\n dxCON_V = 0.0\r\n dxCON_W = 0.0\r\n \r\n \r\n ######################################## COAGULATION ################################################## \r\n if y[0] > 1e-50 and y[1] > 1e-50 and y[2] > 1e-50:\r\n dxCOA_N = com0 * y[0] ** 2.0 * n_d * tau\r\n dxCOA_W = com2 * y[1] ** 2.0 * n_d * tau\r\n else:\r\n dxCOA_N = 0.0\r\n dxCOA_W = 0.0\r\n \r\n \r\n \r\n\r\n ############ Gathering all the phenomenon together into f(t,y) #########################################\r\n f = [dxNUC_N - dxCOA_N,\r\n dxNUC_V + dxCON_V,\r\n dxNUC_W + dxCON_W + dxCOA_W,\r\n dxREA_S - dxNUC_S - dxCON_S]\r\n return f", "title": "" }, { "docid": "571669c5800aa6a512e05e771941041b", "score": "0.51929146", "text": "def comp(c, n, r):\n return float(c) * ((1+r)**n)", "title": "" }, { "docid": "a9e1b8bd407aab978c7aed0517af25c9", "score": "0.5188679", "text": "def matrixAssembly_V0(p, Nbase, T, bc):\n \n \n el_b = bsp.breakpoints(T, p)\n ne = len(el_b) - 1\n\n pts_loc, wts_loc = np.polynomial.legendre.leggauss(p + 1)\n pts, wts = bsp.quadrature_grid(el_b, pts_loc, wts_loc)\n\n d = 1\n basis = bsp.basis_ders_on_quad_grid(T, p, pts, d)\n\n M = np.zeros((Nbase, Nbase))\n C = np.zeros((Nbase, Nbase))\n\n for ie in range(ne):\n for il in range(p + 1):\n for jl in range(p + 1):\n i = ie + il\n j = ie + jl\n\n value_m = 0.\n value_c = 0.\n\n for g in range(p + 1):\n value_m += wts[ie, g]*basis[ie, il, 0, g]*basis[ie, jl, 0, g]\n value_c += wts[ie, g]*basis[ie, il, 0, g]*basis[ie, jl, 1, g]\n\n M[i, j] += value_m\n C[i, j] += value_c\n \n if bc == True:\n M[:p, :] += M[-p:, :]\n M[:, :p] += M[:, -p:]\n M = M[:M.shape[0] - p, :M.shape[1] - p]\n \n C[:p, :] += C[-p:, :]\n C[:, :p] += C[:, -p:]\n C = C[:C.shape[0] - p, :C.shape[1] - p]\n \n elif bc == False:\n M = M[1:-1, 1:-1]\n C = C[1:-1, 1:-1]\n \n \n return M, C", "title": "" }, { "docid": "7273fdc457fd6bd039d0bb7fac8201d4", "score": "0.5179404", "text": "def vnl_c_vectorD_std(p: 'double const *', n: 'unsigned int') -> \"double\":\n return _vnl_c_vectorPython.vnl_c_vectorD_std(p, n)", "title": "" }, { "docid": "fcfd5c8a76cf9b7d7e422a3aef626019", "score": "0.51753265", "text": "def get_4(D):\n return np.array([\n (D[\"transfer\"].values / (D[\"comp\"].values + D[\"mem\"].values)), # F1\n (D[\"coalesced\"].values / D[\"mem\"].values), # F2\n ((D[\"localmem\"].values / D[\"mem\"].values) * D[\"wgsize\"].values), # F3\n (D[\"comp\"].values / D[\"mem\"].values), # F4\n ]).T", "title": "" }, { "docid": "3126336692801388c6b73aef0024c24d", "score": "0.51752067", "text": "def _fc_vc_from_f(self, k, f, v) :\n fc = []\n vc = []\n for ix, (f0, v0, d) in enumerate(zip(f,v, self.ixd)):\n fc0 = [f0/d]*d\n vc0 = [v0/d]*d\n if self.use_conditional :\n fc0=[(f0*self.erf[k,ix]/self.vf[k,ix])/d]*d\n\n # take vc0 as a ratio of it's conditional variance over unconditional variance\n # and apply to bar ix's unconditional variance\n ratio0 = (self.vr[k,ix]-self.erf[k,ix]*self.erf[k,ix]/self.vf[k,ix])/self.vr[k,ix]\n\n #print ('ratio0', ratio0)\n i0 = self.ixf[ix]+1-d\n vc0 = ratio0*self.vr[(k+i0+np.arange(d).astype(int))%self.n, 0]\n\n fc = np.r_[fc, fc0]\n vc = np.r_[vc, vc0]\n\n return fc,vc", "title": "" }, { "docid": "96d5c17248d2e6b4bc79a57d9220954b", "score": "0.51707196", "text": "def get_comp_pm(self):\n vals = self.get_pm()\n v = (vals[0]**2+vals[2]**2)**0.5\n e = ((vals[0]*vals[1])**2+(vals[2]*vals[3])**2)**0.5/v\n return v,e", "title": "" }, { "docid": "c3fb150050f2270f53400af6923d70a6", "score": "0.5169451", "text": "def __nonzero__(self):\n return _runtime_swig.gr_vector_vector_complexf___nonzero__(self)", "title": "" }, { "docid": "e87932ade0d4378c0df35e6a4313c11a", "score": "0.51666456", "text": "def vnl_c_vectorF_sum(v: 'float const *', n: 'unsigned int') -> \"float\":\n return _vnl_c_vectorPython.vnl_c_vectorF_sum(v, n)", "title": "" }, { "docid": "47e25b9628b1e6daa35e886e3eab2a27", "score": "0.5163802", "text": "def calc_gf_mp(ty_1: float, ty_2: float, b_strip: float, i_5: float) -> np.ndarray:\n gf_mp = np.zeros((4, 4))\n\n # assemble the matrix of gf_mp (symmetric flexural stability matrix)\n gf_mp[0, 0] = (10*ty_1 + 3*ty_2) * b_strip * i_5 / 35\n gf_mp[0, 1] = (15*ty_1 + 7*ty_2) * b_strip**2 * i_5 / 210 / 2\n gf_mp[1, 0] = gf_mp[0, 1]\n gf_mp[0, 2] = 9 * (ty_1+ty_2) * b_strip * i_5 / 140\n gf_mp[2, 0] = gf_mp[0, 2]\n gf_mp[0, 3] = -(7*ty_1 + 6*ty_2) * b_strip**2 * i_5 / 420\n gf_mp[3, 0] = gf_mp[0, 3]\n gf_mp[1, 1] = (5*ty_1 + 3*ty_2) * b_strip**3 * i_5 / 2 / 420\n gf_mp[1, 2] = (6*ty_1 + 7*ty_2) * b_strip**2 * i_5 / 420\n gf_mp[2, 1] = gf_mp[1, 2]\n gf_mp[1, 3] = -(ty_1 + ty_2) * b_strip**3 * i_5 / 140 / 2\n gf_mp[3, 1] = gf_mp[1, 3]\n gf_mp[2, 2] = (3*ty_1 + 10*ty_2) * b_strip * i_5 / 35\n gf_mp[2, 3] = -(7*ty_1 + 15*ty_2) * b_strip**2 * i_5 / 420\n gf_mp[3, 2] = gf_mp[2, 3]\n gf_mp[3, 3] = (3*ty_1 + 5*ty_2) * b_strip**3 * i_5 / 420 / 2\n\n return gf_mp", "title": "" }, { "docid": "ce07e12adf336f53af5337c392066bb2", "score": "0.51588213", "text": "def _compute_C_(self):\n # TODO: implement C\n num_qubits = self.n\n C_column_vecs = list()\n num_dim = 2 ** num_qubits\n for z in range(num_dim):\n # Calculate Count(z) * |z>\n Count = self.max2sat.Count(z)\n z_vec = np.zeros(shape=(num_dim, 1))\n z_vec[z, 0] = 1\n C_column_vecs.append(Count * z_vec)\n C = np.concatenate(C_column_vecs, axis=1)\n return C # np.eye(num_dim)", "title": "" }, { "docid": "26dfbc6ff81d4a97620b056c04591724", "score": "0.5158231", "text": "def V2_cubic(band1, band2, band3, q1, q2, q3, \n Ubov1, Ubov2, Ubov3, Ubovm1, Ubovm2, Ubovm3):\n \n V2 = 0.0\n \n# =============================================================================\n# tmp, Ubov1 = GLSW.eigensystem(q1)\n# tmp, Ubov2 = GLSW.eigensystem(q2)\n# tmp, Ubov3 = GLSW.eigensystem(q3)\n# \n# tmp, Ubovm1 = GLSW.eigensystem(-q1)\n# tmp, Ubovm2 = GLSW.eigensystem(-q2)\n# tmp, Ubovm3 = GLSW.eigensystem(-q3)\n# =============================================================================\n \n for N in range(2):\n for Np in range(2):\n \n for bond in range(12):\n \n bond_vec = delta_ij[:, bond]\n sub1 = sub_idx[bond, 0]\n sub2 = sub_idx[bond, 1]\n \n In = num_sub*N + sub1\n Jn = num_sub*N + sub2\n Inp = num_sub*Np + sub1\n Jnp = num_sub*Np + sub2\n \n tFc1 = Fc_symm(Jn, Jn, Jnp, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 0)\n \n tFd1 = Fd_symm(Jn, Jn, Jnp, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 0)\n \n tFc2 = Fc_symm(In, In, Inp, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 0)\n \n tFd2 = Fd_symm(In, In, Inp, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 0)\n \n V2 += JJJ[bond, N, Np]*tFc1 + (JJJ[bond, N, Np]*tFd1).conj() \\\n + III[bond, N, Np]*tFc2 + (III[bond, N, Np]*tFd2).conj()\n \n for M in range(2):\n \n Im = num_sub*M + sub1\n Jm = num_sub*M + sub2\n \n tFc1 = Fc_symm(Jn, Jnp, Im, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 2)\n \n tFd1 = Fd_symm(Jn, Jnp, Im, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 2)\n \n tFc2 = Fc_symm(In, Inp, Jm, \\\n band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n q1, q2, q3, bond_vec, 1)\n \n tFd2 = Fd_symm(In, Inp, Jm, \\\n band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n -q3, -q2, -q1, bond_vec, 1)\n \n V2 += 2.0*(JJI[bond, N, Np, M]*tFc1 \\\n + (JJI[bond, N, Np, M]*tFd1).conj() \\\n + IIJ[bond, N, Np, M]*tFc2 \\\n + (IIJ[bond, N, Np, M]*tFd2).conj())\n \n# =============================================================================\n# for sublat in range(num_sub):\n# \n# In = num_sub*N + sublat\n# Inp = num_sub*Np + sublat\n# \n# factor1 = thi[sublat] @ f3[:, N, Np]\n# factor2 = thi[sublat] @ f3[:, N, Np].conj()\n# \n# tFc = Fc_symm(In, In, Inp, \\\n# band1, band2, band3, Ubovm1, Ubovm2, Ubov3, \\\n# q1, q2, q3, 0.0, 0)\n# tFd = Fd_symm(In, In, Inp, \\\n# band3, band2, band1, Ubov3, Ubovm2, Ubovm1, \\\n# -q3, -q2, -q1, 0.0, 0)\n# \n# V2 += factor1*tFc + factor2*tFd.conj()\n# =============================================================================\n \n return V2", "title": "" }, { "docid": "8e991d9adc9fc5aa8c3a353b14759123", "score": "0.5156122", "text": "def feature_vector(self):\n # energy is no good! depends on time of first contact\n \n v = list()\n v += self.max_finger_tip_forces # good\n #v.append(self.finger_tip_forces_energy)\n #v.append(self.max_forces_derivative)\n #v.append(self.distance_traveled)\n #v.append(self.max_joint_velocity)\n v.append(self.max_joint_effort) # good\n #v.append(self.joint_effort_energy)\n #v.append(self.texture_similarity)\n return np.array(v)", "title": "" }, { "docid": "77555bfba70c92239a807e02ff37fe44", "score": "0.51550967", "text": "def compute_comp_ref(self,PS):\n T2 = self.get('T2',PS)\n QR = self.nc.variables['QRAIN'][PS['t'],:,PS['la'],PS['lo']]\n PSFC = self.get('PSFC',PS)\n try:\n QS = self.nc.variables['QRAIN'][PS['t'],:,PS['la'],PS['lo']]\n except:\n QS = N.zeros(N.shape(QR))\n rhor = 1000.0\n rhos = 100.0\n rhog = 400.0\n rhoi = 917.0\n\n no_rain = 8.0E6\n # How do I access this time?\n no_snow = 2.0E6 * N.exp(-0.12*(T2-273.15))\n no_grau = 4.0E6\n\n density = N.divide(PSFC,(287.0 * T2))\n Qra_all = QR\n Qsn_all = QS\n\n for j in range(len(Qra_all[1,:,1])):\n curcol_r = []\n curcol_s = []\n for i in range(len(Qra_all[1,1,:])):\n maxrval = N.max(Qra_all[:,j,i])\n maxsval = N.max(Qsn_all[:,j,i])\n curcol_r.append(maxrval)\n curcol_s.append(maxsval)\n N_curcol_r = N.array(curcol_r)\n N_curcol_s = N.array(curcol_s)\n if j == 0:\n Qra = N_curcol_r\n Qsn = N_curcol_s\n else:\n Qra = N.row_stack((Qra, N_curcol_r))\n Qsn = N.row_stack((Qsn, N_curcol_s))\n\n # Calculate slope factor lambda\n lambr = (N.divide((3.14159 * no_rain * rhor), N.multiply(density, Qra))) ** 0.25\n lambs = N.exp(-0.0536 * (T2 - 273.15))\n\n # Calculate equivalent reflectivity factor\n Zer = (720.0 * no_rain * (lambr ** -7.0)) * 1E18\n Zes = (0.224 * 720.0 * no_snow * (lambr ** -7.0) * (rhos/rhoi) ** 2) * 1E18\n Zes_int = N.divide((lambs * Qsn * density), no_snow)\n Zes = ((0.224 * 720 * 1E18) / (3.14159 * rhor) ** 2) * Zes_int ** 2\n\n Ze = N.add(Zer, Zes)\n dBZ = N.nan_to_num(10*N.log10(Ze))\n return dBZ", "title": "" }, { "docid": "858c4786a70eacb9d402adf73c827864", "score": "0.5153525", "text": "def _extract_capacity_dual_variable_results(m):\r\n\r\n results = {'x_c': {i: {g: m.dual[m.FIXED_CAPACITY_CONT[(g, i)]] for g in\r\n m.G_C_STORAGE.union(m.G_C_WIND).union(m.G_C_SOLAR)} for i in m.I},\r\n 'd': {key: m.dual[m.FIXED_CAPACITY_DISC[key]] for key in\r\n m.G_C_THERM.cross(m.I).cross(m.G_C_THERM_SIZE_OPTIONS)}\r\n }\r\n\r\n return results", "title": "" }, { "docid": "82c78f6e09de7d2d6751a896e3061fbe", "score": "0.51512504", "text": "def vnl_c_vectorUL_std(p: 'unsigned long const *', n: 'unsigned int') -> \"double\":\n return _vnl_c_vectorPython.vnl_c_vectorUL_std(p, n)", "title": "" }, { "docid": "4d143a90f7dc37e67c62536d11a13354", "score": "0.5149455", "text": "def allocate_T(n: 'unsigned long long const') -> \"stdcomplexF *\":\n return _vnl_c_vectorPython.vnl_c_vectorCF_allocate_T(n)", "title": "" }, { "docid": "2e860f3cb05e5d34a713ee3fa5422525", "score": "0.51484835", "text": "def dV(self):\n return np.product(self.cell)", "title": "" }, { "docid": "ef39c05a4f4ff07067b7617f1cc15daf", "score": "0.5146538", "text": "def calc_C(self):\n pass", "title": "" }, { "docid": "7b720eddca5e555e98234a721bb43ffc", "score": "0.5146172", "text": "def value(self):\r\n u = gpu.cp.sum(self._bvalues * self.parameters[\"bcoeff\"], axis=(2, 1))\r\n u += gpu.cp.einsum(\"ijkl,jkl->i\", self._avalues, self.parameters[\"acoeff\"])\r\n return (np.ones(len(u)), gpu.asnumpy(u))", "title": "" }, { "docid": "f5384ebbd25b3de3c1871269e262bcf6", "score": "0.5143353", "text": "def frmchg_vector(k1, k2, in11):\n return _cspyce0.frmchg_vector(k1, k2, in11)", "title": "" }, { "docid": "0af4c57473d153ec7b175d28e0319898", "score": "0.5142138", "text": "def D(self):\n return _trellis_swig.viterbi_combined_ci_sptr_D(self)", "title": "" }, { "docid": "f9977fe80f4f4af8acc8312d01bd77f1", "score": "0.5137456", "text": "def test_spvc_vc():\n kvz = KernelVectorizor(['a', 'b', 'c'],\n unit=timedelta(seconds=3600),\n epoch=datetime(2010, 12, 13),\n eschatos=datetime(2010, 12, 14),\n accumulated=False)\n data = ['a 2010-12-13 04:07:12',\n 'b 2010-12-13 04:20:39',\n 'b 2010-12-13 06:09:51']\n checkin_set = [{'poi': x,\n 'tick': datetime.strptime(y, '%Y-%m-%d %H:%M:%S')}\n for x, y in [s.split(' ', 1) for s in data]]\n spa = kvz.sp_process(checkin_set)\n a = spa.densify()\n b = kvz.process(checkin_set)\n assert_equal(a, b)", "title": "" }, { "docid": "c326681331cf7a93f88000e28c2e6506", "score": "0.5137199", "text": "def TABLE(self):\n return _trellis_swig.viterbi_combined_fb_sptr_TABLE(self)", "title": "" }, { "docid": "8d943eacaa37d304e054012ce2e2f44c", "score": "0.51367486", "text": "def calc_bv(mag_pars):\n\n _, _, _, bands = mist.interp_mag([*mag_pars], [\"B\", \"V\"])\n B, V = bands\n return B-V", "title": "" }, { "docid": "3932a58d651905acfa2cc4ac799ff9fc", "score": "0.5135823", "text": "def compute_iv(table):\n\n\t\tdef calculate_iv(x):\n\t\t\tif x['p_goods'] != x['p_bads']:\n\t\t\t\treturn (x['p_goods'] - x['p_bads']) * x['woe']\n\t\t\treturn 0 \n\n\t\t# Computes informaiton value \n\t\tivs = table.apply(lambda x: calculate_iv(x), axis=1)\n\t\treturn ivs.sum()", "title": "" }, { "docid": "d9ec39ea13c902d565911d9f591b6524", "score": "0.5135708", "text": "def vectorize(self, roughVector):\n vector = Counter()\n for feature in self.features:\n if feature in roughVector:\n vector[feature] = roughVector[feature]\n else:\n vector[feature] = 0\n #print(self.features)\n #print(vector)\n return vector", "title": "" }, { "docid": "c72e8747ee93751440f623a26a958f5d", "score": "0.5131366", "text": "def D(self):\n return _trellis_swig.viterbi_combined_fi_D(self)", "title": "" }, { "docid": "1a3fc105b873a3f3147612c1b098937f", "score": "0.51284796", "text": "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n # print((grad_p)*keq)\n # print(- grad_z*keq*self.gama)\n # print(q)\n # print(self.store_flux_pf_gr[volume][tuple(unit)])\n # print('\\n')\n # import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if abs(sum(values)) > lim4:\n print('fluxo multiescala com gravidade nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "title": "" }, { "docid": "553a10d4de72c454c2eea7cf6f265671", "score": "0.512762", "text": "def mxvg_vector(in31, in21):\n return _cspyce0.mxvg_vector(in31, in21)", "title": "" }, { "docid": "d1d2aa776ae682ea4fbcc8baefdb57e7", "score": "0.5122566", "text": "def vnl_c_vectorCF_inner_product(arg0: 'stdcomplexF', arg1: 'stdcomplexF', arg2: 'unsigned int') -> \"stdcomplexF\":\n return _vnl_c_vectorPython.vnl_c_vectorCF_inner_product(arg0, arg1, arg2)", "title": "" }, { "docid": "57c84697b26199dbfd8d09a1e91c50b0", "score": "0.5119531", "text": "def GetMp(self) -> Vector:\n ...", "title": "" }, { "docid": "c8f79fd86f91c286971345cab3f5dc17", "score": "0.5114595", "text": "def get_vbm(EIG, OCC, OCC_E=0.001):\n return np.max(EIG[OCC > OCC_E])", "title": "" }, { "docid": "1953f48e5914e0ac0b3625fbfeacc866", "score": "0.51112324", "text": "def V1_cubic(band1, band2, band3, q1, q2, q3, \\\n Ubov1, Ubov2, Ubov3, Ubovm1, Ubovm2, Ubovm3):\n\n V1 = 0.0\n \n# =============================================================================\n# tmp, Ubov1 = GLSW.eigensystem(q1)\n# tmp, Ubov2 = GLSW.eigensystem(q2)\n# tmp, Ubov3 = GLSW.eigensystem(q3)\n# \n# tmp, Ubovm1 = GLSW.eigensystem(-q1)\n# tmp, Ubovm2 = GLSW.eigensystem(-q2)\n# tmp, Ubovm3 = GLSW.eigensystem(-q3)\n# \n# =============================================================================\n for N in range(2):\n for Np in range(2):\n \n \n for bond in range(12):\n \n bond_vec = delta_ij[:, bond]\n sub1 = sub_idx[bond, 0]\n sub2 = sub_idx[bond, 1]\n \n # N, Np, and M are flavor indices defined in the note\n In = num_sub*N + sub1\n Jn = num_sub*N + sub2\n Inp = num_sub*Np + sub1\n Jnp = num_sub*Np + sub2 \n \n tFa1 = Fa_symm(Jn, Jn, Jnp, \\\n band1, band2, band3, Ubov1, Ubov2, Ubov3, \\\n q1, q2, q3, bond_vec, 0)\n \n tFb1 = Fb_symm(Jn, Jn, Jnp, \\\n band3, band2, band1, Ubov3, Ubov2, Ubov1, \\\n -q3, -q2, -q1, bond_vec, 0)\n \n tFa2 = Fa_symm(In, In, Inp, \\\n band1, band2, band3, Ubov1, Ubov2, Ubov3, \\\n q1, q2, q3, bond_vec, 0)\n \n tFb2 = Fb_symm(In, In, Inp, \\\n band3, band2, band1, Ubov3, Ubov2, Ubov1, \\\n -q3, -q2, -q1, bond_vec, 0)\n\n V1 += JJJ[bond, N, Np]*tFa1 + (JJJ[bond, N, Np]*tFb1).conj() \\\n +III[bond, N, Np]*tFa2 + (III[bond, N, Np]*tFb2).conj()\n \n for M in range(2):\n \n Im = num_sub*M + sub1\n Jm = num_sub*M + sub2\n \n tFa1 = Fa_symm(Jn, Jnp, Im, \\\n band1, band2, band3, Ubov1, Ubov2, Ubov3, \\\n q1, q2, q3, bond_vec, 2)\n \n tFb1 = Fb_symm(Jn, Jnp, Im, \\\n band3, band2, band1, Ubov3, Ubov2, Ubov1, \\\n -q3, -q2, -q1, bond_vec, 2)\n \n tFa2 = Fa_symm(In, Inp, Jm, \\\n band1, band2, band3, Ubov1, Ubov2, Ubov3, \\\n q1, q2, q3, bond_vec, 1)\n \n tFb2 = Fb_symm(In, Inp, Jm, \\\n band3, band2, band1, Ubov3, Ubov2, Ubov1, \\\n -q3, -q2, -q1, bond_vec, 1)\n \n \n V1 += 2.0*(JJI[bond, N, Np, M]*tFa1 \\\n + (JJI[bond, N, Np, M]*tFb1).conj() \\\n + IIJ[bond, N, Np, M]*tFa2 \\\n + (IIJ[bond, N, Np, M]*tFb2).conj())\n \n for sublat in range(num_sub):\n \n In = num_sub*N + sublat\n Inp = num_sub*Np + sublat\n factor1 = thi[sublat] @ f3[:, N, Np]\n factor2 = thi[sublat] @ f3[:, N, Np].conj()\n tFa = Fa_symm(In, In, Inp, \\\n band1, band2, band3, Ubov1, Ubov2, Ubov3, \\\n q1, q2, q3, 0.0, 0)\n tFb = Fb_symm(In, In, Inp, \\\n band3, band2, band1, Ubov3, Ubov2, Ubov1, \\\n -q3, -q2, -q1, 0.0, 0)\n \n V1 += factor1*tFa + factor2*tFb.conj()\n \n\n return V1", "title": "" } ]
346d3ed1deeac006fe47ca51b0c1dd9d
No hint returned. Readonly routing to the SEGMENTS_EXEC_CONNECTION is handled manually in the Segment model. This is 'less surprising' than routing all Segment reads through the SEGMENTS_EXEC_CONNECTION.
[ { "docid": "8bd1a3437ac212071979be4f57181f99", "score": "0.0", "text": "def db_for_read(self, model, **hints):\n return None", "title": "" } ]
[ { "docid": "c4f28fca8b39034d3677e853378cef94", "score": "0.5777611", "text": "def test_get_segment_bind(self):\n pass", "title": "" }, { "docid": "a45cab6f8492d86060ab03fadce251a7", "score": "0.5474118", "text": "def _get_segment_routing(self):\n return self.__segment_routing", "title": "" }, { "docid": "a45cab6f8492d86060ab03fadce251a7", "score": "0.5474118", "text": "def _get_segment_routing(self):\n return self.__segment_routing", "title": "" }, { "docid": "a45cab6f8492d86060ab03fadce251a7", "score": "0.5474118", "text": "def _get_segment_routing(self):\n return self.__segment_routing", "title": "" }, { "docid": "a45cab6f8492d86060ab03fadce251a7", "score": "0.5474118", "text": "def _get_segment_routing(self):\n return self.__segment_routing", "title": "" }, { "docid": "e8850bb2f3f91ee5a3263a1e17c6df32", "score": "0.50652015", "text": "def allow_relation(self, obj1, obj2, **hints):\n is_segments = obj1._meta.app_label == 'segments'\n is_conn = obj2._state.db == SEGMENTS_EXEC_CONNECTION\n return True if is_segments and is_conn else None", "title": "" }, { "docid": "c0478bc33e447f5ae526ebdb53540638", "score": "0.4970428", "text": "def test_update_segment_bind(self):\n pass", "title": "" }, { "docid": "2dd2fcb8f24ab4414a8cb477a6c6d49b", "score": "0.47965425", "text": "def test_add_segment_bind(self):\n pass", "title": "" }, { "docid": "8b2f2aca85129cf564e724387046ca06", "score": "0.47502515", "text": "def segmentReceived(self):", "title": "" }, { "docid": "0ad249b63875ee151f028cacff71108b", "score": "0.46872997", "text": "def get_running_route(self):\n raise NotImplemented('Method is not implemented!')", "title": "" }, { "docid": "a5103ea4298889b4e19cdb45455cd69b", "score": "0.46809044", "text": "def test_delete_segment_bind(self):\n pass", "title": "" }, { "docid": "531b57c50b6cb775a5b3478e4407dab0", "score": "0.46603185", "text": "def __getitem__(self, segment_name):\n return self._segments[segment_name]", "title": "" }, { "docid": "594f9536e5bfd01b15319b104b28c9c2", "score": "0.46046227", "text": "def handle_starbound_packets(self, p):\n return self.call_mapping[p.id](p)", "title": "" }, { "docid": "8ea4675bf85f5a842ec4733a20639f7d", "score": "0.45776394", "text": "def _get_segmentation_id(self, context, id):\n session = context.session\n binding = n1kv_db_v2.get_network_binding(session, id)\n return binding.segmentation_id", "title": "" }, { "docid": "8ea4675bf85f5a842ec4733a20639f7d", "score": "0.45776394", "text": "def _get_segmentation_id(self, context, id):\n session = context.session\n binding = n1kv_db_v2.get_network_binding(session, id)\n return binding.segmentation_id", "title": "" }, { "docid": "45eb7f44571615092e229bb0a17537e0", "score": "0.4570584", "text": "def enable_route_for_command(self, celery_app, name):\n if name not in self._envelopes:\n # envelope self-registers with Celery\n celery_app.conf.task_routes = self._generate_celery_routes(name) # update routes in celery\n self._envelopes[name] = Envelope()\n if '.query.' in name.lower():\n self._envelopes[name].patch(celery_app, name, False)\n else:\n self._envelopes[name].patch(celery_app, name, True)", "title": "" }, { "docid": "d7eeb93f70976cf34c9a5b106a3d5395", "score": "0.4522573", "text": "def _get_nonstop_routing(self):\n return self.__nonstop_routing", "title": "" }, { "docid": "23f93e02cc396ea2d8d0740ed1bd5274", "score": "0.4501136", "text": "def get_external_segment_status(self, context):\n pass", "title": "" }, { "docid": "59ae75abf22327273fc9a9da6e565d5a", "score": "0.44961303", "text": "def set_segment_remap(self, remap):\n self.__send_command([0xa0 | remap])", "title": "" }, { "docid": "b9eafa3c1396004f128712ec64f530f8", "score": "0.4448387", "text": "def __call__(self, callback, route):\n raise NotImplementedError", "title": "" }, { "docid": "126f555a7ea5b2fb859aad703d04ca2b", "score": "0.44444585", "text": "def stmt_method_constant_op_asgn_command_call(self, p):\r\n raise NotImplementedError(p)", "title": "" }, { "docid": "019eb1a55aa786e8970c2eb0c6786052", "score": "0.44339278", "text": "def stmt_constant_op_asgn_command_call(self, p):\r\n raise NotImplementedError(p)", "title": "" }, { "docid": "3a748440185c459dee30b1ebd739fbee", "score": "0.44239548", "text": "def process_create_external_segment(self, session, data, result):\n pass", "title": "" }, { "docid": "4dcc0c7f171fc3c7709556fd96158457", "score": "0.44142088", "text": "def synRcvd(self):", "title": "" }, { "docid": "28f033c1cdbfad4f1c537fcfd649c60d", "score": "0.44040105", "text": "def fetch_routing_info(self, address):\n from .api import CypherError\n from .bolt import BoltSession\n try:\n connection = self.acquire(address)\n with BoltSession(connection) as session:\n return list(session.run(\"CALL %s\" % self.routing_info_procedure))\n except CypherError as error:\n if error.code == \"Neo.ClientError.Procedure.ProcedureNotFound\":\n raise ServiceUnavailable(\"Server %r does not support routing\" % (address,))\n else:\n raise ServiceUnavailable(\"Routing support broken on server %r\" % (address,))\n except ServiceUnavailable as error:\n if error.code == \"Neo.ClientError.Security.Unauthorized\":\n from neo4j.v1.security import Unauthorized\n raise Unauthorized(error.args[0])\n self.remove(address)\n return None", "title": "" }, { "docid": "d9c3ef2c2b14c0ae11886b65fb8b7730", "score": "0.43477014", "text": "def access_page(cmd):\n if len(cmd) == ParamsNumber.ACCESS.value:\n try:\n virtual_direction = int(cmd[1])\n dirty_bit = int(cmd[3])\n process_id = cmd[2]\n if process_id not in stats or stats[process_id][\"active_bit\"] == 0:\n print(f'Error: Segementation Fault. El proceso {process_id} no está en memoria.')\n elif virtual_direction >= stats[process_id][\"size\"][-1]:\n print(f'Error: La dirección {virtual_direction} no existe en el proceso {process_id}.')\n else:\n global timer, algorithm\n print(f'Obtener la dirección real correspondiente a la dirección virtual {virtual_direction} del proceso {process_id}.')\n logic_page = int(virtual_direction / PAGE_SIZE)\n access_page_key = f'{process_id}_{logic_page}'\n page_table[access_page_key][\"dirty_bit\"] = dirty_bit\n timer += 0.1\n if dirty_bit == 1:\n print(f'Página {logic_page} del proceso {process_id} modificada.')\n timer += 0.1\n if page_table[access_page_key][\"presence_bit\"] == 0:\n # Mark the page fault.\n stats[process_id][\"page_faults\"][-1] += 1\n # Get disk frame where the direction to be accessed resides.\n disk_frame = page_table[access_page_key][\"disk_frame\"]\n # Get page to be swapped out of ram.\n page_key_swapped_out = relocation_queue.pop(0)\n # Get the frame in ram that is going to receive the page stored in disk.\n ram_frame_swap_in = page_table[page_key_swapped_out][\"ram_frame\"]\n # Execute swap out and update stats.\n swap_page_out_of_ram(page_key_swapped_out, disk_frame)\n # Store in ram the frame previously stored in disk (swap in) and update stats.\n swap_page_in_ram(access_page_key, ram_frame_swap_in, disk_frame)\n else:\n if algorithm == \"lru\":\n move_page_to_front(access_page_key)\n real_direction = page_table[access_page_key][\"ram_frame\"] * PAGE_SIZE + virtual_direction % PAGE_SIZE\n print(f'Dirección virtual = {virtual_direction}. Dirección real = {real_direction}.')\n except ValueError:\n print(\"Error: Los parámetros deben ser enteros.\")\n else:\n print(f\"Error: Se esperan {ParamsNumber.ACCESS.value} parámetros.\")", "title": "" }, { "docid": "0f6dd7e339eec36e166ae154db59bdc7", "score": "0.43352094", "text": "def stmt_method_op_asgn_command_call(self, p):\r\n raise NotImplementedError(p)", "title": "" }, { "docid": "f3aab97b79a99aedcbb71b3b04ee3ffa", "score": "0.43308467", "text": "def failover_segment_get_by_id(context, segment_id):\n return IMPL.failover_segment_get_by_id(context, segment_id)", "title": "" }, { "docid": "feb1a16e5598acb7aa478678df3b0a95", "score": "0.43262705", "text": "def segments(self, params):\n pass", "title": "" }, { "docid": "1fb310c5eb7025bdadb276160fd1f8d4", "score": "0.4295063", "text": "def test_get_space_broadcast(self):\n pass", "title": "" }, { "docid": "b9d20f37c521de5ec5acff7fbdc0f175", "score": "0.4291128", "text": "def lspcli(obj: SkyNetCtxt) -> None:", "title": "" }, { "docid": "3e4f2f052dd058fd1ca7d4858e51b326", "score": "0.42815474", "text": "def access(self, mode = 0):\r\n raise NotImplementedError()", "title": "" }, { "docid": "1dd44d47fbe211dacedf86924073bc22", "score": "0.4278798", "text": "def __call__(is_inbound = True):", "title": "" }, { "docid": "99935814c9831f1d2a0ceb0558af7b15", "score": "0.42635256", "text": "def getSegment(self, int: int) -> AdCycleSegment:\n ...", "title": "" }, { "docid": "78f46c3d1c37ed67b60dcf20a1d07647", "score": "0.42483184", "text": "def access(self, mode):\n \n pass", "title": "" }, { "docid": "e333fa2a04231f872aa641b0e71de782", "score": "0.42336184", "text": "def _operation(self):\n raise NotImplementedError()", "title": "" }, { "docid": "24ef796d75165820d8fb761fe2cdbbf6", "score": "0.42185858", "text": "def test_scan_id_default(self):\n scan = ''\n try:\n self.res=self.run_task(infile=self.rawfile,scan=scan,mode=self.mode,outfile=self.outfile,outform='ASAP')\n self.fail('The task must throw exception')\n except Exception, e:\n pos=str(e).find('Trying to flag whole scantable.')\n self.assertNotEqual(pos,-1,\n msg='Unexpected exception was thrown: %s'%(str(e)))", "title": "" }, { "docid": "1bdd5308f5dd6165f2ce0627b9b2898a", "score": "0.42161515", "text": "def visit_exec(self, node):\n self.add_message(\"exec-used\", node=node)", "title": "" }, { "docid": "e753c26dea462ff52e0df3f088fbb9aa", "score": "0.4215119", "text": "def default_router(obj, function_path):\n if function_path.startswith('_'):\n raise AttributeError\n \n return getattr(obj, function_path)", "title": "" }, { "docid": "1373100164924738005838ab2635b127", "score": "0.42145908", "text": "def _zdo_node_desc_req_srsp_handler(self, rx_data):\r\n if rx_data['id'] == ZpiCommand.ZDO_NODE_DESC_REQ_SRSP:\r\n return struct.unpack('<B', rx_data['status'])[0]\r\n else:\r\n raise ValueError('Invalid Rx frame! Expected: %s, Received: %s' % (\r\n ZpiCommand.ZDO_NODE_DESC_REQ_SRSP, rx_data['id']))", "title": "" }, { "docid": "f1467893c2160481603c27032dffd120", "score": "0.4212026", "text": "def get_routing_command(routing_operation, address, block_action):\n global config\n iptables = config[\"ip4tables_cmd\"] if ip_version(address) == 4 else config[\"ip6tables_cmd\"]\n source_address = ([\"-s\", config[\"blocked_local_client_address_v4\"]] if ip_version(address) == 4 and config[\"blocked_local_client_address_v4\"] \n else [\"-s\", config[\"blocked_local_client_address_v6\"]] if ip_version(address) == 6 and config[\"blocked_local_client_address_v6\"] \n else [])\n routing_action = [\"-j\", block_action]\n\n if routing_operation == 'insert':\n return ([iptables, \"-I\", config[\"iptables_chain\"], \"1\", \"-d\", address] \n + source_address\n + routing_action)\n elif routing_operation == 'check':\n return ([iptables, \"-C\", config[\"iptables_chain\"], \"-d\", address] \n + source_address\n + routing_action)\n elif routing_operation == 'delete':\n return ([iptables, \"-D\", config[\"iptables_chain\"], \"-d\", address] \n + source_address\n + routing_action)\n else:\n logging.error(\"Unknown routing_operation provided for get_routing_command.\")\n return \"\"", "title": "" }, { "docid": "7afa8066157af9ab5cad6b688916ee06", "score": "0.42116517", "text": "def enableReadOnlyProcessing(self, enabled: bool) -> None:\n ...", "title": "" }, { "docid": "f0badfb140ac34db258b5a4e7a07da3e", "score": "0.42055824", "text": "def test_start_space_broadcast(self):\n pass", "title": "" }, { "docid": "8bcb73e131b28c81c150490cdc690d17", "score": "0.4205271", "text": "def subcommand_not_found(self, command, string):\n ...", "title": "" }, { "docid": "b5181ae7bc1fe581ba0794c5170ac718", "score": "0.42049864", "text": "def process_update_external_segment(self, session, data, result):\n pass", "title": "" }, { "docid": "250a46789872202bb497484a9e47b85b", "score": "0.41896978", "text": "def load_segments(self):\n logging.info(\"try to load the segments ... \")\n last = None\n s_tm = time.time()\n\n lines = self.src_handle.read().splitlines()\n for line in lines:\n logging.info(\"load segment: `{}`\".format(line))\n ps = line.split(\"|\", maxsplit=2)\n if len(ps) != 3:\n raise Exception(\"invalid ip segment line `{}`\".format(line))\n\n sip = util.check_ip(ps[0])\n if sip == -1:\n raise Exception(\n \"invalid ip address `{}` in line `{}`\".format(ps[0], line)\n )\n eip = util.check_ip(ps[1])\n if eip == -1:\n raise Exception(\n \"invalid ip address `{}` in line `{}`\".format(ps[1], line)\n )\n\n if sip > eip:\n raise Exception(\n \"start ip({}) should not be greater than end ip({})\".format(\n ps[0], ps[1]\n )\n )\n if len(ps[2]) < 1:\n raise Exception(\"empty region info in segment line `{}`\".format(line))\n\n segment = seg.Segment(sip=sip, eip=eip, reg=ps[2])\n # Check the continuity of data segment\n if last is not None:\n if last.end_ip + 1 != segment.start_ip:\n raise Exception(\n \"discontinuous data segment: last.eip+1({})!=seg.sip({}, {})\".format(\n sip, eip, ps[0]\n )\n )\n self.segments.append(segment)\n last = segment\n logging.info(\n \"all segments loaded, length: {}, elapsed: {}\".format(\n len(self.segments), time.time() - s_tm\n )\n )", "title": "" }, { "docid": "bbe6c06b6057cd16785b98275c77e241", "score": "0.41716272", "text": "def stmt_subscript_op_asgn_command_call(self, p):\r\n raise NotImplementedError(p)", "title": "" }, { "docid": "f76cd533f58b0534fbb6840b138bd5bb", "score": "0.41677192", "text": "def access():", "title": "" }, { "docid": "f76cd533f58b0534fbb6840b138bd5bb", "score": "0.41677192", "text": "def access():", "title": "" }, { "docid": "5927391732a01b84949283ea73cdc045", "score": "0.41623557", "text": "def permission(self):\r\n return \"execution.execute\"", "title": "" }, { "docid": "c816168135c0e547fe9d92fd4a11a18f", "score": "0.416229", "text": "def command_not_found(self, string):\n ...", "title": "" }, { "docid": "be6eece73016ce10dc44ddb712a0208e", "score": "0.41612554", "text": "def _zb_allow_bind_srsp_handler(self, rx_data):\r\n if rx_data['id'] == ZpiCommand.ZB_ALLOW_BIND_SRSP:\r\n return\r\n else:\r\n raise ValueError('Invalid Rx frame! Expected: %s, Received: %s' % (\r\n ZpiCommand.ZB_ALLOW_BIND_SRSP, rx_data['id']))", "title": "" }, { "docid": "500156f4bc7aa9d2a2e2a83b6cfc872a", "score": "0.4156523", "text": "def get_procedure_segments(\n self, ems_system_id, procedure_id, release_id=None, custom_headers=None, raw=False, **operation_config):\n # Construct URL\n url = self.get_procedure_segments.metadata['url']\n path_format_arguments = {\n 'emsSystemId': self._serialize.url(\"ems_system_id\", ems_system_id, 'int'),\n 'procedureId': self._serialize.url(\"procedure_id\", procedure_id, 'int')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n if release_id is not None:\n query_parameters['releaseId'] = self._serialize.query(\"release_id\", release_id, 'int')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Accept'] = 'application/json'\n if custom_headers:\n header_parameters.update(custom_headers)\n\n # Construct and send request\n request = self._client.get(url, query_parameters, header_parameters)\n response = self._client.send(request, stream=False, **operation_config)\n\n if response.status_code not in [200, 401, 503]:\n raise HttpOperationError(self._deserialize, response)\n\n deserialized = None\n\n if response.status_code == 200:\n deserialized = self._deserialize('[AdiEmsWebApiV2DtoNavigationNavigationProcedureSegment]', response)\n if response.status_code == 401:\n deserialized = self._deserialize('AdiEmsWebApiModelError', response)\n if response.status_code == 503:\n deserialized = self._deserialize('AdiEmsWebApiModelError', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized", "title": "" }, { "docid": "a83909f7ec4851b8cab0afd1fa84cabf", "score": "0.414945", "text": "def service_routes(self) -> bool:", "title": "" }, { "docid": "87b5980d2ec4b31fcd4d6239e26d993b", "score": "0.41485733", "text": "def show_route(self):\r\n cmd = \"route\"\r\n output = self._access.exec_command(cmd)\r\n\r\n return self._parse_route(output)", "title": "" }, { "docid": "4c5188bd1f59ab554a0a3a0cdc6174e6", "score": "0.41472617", "text": "def failover_segment_get_by_name(context, name):\n return IMPL.failover_segment_get_by_name(context, name)", "title": "" }, { "docid": "f843fb730823578bad6f45525e42f2df", "score": "0.414336", "text": "def operation_command(self):\n pass", "title": "" }, { "docid": "0ead6149e64569b9b0ad9c1bf0591df6", "score": "0.41420582", "text": "def execute_command_no_results(self, sock_info, generator):\n ...", "title": "" }, { "docid": "df0fcf176cd8c5d0fea89a43326983de", "score": "0.4135441", "text": "def bound_to(self, path, params):\n _logger.info('Bound to ' + path)\n return True", "title": "" }, { "docid": "5f60521cc612e8e1543b29eec1658a63", "score": "0.4121963", "text": "def read_segments(path):\n with open(path, \"r\") as irisao_file:\n raw_line = irisao_file.readline()\n clean_line = clean_string(raw_line)\n\n # Skip to the segment section:\n while clean_line[1:3].upper() != \"ZV\":\n raw_line = irisao_file.readline()\n clean_line = clean_string(raw_line)\n\n segment_commands = {}\n while clean_line[1:3].upper() == \"ZV\":\n\n # Parse into dictionary {segment: (piston, tip, tilt)}.\n segment_string_list = clean_line.lstrip(\"[ZV:\").rstrip(\"]\").split(\",\")\n segment_num = int(segment_string_list[0])\n segment_tuple = convert_to_float(segment_string_list[1]), \\\n convert_to_float(segment_string_list[2]), \\\n convert_to_float(segment_string_list[3])\n\n segment_commands[segment_num] = segment_tuple\n\n raw_line = irisao_file.readline()\n clean_line = clean_string(raw_line)\n\n if segment_commands:\n # Prepare command for segments.\n return segment_commands\n else:\n return None", "title": "" }, { "docid": "1e5a50327463ad4c20268090f44d9db3", "score": "0.41217893", "text": "def iter_executable_segments(self):\n for seg in self.elf.executable_segments:\n yield seg", "title": "" }, { "docid": "1cccbc6525043d7fbc2256a818f09da5", "score": "0.41124442", "text": "def _handler_direct_access_stop_direct(self):\n return self._handler_unknown_discover()", "title": "" }, { "docid": "861c66b9b43f8e920f220cf1452cceea", "score": "0.4089788", "text": "def segment(self):\n return self._pci_address['segment']", "title": "" }, { "docid": "35b39492cb499e5531224a616710f7d6", "score": "0.40779626", "text": "def test_request_SlaveInstance_without_enough_slots(self):\n raise NotImplementedError", "title": "" }, { "docid": "119d4f8df2be69ea9a3a5ddc91f9c907", "score": "0.4070143", "text": "def execute_op_msg_no_results(self, sock_info, generator):\n ...", "title": "" }, { "docid": "0f2ccfe1868725eed431b3f4feb3b74e", "score": "0.40602612", "text": "def getSegmentsCount(self) -> int:\n ...", "title": "" }, { "docid": "4a7898709043e120aa8af50ff9e431eb", "score": "0.40598643", "text": "def handle_server_read(self):\n raise NotImplementedError", "title": "" }, { "docid": "cc3d24d78f9f3c05b2c385b67b9ed01c", "score": "0.40580812", "text": "def _zdo_mgmt_bind_req_srsp_handler(self, rx_data):\r\n if rx_data['id'] == ZpiCommand.ZDO_MGMT_BIND_REQ_SRSP:\r\n return struct.unpack('<B', rx_data['status'])[0]\r\n else:\r\n raise ValueError('Invalid Rx frame! Expected: %s, Received: %s' % (\r\n ZpiCommand.ZDO_MGMT_BIND_REQ_SRSP, rx_data['id']))", "title": "" }, { "docid": "df410fdcb9711104694140ad4181d783", "score": "0.4054606", "text": "def valid_read_endpoint_address(request):\n return request.param", "title": "" }, { "docid": "19c5acb72167ee197aec94d62751b359", "score": "0.4053427", "text": "def handle_regular_task(self):\n pass", "title": "" }, { "docid": "729d06ecbca9835eacae569f08d74db9", "score": "0.40510806", "text": "def num_segments(self):\n return self._num_segments", "title": "" }, { "docid": "a4bd0251810795dddfec510edbdd4956", "score": "0.40453106", "text": "def __call__(self, req, res):\n raise NotImplemented", "title": "" }, { "docid": "e01871442734ec39c8e2933a96cfd23f", "score": "0.4044506", "text": "def _execute(self): \r\n raise NotImplementedError", "title": "" }, { "docid": "f6b5042f363c1b4464002a72adca12ee", "score": "0.40421557", "text": "def recovery_routine(self):\n logger.warning(\"recovery_routine not implemented\")\n raise errors.NotSupportedError(\"N/A\")", "title": "" }, { "docid": "c58b45495888ac5c1a806fea63dd70bc", "score": "0.4038893", "text": "def _zdo_simple_desc_req_srsp_handler(self, rx_data):\r\n if rx_data['id'] == ZpiCommand.ZDO_SIMPLE_DESC_REQ_SRSP:\r\n return struct.unpack('<B', rx_data['status'])[0]\r\n else:\r\n raise ValueError('Invalid Rx frame! Expected: %s, Received: %s' % (\r\n ZpiCommand.ZDO_SIMPLE_DESC_REQ_SRSP, rx_data['id']))", "title": "" }, { "docid": "1acca977d2072f758790375b6b84774d", "score": "0.40318757", "text": "def pre_discover(self, s, terminal, a, sn, terminaln):\n\n return 0", "title": "" }, { "docid": "7947ac5155b925d57250fd06c631cb21", "score": "0.40309232", "text": "def in_subsegment(self, name=None, **kwargs) -> ContextManager:", "title": "" }, { "docid": "315d017f174f82b3620acf5ab3ca5817", "score": "0.4024149", "text": "def sscan(self, *args):\n if self._cluster:\n return self.execute(u'SSCAN', *args, shard_key=args[0])\n return self.execute(u'SSCAN', *args)", "title": "" }, { "docid": "b784e072b931c1ff69038f7f91c6e06e", "score": "0.4016509", "text": "def getIsSegmented(self):\n return self._IsSegmented", "title": "" }, { "docid": "c20e06d7bd3f328729e3bd696432f6e9", "score": "0.4014882", "text": "def read_segment(st, segment, cfile, vys_timeout):\n\n logger.info(\"Reading datasetId {0}, segment {1} locally.\"\n .format(st.metadata.scanId, segment))\n\n with distributed.worker_client() as cl_loc:\n fut = cl_loc.submit(source.read_segment, st, segment, cfile,\n vys_timeout)\n data = fut.result()\n\n logger.info(\"Finished reading datasetId {0}, segment {1} locally.\"\n .format(st.metadata.scanId, segment))\n\n return data", "title": "" }, { "docid": "37c162f2efb09d01e0a714af45228c27", "score": "0.4014161", "text": "def _get_route(self):\n return self.__route", "title": "" }, { "docid": "82a77d90d1109accc9f8a14c77c06913", "score": "0.4010773", "text": "def getResourceId():\r\n return 'Access Ports'", "title": "" }, { "docid": "2b3b8aae9222f676e381c41e76a0cc65", "score": "0.40102398", "text": "def use_internal_routing(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_internal_routing\")", "title": "" }, { "docid": "c5c9bda0e819451142444b9f786232e3", "score": "0.40062597", "text": "def initialize_segments(self, runner):", "title": "" }, { "docid": "28a3037b69c71d3f584acbb8d90fc20b", "score": "0.3989013", "text": "def _NoOperation(self, **kwarg):\n logger.debug('Default handler: {0!s}'.format(kwarg))", "title": "" }, { "docid": "6976b160f729db1271e1b22dd2ca14f3", "score": "0.3983882", "text": "def reduce_route(task):\n from noc.ip.models import VRF, Prefix\n from noc.lib.ip import IP\n\n vrf = VRF.get_global()\n r = {} # prefix -> (description, objects)\n\n for mt in task.maptask_set.filter(status=\"C\"):\n for instance in mt.script_result:\n if instance[\"type\"] == \"ip\" and instance[\n \"forwarding_instance\"] == \"default\":\n for iface in instance[\"interfaces\"]:\n if not iface[\"admin_status\"]:\n continue\n for subiface in iface[\"subinterfaces\"]:\n if not iface[\"admin_status\"]:\n continue\n if \"is_ipv4\" in subiface and subiface[\"is_ipv4\"]:\n # Get description\n if (\"description\" in subiface and\n subiface[\"description\"]):\n description = subiface[\"description\"]\n else:\n description = iface.get(\"description\")\n # Check prefixes in database\n for ipv4 in subiface[\"ipv4_addresses\"]:\n prefix = IP.prefix(ipv4).normalized.prefix\n if not Prefix.objects.filter(vrf=vrf,\n afi=\"4\", prefix=prefix).exists():\n # No prefix in database\n if prefix not in r:\n r[prefix] = [description if description else prefix,\n [mt.managed_object.name]]\n else:\n r[prefix][1] += [mt.managed_object.name]\n # Render template\n return r", "title": "" }, { "docid": "7833ea89d19b920341ddcbe5dbc7d20d", "score": "0.3983344", "text": "def _get_explicit_route_object(self):\n return self.__explicit_route_object", "title": "" }, { "docid": "7833ea89d19b920341ddcbe5dbc7d20d", "score": "0.3983062", "text": "def _get_explicit_route_object(self):\n return self.__explicit_route_object", "title": "" }, { "docid": "7833ea89d19b920341ddcbe5dbc7d20d", "score": "0.3983062", "text": "def _get_explicit_route_object(self):\n return self.__explicit_route_object", "title": "" }, { "docid": "7833ea89d19b920341ddcbe5dbc7d20d", "score": "0.3983062", "text": "def _get_explicit_route_object(self):\n return self.__explicit_route_object", "title": "" }, { "docid": "7c3ff40803e59e30312a67ec029a193e", "score": "0.39760056", "text": "def failover_segment_get_by_uuid(context, segment_uuid):\n return IMPL.failover_segment_get_by_uuid(context, segment_uuid)", "title": "" }, { "docid": "9aa93fe5a105bcff2d5888ba990a4bdf", "score": "0.3971211", "text": "def test_process_created_on_ipsec_connection_create(self):\n pass", "title": "" }, { "docid": "f1d70e70f99362674cf763014b817e49", "score": "0.39669302", "text": "def dynamic_caller(self):\n dynamic_rel_path = self.rel_path\n # print self.obstacle_list\n # if not self.target_changed:\n dynamic_rel_path = dri.dynamic_rrt(start = self.curr_pos, end =self.curr_target, path = self.rel_path, obstacle_list = self.obstacle_list)\n if(self.rel_path != dynamic_rel_path):\n self.rel_path = dynamic_rel_path\n self.gp_counter=0\n else:\n self.rel_path = dynamic_rel_path\n print self.rel_path\n self.target_reached()\n dat = Float32MultiArray()\n dat.data = self.rel_path[self.gp_counter] \n self.path_pub.publish(dat)", "title": "" }, { "docid": "274b1f48b231936dd2b6293fd3fcb996", "score": "0.39638856", "text": "def _zdo_mgmt_direct_join_req_srsp_handler(self, rx_data):\r\n if rx_data['id'] == ZpiCommand.ZDO_MGMT_DIRECT_JOIN_REQ_SRSP:\r\n return struct.unpack('<B', rx_data['status'])[0]\r\n else:\r\n raise ValueError('Invalid Rx frame! Expected: %s, Received: %s' % (\r\n ZpiCommand.ZDO_MGMT_DIRECT_JOIN_REQ_SRSP, rx_data['id']))", "title": "" }, { "docid": "1355d4844edcce8503eb1a4b16ef4487", "score": "0.39602247", "text": "def execute_no_results(self, sock_info, generator):\n ...", "title": "" }, { "docid": "ead204315252f57c1859a32b33abd2ee", "score": "0.39593175", "text": "def add_segment(self, symbols=[]):\n # Skipping maxprot and beyond for now. Need to figure out how to account for vm_prot_t data types\n #uint32_t cmd; /* LC_SEGMENT */\n #uint32_t cmdsize; /* includes sizeof section structs */\n #char segname[16]; /* segment name */\n #uint32_t vmaddr; /* memory address of this segment */\n #uint32_t vmsize; /* memory size of this segment */\n #uint32_t fileoff; /* file offset of this segment */\n #uint32_t filesize; /* amount to map from the file */\n #vm_prot_t maxprot; /* maximum VM protection */\n #vm_prot_t initprot; /* initial VM protection */\n #uint32_t nsects; /* number of sections in segment */\n #uint32_t flags; /* flags */\n for header in self.__mm.headers:\n for cmd in header.commands:\n load_cmd = cmd[0]\n cmd_info = cmd[1]\n try:\n if load_cmd.get_cmd_name() == 'LC_SEGMENT':\n segment = []\n segment.append(\"???????????????????????????????\")\n for i in range(5):\n segment.append(\"????????\")\n\n cmd_bytes = struct.pack(self.__byte_order(header) + 'I', load_cmd.cmd).encode('hex')\n cmd_size = struct.pack(self.__byte_order(header) + 'I', load_cmd.cmdsize).encode('hex')\n if 'vmaddr' in symbols: segment[0] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['vmaddr']).encode('hex')\n if 'vmsize' in symbols: segment[1] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['vmsize']).encode('hex')\n if 'fileoff' in symbols: segment[2] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['fileoff']).encode('hex')\n if 'filesize' in symbols: segment[3] = struct.pack(self.__byte_order(header) + 'I', cmd_info.describe()['filesize']).encode('hex')\n if len(symbols) > 0:\n for i in range(len(segment)):\n if '??' in segment[-1]:\n segment.pop(-1)\n else:\n break\n self.__sig.add_named_hex(load_cmd.get_cmd_name() + \"_\" + str(self.__dyld_count), cmd_bytes + cmd_size + ''.join(segment))\n else:\n self.__sig.add_named_hex(load_cmd.get_cmd_name() + \"_\" + str(self.__dyld_count), cmd_bytes + cmd_size)\n self.__dyld_count += 1\n except Exception as e:\n print \"EXCEPTION: %s\" % str(e)", "title": "" }, { "docid": "fa8e11bbdff1652a365e14377cce561a", "score": "0.39590803", "text": "def replication_exec(self, func):\n module = self.modules[Module.REPLICATION_MODULE]\n if func == Function.GET_PEND_REQS:\n return module.get_pend_reqs()\n elif func == Function.REP_REQUEST_RESET:\n return module.rep_request_reset()\n elif func == Function.REPLICA_FLUSH:\n return module.replica_flush()\n elif func == Function.NEED_FLUSH:\n return module.rep_need_flush()\n else:\n raise ValueError(\"Bad function parameter\")", "title": "" }, { "docid": "ed3000ecf1aec9f8def4fe0c2ee0f5bb", "score": "0.39570194", "text": "def command(self):\n raise Exception(\"command is a write-only property!\")", "title": "" }, { "docid": "9b9e7f87b021212bc72fb8deca4632ef", "score": "0.39530808", "text": "def someTask(self, path):\r\n pass", "title": "" }, { "docid": "a234b16946df738f57b156ea46be4132", "score": "0.3952303", "text": "def test_seg(self):\n k, v = self.sch.parse_seg('6 5')\n self.assertEqual(k, 'segment')\n self.assertEqual(v, (6, 5))", "title": "" }, { "docid": "246d3b567d876ecf5c87bf4828d21305", "score": "0.3951941", "text": "def getType(self) -> 'SegmentType':\n ...", "title": "" } ]
767b313f27de008c06873b5506f74731
Concatenate several GenomicArrays, keeping this array's metadata. This array's data table is not implicitly included in the result.
[ { "docid": "fef4b50b2b7e631f7ecc7e10a12b1c97", "score": "0.6265276", "text": "def concat(self, others):\n result = self.as_dataframe(pd.concat([otr.data for otr in others]))\n result.sort()\n return result", "title": "" } ]
[ { "docid": "15a61d1834cdf3ed63f0ebfd38933e33", "score": "0.6837463", "text": "def concatenate(arrays, axis=0):\n if not isinstance(arrays, tuple):\n raise ValueError(\"data type not understood\")\n arrays = tuple([asarray(a) for a in arrays])\n from numpy import concatenate\n return BoltArrayLocal(concatenate(arrays, axis))", "title": "" }, { "docid": "021dca021a9c51cecb723478ac8a7911", "score": "0.66644454", "text": "def cat(*arrays):\n return np.concatenate([a[np.newaxis] for a in arrays])", "title": "" }, { "docid": "c1cc129dcebea596873b8d400e127fae", "score": "0.65845215", "text": "def ecat(*arrays):\n return np.concatenate([a[..., np.newaxis] for a in arrays], axis=-1)", "title": "" }, { "docid": "71d48673ff4ec9bcfb935b6974493396", "score": "0.6557432", "text": "def combineSamples(datasets):\n return np.concatenate(datasets)", "title": "" }, { "docid": "079458ad94206b4f7c51452145a84f4c", "score": "0.6449312", "text": "def _concatenate_virtual_arrays(arrs):\n n = len(arrs)\n if n == 1:\n return arrs[0]\n return ConcatenatedArrays(arrs)", "title": "" }, { "docid": "2354e14133690bb4658fc376a772a048", "score": "0.6339455", "text": "def combine_data(arr_1, arr_2):\r\n return np.concatenate((arr_1, arr_2), axis=0)[:, 0]", "title": "" }, { "docid": "32de2d9460735ff0c80f5506e95d38f2", "score": "0.63105655", "text": "def _merge_data(self):\n self.data_concated = np.concatenate(self.data_buf, axis=1)\n return", "title": "" }, { "docid": "360ef8fa2941f4ed36dc03c8c8fd9958", "score": "0.62901515", "text": "def _concatenate(arrays, axis):\n arrays = list(a for a in arrays if 0 not in a.shape)\n if len(arrays) == 0:\n return numpy.array([])\n maxd = max(max(a.ndim for a in arrays), 2)\n for a in arrays:\n _atleast_nd(a, maxd)\n return numpy.concatenate(arrays, axis=axis)", "title": "" }, { "docid": "7ff1baf532f00f8c06845d7db05edbc6", "score": "0.62083703", "text": "def ConcatenateNdArrays(arrays:list, axis:int = 0):\n try:\n return np.concatenate(arrays, axis=axis)\n except Exception as ex:\n template = \"An exception of type {0} occurred in [ContentSupport.ConcatenateNdArrays]. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)", "title": "" }, { "docid": "1fb2dd718fde504213d0e29e95a17b87", "score": "0.61884797", "text": "def concatenate(tensors, axis=-1):\n return av.ops.concatenate(tensors, axis)", "title": "" }, { "docid": "c66c1ed9f8e31653747b07ddf061b0aa", "score": "0.61760396", "text": "def _concat(self, first_data, second_data):\n if (not first_data) or (not second_data):\n return first_data if first_data else second_data\n assert len(first_data) == len(\n second_data), 'data source should contain the same size'\n return [\n concat(\n first_data[i],\n second_data[i],\n dim=0\n ) for i in range(len(first_data))\n ]", "title": "" }, { "docid": "454027f5dc63d5ccb93eda10038d7deb", "score": "0.5978839", "text": "def concatenate_data(self):\n keys = self.get_exist_data_names()\n time_unrolled = False\n for k in keys:\n data = self.data[k]\n if type(data) is list:\n if not time_unrolled and self.t is not None:\n self.data[k], self.t = concat_time_series_matrices(self.data[k], self.t)\n time_unrolled = True\n else:\n self.data[k] = concat_time_series_matrices(self.data[k])", "title": "" }, { "docid": "8b375b31f2a319e4d9f5efc276dccfe6", "score": "0.5966005", "text": "def _concat(self, X, y, X_art, y_art):\n X_concat = np.vstack((X, X_art))\n y_concat = np.hstack((y, y_art))\n return X_concat, y_concat", "title": "" }, { "docid": "ef65ba38d9fd766824a8dc3625bc84ee", "score": "0.59248024", "text": "def concat(xs: Sequence[Any], axis: int = 0) -> Any:\n return multimap(lambda *xs: np.concatenate(xs, axis=axis), *xs)", "title": "" }, { "docid": "cc982a737dfd60beb4bc1331d6c540a1", "score": "0.5914997", "text": "def default_concat(data: typing.List[np.ndarray]) -> np.ndarray:\n return np.stack(data, axis=-1)", "title": "" }, { "docid": "05c70ca36df71a8d11a8c0a3d574bc6c", "score": "0.5883039", "text": "def flatten_arrays(arrays):\n return numpy.concatenate([a.flatten() for a in arrays])", "title": "" }, { "docid": "15239c252b031389d38fba3107dc3e64", "score": "0.5855274", "text": "def _merge_arrays(arrays):\n names = arrays[0].keys()\n result = {}\n for name in names:\n for array in arrays:\n result.setdefault(name, []).append(array[name])\n result = {k: np.concatenate(v, axis=0) for k, v in result.items()}\n return result", "title": "" }, { "docid": "76a8674547b0354cdf53be87a7513f16", "score": "0.58280665", "text": "def batch_concat(\n values: types.NestedArray,\n num_batch_dims: int = 1,\n) -> jnp.ndarray:\n flatten_fn = lambda x: _flatten(x, num_batch_dims)\n flat_leaves = tree.map_structure(flatten_fn, values)\n return jnp.concatenate(tree.flatten(flat_leaves), axis=-1)", "title": "" }, { "docid": "0c2039656695bb14b1924cfaaa2e241c", "score": "0.57595724", "text": "def combine(self, array1, array2):\n return ClientSide.combine(array1, array2)", "title": "" }, { "docid": "3b3d9d095a6c5d36b59a112f1f784e82", "score": "0.575201", "text": "def mconcat(cls, *monoids):\n if len(monoids) < 2:\n raise TypeError(\"Need at least two values for mconcat\")\n return reduce(cls.mappend, monoids)", "title": "" }, { "docid": "23a3b5538b4076d8aef917a272c9be68", "score": "0.5662059", "text": "def _gather_arrays(self, this_cpu_array, axis=0):\n if self._num_cpus == 1:\n return this_cpu_array\n\n gathered_arrays = self._comm.allgather(this_cpu_array)\n\n return np.concatenate(gathered_arrays, axis=axis)", "title": "" }, { "docid": "58f6e7892cff3b74a78feb5dcd94e746", "score": "0.56226766", "text": "def append_all(self, samples: tuple) -> None:\n batch_size = samples[0][0].size(0)\n for i in range(batch_size):\n for o, a, r, o_, a_ in samples:\n self._data.append((o[i], a[i], r[i], o_[i], a_[i]))", "title": "" }, { "docid": "fa6227afbcfaa58efa505f810ed1522b", "score": "0.5611987", "text": "def cat(self, *plots):\n return concat(self, *plots)", "title": "" }, { "docid": "fa6227afbcfaa58efa505f810ed1522b", "score": "0.5611987", "text": "def cat(self, *plots):\n return concat(self, *plots)", "title": "" }, { "docid": "d8be425480a7553456fa29ccc6aeba98", "score": "0.56075835", "text": "def test_concatenate(self) -> None:\n sample1 = np.arange(0, 10)\n sample2 = np.arange(10, 20)\n fd1 = FDataGrid([sample1]).to_basis(FourierBasis(n_basis=5))\n fd2 = FDataGrid([sample2]).to_basis(FourierBasis(n_basis=5))\n\n fd = concatenate([fd1, fd2])\n\n np.testing.assert_equal(fd.n_samples, 2)\n np.testing.assert_equal(fd.dim_codomain, 1)\n np.testing.assert_equal(fd.dim_domain, 1)\n np.testing.assert_array_equal(\n fd.coefficients,\n np.concatenate([fd1.coefficients, fd2.coefficients]),\n )", "title": "" }, { "docid": "dc5a4b9bf44b0647596143e240bf899b", "score": "0.5575618", "text": "def _concatenate_arrays(arrays_by_rank, nprocs):\n\n if nprocs <= 0:\n return 0\n\n array_names = arrays_by_rank[0].keys()\n first_processors_arrays = arrays_by_rank[0]\n\n if nprocs > 1:\n ret = {}\n for array_name in array_names:\n first_array = first_processors_arrays[array_name]\n for rank in range(1, nprocs):\n other_processors_arrays = arrays_by_rank[rank]\n other_array = other_processors_arrays[array_name]\n\n # append the other array to the first array\n first_array.append_parray(other_array)\n\n # remove the non local particles\n first_array.remove_tagged_particles(1)\n\n ret[array_name] = first_array\n\n else:\n ret = arrays_by_rank[0]\n\n return ret", "title": "" }, { "docid": "0fae3332ed021d48d530549f7fbf3156", "score": "0.5565273", "text": "def merge_data():\n input_set, target_set = dataset_reader(name='1PmitT')\n for i in range(2, 8):\n name = f\"{i}PmitT\"\n input_append, target_append = dataset_reader(name=name)\n input_set = np.concatenate((input_set, input_append), axis=0)\n target_set = np.concatenate((target_set, target_append), axis=0)\n\n return input_set, target_set", "title": "" }, { "docid": "f50e02d3fdd0a1ff90a73f99199c6adf", "score": "0.5542635", "text": "def np_cat(mat1, mat2, axis=0):\n result = np.concatenate((mat1, mat2), axis=axis)\n return result", "title": "" }, { "docid": "757afb446da7ed9093d90f0350831a8e", "score": "0.55274945", "text": "def np_cat(mat1, mat2, axis=0):\n return(np.concatenate((mat1, mat2), axis=axis))", "title": "" }, { "docid": "3ade3a2a4747abee65223caf6bde5eea", "score": "0.5501633", "text": "def append_all(arr1: bytearray, arr2: bytearray):\n for b in arr2:\n arr1.append(b)", "title": "" }, { "docid": "bdd2b4f68f31ad3f9e584b1cdb238bc6", "score": "0.55002123", "text": "def concat_metadatas(texts, metadatas):\n concatenated = []\n for i in range(len(texts)):\n concatenated.append(concat_sequence_metadata(texts[i], metadatas[i]))\n return torch.stack(concatenated)", "title": "" }, { "docid": "6cb40129c0e89a0eddd3dee145c140a8", "score": "0.54881823", "text": "def concatenate(alignments, file_name):\n otus = []\n sequences = []\n for alignment in alignments:\n records = SeqIO.parse(alignment, \"fasta\")\n for record in records:\n # sample record.description:\n # 10-JEPS-98429\n otu = record.description\n if otu not in otus:\n otus.append(otu)\n sequences.append(\"\")\n\n # now concatenate the sequences\n total_length = 0\n for alignment in alignments:\n records = SeqIO.parse(alignment, \"fasta\")\n # make sure to only add 1 sequence per gene region for each otu\n already_added = []\n for record in records:\n otu = record.description\n if otu not in already_added:\n sequences[otus.index(otu)] = sequences[otus.index(otu)] + record.seq\n already_added.append(otu)\n loci_length = len(record.seq)\n total_length += loci_length\n # add gaps for any OTU that didn't have a sequence\n for otu in otus:\n if otu not in already_added:\n sequences[otus.index(otu)] = sequences[otus.index(otu)] + make_gaps(loci_length)\n\n # write to FASTA file\n f = open(file_name, \"w\")\n for otu in otus:\n # >otu\n # otus[otu]\n f.write(\"> \" + otu + \"\\n\")\n sequence = str(sequences[otus.index(otu)])\n i = 0\n while i < len(sequence):\n f.write(sequence[i:i+80] + \"\\n\")\n i += 80\n f.close()\n\n # write to nexus file\n alignment = AlignIO.read(open(file_name), \"fasta\", alphabet=Gapped(IUPAC.ambiguous_dna))\n g = open(file_name + \".nex\", \"w\")\n g.write(alignment.format(\"nexus\"))", "title": "" }, { "docid": "aaf3370d3395141ef5c15fc95a4f68ea", "score": "0.54863805", "text": "def concat2(list_of_two_arrays):\n if len(list_of_two_arrays[0]) == 0:\n return list_of_two_arrays[1]\n elif len(list_of_two_arrays[1]) == 0:\n return list_of_two_arrays[0]\n else:\n return np.concatenate(list_of_two_arrays, axis=1)", "title": "" }, { "docid": "0c470939268d373a0af2b93efa2dd650", "score": "0.5442266", "text": "def merge_arrays(args):\n arrays_paths = args.arrays\n save_path = args.save_path\n save_name = args.save_name\n\n arrays = []\n for array_path in arrays_paths:\n arrays.append({})\n with np.load(array_path) as data:\n for name in data.files:\n arrays[-1][name] = data[name]\n merged_arrays = _merge_arrays(arrays)\n\n if save_path is not None:\n file_name = save_name if save_name is not None else 'training_data'\n save_file = os.path.join(save_path, file_name)\n np.savez_compressed(save_file, **merged_arrays)\n print('Merged arrays successfully written to %s' % (save_file + '.npz'))", "title": "" }, { "docid": "680b77b5b18b5fdd5502670cecc6c28d", "score": "0.54215723", "text": "def merge(self, genome_array):\r\n assert type(genome_array) == GenomeArray, genome_array\r\n\r\n if genome_array.empty():\r\n # second array is empty\r\n # return self, with 0 mutations\r\n return self, 0\r\n else:\r\n # strings should be of equal size to compare mutations\r\n assert len(self) == len(genome_array)\r\n\r\n mutation_count = 0\r\n merged_array = GenomeArray()\r\n for pos_orig, pos_new in zip(self, genome_array):\r\n if pos_orig == pos_new:\r\n # the strings are identical at this coordinate, add this to the merged string.\r\n merged_array.append(pos_new)\r\n else:\r\n # the strings are not identical at this coordinate\r\n # take the intersection if possible\r\n intersection = pos_orig & pos_new\r\n if len(intersection) > 0:\r\n merged_array.append(intersection)\r\n else:\r\n # there is no intersection, append the array union\r\n merged_array.append(pos_orig | pos_new)\r\n\r\n # don't count gaps as mutations\r\n if {'-'} != pos_orig and {'-'} != pos_new:\r\n mutation_count += 1\r\n\r\n return merged_array, mutation_count", "title": "" }, { "docid": "ac73e4567d5911f281c9dabac2740373", "score": "0.5406732", "text": "def np_cat(mat1, mat2, axis=0):\n return np.concatenate((mat1, mat2), axis)", "title": "" }, { "docid": "ac73e4567d5911f281c9dabac2740373", "score": "0.5406732", "text": "def np_cat(mat1, mat2, axis=0):\n return np.concatenate((mat1, mat2), axis)", "title": "" }, { "docid": "8b1e1bf50468e9dab220ddb90b6b2728", "score": "0.53860223", "text": "def concatenate_and_rechunk(\n zarrs: Sequence[zarr.Array],\n chunks: Optional[Tuple[int, ...]] = None,\n dtype: DType = None,\n) -> da.Array:\n\n if len(set([z.shape[1:] for z in zarrs])) > 1:\n shapes = [z.shape for z in zarrs]\n raise ValueError(\n f\"Zarr arrays must have matching shapes (except in the first dimension): {shapes}\"\n )\n\n lengths = np.array([z.shape[0] for z in zarrs])\n lengths0 = np.insert(lengths, 0, 0, axis=0)\n offsets = np.cumsum(lengths0)\n total_length = offsets[-1]\n\n shape = (total_length, *zarrs[0].shape[1:])\n chunks = chunks or zarrs[0].chunks\n dtype = dtype or zarrs[0].dtype\n\n ar = da.empty(shape, chunks=chunks)\n\n def load_chunk(\n x: ArrayLike,\n zarrs: Sequence[zarr.Array],\n offsets: ArrayLike,\n block_info: Dict[Any, Any],\n ) -> ArrayLike:\n return _slice_zarrs(zarrs, offsets, block_info[0][\"array-location\"])\n\n return ar.map_blocks(load_chunk, zarrs=zarrs, offsets=offsets, dtype=dtype)", "title": "" }, { "docid": "7e5bb5b2076d8e8584e1b2aa687ce120", "score": "0.5372834", "text": "def concat(self, outputs):\n\n return [\". \".join([str(y) for y in x if y]) for x in self.hstack(outputs)]", "title": "" }, { "docid": "83a878334b7003421fc061f2502457a1", "score": "0.5367566", "text": "def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,\n autoconvert=False):\n if isinstance(arrays, ndarray):\n return arrays\n elif len(arrays) == 1:\n return arrays[0]\n seqarrays = [np.asanyarray(a).ravel() for a in arrays]\n nrecords = [len(a) for a in seqarrays]\n ndtype = [a.dtype for a in seqarrays]\n fldnames = [d.names for d in ndtype]\n #\n dtype_l = ndtype[0]\n newdescr = _get_fieldspec(dtype_l)\n names = [n for n, d in newdescr]\n for dtype_n in ndtype[1:]:\n for fname, fdtype in _get_fieldspec(dtype_n):\n if fname not in names:\n newdescr.append((fname, fdtype))\n names.append(fname)\n else:\n nameidx = names.index(fname)\n _, cdtype = newdescr[nameidx]\n if autoconvert:\n newdescr[nameidx] = (fname, max(fdtype, cdtype))\n elif fdtype != cdtype:\n raise TypeError(\"Incompatible type '%s' <> '%s'\" %\n (cdtype, fdtype))\n # Only one field: use concatenate\n if len(newdescr) == 1:\n output = ma.concatenate(seqarrays)\n else:\n #\n output = ma.masked_all((np.sum(nrecords),), newdescr)\n offset = np.cumsum(np.r_[0, nrecords])\n seen = []\n for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):\n names = a.dtype.names\n if names is None:\n output['f%i' % len(seen)][i:j] = a\n else:\n for name in n:\n output[name][i:j] = a[name]\n if name not in seen:\n seen.append(name)\n #\n return _fix_output(_fix_defaults(output, defaults),\n usemask=usemask, asrecarray=asrecarray)", "title": "" }, { "docid": "b808b3fe183cdb5f17c57432436b309b", "score": "0.5360926", "text": "def concat_new_first(arrs):\n arrs = map(lambda x: x[np.newaxis, ...], arrs)\n return np.concatenate(arrs, axis=0)", "title": "" }, { "docid": "27c2e255f33a7f30618509cbc7887cdd", "score": "0.5354984", "text": "def cati(*args):\n\n arrays = []\n\n for i, item in enumerate(args):\n\n if isinstance(item, np.ndarray):\n if item.ndim == 3:\n item = np.expand_dims(item, 0)\n\n if item.ndim != 4:\n raise ValueError(f'Shape of element {i} ({item.shape}) is not supported!')\n\n else:\n item = np.concatenate([x if x.ndim == 4 else np.expand_dims(x, axis=0) for x in item])\n if item.ndim != 4:\n item = item.squeeze()\n if item.ndim != 4:\n raise ValueError(f'Shape of element {i} ({item.shape}) is not supported!')\n\n arrays.append(item)\n\n out = np.concatenate(arrays, axis=0)\n return out if out.ndim == 4 else out.squeeze()", "title": "" }, { "docid": "ba9398f7567de54ca76d1abb841c3d29", "score": "0.5338124", "text": "def consolidated_array(self):\n arr = np.empty(shape=(len(self.names)))\n for i, name in enumerate(self.names):\n arr[i] = self.ipms[self.ipm_mapper[name]][name]\n return arr", "title": "" }, { "docid": "3909174de7202c425935d40682014a47", "score": "0.53241664", "text": "def concatenate_inner_features(self):\n new_data_inputs = [di[0] for di in self.data_inputs]\n\n for i, data_input in enumerate(self.data_inputs):\n for di in data_input[1:]:\n new_data_inputs[i] = _inner_concatenate_np_array(new_data_inputs[i], di)\n\n self.set_data_inputs(new_data_inputs)", "title": "" }, { "docid": "130ca2285f58dce258e0d9418db4bdac", "score": "0.5303287", "text": "def concat(data, axis):\n \n data_size = len(data)\n if data_size < min_size:\n raise RuntimeError(\"The size of data must be greater equal 1\")\n\n dtype = data[0].dtype\n vc_util.ops_dtype_check(dtype, vc_util.DtypeForDavinci.ALL_TYPES)\n\n shape_0 = data[0].shape\n vc_util.check_shape(shape_0)\n if axis < 0:\n axis += len(shape_0)\n\n for i in range(1, data_size):\n shape_i = data[i].shape\n vc_util.check_shape(shape_i)\n if len(shape_i) != len(shape_0):\n raise ValueError(\"Input tensors must have same dimensions.\")\n\n res = akg.lang.cce.concat(data, axis)\n return res", "title": "" }, { "docid": "fa97edc635f2539326be9305742b01b9", "score": "0.53031164", "text": "def merge_arrays(seqarrays, fill_value=-1, flatten=False,\n usemask=False, asrecarray=False):\n # Only one item in the input sequence ?\n if (len(seqarrays) == 1):\n seqarrays = np.asanyarray(seqarrays[0])\n # Do we have a single ndarray as input ?\n if isinstance(seqarrays, (ndarray, np.void)):\n seqdtype = seqarrays.dtype\n # Make sure we have named fields\n if seqdtype.names is None:\n seqdtype = np.dtype([('', seqdtype)])\n if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:\n # Minimal processing needed: just make sure everything's a-ok\n seqarrays = seqarrays.ravel()\n # Find what type of array we must return\n if usemask:\n if asrecarray:\n seqtype = MaskedRecords\n else:\n seqtype = MaskedArray\n elif asrecarray:\n seqtype = recarray\n else:\n seqtype = ndarray\n return seqarrays.view(dtype=seqdtype, type=seqtype)\n else:\n seqarrays = (seqarrays,)\n else:\n # Make sure we have arrays in the input sequence\n seqarrays = [np.asanyarray(_m) for _m in seqarrays]\n # Find the sizes of the inputs and their maximum\n sizes = tuple(a.size for a in seqarrays)\n maxlength = max(sizes)\n # Get the dtype of the output (flattening if needed)\n newdtype = _zip_dtype(seqarrays, flatten=flatten)\n # Initialize the sequences for data and mask\n seqdata = []\n seqmask = []\n # If we expect some kind of MaskedArray, make a special loop.\n if usemask:\n for (a, n) in zip(seqarrays, sizes):\n nbmissing = (maxlength - n)\n # Get the data and mask\n data = a.ravel().__array__()\n mask = ma.getmaskarray(a).ravel()\n # Get the filling value (if needed)\n if nbmissing:\n fval = _check_fill_value(fill_value, a.dtype)\n if isinstance(fval, (ndarray, np.void)):\n if len(fval.dtype) == 1:\n fval = fval.item()[0]\n fmsk = True\n else:\n fval = np.array(fval, dtype=a.dtype, ndmin=1)\n fmsk = np.ones((1,), dtype=mask.dtype)\n else:\n fval = None\n fmsk = True\n # Store an iterator padding the input to the expected length\n seqdata.append(itertools.chain(data, [fval] * nbmissing))\n seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))\n # Create an iterator for the data\n data = tuple(_izip_records(seqdata, flatten=flatten))\n output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),\n mask=list(_izip_records(seqmask, flatten=flatten)))\n if asrecarray:\n output = output.view(MaskedRecords)\n else:\n # Same as before, without the mask we don't need...\n for (a, n) in zip(seqarrays, sizes):\n nbmissing = (maxlength - n)\n data = a.ravel().__array__()\n if nbmissing:\n fval = _check_fill_value(fill_value, a.dtype)\n if isinstance(fval, (ndarray, np.void)):\n if len(fval.dtype) == 1:\n fval = fval.item()[0]\n else:\n fval = np.array(fval, dtype=a.dtype, ndmin=1)\n else:\n fval = None\n seqdata.append(itertools.chain(data, [fval] * nbmissing))\n output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),\n dtype=newdtype, count=maxlength)\n if asrecarray:\n output = output.view(recarray)\n # And we're done...\n return output", "title": "" }, { "docid": "2a88506e8a22bbd8cb7386f905fa3c77", "score": "0.52946436", "text": "def combineArray(arrayList, method, prefix):\n # Check Array List\n for array in arrayList:\n if not cmds.objExists(array):\n raise Exception('Array attribute \"' + array + '\" does not exist!')\n\n # Check Method\n methodDict = {'sum': 0, 'subtract': 1, 'multiply': 2, 'divide': 3, 'average': 4, 'min': 5, 'max': 6}\n if not methodDict.has_key(method):\n raise Exception('Invalid method - \"' + method + '\"!')\n methodVal = methodDict[method]\n\n # Create combineArray node and connect\n combineArrayNode = cmds.createNode('combineArray', n=prefix + '_combineArray')\n cmds.setAttr(combineArrayNode + '.method', methodVal)\n for i in range(len(arrayList)):\n cmds.connectAttr(arrayList[i], combineArrayNode + '.inputArray[' + str(i) + ']', f=True)\n\n # Return Result\n return combineArrayNode + '.outputArray'", "title": "" }, { "docid": "037ac3e484dc206f3c6ae4b6e8fe15a3", "score": "0.5270676", "text": "def _concat(bundles, cat=(0, 0, 0, 0), concat_continuous=True):\n chan = sorted(set([x['chan'] for x in bundles]))\n cycle = sorted(set([x['cycle'] for x in bundles]))\n stage = sorted(set([x['stage'] for x in bundles]))\n evt_type = sorted(set([x['name'] for x in bundles]))\n\n all_cycle = None\n all_stage = None\n all_evt_type = None\n\n if cycle[0] is not None:\n all_cycle = ', '.join([str(c) for c in cycle])\n if stage[0] is not None:\n all_stage = ', '.join(stage)\n if evt_type[0] is not None:\n all_evt_type = ', '.join(evt_type)\n\n if cat[0]:\n cycle = [all_cycle]\n\n if cat[1]:\n stage = [all_stage]\n\n if cat[3]:\n evt_type = [all_evt_type]\n\n to_concat = []\n for ch in chan:\n\n for cyc in cycle:\n\n for st in stage:\n\n for et in evt_type:\n new_times = []\n\n for bund in bundles:\n chan_cond = ch == bund['chan']\n cyc_cond = cyc in (bund['cycle'], all_cycle)\n st_cond = st in (bund['stage'], all_stage)\n et_cond = et in (bund['name'], all_evt_type)\n\n if chan_cond and cyc_cond and st_cond and et_cond:\n new_times.extend(bund['times'])\n\n new_times = sorted(new_times, key=lambda x: x[0])\n new_bund = {'times': new_times,\n 'chan': ch,\n 'cycle': cyc,\n 'stage': st,\n 'name': et\n }\n to_concat.append(new_bund)\n\n if not cat[2]:\n to_concat_new = []\n\n for bund in to_concat:\n last = None\n bund['times'].append((inf,inf))\n start = 0\n\n for i, j in enumerate(bund['times']):\n\n if last is not None:\n if not isclose(j[0], last, abs_tol=0.01) \\\n or not concat_continuous:\n new_times = bund['times'][start:i]\n new_bund = bund.copy()\n new_bund['times'] = new_times\n to_concat_new.append(new_bund)\n start = i\n last = j[1]\n\n to_concat = to_concat_new\n\n to_concat = [x for x in to_concat if x['times']]\n\n return to_concat", "title": "" }, { "docid": "0d8bc550c45780573288a67b51ca19d5", "score": "0.52532935", "text": "def concatenate(list_a, list_b):\n m_f = np.asarray(list_a)\n m_i = np.asarray(list_b)\n f_and_i = np.append(m_f, m_i)\n f_and_i = np.asmatrix(f_and_i)\n\n return f_and_i", "title": "" }, { "docid": "79c617620a7a425161a2e6aa2efba5fd", "score": "0.52418464", "text": "def test_concatenate_matrices(self):\n def input_fn(dtype):\n return tuple(np.random.random((4, 4)) for i in range(5))\n\n self._np_tf_compare(\n input_fn,\n ttf.concatenate_matrices,\n t.concatenate_matrices)", "title": "" }, { "docid": "6cb70e85d315c466062cd3dafb536659", "score": "0.52359486", "text": "def _merge_by_append(self, tables):\n columns = uniq(column for table in tables for column in table.columns)\n\n merged = Table(columns=columns)\n for table in tables:\n merged.append_rows(table)\n\n return merged", "title": "" }, { "docid": "fbe79731426a4167ec631376c3e282c5", "score": "0.5235281", "text": "def concatenate_fastq(prefix_dict):\n for prefix, prefix_list in prefix_dict.items():\n logger.debug(\"Concatenating %s pairs of files.\" % len(prefix_list))\n r1, r2 = DemultiplexWriter.paired_end_filenames(prefix)\n pair_list = [DemultiplexWriter.paired_end_filenames(p) for p in prefix_list]\n\n cmd = \"cat %s > %s\" % (\" \".join(p[0] for p in pair_list), r1)\n os.system(cmd)\n logger.debug(r1)\n cmd = \"cat %s > %s\" % (\" \".join(p[1] for p in pair_list), r2)\n os.system(cmd)\n logger.debug(r2)", "title": "" }, { "docid": "f3733afe7daa39e634a7d97c5163f290", "score": "0.5222671", "text": "def combine_covs(*covs):\n def to_array(cov):\n if isinstance(cov,tuple): \n return cov\n elif isinstance(cov,str):\n with open(cov) as f:\n return re.sub(\"#\",\"\",f.readline()).split(), loadtxt(f)\n elif isinstance(cov,Chain):\n return cov.params(), cov.cov()\n elif isinstance(cov,dict):\n return [k for k in cov], diag([v**2 for v in cov.values()])\n else:\n raise ValueError(\"Unrecognized covariance data type.\")\n \n covs = [to_array(cov) for cov in covs]\n allnames = list(chain(*[n for n,_ in covs]))\n \n allcov = zeros((len(allnames),len(allnames)))\n for (names,cov) in covs:\n idxs = [allnames.index(n) for n in names]\n for i in idxs: allcov[i,:] = allcov[:,i] = 0\n allcov[ix_(idxs,idxs)] = cov\n \n return allnames, allcov", "title": "" }, { "docid": "cc43d4abf6af6609125f3028b9b6b249", "score": "0.52198577", "text": "def combine_and_vectorize(data_batches):\n data = np.concatenate(data_batches, axis=0)\n s = data.shape\n if len(s) > 2:\n data = data.reshape((s[0], -1))\n\n return data", "title": "" }, { "docid": "6ff9b0756b8404a22d60ab0267810185", "score": "0.52002597", "text": "def concatenate(arg1, arg2):\n for item in arg2[1][0]:\n arg1[1][0] += [item]\n for item in arg2[1][1]:\n arg1[1][1] += [item]\n for item in arg2[1][2]:\n arg1[1][2] += [item]\n for item in arg2[1][3]:\n arg1[1][3] += [item]\n return arg1", "title": "" }, { "docid": "d275f242fab296e8225d949f7075e17b", "score": "0.5190023", "text": "def _combine_arrays(\n self, compute_market_results: Callable, market_ids: Array, fixed_args: Sequence = (),\n market_args: Sequence = ()) -> Array:", "title": "" }, { "docid": "e489d4cca8501e06a680c3d6e239c8a5", "score": "0.51898175", "text": "def append(self, raws, preload=None):\r\n if not isinstance(raws, list):\r\n raws = [raws]\r\n\r\n # make sure the raws are compatible\r\n all_raws = [self]\r\n all_raws += raws\r\n _check_raw_compatibility(all_raws)\r\n\r\n # deal with preloading data first (while files are separate)\r\n all_preloaded = self._preloaded and all(r._preloaded for r in raws)\r\n if preload is None:\r\n if all_preloaded:\r\n preload = True\r\n else:\r\n preload = False\r\n\r\n if preload is False:\r\n if self._preloaded:\r\n self._data = None\r\n self._times = None\r\n self._preloaded = False\r\n else:\r\n # do the concatenation ourselves since preload might be a string\r\n nchan = self.info['nchan']\r\n c_ns = np.cumsum([rr.n_times for rr in ([self] + raws)])\r\n nsamp = c_ns[-1]\r\n\r\n if not self._preloaded:\r\n this_data = self._read_segment()[0]\r\n else:\r\n this_data = self._data\r\n\r\n # allocate the buffer\r\n if isinstance(preload, string_types):\r\n _data = np.memmap(preload, mode='w+', dtype=this_data.dtype,\r\n shape=(nchan, nsamp))\r\n else:\r\n _data = np.empty((nchan, nsamp), dtype=this_data.dtype)\r\n\r\n _data[:, 0:c_ns[0]] = this_data\r\n\r\n for ri in range(len(raws)):\r\n if not raws[ri]._preloaded:\r\n # read the data directly into the buffer\r\n data_buffer = _data[:, c_ns[ri]:c_ns[ri + 1]]\r\n raws[ri]._read_segment(data_buffer=data_buffer)\r\n else:\r\n _data[:, c_ns[ri]:c_ns[ri + 1]] = raws[ri]._data\r\n self._data = _data\r\n self._preloaded = True\r\n\r\n # now combine information from each raw file to construct new self\r\n for r in raws:\r\n self._first_samps = np.r_[self._first_samps, r._first_samps]\r\n self._last_samps = np.r_[self._last_samps, r._last_samps]\r\n self._raw_lengths = np.r_[self._raw_lengths, r._raw_lengths]\r\n self.rawdirs += r.rawdirs\r\n self._filenames += r._filenames\r\n self.last_samp = self.first_samp + sum(self._raw_lengths) - 1\r\n\r\n # this has to be done after first and last sample are set appropriately\r\n if self._preloaded:\r\n self._times = np.arange(self.n_times) / self.info['sfreq']", "title": "" }, { "docid": "ea85cb4b719ad17acaa1109227506385", "score": "0.5187995", "text": "def concatenate_matrices(*matrices):\n M = numpy.identity(4)\n for i in matrices:\n M = numpy.dot(M, i)\n return M", "title": "" }, { "docid": "0dd4b435654e8ba76b93177af5a5c59b", "score": "0.51767373", "text": "def _inner_concatenate_np_array(np_array, np_array_to_zip):\n while len(np_array_to_zip.shape) < len(np_array.shape):\n np_array_to_zip = np.expand_dims(np_array_to_zip, axis=-1)\n\n target_shape = tuple(list(np_array.shape[:-1]) + [np_array_to_zip.shape[-1]])\n np_array_to_zip = np.broadcast_to(np_array_to_zip, target_shape)\n\n return np.concatenate((np_array, np_array_to_zip), axis=-1)", "title": "" }, { "docid": "bf641f0b51bd96106db8065c513405cf", "score": "0.5170484", "text": "def combine(self, process_size=None):\n self._combine_time_index()\n self._combine_meta()\n self._combine_coordinates()\n\n for dset_name, dset_attrs in self._dset_attrs.items():\n self._combine_dataset(dset_name, dset_attrs,\n process_size=process_size)", "title": "" }, { "docid": "b870d534b2db1d7adb74544425211ba1", "score": "0.5169373", "text": "def cat(batches : List[\"Batch\"], axis : int = 0) -> \"Batch\":\n if isinstance(list(batches[0].values())[0], np.ndarray):\n cat_func = np.concatenate\n else:\n cat_func = torch.cat\n batch = Batch()\n for k in batches[0].keys():\n batch[k] = cat_func([b[k] for b in batches], axis=axis)\n return batch", "title": "" }, { "docid": "2f595e5a7de7bc579211e6adb1f0208c", "score": "0.5168196", "text": "def concat(data, files):\n \n params = get_header(data)\n versions = get_versions(params)\n to_merge = []\n for v in versions:\n [to_merge.append(fname) for fname in files if v in fname]\n with open(expanduser(params['outfile']), 'w') as outfile:\n for key, val in params.iteritems():\n outfile.write(\"# %s -> %s\\n\" % (key, val))\n outfile.write(\"Project;Version;SHA;Configuration;Test;RawVal\\n\")\n for fname in to_merge:\n with open(fname, 'r') as fin:\n fin.next() # skip header\n [outfile.write(line) for line in fin]", "title": "" }, { "docid": "7a3ec61a58a3d95f8ea1777fc911795c", "score": "0.516331", "text": "def combine_batch_lists(processed_batch_queue: multiprocessing.Queue) -> np.array:\r\n batches = list()\r\n while not processed_batch_queue.empty():\r\n batches.append(processed_batch_queue.get())\r\n return batches", "title": "" }, { "docid": "f5d97822b5e4b1b58ab57138d68b8415", "score": "0.5163119", "text": "def nod_concat(nod_arr):\n ts_list = [nod[1] for nod in nod_arr]\n data_list = [nod[2] for nod in nod_arr]\n ts = np.concatenate(ts_list)\n data = np.concatenate(data_list, axis=2)\n return (ts, data)", "title": "" }, { "docid": "19b52229fecee14423633d25604cf60e", "score": "0.5152555", "text": "def concat_tensor_list(tensor_list):\n return np.concatenate(tensor_list, axis=0)", "title": "" }, { "docid": "8f42f6c919d2812534088f7280d27f54", "score": "0.5146262", "text": "def aggregate_csv(data_files):\n data = np.array([])\n\n for fname in data_files:\n fdata = np.loadtxt(fname, delimiter=',', dtype=np.float32)\n data = np.vstack([data, fdata]) if data.size else fdata\n\n np.random.shuffle(data)\n return data", "title": "" }, { "docid": "dd8fb7c5db15808f1316e9bc1b911026", "score": "0.51433617", "text": "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "title": "" }, { "docid": "dd8fb7c5db15808f1316e9bc1b911026", "score": "0.51433617", "text": "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "title": "" }, { "docid": "96fbeb1af1bf21c3fe3c19c58355b262", "score": "0.5142972", "text": "def merge(*args):\n return np.hstack(tuple(arg.reshape(-1, 1) for arg in args))", "title": "" }, { "docid": "68f21edcc9e67c36fe731c4efdba86e1", "score": "0.5125708", "text": "def concat_annotations(infile, outdir, est_fragments):\n df = pd.read_csv(infile)\n logger(f'Finished loading the annotations')\n barcodes = df.Barcode.unique()\n logger(f'{len(barcodes)} read clouds')\n fw, rv = fragment_annot(df, barcodes) if est_fragments else species_annot(df, barcodes)\n pd.DataFrame(fw).to_csv(join(outdir, 'R1', basename(infile)), index = False, header = False) \n pd.DataFrame(rv).to_csv(join(outdir, 'R2', basename(infile)), index = False, header = False)", "title": "" }, { "docid": "345a067641b1fc9822b374dcf5ed7eb7", "score": "0.51248586", "text": "def concatenate_events(events, first_samps, last_samps):\r\n if not isinstance(events, list):\r\n raise ValueError('events must be a list of arrays')\r\n if not (len(events) == len(last_samps) and\r\n len(events) == len(first_samps)):\r\n raise ValueError('events, first_samps, and last_samps must all have '\r\n 'the same lengths')\r\n first_samps = np.array(first_samps)\r\n last_samps = np.array(last_samps)\r\n n_samps = np.cumsum(last_samps - first_samps + 1)\r\n events_out = events[0]\r\n for e, f, n in zip(events[1:], first_samps[1:], n_samps[:-1]):\r\n # remove any skip since it doesn't exist in concatenated files\r\n e2 = e.copy()\r\n e2[:, 0] -= f\r\n # add offset due to previous files, plus original file offset\r\n e2[:, 0] += n + first_samps[0]\r\n events_out = np.concatenate((events_out, e2), axis=0)\r\n\r\n return events_out", "title": "" }, { "docid": "dee36aefa18072416faa56cc79f72912", "score": "0.5116867", "text": "def concatenate_genes(out_path, cosmic_dir):\n # concatenate gene tab delim files\n with open(out_path, 'wb') as mywriter:\n # iterate through 'A'...'Z' directories\n FIRST_FLAG = True # flag for writing header\n for letter in string.ascii_uppercase:\n for file_name in os.listdir(cosmic_dir + letter):\n if file_name.endswith('.tsv') and '_ENST' not in file_name:\n # only use tab delim files that are\n # not alternative isoforms\n with open(cosmic_dir + letter + \"/\" + file_name) as handle:\n if FIRST_FLAG:\n # headers match the `Nucleotide` table in COSMIC_nuc\n header = ['Gene', 'SampleName', 'COSMICSampleID',\n 'AminoAcid', 'Nucleotide', 'PrimaryTissue',\n 'Tissuesubtype1', 'Tissuesubtype2', 'Histology',\n 'Histologysubtype1', 'Histologysubtype2', 'PubmedID',\n 'studies', 'MutationID', 'SomaticStatus',\n 'SampleSource', 'Zygosity', 'hg18chrom',\n 'hg18start', 'hg18end', 'hg19chrom',\n 'hg19start', 'hg19end']\n mywriter.write('\\t'.join(header) + '\\n') # write header\n LineTuple = namedtuple('LineTuple', header[1:]) # create namedtuple type\n FIRST_FLAG = False\n\n gene_name = file_name[:-4] # file name before \".tsv\"\n handle = skip_header(handle) # skip beginning lines\n # add data\n for line in handle:\n #split_line = line.split('\\t')\n split_tuple = LineTuple(*line.split('\\t')) # split\n if split_tuple.AminoAcid and split_tuple.Nucleotide:\n # if line designates a mutation\n if split_tuple.AminoAcid != 'p.?' or split_tuple.Nucleotide != 'c.?':\n # not unknown effect for AA and amino acid\n if 'unknown' not in split_tuple.SomaticStatus.lower():\n # do not include unknown somatic status mutations\n mywriter.write(gene_name + \"\\t\" + line) # write with gene name\n\n # iterate through genes in the numeric directory\n numeric_dir = cosmic_dir + '0-9/'\n for file_name in os.listdir(numeric_dir):\n if file_name.endswith('.tsv') and '_ENST' not in file_name:\n # only use tab delim files that are\n # not alternative isoforms\n with open(numeric_dir + file_name) as handle:\n gene_name = file_name[:-4] # file name before \".tsv\"\n handle = skip_header(handle) # skip beginning lines\n for line in handle:\n #split_line = line.split('\\t')\n split_tuple = LineTuple(*line.split('\\t')) # split line\n if split_tuple.AminoAcid and split_tuple.Nucleotide:\n # if line designates a mutation\n if split_tuple.AminoAcid != 'p.?' or split_tuple.Nucleotide != 'c.?':\n # not uknown effect for both AA and nucleotide\n if 'unknown' not in split_tuple.SomaticStatus.lower():\n # do not include unknown somatic status mutations\n mywriter.write(gene_name + \"\\t\" + line) # write with gene name", "title": "" }, { "docid": "f8f5ade510e7e8b6bdb06909f0c77d43", "score": "0.5109734", "text": "def concat_time_series_matrices(mats, t=None):\n ret_mat = np.concatenate(mats, axis=1)\n if t is not None:\n ret_t = np.concatenate([[t[i]]*mats[i].shape[1] for i in range(len(t))])\n return ret_mat, ret_t\n else:\n return ret_mat", "title": "" }, { "docid": "4e46eb78f4c6eae2c5de81c7bc9f62e0", "score": "0.510929", "text": "def __combined_generator(sequence1, sequence2, sequence3):\n if len(sequence1) != len(sequence2) or len(sequence1) != len(sequence3):\n raise ValueError('All arrays have to be the same length!')\n length = len(sequence1)\n return [__frac(sequence1[i] + sequence2[i] + sequence3[i]) for i in range(0, length)]", "title": "" }, { "docid": "18bb38b39dacd413139fe438d54041a0", "score": "0.5104767", "text": "def pack_o(d1, d2, axis=1):\n return np.concatenate((d1, d2), axis=axis)", "title": "" }, { "docid": "db3d52993f2f99316653298bed277311", "score": "0.51022285", "text": "def _append_array(self, biodesy_array):\n\n assert (\n biodesy_array.Nrows == self.Nrows\n ), \"Data does not have equal number of rows\"\n assert (\n biodesy_array.Ncolumns == self.Ncolumns\n ), \"Data does not have equal number of columns\"\n\n # update data shape\n self.Nreads += biodesy_array.Nreads\n self.shape = (self.Nreads, self.Nrows, self.Ncolumns, self.Nchannels)\n\n # find matching channel names\n matching_channels = list(\n set(self.channel_names) & set(biodesy_array.channel_names)\n )\n # and append input BiodesyArray for each matching channel\n for ch in matching_channels:\n self.__setattr__(\n ch,\n np.concatenate(\n [self.__dict__[ch], biodesy_array.__dict__[ch]], axis=0\n ),\n )", "title": "" }, { "docid": "7b8ed32d00b7294bf12a43f917586a48", "score": "0.5082424", "text": "def add_arrays(arr1, arr2):\n if (len(arr1) == len(arr2)):\n return [x+y for x, y in zip(arr1, arr2)]\n else:\n return None", "title": "" }, { "docid": "f200baff6afb24da1a03abd458cc654c", "score": "0.507967", "text": "def concatenate(functions: Iterable[T], as_coordinates: bool = False) -> T:\n functions = iter(functions)\n first = next(functions, None)\n\n if first is None:\n raise ValueError(\n \"At least one FData object must be provided to concatenate.\",\n )\n\n return first.concatenate(*functions, as_coordinates=as_coordinates)", "title": "" }, { "docid": "ccc429aaaf02c822b48da4bda8273f22", "score": "0.50748837", "text": "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n else:\n return [sum(x) for x in zip(arr1, arr2)]", "title": "" }, { "docid": "a3de47bd9dd541f3ae0025b8cb18889c", "score": "0.50738615", "text": "def concat_data_across_binning_dim(self, data):\n data['start_speeds'] = [np.concatenate(data['start_speeds'], axis=0)]\n data['start_configs'] = \\\n [SystemConfig.concat_across_batch_dim(data['start_configs'])]\n data['waypt_configs'] = \\\n [SystemConfig.concat_across_batch_dim(data['waypt_configs'])]\n data['spline_trajectories'] = \\\n [Trajectory.concat_across_batch_dim(data['spline_trajectories'])]\n data['horizons'] = [np.concatenate(data['horizons'], axis=0)]\n data['lqr_trajectories'] = \\\n [Trajectory.concat_across_batch_dim(data['lqr_trajectories'])]\n data['K_nkfd'] = [np.concatenate(data['K_nkfd'], axis=0)]\n data['k_nkf1'] = [np.concatenate(data['k_nkf1'], axis=0)]\n return data", "title": "" }, { "docid": "9af1e551d961ca828b2fb39094b26cea", "score": "0.5071711", "text": "def load_each_data_cube_and_catenate_each_class(dataCubeFiles, dataCubePath, annotationsPositionsSet):\n \n \n spectraAllClass = []\n spectraAllClassFileNumbers = []\n spectraAllClassPositions = []\n i = 0\n while i < len(annotationsPositionsSet[0]):\n spectraAllClass.append(0)\n \n spectraAllClassFileNumbers.append(0)\n spectraAllClassPositions.append(0)\n \n i += 1\n \n \n waveNumbers = []\n spectraTrainingSet = []\n i = 0\n while i < len(annotationsPositionsSet):\n fileName = dataCubeFiles[i]\n dataCubeStruct = scipy.io.loadmat(os.path.join(dataCubePath, fileName))\n dataCubeImage = dataCubeStruct[\"image\"]\n \n if i == 0:\n waveNumbers = dataCubeStruct[\"wn\"]\n \n \n spectraEachColor = []\n j = 0\n while j < len(annotationsPositionsSet[0]):\n rowIndexes = [element[0] for element in annotationsPositionsSet[i][j]]\n columnIndexes = [element[1] for element in annotationsPositionsSet[i][j]]\n spectraSelected = dataCubeImage[rowIndexes, columnIndexes, :]\n \n spectraEachColor.append(spectraSelected)\n \n \n if not isinstance(spectraAllClass[j], numpy.ndarray):\n spectraAllClass[j] = spectraSelected\n \n spectraAllClassFileNumbers[j] = numpy.full((spectraSelected.shape[0],), i)\n spectraAllClassPositions[j] = copy.deepcopy(annotationsPositionsSet[i][j])\n \n \n else:\n spectraAllClass[j] = numpy.vstack((spectraAllClass[j], spectraSelected))\n \n spectraAllClassFileNumbers[j] = numpy.concatenate((spectraAllClassFileNumbers[j], numpy.full((spectraSelected.shape[0],), i)))\n spectraAllClassPositions[j] = numpy.vstack((spectraAllClassPositions[j], copy.deepcopy(annotationsPositionsSet[i][j])))\n \n \n j += 1\n \n \n spectraTrainingSet.append(spectraEachColor)\n \n i += 1\n \n \n spectraAllClassConcatenated = spectraAllClass[0]\n spectraCorrespondingClass = numpy.full((spectraAllClass[0].shape[0],), 0)\n \n spectraCorrespondingFileNumber = spectraAllClassFileNumbers[0]\n spectraCorrespondingPosition = spectraAllClassPositions[0]\n \n i = 1\n while i < len(annotationsPositionsSet[0]):\n spectraAllClassConcatenated = numpy.vstack((spectraAllClassConcatenated, spectraAllClass[i]))\n \n spectraCorrespondingClass = numpy.concatenate((spectraCorrespondingClass, numpy.full((spectraAllClass[i].shape[0],), i)))\n \n spectraCorrespondingFileNumber = numpy.concatenate((spectraCorrespondingFileNumber, spectraAllClassFileNumbers[i]))\n spectraCorrespondingPosition = numpy.vstack((spectraCorrespondingPosition, spectraAllClassPositions[i]))\n \n i += 1\n \n \n return spectraTrainingSet, waveNumbers, spectraAllClassConcatenated, spectraCorrespondingClass, spectraCorrespondingFileNumber, spectraCorrespondingPosition", "title": "" }, { "docid": "ba889afecbdce56916161a09a0e5a541", "score": "0.50682575", "text": "def pack_thetas(self, t1, t2):\n return np.concatenate((t1.reshape(-1), t2.reshape(-1)))", "title": "" }, { "docid": "9ff1de5c59986523eeac21a6da21b77f", "score": "0.50673115", "text": "def concatenate_colums(left_arr, right_arr):\n return np.concatenate((left_arr, right_arr), axis=0)", "title": "" }, { "docid": "f5780f469ea45eb2b3f29816d2c85f80", "score": "0.5063113", "text": "def vstack(self, outputs):\n\n # If all outputs are numpy arrays, use native method\n if all(isinstance(output, np.ndarray) for output in outputs):\n return np.concatenate(np.stack(outputs, axis=1))\n\n # If all outputs are torch tensors, use native method\n # pylint: disable=E1101\n if all(torch.is_tensor(output) for output in outputs):\n return torch.cat(tuple(torch.stack(outputs, axis=1)))\n\n # Flatten into lists of outputs per input row. Wrap as one to many transformation.\n merge = []\n for x in zip(*outputs):\n combine = []\n for y in x:\n if isinstance(y, list):\n combine.extend(y)\n else:\n combine.append(y)\n\n merge.append(OneToMany(combine))\n\n return merge", "title": "" }, { "docid": "181c6a915397fee4525dab8f9ab71646", "score": "0.5059971", "text": "def combine_data(data, metadata):\n charge_mass = metadata[1]\n shot_mass = metadata[2]\n angle = metadata[3]\n\n combined = []\n for row in data:\n time = row[0]\n velocity = row[1]\n x = row[4]\n y = row[5]\n\n combined.append([time, angle, charge_mass, velocity, x, y])\n return combined", "title": "" }, { "docid": "288cc7257ccc10972994bbedbb580198", "score": "0.50559753", "text": "def _pad_concat(arrs: list) -> np.ndarray:\n n_cols = max([arr.shape[1] for arr in arrs])\n\n for i in range(len(arrs)):\n arrs[i] = np.pad(arrs[i], ((0, 0), (0, n_cols - arrs[i].shape[1])))\n\n return np.vstack(arrs)", "title": "" }, { "docid": "ba5e3f0bebf0699e1caa1566f8d2225a", "score": "0.5055735", "text": "def concat(inputs, concat_axis=-1, output_shape=None, name=None):\n if len(inputs) == 1:\n # Degenerate case. TODO: handle output_shape and name.\n return inputs[0]\n else:\n return merge(inputs, mode='concat', concat_axis=concat_axis,\n output_shape=output_shape, name=name)", "title": "" }, { "docid": "70315e1f685ab33b1510914e3b69fd36", "score": "0.504228", "text": "def concat_all_gathered(tensor):\n gathered = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())]\n torch.distributed.all_gather(gathered, tensor)\n output = torch.cat(gathered, dim=0)\n return output", "title": "" }, { "docid": "39e7c82597c9f0ecd6f5848af209bd92", "score": "0.503738", "text": "def combine_all(self, ds_list):\n for ds in ds_list:\n combine(ds)\n return self", "title": "" }, { "docid": "d6ab899f220c251c76f18f4f908c7086", "score": "0.50332403", "text": "def concatenate(self, name, keys=None):\n if (keys is not None) and not isinstance(keys, list):\n raise ValueError('keys must be None or a list of alignment names')\n # Check if alignments are compatible\n self._check_raise()\n\n samples = []\n markers = []\n block_list = []\n start = 0\n for k in (name for name in self.alignment_names):\n sample = self._alignments[k].samples\n marker = self._alignments[k].markers\n samples.append(sample)\n markers.append(marker)\n\n block_list.append(Block(str(k), start, start + sample.nsites))\n start += sample.nsites\n\n sample_alignment = concat_basealignments(samples)\n if markers[0]:\n marker_alignment = concat_basealignments(markers)\n else:\n marker_alignment = None\n\n subspaces = OrderedDict({\n str(k): v._linspace for k, v in self._alignments.items()\n })\n return CatAlignment(\n name, sample_alignment, marker_alignment,\n linspace=blocks_to_linspace(block_list),\n subspaces=subspaces,\n )", "title": "" }, { "docid": "acc3cf6d0a913153cf290a68f714c4f8", "score": "0.50292474", "text": "def expand_arrays(arrays: Iterable[dask.array.core.Array]) -> List[dask.array.core.Array]:\n\n arrays = list(arrays)\n max_dimns = max(len(array.shape) for array in arrays)\n\n const_arrays = []\n for array in arrays:\n current_dimns = len(array.shape)\n if current_dimns < max_dimns:\n array = array.reshape(*[1] * (max_dimns - current_dimns), *array.shape)\n const_arrays.append(array)\n\n return const_arrays", "title": "" }, { "docid": "08586f935678717c0337a61c83cd1255", "score": "0.50247556", "text": "def _concat_singles(dici):\n\n unlucky_guys = dici['all_male'][dici['all_male'].index.isin(dici['lucky_guys'].index) == False]\n\n unhappy_girls = dici['all_female'][dici['all_female'].index.isin(dici['happy_girls'].index) == False]\n\n girls = pd.concat((dici['happy_girls'], unhappy_girls), axis = 0)\n guys = pd.concat((dici['lucky_guys'], unlucky_guys), axis = 0)\n\n assert(\n len(unlucky_guys) + len(dici['lucky_guys']) == len(dici['all_male'])\n ), \"Error in concating guys\"\n\n assert(\n len(unhappy_girls) + len(dici['happy_girls']) == len(dici['all_female'])\n ), \"Error in concating girls\"\n\n out_dict = {'girls' : girls,\n 'guys': guys}\n return out_dict", "title": "" }, { "docid": "7a905f84d74e0b7d8c3d5ee01fabb0a1", "score": "0.5008827", "text": "def mergeDataframes(datasets):\n return pd.concat(datasets)", "title": "" }, { "docid": "7a905f84d74e0b7d8c3d5ee01fabb0a1", "score": "0.5008827", "text": "def mergeDataframes(datasets):\n return pd.concat(datasets)", "title": "" }, { "docid": "abd1b46252867f83ed33442eb8cb4b86", "score": "0.50055593", "text": "def horzcat(self, df):\n if self.shape[0] != df.shape[0]:\n raise ValueError(\"Can't append differently sized design matrices! \"\n \"Mat 1 has %s rows and Mat 2 has %s rows.\"\n % (self.shape[0]), df.shape[0])\n out = pd.concat([self, df], axis=1)\n out.sampling_rate = self.sampling_rate\n out.convolved = self.convolved\n out.hasIntercept = self.hasIntercept\n return out", "title": "" }, { "docid": "f67157e038328d22d7c578bd7c3f0a71", "score": "0.4999727", "text": "def load_and_concat(paths):\n n_subjects, n_runs = paths.shape\n concat = []\n for i in range(n_subjects):\n X_i = np.concatenate(\n [np.load(paths[i, j]) for j in range(n_runs)], axis=1\n )\n concat.append(X_i)\n return np.array(concat)", "title": "" }, { "docid": "e09354cd4660dff203cec5549dc9ae22", "score": "0.49988702", "text": "def concatenate(self: T, *others: T, as_coordinates: bool = False) -> T:\n pass", "title": "" }, { "docid": "fd7723706661f7b2feb990a97d6879cb", "score": "0.49901664", "text": "def concat_all_gather(tensor):\n tensors_gather = [torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())]\n dist.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output", "title": "" }, { "docid": "b9c949ee62582bd33e4fd3784c33a3ca", "score": "0.4988827", "text": "def _np_stack(arrays, axis=0):\n arrays = [asanyarray(arr) for arr in arrays]\n if not arrays:\n raise ValueError('need at least one array to stack')\n\n shapes = set(arr.shape for arr in arrays)\n if len(shapes) != 1:\n raise ValueError('all input arrays must have the same shape')\n\n result_ndim = arrays[0].ndim + 1\n if not -result_ndim <= axis < result_ndim:\n msg = 'axis {0} out of bounds [-{1}, {1})'.format(axis, result_ndim)\n raise IndexError(msg)\n if axis < 0:\n axis += result_ndim\n\n sl = (slice(None),) * axis + (_nx.newaxis,)\n expanded_arrays = [arr[sl] for arr in arrays]\n return _nx.concatenate(expanded_arrays, axis=axis)", "title": "" }, { "docid": "1ff8a9f1f16bcc08a50678f014909426", "score": "0.49873406", "text": "def concat_all_gather(tensor):\n tensors_gather = [\n torch.ones_like(tensor)\n for _ in range(torch.distributed.get_world_size())\n ]\n torch.distributed.all_gather(tensors_gather, tensor, async_op=False)\n\n output = torch.cat(tensors_gather, dim=0)\n return output", "title": "" } ]
7612886b5c1c75952937468ef4de0f84
Start a busy loop checking for file changes every interval seconds. If blocking is False make one loop then return.
[ { "docid": "34b86190c20808af2caaeaef05906339", "score": "0.7577745", "text": "def loop(self, interval=0.1, blocking=True):\n # May be overridden in order to use pyinotify lib and block\n # until the directory being watched is updated.\n # Note that directly calling readlines() as we do is faster\n # than first checking file's last modification times.\n while True:\n self.update_files()\n for fid, file in list(self._files_map.items()):\n self.readlines(file)\n if not blocking:\n return\n time.sleep(interval)", "title": "" } ]
[ { "docid": "6d5e0b4b914132edacb2fa8bbd1018de", "score": "0.64747286", "text": "def loop_forever(self):\r\n while self.running:\r\n time.sleep(0.1)", "title": "" }, { "docid": "dc72b9d7b7d88b6fc569acd8b1bd22a0", "score": "0.625984", "text": "def _loop(self):\n ok = True\n last_time = 0\n while ok:\n dir_state = os.stat(self.dir_name)\n curr_time = dir_state.st_mtime\n if curr_time > last_time:\n self.update_data_set()\n last_time = curr_time\n time.sleep(self.monitor_interval)\n if self.must_stop:\n break", "title": "" }, { "docid": "927b7ea513166f838a87b188db82cf06", "score": "0.61609703", "text": "def run(self):\n\n while True:\n self.scan_dir()\n time.sleep(self.poll_interval)", "title": "" }, { "docid": "9560e099e0fe863ebc60379ae9145556", "score": "0.60629416", "text": "def test_continous_file_monitoring(self):\n\n self.watcher.add_files(self.fixture(\"a.txt\"), self.fixture(\"b.txt\"), self.fixture(\"c.txt\"))\n self.watcher.monitor()\n\n self.touch(self.fixture(\"a.txt\"))\n time.sleep(2)\n self.assertEqual(1, self.watcher.num_runs)\n \n self.watcher.stop_monitor()", "title": "" }, { "docid": "b1818ed2e588ad9cfef42219d5f684c7", "score": "0.60192305", "text": "def loop():\n while True:\n schedule.run_pending()\n if sched_stop_event.is_set():\n break\n time.sleep(1)", "title": "" }, { "docid": "900ef69f7a531f4e6aa135d2f2e56502", "score": "0.5993254", "text": "def wait_forever():\n while True:\n time.sleep(60)", "title": "" }, { "docid": "95178651c7f8320d73c3c3025efb0cbc", "score": "0.59653974", "text": "def watch(path: Union[Path, str], *,\n watcher_cls: Type[AllWatcher]=DefaultWatcher,\n debounce=400,\n min_sleep=100):\n w = watcher_cls(path)\n try:\n while True:\n start = unix_ms()\n changes = w.check()\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('time=%0.0fms files=%d changes=%d', unix_ms() - start, len(w.files), len(changes))\n\n if changes:\n yield changes\n\n sleep_time = max(debounce - (unix_ms() - start), min_sleep)\n sleep(sleep_time / 1000)\n except KeyboardInterrupt:\n logger.debug('KeyboardInterrupt, exiting')", "title": "" }, { "docid": "8c69810be3d8fc419502be23ef0e3d86", "score": "0.5954457", "text": "def status_check_loop(self):\n self.update()\n self.timer = threading.Timer(TIMER_SECONDS, self.status_check_loop)\n self.timer.start()", "title": "" }, { "docid": "94cb8c90bbcadc95870085c949d8aff9", "score": "0.5945685", "text": "def run(self):\n while not self.stopped.wait(self.wait_time):\n self.update_heartbeat()", "title": "" }, { "docid": "c8b03c3e989970db4241d0f232244400", "score": "0.5921999", "text": "def run(self):\n self.running = True\n self.logger.info('Running')\n while self.running:\n self.check_file_contents()\n sleep(self.config['user_input']['sleep_interval'])", "title": "" }, { "docid": "0facdd583ee4505b008ad3a3c8450981", "score": "0.5838294", "text": "def config_monitor(self):\n while True:\n self.config_sem.acquire()\n if not self.config_changed:\n latest_stat = os.stat(self.config_file)\n if latest_stat.st_mtime != self.config_stat.st_mtime:\n self.config_changed = True\n self.config_sem.release()\n time.sleep(10)", "title": "" }, { "docid": "b8622518492cec3db121c039f240fb4d", "score": "0.58136696", "text": "def loop(self):\n try:\n while True:\n abs_path = self._queue.get(timeout=60)\n try:\n self._scan_dir(abs_path)\n except Exception as e:\n self._db.record_failure(abs_path, e)\n except queue.Empty:\n print(\"Thread finished after 60s of empty queue.\")", "title": "" }, { "docid": "30137edf970aed1c69f6e5f474a64e24", "score": "0.5813515", "text": "def _poll(self):\n while True:\n self.poll_count += 1\n self._poll_action()\n if self.poll_complete:\n break\n time.sleep(self.poll_interval_s)\n self._poll_update_interval()", "title": "" }, { "docid": "2af06f5a9882abc2646f200a80bfaf53", "score": "0.5789726", "text": "def loop(self, filename, handler):\n tailer = FileTailer(filename)\n while True:\n entry, comment = tailer.poll()\n if not entry:\n time.sleep(1.0)\n continue\n kos, not_kos, error = self.koscheck_logentry(entry)\n handler(comment, kos, not_kos, error)", "title": "" }, { "docid": "d0dce44486e6d7269a6af48d5b943323", "score": "0.5782501", "text": "async def loop(self):\r\n pass", "title": "" }, { "docid": "a0bb2df261292193563d86547ebd3840", "score": "0.5775545", "text": "def watch(self):\n\n rev = self.client.rev().rev\n\n def watchjob(rev):\n change = None\n\n while True:\n try:\n change = self.client.wait(\"%s/**\" % self._folder, rev)\n except Timeout:\n rev = self.client.rev().rev\n change = None\n\n if change:\n self._handle_change(change)\n rev = change.rev+1\n #print '.....', rev\n\n self.watchjob = gevent.spawn(watchjob, rev)", "title": "" }, { "docid": "269c3d6abe3ecb7a340b69eb1c488f96", "score": "0.57254463", "text": "def _async_watch_directory(self):\n self.logger.debug(\"Starting watch thread.\")\n while True:\n if self._stop:\n self.logger.debug(\"Stopping watch thread.\")\n break\n # Loop over filenames and add them to the queue\n # Duplicates are taken care of later on\n for filename in self._get_filenames_to_archive():\n self._archive_queue.put([current_time(), filename])\n # Sleep before checking again\n time.sleep(self.sleep_interval.to_value(u.second))", "title": "" }, { "docid": "df893c0a16bb65a7fb70ef69a74dc481", "score": "0.57229763", "text": "def watch_logfile():\n from time import sleep\n while not cancelled:\n log.read_file()\n sleep(2.5)", "title": "" }, { "docid": "482434500577efa198bc8c8f4c81c46a", "score": "0.56843245", "text": "def monitor(frequency: int = 600):\n\n # Kick off timer\n starttime = time.time()\n th = threading.Thread(target=check_sources)\n th.start()\n while True:\n if not th.is_alive():\n time.sleep(frequency - ((time.time() - starttime) % frequency))\n th = threading.Thread(target=check_sources)\n th.start()", "title": "" }, { "docid": "4e4c8fd2139dd61932183a183f572bcd", "score": "0.5670978", "text": "def auto_thread_function():\r\n\r\n global needs_autoreload\r\n\r\n while True:\r\n\r\n with auto_lock:\r\n\r\n auto_lock.wait(1.5)\r\n\r\n if auto_quit_flag:\r\n return\r\n\r\n items = auto_mtimes.items()\r\n\r\n for fn, mtime in items:\r\n\r\n if mtime is auto_blacklisted:\r\n continue\r\n\r\n if auto_mtime(fn) != mtime:\r\n needs_autoreload = True", "title": "" }, { "docid": "3ebb60caf6b6a40f030852b34b7cde46", "score": "0.558985", "text": "def _poll(self):\n # Check if the file has changed\n log.trace(\"Checking for file change\")\n # if the parser keeps track of unprocessed data, that can be tried again if it is not empty\n if not self._in_process_state and self._next_driver_state != None:\n self._got_file()", "title": "" }, { "docid": "b076f103ca892ad9f84d83d01edaf0f3", "score": "0.55843043", "text": "async def status_loop(component: Component) -> None:\n component.logger.info(\"Starting status loop\")\n while not check_drain_semaphore(component):\n # PATCH /status/{component}\n await patch_status_heartbeat(component)\n # sleep until we PATCH the next heartbeat\n await asyncio.sleep(component.heartbeat_sleep_duration_seconds)\n component.logger.info(\"Ending status heartbeats; drain semaphore detected.\")", "title": "" }, { "docid": "773f5d7f89cd72dc5041e8c319aa0c4c", "score": "0.5570944", "text": "async def periodic_wakeup():\n while True:\n await asyncio.sleep(1)", "title": "" }, { "docid": "0bc0e48de3c74ebbc11aae9efef47e75", "score": "0.5550591", "text": "def loop(self, quiet=False, notify=True):\n\n while True:\n self._check(quiet=quiet, notify=notify)\n\n if self.debug: print(\"sleep %d\" % self.poll_interval)\n time.sleep(self.poll_interval)\n # Did not work ok\n # idle_loop(self.unread_stuff)\n\n self.logout()", "title": "" }, { "docid": "6cd3eb7dc6f213232d7a6a3acea31ab1", "score": "0.5549749", "text": "def __do_processing(self):\n while self.stay_alive:\n path = self.__new_files_q.get()\n\n if not path: # probably an exit request\n continue\n\n file_is_closed = False\n while not file_is_closed and self.stay_alive:\n\n # try to open the file for reading, specifying exclusive access\n # this will fail if the file is already open in an another process\n\n try:\n handle = win32file.CreateFile(\n path,\n win32file.GENERIC_READ,\n 0, # share mode - 0 == no sharing\n None,\n win32file.OPEN_ALWAYS,\n win32file.FILE_ATTRIBUTE_NORMAL,\n None)\n handle.close()\n file_is_closed = True\n except pywintypes.error as e:\n time.sleep(0.01)\n # if e[0] == winerror.ERROR_SHARING_VIOLATION: # This wasn't working so I have simplified\n # time.sleep(0.01)\n # else:\n # raise\n\n if not self.stay_alive:\n return\n\n self._on_new_file(path, self.__created_files[path])", "title": "" }, { "docid": "a99cdf0d324e4097913f7102e2edf2a5", "score": "0.5547866", "text": "def __run(self):\n assert(self.__is_running)\n self.__is_running = False\n logger.debug(\"Polling...\")\n if self.poll():\n # If there are any more background processes running,\n # start a new timer\n self.start()", "title": "" }, { "docid": "3e7655f1ebaf897e3ce722f47c5805e6", "score": "0.55362254", "text": "def run(self):\n self.initialize()\n watch_manager = pyinotify.WatchManager()\n watch_manager.add_watch(watch_file, self.mask, rec=True, auto_add=True)\n notifier = pyinotify.Notifier(watch_manager, default_proc_fun=self.toggle_leds)\n notifier.loop()", "title": "" }, { "docid": "d716f2a85ba81d72e23f1dde725fdc03", "score": "0.5526162", "text": "def _loop_checking_for_updates(self):\n while not self._quit_event.is_set():\n self._check_for_updates()\n self._quit_event.wait(timeout=1)", "title": "" }, { "docid": "c2c2f116c6cbb49770fc8377d5fa3383", "score": "0.55250037", "text": "def _run(self):\n while not self._stop:\n while not self.connected:\n self.connect()\n time.sleep(0.1)\n\n self.read()\n\n if (time.time() - self.last_heartbeat) > 1:\n self.heartbeat()\n self.last_heartbeat = time.time()", "title": "" }, { "docid": "42ed693653799a9f19e92385d925588d", "score": "0.5500294", "text": "async def check_job_status_thread(con):\n while True:\n await check_all_jobs(con)\n\n await sleep(60)", "title": "" }, { "docid": "5adf14daa9af32a3729cdebae03a6bef", "score": "0.54936546", "text": "def run():\n while True:\n try:\n clock(time.localtime())\n time.sleep(0.5)\n except ZeroDivisionError:\n pass", "title": "" }, { "docid": "dc56ab3620e0a637e3959f9269c3d726", "score": "0.5486423", "text": "def poll(plugin,interval):\n while (True):\n try:\n # check the current contents against self.OrigSeqFiles\n plugin.getLogger().info(\"MailFilterPlugin Thread triggering\" + str(interval))\n evt = Event('TriggerRefresh',{})\n Correlator.sendTo(\"mail_events\", evt)\n except:\n plugin.getLogger().error(\"Poll Thread Exception: %s\", sys.exc_info()[1])\n\n time.sleep(interval)", "title": "" }, { "docid": "4c43182e374366c157cb14eeb5c54c44", "score": "0.5480603", "text": "def loop(self, now=None):\n raise NotImplementedError", "title": "" }, { "docid": "21269e69a2a3604354bd017503738d98", "score": "0.5448773", "text": "def loop(self):\n if self.running:\n self._loop()", "title": "" }, { "docid": "d085baf64476018027c1da7b3846f789", "score": "0.5439919", "text": "def run(self):\n try:\n while self.running:\n t0 = self.clock.time()\n self.fn(*self.args, **self.kw)\n t1 = self.clock.time()\n dt = self.interval - (t1 - t0)\n if dt > 0:\n self.clock.sleep(dt)\n except Exception:\n log.exception('caught exception in looping call')\n finally:\n self.running = 0", "title": "" }, { "docid": "9d11154c8425556dc3494d0b3921a416", "score": "0.5439139", "text": "def run(self):\n while self.run_flag:\n # Do something\n self.api.recvAndProcess()\n time.sleep(self.interval)", "title": "" }, { "docid": "4845444e330aeb12dad419b330980bc0", "score": "0.5435545", "text": "async def updater_loop(self) -> None:", "title": "" }, { "docid": "6fc6366d9bb7b8cdb2b1d24de368b8c5", "score": "0.54300225", "text": "def file_spinlock(file_name, timeout, step=0.01):\n elapsed = 0.0\n\n while elapsed < timeout:\n if path_exists(file_name):\n return True\n\n elapsed += step\n sleep(step)\n\n return False", "title": "" }, { "docid": "f759899b69e807d1d9b949a210829e11", "score": "0.5425507", "text": "def check_sync(self):\n lf = LF()\n if lf.lock_acquire(\"/var/lock/bm_sync_check.lock\", to=60):\n try:\n self.check_sync_locked()\n except Exception as err:\n self.logger.error(\"Error check_sync(): {}\".format(err))\n finally:\n lf.lock_release(\"/var/lock/bm_sync_check.lock\")", "title": "" }, { "docid": "91a378bc08e2e64d096e542d73dcd717", "score": "0.54234487", "text": "def _simple_lock(f):\n lock_file = f + \".lock\"\n timeout = 20\n curtime = 0\n interval = 2\n while os.path.exists(lock_file):\n time.sleep(interval)\n curtime += interval\n if curtime > timeout:\n os.remove(lock_file)\n with open(lock_file, \"w\") as out_handle:\n out_handle.write(\"locked\")\n yield\n if os.path.exists(lock_file):\n os.remove(lock_file)", "title": "" }, { "docid": "a0fe043039325e985409cafba6912550", "score": "0.54224867", "text": "def run_forever(self):\n while True:\n\n # for each module loaded...\n for modulename in self._loadedmodules.keys():\n self.update_module(modulename)\n\n # Pause this loop\n self.logger.debug(\"Pause for [%s] seconds\" % self.refresh)\n sleep(self.refresh)", "title": "" }, { "docid": "7e15b79bbb7c2765c4c92f6066772b9b", "score": "0.54137707", "text": "def _config_file_stat(self):\n while True:\n if self.config_watcher.files_changed():\n if self.stat_reload:\n self.send_event('Gauge', EventReconfigure())\n self._thread_jitter(3)", "title": "" }, { "docid": "29bf410383708abd59dddc9aed068e54", "score": "0.54090196", "text": "async def check_flights_periodically(self):\n while True:\n tasks = [self.check_flight(flight) for flight in self.flights.values()]\n await asyncio.gather(*tasks)\n\n logger.info('Checked number of %d flights', len(tasks))\n await self.write_to_file()\n await asyncio.sleep(configs.CHECK_FLIGHT_PERIOD)", "title": "" }, { "docid": "2accc5f6d7bf0d7db9ed357546f7926a", "score": "0.5376154", "text": "def lock():\n loopcount = 0\n while os.path.isfile(lockPath):\n if loopcount > 15:\n sys.stdout.write(\"lockfile_timeout\")\n exit()\n time.sleep(1)\n loopcount += 1\n\n open(lockPath,\"w\").close()", "title": "" }, { "docid": "976d5315bf022d74976860c65dbf36fe", "score": "0.53483576", "text": "def run(self):\n self._capture_signals()\n self._start_monitor()\n try:\n while True:\n if not self._run_worker():\n self._wait_for_changes()\n time.sleep(self.reload_interval)\n except KeyboardInterrupt:\n pass\n finally:\n self._stop_monitor()\n self._restore_signals()\n sys.exit(1)", "title": "" }, { "docid": "6db98c666fcd35ca852f09153807370b", "score": "0.53338957", "text": "def _serial_poller(self):\n while True:\n _next = dict(self._poller.poll(POLLING_FREQUENCY_MS))\n if self._halt_read_file.fileno() in _next:\n log.debug(\"Poller [{}]: halt\".format(hash(self)))\n self._halt_read_file.read()\n # Note: this is discarded because we send a set message to halt\n # the thread--don't currently need to parse it\n break\n\n elif self._connection.fileno() in _next:\n # Lid-open interrupt\n log.debug(\"Poller [{}]: interrupt\".format(hash(self)))\n res = self._connection.read_until(SERIAL_ACK)\n self._interrupt_callback(res)\n\n elif self._send_read_file.fileno() in _next:\n self._send_read_file.read(1)\n command, callback = self._command_queue.get()\n log.debug(\"Poller [{}]: send {}\".format(hash(self), command))\n res = self._send_command(command)\n callback(res)\n else:\n # Nothing else to do--update device status\n log.debug(\"Poller [{}]: updating temp\".format(hash(self)))\n res = self._send_command(GCODES['GET_PLATE_TEMP'])\n self._temp_status_callback(res)\n res = self._send_command(GCODES['GET_LID_STATUS'])\n self._lid_status_callback(res)\n res = self._send_command(GCODES['GET_LID_TEMP'])\n self._lid_temp_status_callback(res)\n log.info(\"Exiting TC poller loop [{}]\".format(hash(self)))", "title": "" }, { "docid": "35686209b5cc4da4b55bd5d1a610fa4f", "score": "0.53259045", "text": "def run(self):\r\n self.running = 1\r\n self.loop()", "title": "" }, { "docid": "bbe3db833c8e0014763d2cfc8e0f91ed", "score": "0.5323164", "text": "def run(self):\n self.watch()\n self.wait()", "title": "" }, { "docid": "ecf8401da0b9a95248f8444754fc2754", "score": "0.5321146", "text": "def wav_check():\n while True:\n modify_wav(INBOUND_QUEUE.get())", "title": "" }, { "docid": "5ab54f6b3a073e917df5276ecddd019a", "score": "0.53106844", "text": "def wait_for_files(filepaths):\n wait_time = 5\n for filepath in filepaths:\n # If the file doesn't exist, wait wait_time seconds and try again\n # until it's found.\n while not os.path.exists(filepath):\n print(\"%s hasn't arrived. Waiting %s seconds.\" % \\\n (filepath, wait_time))\n time.sleep(wait_time)\n # If the file exists but locked, wait wait_time seconds and check\n # again until it's no longer locked by another process.\n while is_locked(filepath):\n print(\"%s is currently in use. Waiting %s seconds.\" % \\\n (filepath, wait_time))\n time.sleep(wait_time)", "title": "" }, { "docid": "3a3913ee675f019ad08ce35aeeff25fa", "score": "0.5305055", "text": "def run(self):\n # go into infinity loop (we could do anything here)\n while True:\n time.sleep(1)", "title": "" }, { "docid": "6e4a9b7a25f356540757163c8603aa50", "score": "0.5302872", "text": "def wait_for_lock_file(self):\n nsec = 0\n while not os.path.isfile(self.lock_file):\n time.sleep(self.sleep_seconds)\n nsec += self.sleep_seconds\n if nsec > self.max_sleep_seconds:\n raise QboxLockfileError\n time.sleep(self.sleep_seconds)", "title": "" }, { "docid": "e793cebf9cf7f8f6cc799a482c3b4df6", "score": "0.529689", "text": "def scan_forever(queue, *args, **kwargs):\n process_once_now = kwargs.get('process_once_now', True)\n if process_once_now:\n for work in scan(queue, *args, **kwargs):\n yield work\n while True:\n with open(fsq_path.trigger(queue), 'rb') as t:\n t.read(1)\n for work in scan(queue, *args, **kwargs):\n yield work", "title": "" }, { "docid": "b2421cd95886fa9e673b22510a70461a", "score": "0.5288254", "text": "def updateloop(self):\n while self.doupdateloop:\n with self.updatelock:\n for wlan in self.updates:\n # Check if wlan is from a previously closed session. Because of the\n # rate limiting scheme employed here, this may happen if a new session\n # is started soon after closing a previous session.\n # TODO: if these are WlanNodes, this will never throw an exception\n try:\n wlan.session\n except:\n # Just mark as updated to remove from self.updates.\n self.updated(wlan)\n continue\n\n if self.lastupdate(wlan) > self.rate:\n self.buildcmds(wlan)\n self.ebcommit(wlan)\n self.updated(wlan)\n\n time.sleep(self.rate)", "title": "" }, { "docid": "9c67b44f4e3606f6135642369d1ccfc0", "score": "0.52862865", "text": "def enter_loop(self, pollingIntervalSeconds = 0.1):\n while True:\n state = self.read()\n if state != self.last_reported_state:\n self.last_reported_state = state\n timeNow = time.time()\n if timeNow > self.lastEventTime + self.debounceSeconds:\n self.onChange()\n self.lastEventTime = timeNow\n time.sleep(pollingIntervalSeconds)", "title": "" }, { "docid": "ae04a3a5c2adb9d8940d1fdccbf342c2", "score": "0.527665", "text": "def watch(self):\n while True:\n # /etc/fstab watcher\n self.handle_fstab()\n\n # IMDS probe (only sporadically, inside the function)\n if self._imds_logger:\n try:\n self._imds_logger.log_imds_data_if_right_time()\n except Exception as e:\n self._hutil_error('ImdsLogger exception: {0}\\nStacktrace: {1}'.format(e, traceback.format_exc()))\n\n # Sleep 5 minutes\n time.sleep(60 * 5)\n pass", "title": "" }, { "docid": "0a6edabbf1c150ef4d09436d8e32afea", "score": "0.52740747", "text": "def block_one(self, progress_file=sys.stdout):\n\t\t# type: (Optional[TextIO], ) -> Tuple[str, str]\n\n\t\twhile True:\n\t\t\t\"\"\" fixme: This loop has a (not very serious) race condition.\n\n\t\t\t\tFor example a download might change its status from waiting to active\n\t\t\t\tduring the two queries. Then it would not be found.\n\t\t\t\tThis cannot be fixed by changing the query order as the status change graph\n\t\t\t\t(waiting<->active->stopped) cannot be sorted topologically.\n\t\t\t\tAnd even if you could, returning stopped downloads has priority over\n\t\t\t\twaiting/active ones, as otherwise we would block for too long.\n\n\t\t\t\tDoes using a multi/batch-call fix this?\n\t\t\t\"\"\"\n\n\t\t\tentries = self._entries(self.query(\"tell_stopped\", 0, self.max_num_results)) # complete or error\n\t\t\tif entries:\n\t\t\t\tgid, entry = entries.popitem()\n\n\t\t\t\ttry:\n\t\t\t\t\tif entry[\"status\"] == \"complete\":\n\t\t\t\t\t\tassert len(entry[\"files\"]) == 1\n\t\t\t\t\t\treturn gid, entry[\"files\"][0][\"path\"]\n\t\t\t\t\telif entry[\"status\"] == \"error\":\n\t\t\t\t\t\traise DownloadFailed(gid, entry[\"errorCode\"], entry[\"errorMessage\"])\n\t\t\t\t\telse:\n\t\t\t\t\t\traise RuntimeError(\"Unexpected status: {}\".format(entry[\"status\"]))\n\n\t\t\t\tfinally:\n\t\t\t\t\tself.remove_stopped(gid)\n\n\t\t\tentries = self._entries(self.query(\"tell_active\")) # active\n\t\t\tif entries:\n\t\t\t\tif progress_file:\n\t\t\t\t\tcompleted = sum(int(entry[\"completedLength\"]) for entry in entries.values())\n\t\t\t\t\ttotal = sum(int(entry[\"totalLength\"]) for entry in entries.values())\n\t\t\t\t\tspeed = sum(int(entry[\"downloadSpeed\"]) for entry in entries.values())\n\t\t\t\t\tprint(\"{} downloads: {}/{} bytes {} bytes/sec\".format(len(entries), completed, total, speed), file=progress_file, end=\"\\r\")\n\n\t\t\t\tsleep(self.poll)\n\t\t\t\tcontinue\n\n\t\t\tentries = self._entries(self.query(\"tell_waiting\", 0, self.max_num_results)) # waiting or paused\n\t\t\tif entries:\n\t\t\t\tprint(\"{} downloads waiting or paused\".format(len(entries)), end=\"\\r\")\n\t\t\t\tsleep(self.poll)\n\t\t\t\tcontinue\n\n\t\t\tif self.gids:\n\t\t\t\t\"\"\" Actually only this check is sensitive to race condition, as the looping logic\n\t\t\t\t\twould care of retrying otherwise.\n\t\t\t\t\tHowever this is the only way to check for external modifications.\n\t\t\t\t\"\"\"\n\t\t\t\traise InconsistentState(\"Some downloads got lost. We either encoutered a race condition \\\n\t\t\t\t\tor some external actor removed the download\")\n\n\t\t\traise WouldBlockForever(\"No downloads active or waiting\")", "title": "" }, { "docid": "afb4dc5070d47c2a878c41d3638e1dde", "score": "0.5269101", "text": "def run(self):\n while True:\n logger.info('Start repository scan ...')\n try:\n self.connect()\n repos, has_change = self.synchronize()\n with ThreadPoolExecutor(max_workers=self.concurrency) as executor:\n futures = []\n for i in range(len(repos)):\n futures.append(executor.submit(self.update_repo, **repos[i]))\n for future in as_completed(futures):\n try:\n if not has_change and future.result():\n has_change = True\n except Exception as e:\n logger.warning('Unexpected exception occurred during processing: %s', e)\n # Notify UI backend about a change in repos\n if has_change:\n key = '{}:repos_updated'.format(NAMESPACE)\n redis_client.set(key, value=str(int(time.time())))\n except Exception as e:\n logger.warning('Repository scan failed: %s', str(e))\n else:\n logger.info('Repository scan completed\\n')\n finally:\n time.sleep(self.interval)", "title": "" }, { "docid": "8d6a8ca2657c1c58ee1f2f04fb73b0e7", "score": "0.52643573", "text": "def keep_updated(self):\n while True:\n try:\n self.update()\n except RuntimeError:\n break\n from time import sleep\n sleep(1)", "title": "" }, { "docid": "8d6a8ca2657c1c58ee1f2f04fb73b0e7", "score": "0.52643573", "text": "def keep_updated(self):\n while True:\n try:\n self.update()\n except RuntimeError:\n break\n from time import sleep\n sleep(1)", "title": "" }, { "docid": "d54a1537922daa5805303899930ebe77", "score": "0.5255857", "text": "def start(self):\n if not self.is_running:\n self.is_running = True\n self._handle = self._loop.call_later(self.interval, self._run)", "title": "" }, { "docid": "20fd406f4ae8de04245df9c4aa012261", "score": "0.5243536", "text": "def do_poll():\n _engine.poll(0)\n\n if _engine._deferreds:\n timer.setInterval(min(1000 * (_engine._deferreds[0].end - _engine.latest_poll_time), _timeout))\n else:\n timer.setInterval(_timeout)", "title": "" }, { "docid": "198872a16996da904ca26150283e4c0f", "score": "0.5240747", "text": "def run(self):\n while True:\n # Do something\n if self.queue.empty():\n print('[{}] Doing something imporant in the background'.format(self.num))\n else:\n val = self.queue.get()\n print('[{}] Getting {} to do something imporant in the background'.format(self.num, val))\n\n time.sleep(self.interval)", "title": "" }, { "docid": "3bf6190b210e0ca85368b06187c7db0d", "score": "0.5237674", "text": "def _config_file_stat(self):\n while True:\n if self._config_files_changed():\n if self.stat_reload:\n self.send_event(self.__class__.__name__, EventReconfigure())\n self._thread_jitter(3)", "title": "" }, { "docid": "c070c2cf5d60a16fbd23a8c30e95f332", "score": "0.52365845", "text": "def _monitor(self):\n self._call = None\n if self.index is not None:\n name, callback, mtime = self.files[self.index][1:]\n try:\n now = os.path.getmtime(name)\n except BaseException:\n now = 0\n if now > mtime:\n log.msg(\"{} changed, notifying listener\".format(name))\n self.files[self.index][3] = now\n callback(name)\n self._setupMonitor()", "title": "" }, { "docid": "2f0601db49aaaf4f2e098d00f86116f5", "score": "0.522968", "text": "def check_sync_locked(self):\n bm_status = {}\n messages_processed_diff = 0\n lf = LF()\n if lf.lock_acquire(config.LOCKFILE_API, to=config.API_LOCK_TIMEOUT):\n try:\n bm_status = api.clientStatus()\n except Exception as err:\n self.logger.error(\"Error check_sync_locked(): {}\".format(err))\n finally:\n time.sleep(config.API_PAUSE)\n lf.lock_release(config.LOCKFILE_API)\n\n if \"networkStatus\" in bm_status:\n if bm_status[\"networkStatus\"] != \"notConnected\":\n if not self.bm_connected_timer:\n # upon becoming connected, wait 90 sec until checking if synced\n self.bm_connected_timer = time.time() + 90\n self.bm_connected = True\n else:\n self.bm_connected = False\n self.bm_connected_timer = None\n self.bm_pending_download_timer = None\n\n if \"numberOfMessagesProcessed\" in bm_status:\n if bm_status[\"numberOfMessagesProcessed\"] > self.bm_number_messages_processed_last:\n messages_processed_diff = (bm_status[\"numberOfMessagesProcessed\"] -\n self.bm_number_messages_processed_last)\n self.bm_number_messages_processed_last = bm_status[\"numberOfMessagesProcessed\"]\n\n if \"pendingDownload\" in bm_status:\n if bm_status[\"pendingDownload\"] == 0:\n self.bm_pending_download = False\n else:\n self.bm_pending_download = True\n self.bm_sync_complete = False\n\n if self.bm_connected:\n if \"pendingDownload\" in bm_status and bm_status[\"pendingDownload\"] < 50:\n if not self.bm_pending_download_timer:\n self.bm_pending_download_timer = time.time() + 60\n else:\n self.bm_pending_download_timer = None\n self.bm_sync_complete = False\n\n # indicate sync is complete if:\n # 1) connected and no more than 10 new messages processed\n # and no pending downloads for past 60 seconds.\n # or\n # 2) connected and no more than 10 new messages processed and\n # only a few pending downloads remain and have not increased over 50 in the past 60 seconds.\n if (self.bm_connected and\n messages_processed_diff < 20 and\n (self.bm_connected_timer and time.time() > self.bm_connected_timer) and\n (\n not self.bm_pending_download or\n (self.bm_pending_download_timer and\n time.time() > self.bm_pending_download_timer)\n )):\n self.bm_connected_timer = None\n self.bm_pending_download_timer = None\n self.bm_sync_complete = True\n\n # self.logger.debug(\"con {}, pend {} {}, synced {}\".format(\n # self.bm_connected,\n # bm_status[\"pendingDownload\"],\n # self.bm_pending_download,\n # self.bm_sync_complete))", "title": "" }, { "docid": "3df0cc8c8a93ed654c4433eaebffe95a", "score": "0.5211778", "text": "def run_loop(config, location, extended=False):\n\n while True:\n data = load_data(config, location)\n os.system(\"clear\")\n print_hourly(data, extended)\n sleep(60)", "title": "" }, { "docid": "16bd8d84fb16f0754a1335298ae4e044", "score": "0.52105796", "text": "def polling(self):\n self.__stop_polling = False\n self.polling_thread = threading.Thread(target=self.__polling, args=())\n self.polling_thread.daemon = True\n self.polling_thread.start()", "title": "" }, { "docid": "ce33c09dc6e7c05209ac135b2bacdb6a", "score": "0.52088785", "text": "async def sleep_forever() -> None:\n await sleep(math.inf)", "title": "" }, { "docid": "738e8678ce17ba6c83fb5db56cc95962", "score": "0.52057785", "text": "def loop_main():\n logging.info(\"loop main_thread\")\n while(True):\n logging.debug(\"Tick\")\n yield from asyncio.sleep(10)", "title": "" }, { "docid": "e7486292a8e6ea5f1df7c988e57e2ce3", "score": "0.5201966", "text": "def monitor(self):\r\n self.started_event.set()\r\n while True:\r\n self.timer.wait(Monitor._refresh_interval)\r\n if self.stopped:\r\n break\r\n self.timer.clear()\r\n\r\n try:\r\n try:\r\n self.rsc.refresh()\r\n finally:\r\n self.refreshed.set()\r\n except AutoReconnect:\r\n pass\r\n\r\n # RSC has been collected or there\r\n # was an unexpected error.\r\n except:\r\n break", "title": "" }, { "docid": "ed2e51386c53139b551baff5f799bee4", "score": "0.51957446", "text": "def loop(self):\n pass", "title": "" }, { "docid": "0fb93a07fda8cdf5f8c0c70cc030d1ac", "score": "0.51769716", "text": "def run(self):\n\t\twhile True:\n\t\t\tevent = self.q.get()\n\t\t\tnow = time.time()\n\t\t\tif now < event.ts:\n\t\t\t\ttime.sleep(event.ts - now)\n\t\t\tevent.f()", "title": "" }, { "docid": "4781c901c95418bed917499ef9805a21", "score": "0.5166543", "text": "def _run(self):\r\n self.running = True\r\n while self.running:\r\n self._run_once()\r\n self.running = False", "title": "" }, { "docid": "9d02fec49cc3fcfd8d576a3cbc7eeb11", "score": "0.51648057", "text": "def bitmessage_monitor(self):\n while True:\n if self.timer_check_bm_alive < time.time():\n if not self.is_restarting_bitmessage:\n lf = LF()\n if lf.lock_acquire(config.LOCKFILE_API, to=60):\n try:\n self.logger.debug(\"Beginning BM API check\")\n socket.setdefaulttimeout(5)\n api.add(2, 3)\n self.logger.debug(\"Finished BM API check\")\n except socket.timeout:\n self.logger.error(\"Timeout during BM monitor API query. Restarting bitmessage.\")\n self.restart_bitmessage()\n except Exception as err:\n self.logger.error(\"Exception during BM monitor API query: {}\".format(err))\n finally:\n socket.setdefaulttimeout(config.API_TIMEOUT)\n time.sleep(config.API_PAUSE)\n lf.lock_release(config.LOCKFILE_API)\n self.timer_check_bm_alive = time.time() + config.API_CHECK_FREQ\n time.sleep(1)", "title": "" }, { "docid": "7a2d435231e63c006ef388dfab30717c", "score": "0.5160895", "text": "def should_poll(self):\n\t\treturn True", "title": "" }, { "docid": "4ad222cf963c190eb74b2908add095ff", "score": "0.51591104", "text": "def execute_loop():\n trans_count = 0\n\n # Begin real-time control loop\n real_time = 0.0\n real_time_start = time.time()\n while True:\n if vehicle.parameters['TIME_DEFAULT'] not in ctrl_queue:\n ctrl_queue[vehicle.parameters['TIME_DEFAULT']] = [vehicle.parameters['GRAPH_DEFAULT'],\n vehicle.parameters['CTRL_DEFAULT']]\n real_time_prev = real_time\n real_time = time.time() - real_time_start\n update_time = trans_count * TRANSITION_DURATION\n ctrl_loop(ctrl_queue, update_time, real_time)\n time.sleep(0.05)\n if real_time_prev <= (trans_count + 1) * TRANSITION_DURATION <= real_time and real_time_prev != real_time:\n t_div = divmod(real_time, TRANSITION_DURATION)\n trans_count = int(round(t_div[0]))\n print('Updated the perceived control index in the queue')\n\n # Check to see if listener received go-ahead to shut down the channel and return...\n if vehicle.parameters['PARAMETER_DKM'] == 10:\n close_all()\n break", "title": "" }, { "docid": "6c606cdf6f038b44000b00d4e3622daa", "score": "0.51464766", "text": "async def _loop_interrupter(self):\n while True:\n await asyncio.sleep(0.01)", "title": "" }, { "docid": "0963641f97c2bb680fbd0198127bb981", "score": "0.5136763", "text": "def wait_for_file(file_path, wait_time):\n logging.info(\"Waiting up to %s seconds for file %s\", wait_time, file_path)\n loop_time = 0.1\n loops = wait_time / loop_time\n\n loop = 0\n while loop < loops:\n time.sleep(0.1)\n if os.path.isfile(file_path):\n logging.info(\"File appeared!\")\n return True\n loop += 1\n if loop % 10 == 0 and loop > 0:\n logging.info(\"%s seconds\", loop / 10)\n\n logging.info(\"File didn't appear within given time.\")\n return False", "title": "" }, { "docid": "75fd409a0b129ec0bc3676e517c6e81d", "score": "0.5136743", "text": "def main_loop():\n lastly_seen_file = True\n\n while True:\n file_name = QueueHelperRedisAPI.get_file_name()\n if not file_name:\n if lastly_seen_file:\n lastly_seen_file = False\n print('Reader waiting for new file')\n\n time.sleep(SLEEP_INTERVAL)\n continue\n\n lastly_seen_file = True\n\n article_title, words = seperate_article_words(file_name)\n\n for word in words:\n\n while KeywordFunnelRedisAPI.get_articles_count(word) > MAX_ARTICLES_COUNT_IN_MEM:\n time.sleep(SLEEP_INTERVAL)\n\n KeywordFunnelRedisAPI.add_article_by_word(word, article_title)\n QueueHelperRedisAPI.add_word_to_flush(word)", "title": "" }, { "docid": "88c03f3c89c160a96ba950ce67f73017", "score": "0.513459", "text": "def run(self):\n # Make sure everything we need is setup, both locally & on AWS.\n self.ensure_local_setup()\n self.ensure_aws_setup()\n \n # Run forever, or until the user says stop.\n while True:\n print(\"Checking for new files.\")\n files_found = self.check_unconverted()\n \n if files_found:\n print(\"Found {0} new file(s).\".format(len(files_found)))\n self.start_converting(files_found)\n \n # Here we check the queue, which will long-poll\n # for up to ``self.poll_interval`` seconds.\n self.process_completed()", "title": "" }, { "docid": "5c91f7f855940a764f8f17f46fac23cc", "score": "0.51088744", "text": "def loop_start(self):\n self.loop_running = True\n t = threading.Thread(target=self.loop_thread, daemon=True)\n t.start()", "title": "" }, { "docid": "3762bd187f74a60f177fc7ba10d59c04", "score": "0.5103863", "text": "def watch_file(fpath):\n # verificar que es un archivo valido\n if os.path.isfile(fpath):\n fname = os.path.basename(fpath)\n mngfile = os.path.join(MANAGED_FILES_DIR, fname)\n # verificar si ya existe copia para monitorizar cambios\n if not os.path.exists(mngfile):\n shutil.copy(fpath, mngfile)\n\n change_time = None\n while 1:\n try:\n # verificar si son diferentes\n if not compare_files(fpath, mngfile):\n \"\"\"Si cambio el archivo definamos el tiempo y hagamos una copia temporal\n por si se detectan cambios seguidos\"\"\"\n if not change_time:\n shutil.copy(fpath, \"/tmp/\" + fname)\n change_time = time.time()\n #web.do_post(url=\"/file_change\", data={'file': fpath, 'ip': IP, 'pos': POS,\n # 'node': HOSTNAME, 'hora': time.strftime(\"%a, %d-%m-%Y %H:%M \")})\n\n \"\"\"Si cambio el archivo definamos el tiempo y actulizamos la copia temporal\"\"\"\n if not compare_files(fpath, \"/tmp/\" + fname):\n shutil.copy(fpath, \"/tmp/\" + fname)\n change_time = time.time()\n #web.do_post(url=\"/file_change\", data={'file': fname, 'ip': IP, 'pos': POS,\n # 'node': HOSTNAME, 'hora': time.strftime(\"%a, %d-%m-%Y %H:%M \")})\n\n\n if change_time and (time.time() - change_time) > FILE_CHANGE_THRESHOLD:\n \"\"\" -Resetear tiempo de ultimo cambio\n - Remover copia temp, a su vez\n - Notificar el cambio al servidor web\n - Actualixar la copia monitorizada en managed_files\"\"\"\n change_time = None\n shutil.copy(fpath, mngfile)\n os.remove(\"/tmp/\" + fname)\n\n web.do_post(url=\"/file_change\", data={'file': fpath, 'ip': IP, 'pos': POS,\n 'node': HOSTNAME, 'hora': datetime.now()})\n\n notification = \"El archivo \" + fpath + \" fue modificado el \"+ time.strftime(\"%d-%m-%Y a las %H:%M\")+\" hora del host\"\n\n dweb.do_post(url=\"/api/network/notification/\", data={'description': notification, 'node': DB_ID})\n\n\n except OSError, oe:\n logger.error(\"Problema con: \" + oe.filename, exc_info=True)\n except Exception, e:\n logger.error(\"Problema con archivo: \", exc_info=True)\n time.sleep(2)\n else:\n logger.warning(fpath + \" no es un archivo valido, terminado hilo...\")\n return\n logger.info('Deteniendo Thread [watch_file] finished...')", "title": "" }, { "docid": "a808031a97454b326d525bfbf7633e80", "score": "0.51031315", "text": "def bw_filecc_loop_test(\n config_filepath: str,\n snd_quantity: int,\n snd_mode: str,\n collect_stats: bool,\n run_tshark: bool,\n results_dir: str\n):\n interval = 10\n rcv_mode = 'remotely'\n\n try:\n logger.info('Starting bandwidth loop test')\n perform_test.main_function(\n perform_test.TestName.bw_loop_test.value,\n config_filepath,\n rcv_mode,\n snd_quantity,\n snd_mode,\n collect_stats,\n run_tshark,\n results_dir + '/bw_loop_test'\n )\n except Exception as error:\n logger.info(\n f'During bandwidth loop test an exception occured ({error.__class__.__name__}): {error}. '\n f'File CC loop test can not be done.'\n )\n return\n\n logger.info(f'Waiting for {interval} s ...')\n time.sleep(interval)\n\n logger.info('Starting file cc loop test')\n perform_test.main_function(\n perform_test.TestName.filecc_loop_test.value,\n config_filepath,\n rcv_mode,\n snd_quantity,\n snd_mode,\n collect_stats,\n run_tshark,\n results_dir + '/filecc_loop_test'\n )\n\n logger.info('Done')", "title": "" }, { "docid": "0e17cefc0d18a1a13620217e4b252747", "score": "0.50998366", "text": "async def periodic_sync_unprocessed(app):\n server_address = app[\"socket_file\"]\n\n while True:\n print(\"Starting background sync unprocessed\")\n reset_db_connections()\n try:\n conn = aiohttp.UnixConnector(path=server_address)\n timeout = aiohttp.ClientTimeout(total=600)\n async with aiohttp.ClientSession(connector=conn, raise_for_status=True) as session:\n # localhost substitutes for the socket file\n async with session.get(f\"http://localhost/sync_unprocessed\", timeout=timeout) as resp:\n pass\n await asyncio.sleep(60 * 30)\n except asyncio.CancelledError:\n break\n except asyncio.TimeoutError:\n print(\"Timeout 1hr syncing data: \", timeout)\n except Exception as e:\n traceback.print_exc()\n raise e from None", "title": "" }, { "docid": "80f037892eddf1413741e97c8b06ab65", "score": "0.5097829", "text": "def run_cycle_loop(self):\n\n self.clear_finished_jobs()\n self.wait_until_slot_is_open()\n job = self.get_next_enqueued_job()\n if (job is None):\n time.sleep(0.1)\n return\n if (self.try_set_lock_on_job(job)):\n self.run_job(job)", "title": "" }, { "docid": "6d55067dcdb993c7ef62e07086fc27fa", "score": "0.5097686", "text": "def run(self):\n while True:\n self.run_once()", "title": "" }, { "docid": "831f09e1e013b0e583b1a4f07a253ff9", "score": "0.5096968", "text": "def periodic(self):\r\n\r\n # Update the channel volume.\r\n vol = self.chan_volume * renpy.game.preferences.volumes[self.mixer]\r\n\r\n if vol != self.actual_volume:\r\n pss.set_volume(self.number, vol)\r\n self.actual_volume = vol\r\n\r\n\r\n # This should be set from something that checks to see if our\r\n # mixer is muted.\r\n force_stop = self.context.force_stop or (renpy.game.preferences.mute[self.mixer] and self.stop_on_mute)\r\n\r\n if self.playing and force_stop:\r\n pss.stop(self.number)\r\n self.playing = False\r\n self.wait_stop = False\r\n\r\n if force_stop:\r\n if self.loop:\r\n self.queue = self.queue[-len(self.loop):]\r\n else:\r\n self.queue = [ ]\r\n return\r\n\r\n # Should we do the callback?\r\n do_callback = False\r\n\r\n # This has been modified so we only queue a single sound file\r\n # per call, to prevent memory leaks with really short sound\r\n # files. So this loop will only execute once, in practice.\r\n while True:\r\n\r\n depth = pss.queue_depth(self.number)\r\n\r\n if depth == 0:\r\n self.wait_stop = False\r\n self.playing = False\r\n\r\n # Need to check this, so we don't do pointless work.\r\n if not self.queue:\r\n break\r\n\r\n # If the pcm_queue is full, then we can't queue\r\n # anything, regardless of if it is midi or pcm.\r\n if depth >= 2:\r\n break\r\n\r\n # If we can't buffer things, and we're playing something\r\n # give up here.\r\n if not self.buffer_queue and depth >= 1:\r\n break\r\n\r\n # We can't queue anything if the depth is > 0 and we're\r\n # waiting for a synchro_start.\r\n if self.synchro_start and depth:\r\n break\r\n\r\n # If the queue is full, return.\r\n if pss.queue_depth(self.number) >= 2:\r\n break\r\n\r\n # Otherwise, we might be able to enqueue something.\r\n topq = self.queue.pop(0)\r\n\r\n # Blacklist of old file formats we used to support, but we now\r\n # ignore.\r\n lfn = topq.filename.lower() + self.file_suffix.lower()\r\n for i in (\".mod\", \".xm\", \".mid\", \".midi\"):\r\n if lfn.endswith(i):\r\n topq = None\r\n\r\n if not topq:\r\n continue\r\n\r\n try:\r\n topf = load(self.file_prefix + topq.filename + self.file_suffix)\r\n\r\n if depth == 0:\r\n pss.play(self.number, topf, topq.filename, paused=self.synchro_start, fadein=topq.fadein, tight=topq.tight)\r\n else:\r\n pss.queue(self.number, topf, topq.filename, fadein=topq.fadein, tight=topq.tight)\r\n\r\n self.playing = True\r\n\r\n except:\r\n\r\n # If playing failed, remove topq.filename from self.loop\r\n # so we don't keep trying.\r\n while topq.filename in self.loop:\r\n self.loop.remove(topq.filename)\r\n\r\n if renpy.config.debug_sound and not renpy.game.after_rollback:\r\n raise\r\n else:\r\n return\r\n\r\n break\r\n\r\n if self.loop and not self.queue:\r\n for i in self.loop:\r\n newq = QueueEntry(i, 0, topq.tight)\r\n self.queue.append(newq)\r\n else:\r\n do_callback = True\r\n\r\n # Queue empty callback.\r\n if do_callback and self.callback:\r\n self.callback() # E1102\r", "title": "" }, { "docid": "c9fddab77d3709c8e2d7dc1ca057229e", "score": "0.50933516", "text": "async def check_dir():\n try:\n last_check = None\n while True:\n check = {\n f.name\n for f in self._direct.iterdir()\n if f.is_file() }\n\n if last_check != check:\n last_check = check\n self._dir_update.set()\n\n await aio.sleep(5) # TODO: make interval param\n\n except aio.CancelledError:\n pass", "title": "" }, { "docid": "94445dedfdd27a9694f96a224a2dfe53", "score": "0.5092626", "text": "def manage_forever(self):\n\n while True:\n self.load_config()\n\n # now make a config for a run\n config = dict(self.config)\n config[\"run\"] = self.find_run_number()\n\n log.info(\"Preparing to run playback run %d\", config[\"run\"])\n r = SimulatorRun(self, config, self.kwargs)\n self.current_run = r\n r.run()\n\n if r.state == \"error\":\n return 1", "title": "" }, { "docid": "c010d19ae82ccb66f5b59fe8c91eb91b", "score": "0.5086614", "text": "def should_poll(self):\r\n return True", "title": "" }, { "docid": "c010d19ae82ccb66f5b59fe8c91eb91b", "score": "0.5086614", "text": "def should_poll(self):\r\n return True", "title": "" }, { "docid": "4b385d19011ed226a81a9925401dd891", "score": "0.5083186", "text": "def _start_loop_for_hook(self):\n if not self._hook:\n raise NoHookFound\n\n self._hook_loop = True\n while self._hook_loop:\n time.sleep(25)\n new_notifications = self.check_new_notifications()\n if not new_notifications:\n continue\n\n self._hook(new_notifications)", "title": "" }, { "docid": "f045138b068a3222a890ec6a6e80f885", "score": "0.5065017", "text": "async def _update_dir(self, conn: IndexerConnection):\n async def check_dir():\n \"\"\"\n Polls on directory file name changes, and raises event updates\n on change found\n \"\"\"\n try:\n last_check = None\n while True:\n check = {\n f.name\n for f in self._direct.iterdir()\n if f.is_file() }\n\n if last_check != check:\n last_check = check\n self._dir_update.set()\n\n await aio.sleep(5) # TODO: make interval param\n\n except aio.CancelledError:\n pass\n\n checker = aio.create_task(check_dir())\n try:\n while True:\n await self._dir_update.wait()\n\n async with conn.access:\n files = self.get_files()\n await conn.stream.request(Request.UPDATE, files=files)\n # should be only place where dir_update is cleared\n self._dir_update.clear()\n\n except aio.CancelledError:\n checker.cancel()", "title": "" }, { "docid": "8fba12c8043c82671c4ca8909a1a3283", "score": "0.50632805", "text": "def loop(self):\n\t\ttry:\n\t\t\twhile self.connection.Process(1):\n\t\t\t\tpass\n\t\texcept KeyboardInterrupt:\n\t\t\tpass", "title": "" }, { "docid": "cf5bb819f8bd353151a2aeec0cb85b2f", "score": "0.5059709", "text": "def sync_async():\n return False", "title": "" }, { "docid": "8cbb97e5b27abc6e1d408b878fb9bb3e", "score": "0.5054679", "text": "def wait_for_start(self,dt=.1):\n while not os.path.isfile('%s/%s'%(DATADR,'start')):\n time.sleep(dt)", "title": "" }, { "docid": "e83684e2b06710a76ad5fc7f81559005", "score": "0.5054015", "text": "def watch():", "title": "" }, { "docid": "3820b629009db38802a081791f78298b", "score": "0.5053639", "text": "async def wakeloop_async(self):\n curtime = time.time()\n while True:\n await asyncio.sleep(5.0)\n elapsed = time.time() - curtime\n if elapsed > 30.0:\n async def reconnect_if_connected(team):\n if team.rtm_connected():\n await team.rtm_connect_async()\n \n if self.teams:\n (done, pending) = await asyncio.wait([ self.client.evloop.create_task(reconnect_if_connected(team)) for team in self.teams.values() ])\n for res in done:\n self.print_exception(res.exception(), 'Could not reconnect team')\n \n # Server pings. We do this right after the time check because\n # that is a better way to avoid timeout errors. Now we've got\n # all the sockets restabilized, but timeout errors are still\n # possible; the pings will root them out.\n for team in self.teams.values():\n if team.rtm_connected():\n await team.rtm_send_async({ 'type':'ping', 'id':None })\n\n # Note the time for next go-around. (Should be exactly five\n # seconds, but if the machine sleeps, it'll be more.)\n curtime = time.time()", "title": "" } ]
20616c335706dead03dbd0b355751bb7
Renvoie un dictionnaire contenant les noeuds de steiner
[ { "docid": "2d825c430d9e0d9e277a6470bf367ad8", "score": "0.0", "text": "def heuristique_ACPM(self,draw=False):\n\n def getVertricesOfPath(edges):\n set_node = set()\n for e in edges:\n id1,id2 = self.getIdVerticesOfEdge(e)\n set_node.add(id1)\n set_node.add(id2)\n return set_node\n\n\n if draw:\n try:\n os.makedirs(self.dirname+\"/H_ACPM\")\n except:\n pass\n\n\n #Graphe de depart contenant tout les noeuds\n G = Graphe_Individu(self,self.wholeGraphDict)\n if draw:\n self.drawGraph(\"/H_ACPM/G0\",G.get_graphe().get_edges())\n\n\n nb_vertices = 1\n nb_tmp_vertices = 0\n i=0\n\n #Tant que le nombre de noeuds du graphe decroit, appliquer kruskal\n while nb_vertices > nb_tmp_vertices :\n g_acpm = G.get_MST()\n\n\n nb_vertices = len(G.get_dictSteinerNodes())\n set_Node = set(G.get_dictSteinerNodes().keys())\n dictSteinerNodez = self.eliminationFeuilles(g_acpm.get_edges(),set_Node)\n G = Graphe_Individu(self,dictSteinerNodez)\n nb_tmp_vertices = len(dictSteinerNodez)\n\n if draw:\n i+= 1\n self.drawGraph(\"/H_ACPM/G%d\"%i+\"_%d\"%g_acpm.get_total_weight(),g_acpm.get_edges())\n\n\n if draw:\n self.drawGraph(\"/H_ACPM/GFinal_%d\"%G.get_MST().get_total_weight(),G.get_MST().get_edges())\n\n for i in self.steinerNodes:\n if i not in dictSteinerNodez.keys():\n dictSteinerNodez[i] = 0\n\n # return Graphe_Individu(self,dictSteinerNodez)\n return dictSteinerNodez", "title": "" } ]
[ { "docid": "54c14ee0b7392691fb50c0ffb9f8ea26", "score": "0.6221054", "text": "def creer_dictionnaire_vide():\n dico = {}\n return dico", "title": "" }, { "docid": "82e0cf18839180689903b7d5cd2d1912", "score": "0.59172213", "text": "def get_dictionary(self):\n dct = super(ReportMalnutrition, self).get_dictionary()\n dct.update({\"oedema\":\"no\", \"diarrhea\":\"no\"})\n if self.observed.filter(name=\"Oedema\"):\n dct[\"oedema\"] = \"yes\"\n if self.observed.filter(name=\"Diarrhea\"):\n dct[\"diarrhea\"] = \"yes\"\n # message back as cm\n dct['muac'] = \"%.1f cm\" % (self.muac / 10.0)\n return dct", "title": "" }, { "docid": "4acf72a32450c0e2a112037e6c988beb", "score": "0.5856794", "text": "def dictionary():\r\n kluisDict = dict.fromkeys(range(1, 21))\r\n\r\n with open('FietsStalling.txt', 'r+') as readFile:\r\n for line in readFile: # kluizen uit bestand lezen\r\n splitLine = line.split(' ') # regels opdelen\r\n kluisNummer = int(splitLine[0].strip(';')) # eerste getal is kluisnummer\r\n OVNummer = int(splitLine[3].strip('\\n')) # laatste getal is OV nummer\r\n dateTime = splitLine[1] + ' ' + splitLine[2].strip(',') # middelste deel is datum en tijd\r\n kluisDict[kluisNummer] = (dateTime, OVNummer) # keys zijn kluisnummers, values zijn OV nummer en datetime\r\n return kluisDict", "title": "" }, { "docid": "36c897f2b9636340e1509f1256da9054", "score": "0.5808893", "text": "def convertirDic(botones):\n dic_aux = {}\n for clave,valor in botones.items():\n dic_aux[tuple(map(int,clave.split(\",\")))] = valor\n return dic_aux", "title": "" }, { "docid": "83af111aee848b81013b04e7570e5056", "score": "0.57970524", "text": "def kluisInfo():\r\n kluisDict = dictionary()\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken in\r\n # dictionary\r\n beginSchermTopTitel['text'] = fietsStalTijd(kluisDict[kluis][1]) # functie fietsStalTijd op kluis\r\n # aanroepen\r\n beginSchermTitel['text'] = 'De huidige kosten zijn €' + str(prijs(kluisDict[kluis][0]))\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "title": "" }, { "docid": "cb7ed0f90a1b4209c3bde4f986ff3740", "score": "0.5759881", "text": "def create_ner_tags_dict():\r\n global ne_tags_set, ner_to_id, ne_tags, id_to_ner\r\n\r\n ne_tags = list(ne_tags_set) + ['[CLS]', '[SEP]']\r\n ne_tags.sort()\r\n id_to_ner = {idx: tag for idx, tag in enumerate(ne_tags)}\r\n ner_to_id = {tag: idx for idx, tag in enumerate(ne_tags)}\r\n print(f'Total NER tag size: {len(ne_tags)}; Tags: {ne_tags}')", "title": "" }, { "docid": "01f14828cdeb9c24a91633a9a6f22881", "score": "0.572829", "text": "def informacoes_ultima_nfce(self):\r\n info = {} \r\n for x in range(1,8):\r\n value = self.informacao_ultimo_nfce(x)\r\n info[value[0]] = value[1]\r\n return info", "title": "" }, { "docid": "0df665950c8f6dec5ad69444053febf2", "score": "0.56703657", "text": "def __init__(self, proteins_dict):\n \n self.proteins_dict = proteins_dict\n self.proteins = list(proteins_dict.keys())", "title": "" }, { "docid": "07804a908010412be24cb2577e7cc6ae", "score": "0.56652915", "text": "def get_uniprots_for_hint():\n initial_dict = {}\n for node in DatabaseGraph.get_all('UNIPROT'):\n initial_dict[node['legacyID']] = node.id\n\n for key in list(initial_dict.keys()):\n initial_dict[key.split('_')[0]] = initial_dict.pop(key)\n return initial_dict", "title": "" }, { "docid": "2ddbf0687ba2bf3d03112944759d3785", "score": "0.564972", "text": "def get_dictionar_cuvinte(continut_fisier: str) -> dict:\n cuvinte = list(get_cuvinte_unice(continut_fisier))\n dictionar = {i: cuvinte[i] for i in range(len(cuvinte))}\n return dictionar", "title": "" }, { "docid": "85242d33eefb6490dbf30541ec5512d0", "score": "0.562634", "text": "def __init__(self, nombre, socios):\n self.__nombre = nombre\n self.__socios = socios\n self.__resultados = {'p1': '', 'p2': '', 'p3': '', 'p4': '', 'p5': '', 'p6': '', 'p7': ''}", "title": "" }, { "docid": "1fdd5a571b2ae96994dca6db64951552", "score": "0.56240404", "text": "def dict_ski() -> dict:\r\n ski = {\"Carver\": [], \"Tourenski\": [], \"Allmountain\": [], \"Freestyle-Ski\": []}\r\n return ski", "title": "" }, { "docid": "69c62131b0025293951372bf8243bb69", "score": "0.56221503", "text": "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "title": "" }, { "docid": "d44953956a9004e0f914945b368a1e5b", "score": "0.56216323", "text": "def kluisOpenen():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken in\r\n # dictionary\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis) + ' is geopend'\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "title": "" }, { "docid": "cd8ee4753fa401f08b71bca605a93621", "score": "0.5609806", "text": "def makeGerund(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split()\r\n for x in LoW: \r\n if 'ing' in x and x not in self.gerund: \r\n self.gerund[x] = 1\r\n elif 'ing' in x and x in self.gerund: \r\n self.gerund[x] += 1\r\n return self.gerund", "title": "" }, { "docid": "6fdadcfa32e78eb8f181184ad7a31487", "score": "0.55826163", "text": "def get_mapu_kanala_ID_OPIS(self):\n out = {}\n for kanal in self.sviKanali:\n out[kanal] = self.get_datastore(kanal).koncentracija.opis\n return out", "title": "" }, { "docid": "acfcd529f889c07a69b41393317d1e34", "score": "0.5527054", "text": "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "title": "" }, { "docid": "1ca7887db37d328da4216bebd458ad8e", "score": "0.55207396", "text": "def get_mapu_kanala_ID_JEDINICA(self):\n out = {}\n for kanal in self.sviKanali:\n out[kanal] = self.get_datastore(kanal).koncentracija.jedinica\n return out", "title": "" }, { "docid": "dc9e70215ea5b4aee93eaaa741cf0bc9", "score": "0.55091435", "text": "def tri_si_rencontre(self, joueurs_tries, liste_rencontres, nb_joueurs):\n # We recover the possibilities\n for x in joueurs_tries:\n liste_dict = []\n for y in joueurs_tries:\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n continue\n else:\n liste_dict.append(y)\n self.dict_possiblity[x] = liste_dict\n copy_joueurs = list(joueurs_tries)\n liste_finale = []\n nb_tour = 0\n error = False\n while joueurs_tries:\n x = joueurs_tries[0]\n for y in joueurs_tries:\n if nb_tour > nb_joueurs**2:\n print(\"Il y a une erreur dans l'algorithme.\")\n error = True\n break\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n nb_tour += 1\n continue\n else:\n i = 0\n # we are looking for a unique possibility\n for key in list(self.dict_possiblity):\n if len(self.dict_possiblity[key]) == 1:\n valeur = self.dict_possiblity[key][0]\n liste_finale.append((key, valeur))\n liste_rencontres.append((key, valeur))\n joueurs_tries.remove(key)\n joueurs_tries.remove(valeur)\n self.sup_dicti(valeur, key)\n i += 1\n break\n if i > 0:\n break\n # we remove both of the possibilities\n self.sup_dicti(x, y)\n liste_finale.append((x, y))\n liste_rencontres.append((x, y))\n joueurs_tries.remove(y)\n joueurs_tries.remove(x)\n break\n if error:\n liste_finale = Vue().demander_binomes(copy_joueurs,\n nb_joueurs)\n return liste_finale\n return liste_finale", "title": "" }, { "docid": "b574d538c80820c5eccc2ee840469fcd", "score": "0.55068797", "text": "def _get_rekey_ddi_data(ddi_data):\n for enum, item in enumerate(ddi_data):\n ddi_data[enum] = dict((d['network'],\n dict(d, index=index))\n for (index, d) in enumerate(item))\n return ddi_data", "title": "" }, { "docid": "f20485b68b31e985b773bc1d0e45bb50", "score": "0.5481762", "text": "def kluisInfoTg(ovnummer):\r\n kluisDict = dictionary()\r\n\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and ovnummer in kluisDict[kluis]: # kluis zoeken in dictionary\r\n huidigeKosten = 'De huidige kosten zijn €' + str(prijs(kluisDict[kluis][0]))\r\n return huidigeKosten\r\n except ValueError:\r\n huidigeKosten = 'Geen geldige invoer'\r\n return huidigeKosten", "title": "" }, { "docid": "df9da8ad14ad9e81be1b87d507431967", "score": "0.54767394", "text": "def kluisVrijgeven():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n with open('FietsStalling.txt', 'r+') as readFile:\r\n for kluis in kluisDict:\r\n try:\r\n if kluisDict[kluis] is not None and int(beginSchermEntry.get()) in kluisDict[kluis]: # kluis zoeken\r\n # in dictionary\r\n kluisDict[kluis] = None\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis) + ' is vrijgegeven'\r\n readFile.truncate(0)\r\n readFile.seek(0)\r\n for item in kluisDict: # bestand updaten (vrijgegeven kluis verwijderen)\r\n if kluisDict[item] is not None:\r\n readFile.write(str(item) + '; ' + ''.join(str(kluisDict[item])).strip('{}()\\'\\'')\r\n .replace('\\'', '') + '\\n') # bezette kluizen naar bestand schrijven\r\n beginSchermEntry.delete(0, END)\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n beginSchermTitel['text'] = 'Dit OV nummer is onbekend'\r\n return", "title": "" }, { "docid": "f7aca06a0bbf502473226117e4a5ed02", "score": "0.5473606", "text": "def raw_data() -> Dict:\n return {\"neighbourhood\":\"Buttes-Montmartre\",\"room_type\":\"Entire home/apt\",\"minimum_nights\":1.555,\"mois\":2,\"voyageurs\":2.5,\"chambres\":1,\"lits\":1,\"salle_de_bains\":1}", "title": "" }, { "docid": "4a18d74da2fb333bd1751caa4724bee3", "score": "0.5451817", "text": "def strategia_pocitaca(pole):\n dict_pole = dict(enumerate(list_pole))\n cislo_policka = False\n\n for k, v in dict_pole.items():\n print(type(k), v)\n k1_idx = int(k)\n k2_idx = False\n k3_idx = False\n k0_idx = False\n if v == \"O\":\n print(\"O found\")\n k1_idx = int(k)\n k2_idx = k + 1\n k3_idx = k + 2\n k0_idx = k - 1\n if k <=18:\n if dict_pole[k2_idx] == \"O\" and dict_pole[k3_idx] == \"-\":\n if k3_idx not in mozne_policka1:\n mozne_policka1.append(k3_idx)\n \n elif dict_pole[k3_idx] == \"O\" and dict_pole[k2_idx] == \"-\":\n if k2_idx not in mozne_policka1:\n mozne_policka1.append(k2_idx)\n\n elif dict_pole[k2_idx] == \"-\":\n if k2_idx not in mozne_policka2:\n mozne_policka2.append(k2_idx)\n\n elif dict_pole[k3_idx] == \"-\":\n if k3_idx not in mozne_policka2:\n mozne_policka2.append(k3_idx)\n\n elif dict_pole[k0_idx] == \"-\":\n if k0_idx not in mozne_policka2:\n mozne_policka2.append(k0_idx)\n\n else: \n cislo_policka = randint(0,19)\n else:\n cislo_policka = False\n print(cislo_policka, mozne_policka1, mozne_policka2)\n for x in range(0, 20):\n try:\n mozne_policka1.remove(20)\n mozne_policka1.remove(21)\n mozne_policka2.remove(20)\n mozne_policka2.remove(21)\n except:\n ValueError\n return cislo_policka, mozne_policka1, mozne_policka2", "title": "" }, { "docid": "deb62423d4636a70845f7a3171238b25", "score": "0.5446755", "text": "def _reducedProtToPeps(protToPeps, proteins):\n return {k: v for k, v in viewitems(protToPeps) if k not in proteins}", "title": "" }, { "docid": "4f6dabf34299175ab5f909dddcd0d81c", "score": "0.5446088", "text": "def get_puntos_articulacion(self):\n # Inicializo variables\n visitado = {}\n puntos_articulacion = set()\n for u in grafo.devolver_vertices():\n visitado[u] = False\n\n for v in grafo.devolver_vertices():\n if not visitado[v]:\n # Armo el arbol DFS desde v\n dfs = DFSIterativo(grafo, v, visitado)\n dfs.hacer_dfs()\n puntos_articulacion_v = dfs.get_puntos_articulacion()\n # Como v es raiz del arbol, lo saco para analizarlo por separado\n if v in puntos_articulacion_v: # Aunque deberia estar incluida siempre\n puntos_articulacion_v.remove(v)\n self.analizar_raiz(dfs.get_predecesor(), puntos_articulacion_v, v)\n puntos_articulacion.update(puntos_articulacion_v)\n return puntos_articulacion", "title": "" }, { "docid": "94f9032b997415e4ca85e7e0a35cf2c8", "score": "0.54070574", "text": "def nmer_dictionary(self,n,dic={}):\n if self.sequence == \"\":\n self.fetchSequence()\n self.sequence = self.sequence.upper()\n for i in range(0,len(self.sequence)-n):\n subseq = self.sequence[i:][:n]\n dic[subseq]=1+dic.get(subseq,0)\n del subseq\n return dic", "title": "" }, { "docid": "7cd43d0b12dc271236746066444b6e24", "score": "0.53893346", "text": "def leadersmorts_dpt(source):\n\n with open(source,encoding=\"utf-8\",mode=\"r\") as file:\n nbmpd= {}\n line = file.readline() # Lit la ligne d'entete\n for line in file:\n line = line.strip()\n if len(line) > 0:\n l = line.split(\";\")\n leader = netoyer_donnees(l)\n\n # tri departements\n if leader[\"Departamento\"] in nbmpd:\n nbmpd[leader[\"Departamento\"]] = nbmpd[leader[\"Departamento\"]]+1\n else:\n nbmpd[leader[\"Departamento\"]] = 1\n\n return nbmpd", "title": "" }, { "docid": "bbddc9dcfb3509010b199352939cf5cc", "score": "0.53661954", "text": "def __init__(self):\n self.aeropuertos = {}", "title": "" }, { "docid": "ae503c958b04e97e43aec3ad09da4954", "score": "0.5359835", "text": "def beheerderVrijgeven():\r\n kluisDict = dictionary()\r\n with open('FietsStalling.txt', 'r+') as readFile:\r\n try:\r\n if kluisDict[int(beheerderEntry.get())] is not None: # wanneer kluis bezet is\r\n kluisDict[int(beheerderEntry.get())] = None # kluis waarde verwijderen uit dictionary\r\n beheerderTitel['text'] = 'Kluis nummer ' + beheerderEntry.get() + ' is vrijgegeven'\r\n readFile.truncate(0)\r\n readFile.seek(0)\r\n for item in kluisDict: # bestand updaten (vrijgegeven kluis verwijderen)\r\n if kluisDict[item] is not None:\r\n readFile.write(str(item) + '; ' + ''.join(str(kluisDict[item])).strip('{}()\\'\\'')\r\n .replace('\\'', '') + '\\n') # bezette kluizen naar bestand schrijven\r\n return\r\n else:\r\n beheerderTitel['text'] = 'Deze kluis is niet bezet'\r\n except ValueError:\r\n beheerderTitel['text'] = 'Geen geldige invoer'\r\n return", "title": "" }, { "docid": "731d2af7e70214241b6ad05732a45c64", "score": "0.5344251", "text": "def recupererNiveau(niveau):\n global titres \n for cle, valeur in titres.items():\n if cle == niveau:\n return valeur", "title": "" }, { "docid": "2ddcab258a227e4cf529a04f88588d2f", "score": "0.5336905", "text": "def load_Bietenholz(path=data_path+\"Table1_complete_ascii.txt\"):\n\n res = {}\n num_of_SN = 0\n ex = \"\"\n\n with open(path, 'r') as f:\n for i in range(30):\n next(f)\n for line in f:\n words = line.split()\n current_SN_name = words[0]\n # determine if it's a new SN\n if current_SN_name != ex:\n if num_of_SN > 0:\n res[ex] = SN # save previous SN\n SN = SuperNova()\n num_of_SN += 1\n ex = words[0]\n\n SN.name = words[0]\n if ('L' in line[10]):\n SN.is_limit = np.append(SN.is_limit, True)\n else:\n SN.is_limit = np.append(SN.is_limit, False)\n SN.year = np.append(SN.year, int(line[12:16]))\n SN.month = np.append(SN.month, int(line[17:19]))\n SN.day = np.append(SN.day, float(line[20:25]))\n SN.telescope = np.append(SN.telescope, line[26:33])\n SN.freq = np.append(SN.freq, float(line[35:40]))\n SN.flux = np.append(SN.flux, float(line[41:49]))\n SN.dflux = np.append(SN.dflux, float(line[50:56]))\n SN.comment = np.append(SN.comment, line[57:63])\n res[words[0]] = SN\n return res", "title": "" }, { "docid": "ecc65ffc1395c99dac2a3ebe42726479", "score": "0.5328817", "text": "def __init__(self):\n self.seen = {}", "title": "" }, { "docid": "03b2ddc9559ebaea474fa750731881a4", "score": "0.53172606", "text": "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "title": "" }, { "docid": "03b2ddc9559ebaea474fa750731881a4", "score": "0.53172606", "text": "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n return {\n \"numero_prendas\": [\n self.from_entity(entity=\"number\"),\n\n ]\n }", "title": "" }, { "docid": "453309fc00f2d189644229f799da1d7d", "score": "0.5312169", "text": "def objets_uniques(self):\n objets = []\n for membre in self.membres:\n for objet in membre.equipe:\n if objet.unique:\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n if membre.tenu and membre.tenu.unique:\n objet = membre.tenu\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n\n return objets", "title": "" }, { "docid": "3943df34ab01ebfc8b93dc232dba89b1", "score": "0.5294528", "text": "def legendarios_sansanito():\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT nombre\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary = 1\n\t\t\t\t\"\"\")\n\tprint_table([hdrs_sansanito[2],hdrs_sansanito[-4]])", "title": "" }, { "docid": "0db29cc87f1f3df592ef6cdcdda1c02c", "score": "0.5293456", "text": "def recherche(carnet: List[Dict]) -> None:\n nom = input(\"De qui cherchez-vous le téléphone ? \")\n trouve = False\n \n for i in range(len(carnet)):\n membre = carnet[i]\n if nom in membre.values():\n trouve = True\n for cle in membre.keys():\n print(\"{} : {}\".format(cle, membre[cle]), end=\"\\t\")\n print(\"\")\n if not trouve:\n print(\"{} non trouvée !\".format(nom))", "title": "" }, { "docid": "056a69bc3878a3b9c467436b0743d479", "score": "0.5289422", "text": "def secondary_keys_dicts(self):", "title": "" }, { "docid": "eb7ed2425ab9706d6d3ac3a6bebdc49d", "score": "0.5288512", "text": "def listVoyage(self):\n voyageObject_list = self.mainObject.getVoyagesIO()\n staffObject_list = self.mainObject.getStaffIO()\n voyage_dict = {}\n for voyage in voyageObject_list:\n if voyage.getStaff() == ['', '', '', '']: # If there is no staff assigned to the voyage,\n voyage_dict[voyage] = voyage.getStaff() # Then assign the staff to the voyage.\n else:\n for staffMember in staffObject_list:\n if staffMember.getSSN() in voyage.getStaff():\n if voyage in voyage_dict:\n voyage_dict[voyage].append(staffMember)\n else:\n voyage_dict[voyage] = [staffMember]\n return voyage_dict", "title": "" }, { "docid": "57508bd0a67b85f81e74c3eeb789319b", "score": "0.52860075", "text": "def frequentOneItem(self):\n\n candidate = {}\n # global finalPatterns, minSup, Database\n # self.minSup = self.minSup\n for i in range(len(self.Database)):\n for j in range(len(self.Database[i])):\n if self.Database[i][j] not in candidate:\n candidate[self.Database[i][j]] = [i]\n else:\n candidate[self.Database[i][j]] += [i]\n self.finalPatterns = {keys: value for keys, value in candidate.items() if len(value) >= self.minSup}\n #print(candidate)", "title": "" }, { "docid": "b5b418d64dd344b469b4a5ceea236c8c", "score": "0.52797186", "text": "def furanoses(self):\n return sorted(set([self[x.split(\"_\")[-1]][\"name\"] for x in self.furanose_fac.keys()]))", "title": "" }, { "docid": "4556ea8042cff2680da02ca13c675096", "score": "0.52763385", "text": "def handle_tuberculose_json() -> dict:\n\n result = {}\n\n tuberculose_data = get_json_data('tuberculose_processed.json')\n\n indices = list(tuberculose_data.keys())\n anos = indices[1: -2]\n\n for index,un_fed in tuberculose_data['UF de notificacão'].items():\n result[un_fed] = {}\n result[un_fed]['regiao'] = tuberculose_data['Região'][index]\n result[un_fed][\"total\"] = tuberculose_data['Total'][index]\n result[un_fed]['casos_anuais'] = {}\n for ano in anos:\n result[un_fed]['casos_anuais'][ano] = tuberculose_data[ano][index]\n \n return result", "title": "" }, { "docid": "54ad53104af3c8be7ba0692e5a21fcf2", "score": "0.5255011", "text": "def verif_site_catal(serine,d,x=15):\n\n w=dict()\n #dans un premier temp, on recherche tous les carbones alpha dans un rayon de x angstrom\n #a partir de la serine utilise comme reference\n cmd.select(\"selection_pour_site\",\"name ca within \"+str(x)+\" of (resi \"+str(serine[0])+\" and chain \"+serine[1]+\" and name ca)\")\n stored.list=list()\n cmd.iterate(\"selection_pour_site\",\"stored.list.append((resi,chain,resn))\")\n #print \"liste genere par pymol\"#debug\n #print stored.list #debug\n \n \n #on recherche dans un deuxieme temps s'il existe une histidine dans cette selection\n his,w[\"his\"]=site_utils.verif_histidine(stored.list,d)\n \n #dans un troisieme temps on recherche un aspartate ou un glutamate idealement place \n acide,w[\"acide\"]=site_utils.verif_acide(stored.list,d)\n \n w[\"dist\"]=x\n \n cmd.delete(\"selection_pour_site\")\n return [his,acide,w]", "title": "" }, { "docid": "a6488346b4c49ce720a3c562bb089492", "score": "0.5235363", "text": "def get_all_huns(self):\n for name1 in self.plist.keys():\n for name2 in self.plist.keys():\n if name1 < name2: \n nkey = name1+\":\"+name2\n self.result[nkey] = self.get_hun(name1, name2)", "title": "" }, { "docid": "a182f0732da7671826a129ceeaca8699", "score": "0.52344406", "text": "def ewriters():\n return dict(_ewriters)", "title": "" }, { "docid": "52e395eb930befe4a37d3220491e45db", "score": "0.52272594", "text": "def __init__(self):\n # self.organism = []\n self.weighting_dict = defaultdict(list)\n # self.codon_obj_dict = {}\n self.codon_dict = {\n 'UUU':'F','UUC':'F',\n 'UUA':'L','UUG':'L','CUU':'L','CUC':'L','CUA':'L','CUG':'L',\n 'AUU':'I','AUC':'I','AUA':'I',\n 'AUG':'M',\n 'GUU':'V', 'GUC':'V','GUA':'V','GUG':'V',\n 'UCU':'S','UCC':'S','UCA':'S','UCG':'S',\n 'CCU':'P','CCC':'P','CCA':'P','CCG':'P',\n 'ACU':'T','ACC':'T','ACA':'T','ACG':'T',\n 'GCU':'A','GCC':'A','GCA':'A','GCG':'A',\n 'UAU':'Y','UAC':'Y',\n 'UAA':'X','UAG':'X',\n 'CAU':'H','CAC':'H',\n 'CAA':'Q','CAG':'Q',\n 'AAU':'N','AAC':'N',\n 'AAA':'K','AAG':'K',\n 'GAU':'D','GAC':'D',\n 'GAA':'E','GAG':'E',\n 'UGU':'C','UGC':'C',\n 'UGA':'X',\n 'UGG':'W',\n 'CGU':'R','CGC':'R','CGA':'R','CGG':'R',\n 'AGU':'S','AGC':'S',\n 'AGA':'R','AGG':'R',\n 'GGU':'G','GGC':'G', 'GGA':'G','GGG':'G'\n }", "title": "" }, { "docid": "17631593e81e7927e4e8b3d0c8d49abf", "score": "0.5222315", "text": "def initializeCollection():\n return {SENSOR1:[], SENSOR2:[], SENSOR3:[],SENSOR4:[], DATE:[]}", "title": "" }, { "docid": "964f3be1b45df2170abdabf6caa9b6e7", "score": "0.5206081", "text": "def get_listu_uredjaja(self):\n lista = sorted(list(self.uredjaji.keys()))\n return lista", "title": "" }, { "docid": "5344ca0caa54e6fa815204d8a1af1862", "score": "0.5204758", "text": "def assign_no_to_node(self,list):\n list = sorted(list)\n d = {}\n for i,node in enumerate(list):\n #print i,node\n d[node] = i \n return d,len(d)", "title": "" }, { "docid": "1c0e247d337186a5626f031683e24e20", "score": "0.5188736", "text": "def get_suma_var(nombre):\n contador = dict(Counter([c for c in mydic[nombre][\"CODIGO_POSTAL\"]]))\n contador = {int(k): int(v) for k, v in contador.items()}\n return contador", "title": "" }, { "docid": "cc5177c07e111623557feb15b80699ba", "score": "0.51868904", "text": "def _create_dictionary_of_ned_d(\n self):\n self.log.debug(\n 'starting the ``_create_dictionary_of_ned_d`` method')\n\n count = 0\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n totalRows = sum(1 for row in csvReader)\n csvFile.close()\n totalCount = totalRows\n\n with open(self.pathToDataFile, 'rb') as csvFile:\n csvReader = csv.reader(\n csvFile, dialect='excel', delimiter=',', quotechar='\"')\n theseKeys = []\n dictList = []\n for row in csvReader:\n if len(theseKeys) == 0:\n totalRows -= 1\n if \"Exclusion Code\" in row and \"Hubble const.\" in row:\n for i in row:\n if i == \"redshift (z)\":\n theseKeys.append(\"redshift\")\n elif i == \"Hubble const.\":\n theseKeys.append(\"hubble_const\")\n elif i == \"G\":\n theseKeys.append(\"galaxy_index_id\")\n elif i == \"err\":\n theseKeys.append(\"dist_mod_err\")\n elif i == \"D (Mpc)\":\n theseKeys.append(\"dist_mpc\")\n elif i == \"Date (Yr. - 1980)\":\n theseKeys.append(\"ref_date\")\n elif i == \"REFCODE\":\n theseKeys.append(\"ref\")\n elif i == \"Exclusion Code\":\n theseKeys.append(\"dist_in_ned_flag\")\n elif i == \"Adopted LMC modulus\":\n theseKeys.append(\"lmc_mod\")\n elif i == \"m-M\":\n theseKeys.append(\"dist_mod\")\n elif i == \"Notes\":\n theseKeys.append(\"notes\")\n elif i == \"SN ID\":\n theseKeys.append(\"dist_derived_from_sn\")\n elif i == \"method\":\n theseKeys.append(\"dist_method\")\n elif i == \"Galaxy ID\":\n theseKeys.append(\"primary_ned_id\")\n elif i == \"D\":\n theseKeys.append(\"dist_index_id\")\n else:\n theseKeys.append(i)\n continue\n\n if len(theseKeys):\n count += 1\n if count > 1:\n # Cursor up one line and clear line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n if count > totalCount:\n count = totalCount\n percent = (float(count) / float(totalCount)) * 100.\n print \"%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory\" % locals()\n rowDict = {}\n for t, r in zip(theseKeys, row):\n rowDict[t] = r\n if t == \"ref_date\":\n try:\n rowDict[t] = int(r) + 1980\n except:\n rowDict[t] = None\n\n if rowDict[\"dist_index_id\"] != \"999999\":\n dictList.append(rowDict)\n\n csvFile.close()\n\n self.log.debug(\n 'completed the ``_create_dictionary_of_ned_d`` method')\n return dictList", "title": "" }, { "docid": "62ccabca92f4efd694dc817675f1d835", "score": "0.5185662", "text": "def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []", "title": "" }, { "docid": "b7378d9b83f7fd17ac382b6a6f119314", "score": "0.517416", "text": "def unisciRegioni(dictEsoni, dictIntroni):\n\n\tidx_starts\t=\t0\n\tidx_ends\t=\t1\n\tidx_tipo\t=\t2\n\t\n\tdictEsIn = {}\n\n\t# Inizializzazione del tipo di regioni considerate\n\t# Servono per stampare il file in formato '.gtf'\n\t# (relativo ad introni ed esoni)\n\t#\n\tesone = True\n\tintrone = False\n\n\tfor transcriptID in dictEsoni:\n\t\tstarts_esoni \t= dictEsoni[transcriptID][idx_starts]\t\t\t\t\t# Si salvano le liste relative a..\n\t\tends_esoni\t\t= dictEsoni[transcriptID][idx_ends]\t\t\t\t\t\t# ..start e end degli esoni\n\n\t\tnrEsoni = len(starts_esoni)\n\t\ttipo_esoni\t\t= [esone] * nrEsoni\t\t\t\t\t\t\t\t\t\t# Inizializzazione lista di tipo 'esone' \n\n\t\tif transcriptID in dictIntroni:\t\t\t\t\t\t\t\t\t\t\t# Se il transcript_id ha regioni introniche..\n\t\t\tstarts_introni\t= dictIntroni[transcriptID][idx_starts]\t\t\t\t# ..si salvano le liste relative a start e end..\n\t\t\tends_introni\t= dictIntroni[transcriptID][idx_ends]\t\t\t\t# ..degli introni\n\t\t\t\n\t\t\tnrIntroni = len(starts_introni)\n\t\t\ttipo_introni\t= [introne] * nrIntroni\t\t\t\t\t\t\t\t# Inizializzazione lista di tipo 'introne'\n\t\t\t\n\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# Se il transcript non ha regioni introniche..\n\t\t\tstarts_introni \t= []\t\t\t\t\t\t\t\t\t\t\t\t# ..si cancellano le liste per non avere residui..\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tends_introni\t= []\t\t\t\t\t\t\t\t\t\t\t\t# ..dai transcript precedenti\n\t\t\ttipo_introni\t= []\n\n\t\tstarts_esoni.extend(starts_introni)\t\t\t\t\t\t\t\t\t\t# Concatenazione liste di start di esoni e introni\n\t\tends_esoni.extend(ends_introni)\t\t\t\t\t\t\t\t\t\t\t# Concatenazione liste di end di esoni e introni\n\t\ttipo_esoni.extend(tipo_introni)\t\t\t\t\t\t\t\t\t\t\t# Concatenazione liste di tipo di esoni e introni\n\n\n\t\t# Ordinamento su base numerica delle tre liste rispetto \n\t\t# alla prima (start) ordinata\n\t\t#\n\t\tregioni = sorted(zip(starts_esoni, ends_esoni, tipo_esoni), key=sortRegioni)\n\n\t\t# Conversione da tupla a tre liste come argomenti nel\n\t\t# dizionario dictEsIn\n\t\t#\n\t\tdictEsIn[transcriptID] = [[], [], []]\n\t\tdictEsIn[transcriptID][idx_starts] \t= [t[idx_starts] for t in regioni]\n\t\tdictEsIn[transcriptID][idx_ends] \t= [t[idx_ends] for t in regioni]\n\t\tdictEsIn[transcriptID][idx_tipo] \t= [t[idx_tipo] for t in regioni]\n\n\treturn dictEsIn", "title": "" }, { "docid": "7d49e759c0486f34cbda9e2a848fc7fc", "score": "0.51667905", "text": "def hierachy_nomenclature(a2_data):\n ret_dic = OrderedDict()\n ret_dic['X'] = OrderedDict()\n ret_dic['X']['name'] = a2_data['xs'].keys()\n ret_dic['X']['N'] = len(a2_data['xs'].keys())\n ret_dic['I'] = OrderedDict()\n ret_dic['I']['name'] = a2_data['xs']['1'].keys()\n ret_dic['I']['N'] = len(a2_data['xs']['1'].keys())\n ret_dic['R'] = OrderedDict()\n ret_dic['R']['name'] = a2_data['xs']['1']['U235'].keys()\n ret_dic['R']['N'] = len(a2_data['xs']['1']['U235'].keys())\n ret_dic['G'] = OrderedDict()\n ret_dic['G']['name'] = a2_data['xs']['1']['U235']['abso'].keys()\n ret_dic['G']['N'] = len(a2_data['xs']['1']['U235']['abso'].keys())\n return ret_dic", "title": "" }, { "docid": "99aa772e6e831900f83bc14aeefc451e", "score": "0.51660085", "text": "def counterdict(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.append(s_i)\n for ele in enumerate(vas):\n print(ele)\n logging.debug(\"Starting with to\")", "title": "" }, { "docid": "dc65bda882589c02c08db8f5a5c4fdc9", "score": "0.5163177", "text": "def init_dilucijske_jedinice(self):\n dilucije = self.cfg.get_listu_dilucija()\n for dilucija in dilucije:\n unit = self.load_dilucijsku_jedinicu_iz_konfiga(dilucija)\n self.dilucijskeJedinice[dilucija] = unit", "title": "" }, { "docid": "439be2da1020b45bc237e69198e234fe", "score": "0.51554924", "text": "def __init__(self):\n self._main_dictionary = defaultdict(set)", "title": "" }, { "docid": "d5254e9fa285adbc1eb0ed5288d6042d", "score": "0.51479757", "text": "def get_listu_dilucijskih_jedinica(self):\n popis = sorted(list(self.dilucijskeJedinice.keys()))\n return popis", "title": "" }, { "docid": "fe204e2026c25cb05ee6bcf87d359f02", "score": "0.514559", "text": "def unicWords(self):\n words=self.buscaPalavras()\n return self.freqWords(words).keys()", "title": "" }, { "docid": "684434da23f3635a544dbd6b958a76b4", "score": "0.51440954", "text": "def __init__(self):\n self.cnt = {}", "title": "" }, { "docid": "4d368e2da4a5f0386a281321ed0ca3b3", "score": "0.5141006", "text": "def create_verbose_dico(review_strats_w_noreview, item_strategies):\n verbose_dico = {}\n for item_strat in item_strategies:\n verbose_dico[item_strat]={}\n for strat_name in review_strats_w_noreview.keys():\n verbose_dico[item_strat][strat_name] = {}\n for param_value in review_strats_w_noreview[strat_name]:\n verbose_dico[item_strat][strat_name][param_value]={}\n for period in [\"learning\",\"retention\"]:\n verbose_dico[item_strat][strat_name][param_value][period] = []\n return verbose_dico", "title": "" }, { "docid": "26e46bc1d2b24ec27df79288355813f9", "score": "0.5138401", "text": "def dodaj_uredjaj(self, naziv, ure):\n self.uredjaji[naziv] = ure", "title": "" }, { "docid": "4fc2c9b2d556b3de218be180b06dd9d8", "score": "0.51347554", "text": "def fjerner_ugyldige_karakterer():\r\n for i in list(karakterer):\r\n if i not in emne_liste:\r\n print('Ugyldige fag som fjernes:', i)\r\n karakterer.pop(i)", "title": "" }, { "docid": "32747facd797346e876d3c802526bde4", "score": "0.5134007", "text": "def UserNot(soup: str, nb: int):\n list_not = notation2(soup, nb)\n noteUser = []\n value = []\n list_total = [' 1', '2', '3', '4', '5']\n\n for z in range(0, len(list_not)):\n dico = {}\n # Delete of the first element because it's not a user\n del list_not[z][0]\n # We look if the next note is a value, if yes we take it because when we scrape, we recover the whole list of the barem \n # (ex: we note a 4, we recover 12345. Each noted category is separated by non-numeric characters.\n for i in range(0, len(list_not[z])-2):\n if len(str(list_not[z][i]).replace(' ', '')) > 1:\n if len(str(list_not[z][i+1]).replace(' ', '')) > 1:\n if list_not[z][i] not in value:\n dico[list_not[z][i]] = list_not[z][i+1]\n value.append(list_not[z][i+1])\n else:\n # The last value of the note list\n j = i\n while str(list_not[z][j+1]) in list_total:\n dico[list_not[z][i]] = list_not[z][j+1]\n j = j + 1\n noteUser.append(dico)\n\n counter_user = 0\n c_user_not_w_NA = 0\n p = notation(soup, nb)\n for k in noteUser:\n value = []\n # Counter of notes for all users\n t = 0\n for key, val in k.items():\n if val != 'N/A':\n if val == '5':\n # The category detected by the previous code takes as a note the value scrapped by the function \"notation\"\n noteUser[counter_user][key] = p[c_user_not_w_NA][t]\n t = t + 1\n if t == len(p[c_user_not_w_NA]):\n c_user_not_w_NA = c_user_not_w_NA + 1\n\n counter_user = counter_user + 1\n return noteUser", "title": "" }, { "docid": "04e69deb751135f0325fbc57536c6e3c", "score": "0.5130357", "text": "def kluisAanvragen():\r\n kluisDict = dictionary()\r\n beginSchermTopTitel['text'] = ''\r\n beginSchermTerug.grid(pady=3, padx=(10, 10), sticky='w', row=1)\r\n\r\n try:\r\n if len(beginSchermEntry.get()) == 16:\r\n for getal in kluisDict:\r\n if kluisDict[getal] is not None and kluisDict[getal][1] == int(beginSchermEntry.get()):\r\n beginSchermTitel['text'] = 'Je hebt al een kluis: nummer ' + str(getal)\r\n return\r\n\r\n with open('FietsStalling.txt', 'r+') as readFile:\r\n for kluis in kluisDict:\r\n if kluisDict[kluis] is None: # kluis toewijzen\r\n beginSchermTitel['text'] = 'Kluis nummer ' + str(kluis)\r\n kluisDict[kluis] = (time.strftime('%d-%m-%Y %H:%M'),\r\n int(beginSchermEntry.get())) # value wordt tijd en OV\r\n readFile.truncate(0)\r\n readFile.seek(0)\r\n for item in kluisDict: # bestand updaten (nieuwe kluis toevoegen)\r\n if kluisDict[item] is not None:\r\n readFile.write(str(item) + '; ' + ''.join(str(kluisDict[item])).strip('{}()\\'\\'')\r\n .replace('\\'', '') + '\\n')\r\n beginSchermEntry.delete(0, END)\r\n return\r\n beginSchermTitel['text'] = 'Geen kluizen vrij'\r\n return\r\n else:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return\r\n except ValueError:\r\n beginSchermTitel['text'] = 'Geen geldige invoer'\r\n return", "title": "" }, { "docid": "3e91b64ac69bed6968e45ac03f74835a", "score": "0.5130021", "text": "def dictagdurb(kind, fname):\n\n with open(fname, 'r') as g:\n g.next()\n g.next()\n m = g.next()\n startdict = agline(m)\n genold = startdict['gen']\n\n f = open(fname)\n f.next()\n f.next()\n d = {}\n y = '1'\n nb = []\n for l in f:\n adict = agline(l)\n kdur = kind + 'dur'\n gen = adict['gen']\n well = adict['well']\n\n if adict['gen'] not in d:\n d[gen] = []\n \n if gen != genold:\n if sum(nb) != 0:\n d[genold].append(sum(nb))\n nb = []\n elif sum(nb) == 0:\n nb = []\n else: \n if adict['well'] != y:\n if sum(nb) != 0:\n d[gen].append(sum(nb))\n nb = []\n elif sum(nb) == 0:\n nb = []\n \n #if adict[kdur] == '':\n #nb.append(0)\n #elif int(adict[kdur]) >= 0:\n #nb.append(int(adict[kdur]))\n #elif adict[ks] == '-':\n #pass\n \n if adict[kdur] == '':\n continue\n elif int(adict[kdur]) > 0:\n nb.append(int(adict[kdur]))\n elif adict[ks] == '-':\n pass\n\n y = adict['well']\n genold = adict['gen']\n \n if sum(nb) != 0:\n d[gen].append(sum(nb))\n\n return(d)", "title": "" }, { "docid": "c830b9fef7a64c1f072cecdf121412c0", "score": "0.51297194", "text": "def create_dico(item_list):\n assert type(item_list) is list\n dico = {}\n for items in item_list:\n for item in items:\n if item not in dico:\n dico[item] = 1\n else:\n dico[item] += 1\n return dico", "title": "" }, { "docid": "c830b9fef7a64c1f072cecdf121412c0", "score": "0.51297194", "text": "def create_dico(item_list):\n assert type(item_list) is list\n dico = {}\n for items in item_list:\n for item in items:\n if item not in dico:\n dico[item] = 1\n else:\n dico[item] += 1\n return dico", "title": "" }, { "docid": "a7cf481c7adbc9a3f9e02bf315114cb6", "score": "0.5129512", "text": "def merge_nw_nnw(self):\n nw_nnw = {}\n nw_dict = self.ontology.heirs_network_dictionary\n nnw_dict = self.model.networks_w_namednw_dict\n for label, things in nw_dict.items():\n nw_nnw[label] = things\n if label in nnw_dict.keys():\n nw_nnw[label] = set(nnw_dict[label])\n return nw_nnw", "title": "" }, { "docid": "39a5163a9d78b568233a5c45ea2ca838", "score": "0.51276994", "text": "def __add_missing_entries_to_dict(self, member_dict: dict) -> dict:\n try:\n self.dict = member_dict\n self.dict['Voorletters'] = member_dict['Voornaam'][0].upper() + '.'\n self.dict['Dag'] = member_dict['GeboorteDatum'].split('-')[0]\n self.dict['Maand'] = member_dict['GeboorteDatum'].split('-')[1]\n self.dict['Jaar'] = member_dict['GeboorteDatum'].split('-')[2]\n self.dict['Postcode'] = member_dict['Postcode'].replace(' ', '')\n r = member_dict['Adres'].split(' ')\n self.dict['Huisnr'] = r[len(r) - 1]\n return self.dict\n\n except Exception as e:\n raise Exception(\"Toevoegen lid in NAS mislukt: \" + str(e))", "title": "" }, { "docid": "6c717f164825adeae953a551f939bdfb", "score": "0.512532", "text": "def game_info(tytuł, gatunek, **cechy):\r\n\tgra = {}\r\n\tgra['Tytuł'] = tytuł\r\n\tgra['Gatunek'] = gatunek\r\n\t\r\n\tfor k, v in cechy.items():\r\n\t\tgra[k] = v\r\n\t\r\n\treturn gra", "title": "" }, { "docid": "78097164e5b50e6c3fdd28ce80e69b90", "score": "0.51244015", "text": "def ludnosc(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"EN.POP.DNST\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "title": "" }, { "docid": "ca6b5af683f60023d9f85029f614cea9", "score": "0.51200736", "text": "def dictogram_dictlist(self):\n for key, value in self.word_dict.items():\n self.word_dict[key] = dictogram.Dictogram(value)\n # print(\"self.word_dict\", self.word_dict)", "title": "" }, { "docid": "eb354471ec11fe75c340ca8bf963c801", "score": "0.5118899", "text": "def _get_name(self):\n res = {}\n for rate in self:\n if rate.nature:\n if rate.residence:\n name = 'Persona' + ' ' + 'Natural' + ' ' + 'Residente'\n else:\n name = 'Persona' + ' ' + 'Natural' + ' ' + 'No Residente'\n else:\n if rate.residence:\n name = 'Persona' + ' ' + 'Juridica' + ' ' + 'Domiciliada'\n else:\n name = 'Persona' + ' ' + 'Juridica' + ' ' + \\\n 'No Domiciliada'\n res[rate.id] = name\n return res", "title": "" }, { "docid": "493d044e17741c06dfa3d83e59408292", "score": "0.5108037", "text": "def dane_profilu(imie, nazwisko, **inne_informacje):\r\n inne_informacje['imie'] = imie\r\n inne_informacje['nazwisko'] = nazwisko\r\n return inne_informacje", "title": "" }, { "docid": "7eb53cfce18faedaabc90f2c9cb0590b", "score": "0.5095816", "text": "def tag_nes(dic, max_key_len, sent):\n dic_nes = []\n for begin in range(len(sent.morps)):\n right_bound = _find_right_bound(sent.morps, begin, max_key_len)\n # find pattern and key, longest first\n for end in range(right_bound, begin, -1): # end is exclusive\n text = make_text(sent, begin, end)\n categories = []\n ptn = make_dt_ti_ptn(text)\n if ptn in dic:\n categories = dic[ptn]\n else:\n key = re.sub(r'\\s+', '', text).lower()\n if key in dic:\n categories = dic[key]\n if categories:\n dic_ne_obj = {}\n dic_ne_obj['id'] = len(dic_nes)\n dic_ne_obj['text'] = text\n dic_ne_obj['type'] = categories\n dic_ne_obj['begin'] = begin\n dic_ne_obj['end'] = end-1 # NE's end is inclusive\n dic_nes.append(dic_ne_obj)\n break\n return dic_nes", "title": "" }, { "docid": "985a6f6656b0f9709470ab0edde59183", "score": "0.50840896", "text": "def list_inventory(self):\n\n print('Your inventory contains:')\n #i = 1\n #inv_dict = {}\n for item in self.bag_of_holding:\n if 'casted' not in item.name:\n try:\n print(item.name)\n except:\n pass\n\n #inv_dict[str(i)] = item\n #i += 1\n #return inv_dict", "title": "" }, { "docid": "3faf236c863ef736fe258588e019b101", "score": "0.5073515", "text": "def initDictionnary(self):\n partitions = self.vocabulary.getPartitions()\n for partition in partitions:\n for mod in partition.modalities:\n self.summaryDict[partition.getAttName() + \" : \" + mod] = 0.0\n self.summaryFilteredDict[partition.getAttName() + \" : \" + mod] = 0.0", "title": "" }, { "docid": "7ab55e639a829819955c9b34916f7f1b", "score": "0.5065942", "text": "def __init__(self, n):\n self._dictOut = {}\n self._dictIn = {}\n for i in range(n):\n self._dictOut[i] = []\n self._dictIn[i] = []", "title": "" }, { "docid": "a6ab402c415778f4fc85561284004573", "score": "0.5065442", "text": "def get_grupirane_kanale(self):\n # svi dostupni\n svi = set(self.sviKanali)\n out = list()\n while len(svi): # dokle god ima vrijednosti u setu svih\n grupa = set() # definiramo grupu\n kanal = svi.pop() #dodajemo prvi element iz seta svih (ujedino ga i brišemo iz seta svih) u grupu\n grupa.add(kanal) # dodavanje u grupu\n #pronalazak povezanih\n povezani = self.get_datastore(kanal).koncentracija.povezaniKanali\n for i in povezani:\n grupa.add(i) # kako dodajemo povezane\n svi.discard(i) # tako ih brisemo iz seta svih dostupnih\n out.append(grupa)\n return out", "title": "" }, { "docid": "5937bb6f01f5517eba6ca620980b3462", "score": "0.5063516", "text": "def dodaj_rezervacije(self, rezervacije):\n for r in rezervacije:\n for u in r.seznam_ucilnic:\n for d in r.dnevi_med(self.min_datum, self.max_datum):\n self.rezerviranost_ucilnic[u.pk, d].append(r)", "title": "" }, { "docid": "a90a1cdcb54b7fa16fa3ee355c782ea9", "score": "0.5062189", "text": "def loadAndClean(jfile):\n with open(jfile) as json_file:\n data = json.load(json_file)[\"G15\"]\n newDict = {}\n # Print the type of data variable\n \n for entry in data:\n\n if \"version\" in data[entry] and data[entry][\"assessed_by\"] not in filterList:\n\n if data[entry][\"assessed_by\"] in dictTries:\n dictTries[data[entry][\"assessed_by\"]].append(data[entry])\n\n \n\n if len(dictTries[data[entry][\"assessed_by\"]]) == 2:\n ml = dictTries[data[entry][\"assessed_by\"]]\n #vou querer calcular o maior \n # comparisson = \"accuracy\"\n comparisson = \"target_w_penalty\"\n \n if ml[0][comparisson] <= ml[1][comparisson]:\n if ml[0][\"accuracy\"] > 90:\n printSingle(ml[0])\n newDict[entry] = ml[0]\n\n else:\n if ml[1][\"accuracy\"] > 90:\n printSingle(ml[1])\n newDict[entry] = ml[1]\n dictTries[data[entry][\"assessed_by\"]] = []\n\n else:\n dictTries[data[entry][\"assessed_by\"]] = [data[entry]]\n\n\n\n # dictTries[data[\"assessed_by\"]]\n\n # newDict[entry] = data[entry]\n # print(data[entry])\n # print()\n # printDict(newDict)\n return newDict", "title": "" }, { "docid": "469af8a673d271966bdc21e9d26def5a", "score": "0.5062162", "text": "def Notas(*notas, sit = False):\n notasd = dict();\n notasd['Total de notas'] = len(notas);\n notasd['Maior nota'] = max(notas);\n notasd['Menor nota'] = min(notas);\n notasd['Média'] = sum(notas)/len(notas);\n if(sit):\n if(notasd['Média'] > 7):\n notasd['Situação'] = \"Boa\";\n elif(6 <= notasd['Média'] <= 7):\n notasd['Situação'] = \"Razoável\";\n else:\n notasd[\"Média\"] = \"Ruim\";\n for k, e in notasd.items():\n print(f\"{k}: {e}\");", "title": "" }, { "docid": "dcb73b13b85c08df22f41e997d393dd2", "score": "0.5059121", "text": "def estado2dic(self, estado):\n return {self.vertices[i]: (estado[2 * i], estado[2 * i + 1])\n for i in range(len(self.vertices))}", "title": "" }, { "docid": "4fa7197a942b3ece17b84a1086068128", "score": "0.50570524", "text": "def se_dicta(materia):\n return len(materia['cursos']) > 0", "title": "" }, { "docid": "46f0843984bfda889c30f6d7d4b2d8ea", "score": "0.50547785", "text": "def __init__(self):\n self.kids = dict()\n self.val = None\n self.isWord = False", "title": "" }, { "docid": "668945bfadb38b8772dac553a76c87b7", "score": "0.5053799", "text": "def __init__(self,n):\n\t\tself._dict={}\n\t\tfor i in range(n):\n\t\t\tself._dict[i]=[]", "title": "" }, { "docid": "8bb8f8afff0dad74816aa99d04d30e4d", "score": "0.5040588", "text": "def __len__(self):\n return sum(item['cantidad'] for item in self.carro.values())", "title": "" }, { "docid": "342fda8574edfa1458bd9dbd59b94a03", "score": "0.5040097", "text": "def get_nom_complet(self, ids, name):\n\tres = {}\n\tfor persona in self.browse(ids):\n if persona.cognom2:\n cognoms = '%s %s' % (persona.cognom1,\n persona.cognom2)\n else:\n cognoms = persona.cognom1\n res[persona.id] = '%s, %s' % (cognoms, persona.nom)\n\treturn res", "title": "" }, { "docid": "5f43fde7abd5b48418fe240b463aa40b", "score": "0.5033873", "text": "def display_present_ire_endings():\n ire_endings = {\"io\": \"-o\", \"tu\": \"-i\", \"lui\": \"-e\", \"lei\": \"-e\", \"noi\": \"-iamo\", \"voi\": \"-ite\", \"loro\": \"-ono\"}\n for keys in ire_endings:\n print(f'{keys}: {ire_endings[keys]}')", "title": "" }, { "docid": "191ede39195fb2ba522b116ad9c80a76", "score": "0.50336903", "text": "def __init__(self):\n self.kids = [{}]\n self.root = 0\n self.vocabular = set([])", "title": "" }, { "docid": "1739092bb5b6806b07b5d3e8f0b7c7de", "score": "0.5032031", "text": "def getNoeuds(self) -> list:\n return self._noeuds", "title": "" }, { "docid": "8ad4c5aed67daffdd4a35383ea65d06e", "score": "0.50314754", "text": "def read_legacy():\n serials = File.read_dic()\n final_dic = OrdDic()\n for name, dic in serials.items():\n inner_dic = OrdDic()\n for serial in dic:\n inner_dic.update({serial: dic[serial]['desc']})\n final_dic.update({name: inner_dic})\n return final_dic", "title": "" }, { "docid": "43e542c374d6660d0a93c0926e2e71f8", "score": "0.5027672", "text": "def to_dict(self):\n gi_dict = {}\n for gi in self.gradual_items:\n gi_dict.update({gi.as_string(): 0})\n return gi_dict", "title": "" }, { "docid": "abf20bf0f5d8750f5c462e61ca2ae8fd", "score": "0.50210166", "text": "def get_ingkey_ndbno_map(session):\n alias_ndbno = {}\n\n for alias, ndbno in session.query(LocalNutritionaliase.ingkey,\n LocalNutritionaliase.ndbno):\n alias_ndbno[alias]=ndbno\n return alias_ndbno", "title": "" }, { "docid": "631bf9bd354b7d2495519e1b9857e9fd", "score": "0.501487", "text": "def preencherJogadores():\n global jogadores\n for x in participantes:\n if x['porta'] != lider['porta']:\n jogadores.append(x)", "title": "" }, { "docid": "24b18031ea958fb6d880fbb4d9fe5471", "score": "0.50034684", "text": "def ordenado_sansanito(orden):\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT nombre, hpactual, hpmax, prioridad\n\t\t\t\tFROM sansanito\n\t\t\t\tORDER BY prioridad %s\"\"\" % (orden)\n\t\t\t\t)\n\tprint_table([hdrs_sansanito[2], hdrs_sansanito[5], hdrs_sansanito[6], hdrs_sansanito[-1]])", "title": "" }, { "docid": "2bcb006fee2d548bf13d22949ecc13e0", "score": "0.5001278", "text": "def provincias_distritos():\n\n rv = OrderedDict()\n q = \"\"\"\n SELECT da.id_distrito,\n da.desc_distrito,\n da.id_seccion,\n da.desc_seccion,\n count(e.id) AS estab_count,\n count(wm.id) AS matches_count\n FROM divisiones_administrativas da\n INNER JOIN establecimientos e\n USING (id_distrito, id_seccion)\n LEFT OUTER JOIN weighted_matches wm\n ON wm.establecimiento_id = e.id\n AND wm.score = 1 and wm.match_source = 1\n GROUP BY da.id_distrito,\n da.desc_distrito,\n da.id_seccion,\n da.desc_seccion\n ORDER BY id_distrito,\n id_seccion\n \"\"\"\n\n for d in db.query(q):\n k = (d['id_distrito'], d['desc_distrito'],)\n if k not in rv:\n rv[k] = []\n rv[k].append((\n d['id_distrito'],\n d['id_seccion'],\n d['desc_seccion'],\n d['estab_count'],\n d['matches_count']))\n return rv", "title": "" }, { "docid": "809662c680e3e264b2bf5861a1009d3c", "score": "0.4995601", "text": "def __init__(self):\n self.d = {}\n self.l = []", "title": "" }, { "docid": "52b4801413ca529ec6ccbf5814efecc6", "score": "0.49919942", "text": "def __init__(self):\r\n self.dic={}", "title": "" } ]
f93c6b5d2fe60a9cdcdbdfcf373aa071
Look through a list of buildrequests and return bug number from push comments
[ { "docid": "374a6dcfd50381706b98c26bda1809c1", "score": "0.7646013", "text": "def GetBugNumbers(self, buildrequests):\n bugs = []\n for value in buildrequests.values():\n br = value.to_dict()\n for comment in br['comments']:\n # we only want the bug specified in try syntax\n if len(comment.split('try: ')) > 1:\n comment = comment.split('try: ')[1]\n bugs = self.bz.bugs_from_comments(comment)\n return bugs", "title": "" } ]
[ { "docid": "900b60e5c518d7f522a1b0d26c2ac663", "score": "0.6280939", "text": "def ProcessPushType(self, revision, buildrequests, flag_check=True):\n push_type = None\n for value in buildrequests.values():\n br = value.to_dict()\n for comments in br['comments']:\n if 'try: ' in comments:\n if flag_check:\n if '--post-to-bugzilla' in comments:\n push_type = \"TRY\"\n else:\n push_type = \"TRY\"\n if 'autoland-' in comments:\n push_type = \"AUTO\"\n return push_type", "title": "" }, { "docid": "c718798911ab239dbb88740d4aa12216", "score": "0.61368936", "text": "def _get_ticket_numbers(c):\n branch = c.run('git rev-parse --abbrev-ref HEAD', hide=True).stdout\n ticket_number = _strip_proj(branch)\n commit_msg = c.run('git log --oneline -1 --pretty=%s', hide=True).stdout\n commit_ticket_number = _strip_proj(commit_msg)\n return commit_ticket_number, ticket_number", "title": "" }, { "docid": "88342ed712f739965ef9c36024c2ba3c", "score": "0.60067105", "text": "def ProcessPushType(self, revision, buildrequests, flag_check=True):\n push_type = None\n max_orange = MAX_ORANGE\n for value in buildrequests.values():\n br = value.to_dict()\n for comments in br['comments']:\n if 'try: ' in comments:\n if flag_check:\n if '--post-to-bugzilla' in comments:\n push_type = \"TRY\"\n else:\n push_type = \"TRY\"\n if '--retry-oranges' in comments:\n push_type = \"RETRY\"\n # eliminate any empty strings from the split\n max_orange = [x for x in\n comments.split('--retry-oranges') if x]\n if not len(max_orange) > 1:\n max_orange = MAX_ORANGE\n continue\n max_orange = max_orange[1].split()[0]\n try:\n max_orange = int(max_orange)\n except ValueError:\n max_orange = MAX_ORANGE\n return push_type, max_orange", "title": "" }, { "docid": "57f2b2f75a4e66df0acd75f67c5d2835", "score": "0.58973086", "text": "def get_summary(config, bugnumber):\n\n bugzilla_url_regex = re.compile(\n re.escape(\"https://bugzilla.mozilla.org/show_bug.cgi?id=\") + r\"(\\d+)$\"\n )\n\n # The user could have pasted in a bugzilla ID or a bugzilla URL\n if bugzilla_url_regex.search(bugnumber.split(\"#\")[0]):\n # that's easy then!\n (bugzilla_id,) = bugzilla_url_regex.search(bugnumber.split(\"#\")[0]).groups()\n bugzilla_id = int(bugzilla_id)\n summary, url = bugzilla.get_summary(config, bugzilla_id)\n return summary, bugzilla_id, url\n\n # The user could have pasted in a GitHub issue URL\n github_url_regex = re.compile(r\"https://github.com/([^/]+)/([^/]+)/issues/(\\d+)\")\n if github_url_regex.search(bugnumber.split(\"#\")[0]):\n # that's also easy\n (\n org,\n repo,\n id_,\n ) = github_url_regex.search(bugnumber.split(\"#\")[0]).groups()\n id_ = int(id_)\n title, url = github.get_title(config, org, repo, id_)\n if title:\n return title.strip(), id_, url\n else:\n return None, None, None\n\n # If it's a number it can be either a github issue or a bugzilla bug\n if bugnumber.isdigit():\n # try both and see if one of them turns up something interesting\n\n repo = config.repo\n state = read(config.configfile)\n fork_name = state.get(\"FORK_NAME\", getpass.getuser())\n if config.verbose:\n info_out(\"Using fork name: {}\".format(fork_name))\n candidates = []\n # Looping over the remotes, let's figure out which one\n # is the one that has issues. Let's try every one that isn't\n # your fork remote.\n for origin in repo.remotes:\n if origin.name == fork_name:\n continue\n url = origin.url\n org, repo = parse_remote_url(origin.url)\n github_title, github_url = github.get_title(\n config, org, repo, int(bugnumber)\n )\n if github_title:\n candidates.append((github_title, int(bugnumber), github_url))\n\n bugzilla_summary, bugzilla_url = bugzilla.get_summary(config, bugnumber)\n if bugzilla_summary:\n candidates.append((bugzilla_summary, int(bugnumber), bugzilla_url))\n\n if len(candidates) > 1:\n info_out(\n \"Input is ambiguous. Multiple possibilities found. \"\n \"Please re-run with the full URL:\"\n )\n for title, _, url in candidates:\n info_out(\"\\t{}\".format(url))\n info_out(\"\\t{}\\n\".format(title))\n error_out(\"Awaiting your choice\")\n elif len(candidates) == 1:\n return candidates[0]\n else:\n error_out(\"ID could not be found on GitHub or Bugzilla\")\n raise Exception(bugnumber)\n\n return bugnumber, None, None", "title": "" }, { "docid": "c3f3b010f5f247a547090150945dac3a", "score": "0.5895114", "text": "def scan_and_comment(self):\n\n builds = self.db.query_documents(\n 'build',\n where_clause=\"ifmissingornull(metadata.jira_comments, false)=false\"\n )\n\n for build in builds:\n # Exception: Don't comment on 0.0.0 builds\n if build.version == '0.0.0':\n logger.debug(f'Skipping master build {build.key}')\n\n else:\n for commit_key in build.commits:\n commit = self.db.get_commit(commit_key)\n\n # Exception: Don't commit on testrunner commits\n if commit.project == 'testrunner':\n logger.debug(\n f\"Skipping testrunner commit {commit.sha}\"\n )\n continue\n\n # We actually only need and care about the commit message\n # subject (first line)\n commit.summary = commit.summary.split('\\n', 1)[0]\n\n for ticket in self.get_tickets(commit):\n self.make_comment(ticket, commit, build)\n\n if not self.dryrun:\n build.set_metadata('jira_comments', True)", "title": "" }, { "docid": "a2768c4e271bb9f04bd2a9038c30cce4", "score": "0.5793007", "text": "def comment_on_pull_request(pr_number, slug, token, comment):\n url = '{api_url}/repos/{slug}/issues/{number}/comments'.format(\n api_url=GITHUB_API_URL, slug=slug, number=pr_number)\n response = requests.post(url, data=json.dumps({'body': comment}),\n headers={'Authorization': 'token ' + token})\n msg = 'build_bot published results at https://github.com/{slug}/pull/{number}'.format(\n \tslug=slug, number=pr_number)\n print(msg)\n return response.json()", "title": "" }, { "docid": "e5838a7c218e343f0f6a467caf2b3886", "score": "0.5718329", "text": "def get_latest_bugs(self, project, num=5):\n \n feed = feedparser.parse(\n 'http://feeds.launchpad.net/%s/latest-bugs.atom' % project)\n \n bugs = []\n for i in xrange(num):\n entry = feed.entries[i]\n bugUrl = entry.link\n title = entry.title\n numEnd = title.find(']')\n bugNum = title[1:numEnd]\n bugDesc = title[numEnd+2:].replace('&lt;', '<').replace(\n '&gt;', '>').replace('&amp;', '&')\n html = entry.content[0].value\n \n # Attempt to grab the package, status, and importance from\n # the HTML of the feed entry\n try:\n package, status, importance = [tag.contents[0] for tag \\\n in BeautifulSoup(html).div.table.findAll('tr')[1].findAll('td')[1:4]]\n except Exception:\n package = status = importance = '???'\n \n bugs.insert(0,\n (bugNum, bugDesc, bugUrl, package, status, importance))\n \n return bugs", "title": "" }, { "docid": "4fcd06181c1bb3efb269fe6125fbebf0", "score": "0.56047237", "text": "def match_bugs(s,t):\n status = '' if any([l.samebug(t) for l in s.last]) else NEWSTATUS\n bug = None\n bugid=00000\n bugurl = \"none\"\n bugcomment = '<font style=\"BACKGROUND-COLOR: yellow\">FIXME</font>'\n if s.MATCH_BUGS:\n if t.lerror:\n try:\n bug = s.bugs.match(urllib2.urlopen(t.lerror,timeout=s.URLTIMEOUT).read())\n except (urllib2.HTTPError,urllib2.URLError) as e :\n print 'WARNING: the following test log link leads to \"404 page not found\":'\n print ' ',t.lerror\n bug = None\n if not bug and t.lextract:\n try:\n bug = s.bugs.match(urllib2.urlopen(t.lextract,timeout=s.URLTIMEOUT).read())\n except (urllib2.HTTPError,urllib2.URLError) as e :\n print 'WARNING: the following test log link leads to \"404 page not found\":'\n print ' ',t.lextract\n bug = None\n if not bug and t.ltail:\n try:\n bug = s.bugs.match(urllib2.urlopen(t.ltail,timeout=s.URLTIMEOUT).read())\n except (urllib2.HTTPError,urllib2.URLError) as e :\n print 'ERROR: the following test log link leads to \"404 page not found\":'\n print ' ',t.ltail\n print ' ','THIS BUG CANNOT BE MATCHED'\n bug = None\n if bug:\n bugid=bug.id\n bugurl = bug.url()\n bugcomment = bug.fetch_comment()\n return status,bug,bugid,bugurl,bugcomment", "title": "" }, { "docid": "fb04fb60dd5dbaeb80f1c9f7912b25da", "score": "0.5546098", "text": "def get_open_bug_count(self):\n return self.open_bug_count", "title": "" }, { "docid": "ceb6666c196b64688741b468caa24401", "score": "0.5515954", "text": "def get_bugids(tags):\n bugids = []\n for data in tags:\n tds = data.findAll('td')\n bugid = tds[0]\n has_patch = tds[3]\n if len(has_patch.contents) > 2:\n datum = str(has_patch.contents[1])\n if datum and datum.find('has patch') > 0:\n bugids.append(bugid.string)\n return bugids", "title": "" }, { "docid": "bf88ac12c38c10e165d1890d84d5a2cf", "score": "0.55096066", "text": "def _pick_next_build(bldr, requests):\n now = datetime.now()\n if now.hour >= 20 or now.hour < 6:\n priority_category = 'integration-branch'\n else:\n priority_category = 'merge-request'\n for r in requests:\n for c in r.source.changes:\n if c.category == priority_category:\n return r\n return requests[0]", "title": "" }, { "docid": "de913c345d2244edd811bc6e26c7be06", "score": "0.5390712", "text": "def get_pull_requests(api_key=os.getenv('GH_APIKEY'),i=1):\n baseUrl=\"https://api.github.com\"\n endpoint=f'/repos/ironhack-datalabs/datamad0820/pulls?state=all&page={i}'\n url = f\"{baseUrl}{endpoint}\"\n\n\n headers = {\n \"Authorization\": f\"Bearer {api_key}\"\n }\n\n res = requests.get(url, headers=headers)\n #res = requests.get(url, params=query_params, headers=headers)\n print(f\"Request data to {res.url} status_code:{res.status_code}\")\n \n data = res.json()\n if res.status_code != 200:\n \n raise ValueError(f'Invalid Github API call: {data[\"message\"]}\\nSee more in {data[\"documentation_url\"]}')\n \n else:\n print(f\"Requested data to {baseUrl}; status_code:{res.status_code}\")\n \n return res", "title": "" }, { "docid": "ec983d6f82397ce97497e8c99d030be9", "score": "0.5366024", "text": "def bugs(self, irc, msg, args, optlist, project):\n self._trackers(irc, args, msg, optlist, project, 'bugs')", "title": "" }, { "docid": "f0b2565120e9db4bb7ba71fb3d780780", "score": "0.536493", "text": "def get(self, args):\r\n\t\tlog_info('Getting bug %s ..' % args.bugid)\r\n\t\ttry:\r\n\t\t\tresult = self.bzcall(self.bz.Bug.get, {'ids':[args.bugid]})\r\n\t\texcept xmlrpclib.Fault as fault:\r\n\t\t\traise BugzError(\"Can't get bug #\" + str(args.bugid) + \": \" \\\r\n\t\t\t\t\t+ fault.faultString)\r\n\r\n\t\tfor bug in result['bugs']:\r\n\t\t\tself.showbuginfo(bug, args.attachments, args.comments)", "title": "" }, { "docid": "24026c11c18946c66b98c8124bd45ece", "score": "0.5334474", "text": "def get_latest_patch(bugid):\n patchid = None\n data = rserver.display('issue%s' % bugid)\n if len(data['files']) > 0:\n patchid = data['files'][-1]\n patch = requests.get('http://bugs.python.org/file%s/' % patchid)\n patchfile = os.path.join('patches', 'file%s' % patchid)\n patch_text = patch.text\n with open(patchfile, 'w') as fobj:\n fobj.write(patch_text)\n print('Wrote %s' % patchfile)\n return patchfile", "title": "" }, { "docid": "91eb403f94e10e5e2e98daec9a0456f5", "score": "0.5308165", "text": "def get_bug_location(self, bugUrl):\n \n html = urllib2.urlopen(bugUrl).read()\n # clean up the HTML a little first... on second thought, don't bother.\n # Commenting out.\n # for char in ['\\n', '\\t']:\n # html = HTML.replace(char, '')\n \n component = re.findall(', uploaded to ([a-z]+) on', html) \n if component is None:\n component = '?'\n else:\n component = component[0]\n \n return component", "title": "" }, { "docid": "17dbb2811bad18f780f750e9f5e175b2", "score": "0.5290475", "text": "def get_builds(zuul_builds_url, since):\n builds = []\n pos = 0\n step = 50\n while not builds or is_build_in_range(builds[-1], since):\n url = \"%s?skip=%d&limit=%d\" % (zuul_builds_url, pos, step)\n print(\"Querying %s\" % url)\n builds += requests.get(url).json()\n pos += step\n return builds", "title": "" }, { "docid": "e7cf8bc44ec46198a38078324e6fa259", "score": "0.52835083", "text": "def load_value(self, review_request_details):\n return review_request_details.get_bug_list()", "title": "" }, { "docid": "aae88518645af7b189bafe689e4cd3b2", "score": "0.5274854", "text": "def identify_intent_pr(self):\n tagged_idx = []\n description_sent_idxs = self._is_description_pr()\n question_sent_idxs = self._is_question(self.bug_comments)\n code_sent_idxs = self._is_code(self.bug_comments)\n solution_sent_idxs = self._is_statement(self.bug_comments, self.args.h_solution, \"solution\")\n info_sent_idxs = self._is_statement(self.bug_comments, self.args.h_info, \"info\")\n for idx in description_sent_idxs:\n tokens = self.bug_comments[idx].split()\n tagged_tokens = ['[DES]'] + tokens\n self.bug_comments[idx] = ' '.join(tagged_tokens)\n tagged_idx.append(idx)\n for idx in question_sent_idxs:\n if idx in tagged_idx:\n pass\n else:\n tokens = self.bug_comments[idx].split()\n tagged_tokens = ['[QS]'] + tokens\n self.bug_comments[idx] = ' '.join(tagged_tokens)\n tagged_idx.append(idx)\n for idx in code_sent_idxs:\n if idx in tagged_idx:\n pass\n else:\n tokens = self.bug_comments[idx].split()\n tagged_tokens = ['[CODE]'] + tokens\n self.bug_comments[idx] = ' '.join(tagged_tokens)\n tagged_idx.append(idx)\n for idx in solution_sent_idxs:\n if idx in tagged_idx:\n pass\n else:\n tokens = self.bug_comments[idx].split()\n tagged_tokens = ['[SOLU]'] + tokens\n self.bug_comments[idx] = ' '.join(tagged_tokens)\n tagged_idx.append(idx)\n for idx in info_sent_idxs:\n if idx in tagged_idx:\n pass\n else:\n tokens = self.bug_comments[idx].split()\n tagged_tokens = ['[INFO]'] + tokens\n self.bug_comments[idx] = ' '.join(tagged_tokens)\n tagged_idx.append(idx)\n for idx in range(len(self.bug_comments)):\n if idx not in tagged_idx:\n tokens = self.bug_comments[idx].split()\n tagged_tokens = ['[NON]'] + tokens\n self.bug_comments[idx] = ' '.join(tagged_tokens)", "title": "" }, { "docid": "c316db0330ae5aa9dbe726026e1c6661", "score": "0.5249617", "text": "def bug_versions(self, bug):\n self._queries += 1\n self.fetch_bug_data()\n return self._bug_versions.get(bug, ['~'])", "title": "" }, { "docid": "e9375e1761b37c89b3b3bcc8b4e407fc", "score": "0.5229618", "text": "def get_flake8_comments(revision):\n comments = defaultdict(lambda: [])\n # flake8 needs to be on the path.\n flake8_env = os.environ.copy()\n flake8_env[\"PATH\"] = \"{0}:{1}\".format(VENV_BIN, flake8_env[\"PATH\"])\n\n base_revision = \"{0}^\".format(revision)\n flake8_diff_proc = Popen(\n [FLAKE8_DIFF_PATH, \"--standard-flake8-output\", \"--color\", \"off\", base_revision,\n revision],\n stdin=PIPE, stdout=PIPE, stderr=PIPE, env=flake8_env)\n stdout, stderr = flake8_diff_proc.communicate()\n # Ignore the return code since it will be non-zero if any violations are found. We want\n # to continue in that case. Instead check stderr for any errors.\n if stderr:\n raise Exception(\"Did not expect flake8-diff to write to stderr:\\n{0}\".format(stderr))\n\n # Match output lines like:\n # bin/jenkins/flake8-gerrit-review.py:25:1: F401 'json' imported but unused\n VIOLATION_RE = re.compile(r\"^([^:]*):([0-9]*):([0-9]*): (.*)$\")\n\n for line in stdout.splitlines():\n match = VIOLATION_RE.match(line)\n if not match:\n raise Exception(\"Pattern did not match line:\\n{0}\".format(line))\n file, line, col, details = match.groups()\n line = int(line)\n col = int(col)\n skip_file = False\n for pattern in EXCLUDE_FILE_PATTERNS:\n if pattern.match(file):\n skip_file = True\n break\n if skip_file:\n continue\n comments_for_file = comments[file]\n comment = {\"message\": \"flake8: {0}\".format(details)}\n # Heuristic: if the error is on the first column, assume it applies to the whole line.\n if col == 1:\n comment[\"line\"] = line\n else:\n comment[\"range\"] = {\"start_line\": line, \"end_line\": line,\n \"start_character\": col - 1, \"end_character\": col}\n comments_for_file.append(comment)\n return comments", "title": "" }, { "docid": "5e2316138b18fce9e32207f969ba54bc", "score": "0.519327", "text": "def populate_issues(request):\n project_qs = Project.objects.all()\n\n social = request.user.social_auth.get(provider='github')\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\",\n \"Authorization\": f\"token {social.extra_data['access_token']}\", # Authentication\n }\n uri = APIS['api_contrihub']\n\n for project in project_qs:\n print(\"PROJECT: \", project.name)\n issues_dict = fetch_all_issues(uri, project.name, headers)\n issues = issues_dict['data']\n print(\"COUNT: \", len(issues))\n for issue in issues:\n # TODO: Can be given as ISSUE\n if issue['user']['login'] == DEPENDABOT_LOGIN: # Ignoring issues created by Dependabot\n continue\n if issue.get('pull_request') is not None: # this issue is actually a PR.\n # Source: https://docs.github.com/en/rest/reference/issues#list-repository-issues\n print(\"This issue is a actually a PR\")\n continue\n title, number = issue['title'], issue['number']\n mentor_name, level, points, is_restricted, bonus_value, bonus_description = parse_labels(labels=issue['labels'])\n\n if mentor_name and level: # If mentor name and level labels are present in issue\n api_url, html_url = issue['url'], issue['html_url']\n issue_qs = Issue.objects.filter(number=number, project=project)\n # print(\"I: \", number, title, mentor_name, level)\n if issue_qs: # Update if already present\n db_issue = issue_qs.first()\n db_issue.title = title\n db_issue.level = level\n db_issue.points = points\n db_issue.is_restricted = is_restricted\n db_issue.bonus_value = bonus_value\n db_issue.bonus_description = bonus_description\n else: # Else Create New\n db_issue = Issue(\n number=number,\n title=title,\n api_url=api_url,\n html_url=html_url,\n project=project,\n level=level,\n points=points,\n is_restricted=is_restricted,\n bonus_value=bonus_value,\n bonus_description=bonus_description\n )\n\n # print(db_issue)\n try:\n mentor = User.objects.get(username=mentor_name)\n db_issue.mentor = mentor\n except User.DoesNotExist:\n pass\n\n db_issue.save()\n\n return HttpResponseRedirect(reverse('home'))", "title": "" }, { "docid": "89cefcb36ac661ce82e0db82afa7280f", "score": "0.5173719", "text": "def update_bug_chunks(bugs, chunk_size=100):\n numbugs = 0\n for bchunk in chunked(bugs, chunk_size):\n numbugs += len(bugs)\n log.debug(\"Updating %d bugs\", len(bugs))\n update_bugs.delay([b.id for b in bchunk])\n log.debug(\"Total bugs updated: %d\", numbugs)", "title": "" }, { "docid": "92671c8cdfa4d08b374e57f1598fe483", "score": "0.5161539", "text": "def process_errors(s,err):\n total = 0\n res = []\n if len(err)==0:\n res.append(' None')\n return res,total\n else:\n ts,statuses,bugs,bugids,bugurls,bugcomments = [],[],[],[],[],[]\n for t in err:\n total += 1\n status,bug,bugid,bugurl,bugcomment = s.match_bugs(t)\n # separately keep track of \"new\" bug reports. Note that this functionality\n # depends on the user to separately add these bugs via bugs.add_new().\n if bug and bug.new==True and not bug in s.new_bugs:\n s.new_bugs.append(bug)\n ts.append(t)\n statuses.append(status)\n bugs.append(bug)\n bugids.append(bugid)\n bugurls.append(bugurl)\n bugcomments.append(bugcomment)\n # group by bug id\n uniquebugs = list(set(bugids))\n for uid in uniquebugs:\n matchedidx = [i for i,bugid in enumerate(bugids) if bugid==uid]\n # see if all or some tests in this bug group are NEW\n nnew_statuses = [statuses[i] for i in matchedidx if statuses[i]==NEWSTATUS]\n ntotal_statuses = [statuses[i] for i in matchedidx]\n status_summary = NEWSTATUS if len(nnew_statuses)==len(ntotal_statuses) else NEWSTATUS+'(some are old) '\n if len(nnew_statuses)==0: status_summary=''\n # loop over tests in this bug group\n for iorder,i in enumerate(matchedidx):\n t = ts[i]\n lextract = ts[i].lextract if ts[i].lextract else DUMMY_LINK\n lerror = ts[i].lerror if ts[i].lerror else DUMMY_LINK\n llog = ts[i].llog if ts[i].llog else DUMMY_LINK\n ltail = ts[i].ltail if ts[i].ltail else DUMMY_LINK\n # last test with this bug id: print bug summary\n if iorder==len(matchedidx)-1:\n # special handling for the case of one test only affected by this bug\n offset = ' ' if len(matchedidx)>1 else ' - '\n res.append('%s<a href=\"%s\">%s</a> (<a href=\"%s\">err</a>)(<a href=\"%s\">log</a>)(<a href=\"%s\">tail</a>):\\n [<a href=\"%s\">bug #%s</a>] %s%s'%(offset,lextract,ts[i].name,lerror,llog,ltail,bugurls[i],bugids[i],status_summary,bugcomments[i]))\n # for others, just list the bugs, one per line, with comma in the end of each line\n else:\n offset = ' - ' if iorder==0 else ' '\n res.append('%s<a href=\"%s\">%s</a> (<a href=\"%s\">err</a>)(<a href=\"%s\">log</a>)(<a href=\"%s\">tail</a>),'%(offset,lextract,ts[i].name,lerror,llog,ltail))\n return res,total", "title": "" }, { "docid": "6e692d68cb28e7dfed6cecf0385a099b", "score": "0.5142043", "text": "def list_build_ids(self):\n url = f'{self.baseurl}/listbuildids'\n\n headers = {'Content-Type':'application/json'}\n payload = {}\n myself = lambda: inspect.stack()[1][3]\n\n self.client_request(myself(), url, headers, payload)\n return", "title": "" }, { "docid": "e3473b1e68f1ec35a4c51c4978bd81a3", "score": "0.5128398", "text": "def get_latest_comment():\n github_init = Github(TOKEN)\n repo = github_init.get_repo(REPO_NAME)\n\n pull_request = repo.get_pull(number=PR_NUMBER)\n print(pull_request)\n comments = pull_request.get_issue_comments()\n comments_list = []\n for comment in comments:\n print(\"comment: \" + comment.body)\n comments_list.append(str(comment.body))\n print('')\n print(f\"latest comment in PR {PR_NUMBER} is: {comments_list[-1]}\")", "title": "" }, { "docid": "d539570b2125c3ccc74e05754112befe", "score": "0.51282597", "text": "def get_misc_comments(revision):\n comments = defaultdict(lambda: [])\n # Matches range information like:\n # @@ -128 +133,2 @@ if __name__ == \"__main__\":\n RANGE_RE = re.compile(r\"^@@ -[0-9,]* \\+([0-9]*).*$\")\n\n diff = check_output([\"git\", \"diff\", \"-U0\", \"{0}^..{0}\".format(revision)],\n universal_newlines=True)\n curr_file = None\n check_source_file = False\n curr_line_num = 0\n for diff_line in diff.splitlines():\n if diff_line.startswith(\"+++ \"):\n # Start of diff for a file. Strip off \"+++ b/\" to get the file path.\n curr_file = diff_line[6:]\n check_source_file = os.path.splitext(curr_file)[1] in SOURCE_EXTENSIONS\n if check_source_file:\n for pattern in EXCLUDE_FILE_PATTERNS:\n if pattern.match(curr_file):\n check_source_file = False\n break\n elif diff_line.startswith(\"@@ \"):\n # Figure out the starting line of the hunk. Format of unified diff is:\n # @@ -128 +133,2 @@ if __name__ == \"__main__\":\n # We want to extract the start line for the added lines\n match = RANGE_RE.match(diff_line)\n if not match:\n raise Exception(\"Pattern did not match diff line:\\n{0}\".format(diff_line))\n curr_line_num = int(match.group(1))\n elif diff_line.startswith(\"+\") and check_source_file:\n # An added or modified line - check it to see if we should generate warnings.\n add_misc_comments_for_line(comments, diff_line[1:], curr_file, curr_line_num)\n curr_line_num += 1\n return comments", "title": "" }, { "docid": "8d3fc680de8782b57912bf356cedc98e", "score": "0.5119313", "text": "def pushBuild(self, buildnum: int) -> None:\n ...", "title": "" }, { "docid": "283f3c7044175ddd9e86ab9a13d5837e", "score": "0.5113996", "text": "def getBuildNumber(fpga, board):\n fpga.select_device(board)\n buildNumber = fpga.build_number()\n return buildNumber", "title": "" }, { "docid": "3ed5e6d33cc49ed2ed50e1bd02f55e01", "score": "0.50984067", "text": "def _issue_recorded(testcase):\n return testcase.github_repo_id and testcase.github_issue_num", "title": "" }, { "docid": "b542c2c2bdd0314fb7f7fcc9d26d4511", "score": "0.50951564", "text": "def _get_version_number(self):\n pattern = re.compile(self.config['version_regex'])\n match = pattern.search(self.pull_request_title)\n\n if match:\n return match.group()\n\n return", "title": "" }, { "docid": "91641a1e4ea5231a0f57d5cadea944ca", "score": "0.5060297", "text": "def FindBugFile( bugnumber ):\n if not bugnumber: return\n\n bugnum_str = \"%04d\" % int( bugnumber )\n\n bugs_root = '/math/Admin/Bugs'; # -- bugs subdirs are given relative to this\n bugs_dirs = ('OpenReports/Open.Bug','ResolvedReports/Resolved.Bug','ClosedReports/Closed.Bug','OpenReports/Open.Suggest','ResolvedReports/Resolved.Suggest','ClosedReports/Closed.Suggest','Hold')\n\n for sub_dir in bugs_dirs:\n bug_file = bugs_root + '/' + sub_dir + '/' + bugnum_str\n if os.path.exists( bug_file ):\n return bug_file\n\n return", "title": "" }, { "docid": "e8b3e3d6565e101e994b29bc3b60d7bf", "score": "0.5057987", "text": "def breaking_change(number, cli=False):\n import json\n import requests\n import os\n from github import Github\n\n comp_base = \"https://www.home-assistant.io/components/\"\n pull_base = \"https://github.com/home-assistant/home-assistant/pull/\"\n github = Github(os.environ[\"GHTOKEN\"])\n repo = github.get_repo(\"home-assistant/home-assistant.io\")\n posts = repo.get_dir_contents(\"source/_posts\", \"current\")\n this_post = None\n for post in posts:\n if \"release\" in post.path:\n name = post.path.split(\"/\")[-1].split(\".\")[0]\n name = name.split(\"-\")\n rel_number = name[-1]\n if rel_number == number:\n this_post = post.html_url\n if this_post is None:\n print(\"Release for\", number, \"not found\")\n return\n url = this_post\n url_data = requests.get(url).text.split(\"\\n\")\n raw_changes = []\n changes = {}\n changes[\"version\"] = \"0.{}.x\".format(url.split(\".markdown\")[0].split(\"-\")[-1])\n changes[\"data\"] = []\n control = []\n for line in url_data:\n if \"(breaking change)\" in line:\n raw_changes.append(line)\n for change in raw_changes:\n if change[0:3] == \"<p>\":\n pass\n else:\n this = {}\n try:\n pull = str(change)\n pull = pull.split(\"home-assistant/home-assistant/pull/\")[1]\n pull = pull.split('\"')[0]\n except:\n pull = None\n if pull not in control and pull is not None:\n prlink = \"{}{}\".format(pull_base, pull)\n try:\n split = '<a href=\"/home-assistant/home-assistant.io/blob/'\n split += \"current/components/\"\n component = str(change)\n component = component.split(split)[1]\n component = component.split('\">')[0]\n except:\n component = None\n doclink = \"{}{}\".format(comp_base, component)\n if len(change.split(\"<li>\")) == 1:\n desc = change.split(\"<li>\")[0]\n else:\n desc = change.split(\"<li>\")[1]\n desc = desc.split(\"(<a \")[0]\n desc = desc.replace(\"</code>\", \"\")\n desc = desc.replace('<code class=\"highlighter-rouge\">', \"\")\n desc = desc.replace(\"\\u2019\", \"`\")\n desc = desc.replace(\"\\u201c\", \"\")\n desc = desc.replace(\"\\u201d\", \"\")\n this[\"pull_request\"] = pull\n this[\"prlink\"] = prlink\n this[\"component\"] = component\n this[\"doclink\"] = doclink\n this[\"description\"] = desc\n changes[\"data\"].append(this)\n control.append(pull)\n if cli:\n data = json.dumps(changes, sort_keys=True, indent=4, ensure_ascii=True)\n print(data)\n return changes", "title": "" }, { "docid": "a424f1a5497277d052c1ae2330a8e064", "score": "0.5057061", "text": "def new_bugs(s):\n return [bug for bug in s.bugs if bug.new==True]", "title": "" }, { "docid": "7ef15412e004d1ddebbff840a110c8e8", "score": "0.50560343", "text": "def add_comments_to_issue(github_issue, gid):\n\n start_index = 1\n max_results = GOOGLE_MAX_RESULTS\n\n # Retrieve existing Github comments, to figure out which Google Code comments are new\n\n existing_comments = [ comment.body for comment in github_issue.get_comments() ]\n\n # Retain compatibility with earlier versions of migrateissues.py\n\n existing_comments = [ re.sub(r'^(.+):_\\n', r'\\1_\\n', body) for body in existing_comments ]\n\n # Retrieve comments in blocks of GOOGLE_MAX_RESULTS until there are none left\n\n while True:\n\n query = gdata.projecthosting.client.Query(start_index = start_index, max_results = max_results)\n comments_feed = gc.get_comments(google_project, gid, query = query)\n\n # Filter out empty and otherwise unnecessary comments, unless they contain the\n # 'migrated into' update for a duplicate issue; we'll generate a special Github\n # comment for those.\n\n comments = [ comment for comment in comments_feed.entry if should_migrate_comment(comment) and format_comment(comment) not in existing_comments ]\n\n # Add any remaining comments to the Github issue\n\n if not comments:\n break\n if start_index == 1:\n output(\", adding comments\")\n for comment in comments:\n add_comment_to_github(comment, github_issue)\n output(\".\")\n\n start_index += max_results", "title": "" }, { "docid": "b0169f0c46062d5bcad0e5de4786cd70", "score": "0.50455046", "text": "def get_blended_project_id(pull_request: PrDict) -> Optional[int]:\n m = re.search(r\"\\[\\s*BD\\s*-\\s*(\\d+)\\s*\\]\", pull_request[\"title\"])\n if m:\n return int(m[1])\n else:\n return None", "title": "" }, { "docid": "de6418dbb03dceb023713e2cdf2d158e", "score": "0.502591", "text": "def callback(self, url):\n for item in re.findall(r'[0-9]+', url):\n url = 'https://bugs.launchpad.net/bugs/%s' % item\n return(url)", "title": "" }, { "docid": "dcdd938ba428d13f8805bf19e8b4e7e7", "score": "0.5011033", "text": "def revset_bug(repo, subset, x):\n err = _('bug() requires an integer argument.')\n bugstring = revset.getstring(x, err)\n\n try:\n bug = int(bugstring)\n except Exception:\n raise ParseError(err)\n\n def fltr(x):\n # We do a simple string test first because avoiding regular expressions\n # is good for performance.\n desc = repo[x].description()\n return bugstring in desc and bug in parse_bugs(desc)\n\n return subset.filter(fltr)", "title": "" }, { "docid": "34c20e43bb62d08c3927643cd4c560e6", "score": "0.5000896", "text": "def make_comment(self, ticket, commit, build):\n\n # Exception: Don't bother with ASTERIXDB tickets since\n # they're on a different JIRA\n if ticket.startswith(\"ASTERIXDB\"):\n logger.debug(f'Skipping ticket {ticket}')\n return\n\n try:\n jticket = self.jira.issue(ticket)\n\n except JIRAError as e:\n if e.status_code == 404:\n logger.info(f\"commit references non-existent ticket {ticket}\")\n\n else:\n logger.warning(\n f\"error loading JIRA issue ticket {ticket}: {e.text}\"\n )\n\n return\n\n org = commit.remote.split('/')[3]\n url = f'https://github.com/{org}/{commit.project}/commit/{commit.sha}'\n message = (\n f\"Build {build.key} contains {commit.project} \"\n f\"commit [{commit.sha[0:7]}|{url}] with commit message:\\n\"\n f\"{commit.summary}\"\n )\n\n if self.dryrun:\n logger.info(f'(Not) posting Jira comment on {ticket}:\\n{message}')\n else:\n self.jira.add_comment(jticket, message)\n logger.info(f'Posting Jira comment on {ticket}:\\n{message}')", "title": "" }, { "docid": "07d709def82dfeb56ec1d6c9f0a329f7", "score": "0.49950415", "text": "def get_build_list():\n r = requests.get(fix_url(config.root_url + \"api/json\"))\n build_list = r.json()\n return build_list", "title": "" }, { "docid": "c465acbadd69a80fefa4d0015ce7d37c", "score": "0.498176", "text": "def rest_get_build_status(script, commit):\n j = script.rest(\"GET\", \"/rest/build-status/latest/commits/stats/%s\" % commit)\n if is_http_ok():\n return j", "title": "" }, { "docid": "6195bcfc8e1215d0411143913641b38d", "score": "0.49790078", "text": "def compute_selected_bug_fixes(commits, min_order=False, max_order=False,\n legacy_cutoff=0):\n if min_order or max_order:\n order_to_time = dict([[c['order'], c['date']]\n for c in commits.values() if 'order' in c])\n\n if min_order:\n if min_order in order_to_time:\n min_time = order_to_time[min_order]\n else:\n min_time = 0\n else:\n min_time = 0\n min_time = max(min_time, legacy_cutoff)\n\n if max_order:\n if max_order in order_to_time:\n max_time = order_to_time[max_order]\n else:\n max_time = max(order_to_time.values())\n else:\n max_time = max([c['date'] for c in commits.values()])\n\n return len([k for k, c in commits.items()\n if 'tagged_bug_fix' in c\n and c['date'] >= min_time\n and c['date'] <= max_time])", "title": "" }, { "docid": "11d52e08009879ff0421d952778f2800", "score": "0.49789813", "text": "def test_get_pullrequests_for_commit(self):\n pass", "title": "" }, { "docid": "69d7abd5de29ddfe31050821eff213a7", "score": "0.49526575", "text": "def get_latest_avail(test = False):\n build_num = 0\n result = open_url(WME_JENKINS + DAILY_JOB + \"/api/json\", args.token)\n builds = result[\"builds\"]\n for build in builds:\n findBuilds = 0\n buildNum = build[\"number\"]\n for sub in build[\"subBuilds\"]:\n if sub[\"jobName\"] in BUILD_JOBS and sub[\"result\"] == \"SUCCESS\":\n findBuilds += 1\n if findBuilds == len(BUILD_JOBS):\n build_num = get_downlouad_url(buildNum)\n if(build_num == 0):\n continue\n else:\n if(test):\n return buildNum\n print(\"daily build number is: %d \" % buildNum)\n break;\n return build_num", "title": "" }, { "docid": "919b44a5887d94e0f1d6271fc9748187", "score": "0.49518502", "text": "def jira():", "title": "" }, { "docid": "bda4b1603a80d1be3f8555925221be64", "score": "0.49354175", "text": "def get_all_comments_from_submission(submission,\n limit=32,\n print_updates=False):\n\n try:\n submission.replace_more_comments(limit=limit, threshold=1)\n return submission.comments\n except ValueError as e:\n print(\"ValueError:\", e)\n\n return submission.comments", "title": "" }, { "docid": "b88e0a718cbcf41194729496b9392d23", "score": "0.4926054", "text": "def reviews(ctx):\n root = get_root()\n requests = root.get_review_requests()\n for request in requests:\n print(\"%(id)s - %(summary)s Updated:%(last_updated)s\" % request)\n for bug in request['bugs_closed']:\n print(\"Bug: %s\" % bug)\n print(\"%(description)s\\n\" % request)", "title": "" }, { "docid": "6ea094e3b55a4bbe97e50be10fec23ce", "score": "0.49250218", "text": "def getChangelogNotes(version, auth):\n inform(\"Fetching changelog notes\")\n\n # REST url for issues specific for the release\n url = 'https://jira.esss.lu.se/rest/api/2/search?jql=project=CSSTUDIO AND fixVersion=\"ESS CS-Studio '+version+'\"'\n\n headers = {\"Content-Type\":\"application/json\"}\n response = requests.get(url, auth=auth, headers=headers)\n data = response.json()\n note_list = []\n pattern = re.compile(\"CSS-CE #[0-9]+\")\n\n # `CSS-CE #XXX` is a merge from the community version. Sort `note_list`\n # with CSS-CE merges first.\n for issue in data[\"issues\"]:\n summary = htmlEscape(issue[\"fields\"][\"summary\"])\n if list(pattern.findall(summary)):\n note_list.insert(0,\"<li>\"+summary+\"</li>\")\n else:\n note_list.append(\"<li>\"+summary+\"</li>\")\n\n # Now that the comment_list is sorted, put them into one string to fit the\n # format expected by the prepare-realease.sh script\n notes_str = \"\"\n for note in note_list:\n notes_str += note\n\n # The `prepare-release.sh` script will put <li></li> around the note\n # string. Since several notes may be used, <li></li> must be added around\n # each note as above. To satisfy the `prepare-release.sh` script however,\n # the first <li> and the last </li> of `notes_str` must be excluded. One\n # may be tempted to modify the `prepare-release.sh` script instead, though\n # as it is made by, and spread amongst, the community it is decided to be\n # kept intact for the time being.\n formatted_notes = notes_str[4:-5]\n\n return formatted_notes", "title": "" }, { "docid": "3011939074814d706cf39e5dc3abb2e9", "score": "0.49226734", "text": "def show_bug(bot, trigger, match=None):\n match = match or trigger\n domain = match.group(1)\n if domain not in bot.config.bugzilla.domains:\n return\n url = 'https://%s%sctype=xml&%s' % match.groups()\n data = web.get(url, dont_decode=True)\n bug = etree.fromstring(data).find('bug')\n\n message = ('[BUGZILLA] %s | Product: %s | Component: %s | Version: %s | ' +\n 'Importance: %s | Status: %s | Assigned to: %s | ' +\n 'Reported: %s | Modified: %s')\n\n resolution = bug.find('resolution')\n if resolution is not None and resolution.text:\n status = bug.find('bug_status').text + ' ' + resolution.text\n else:\n status = bug.find('bug_status').text\n\n message = message % (\n bug.find('short_desc').text, bug.find('product').text,\n bug.find('component').text, bug.find('version').text,\n (bug.find('priority').text + ' ' + bug.find('bug_severity').text),\n status, bug.find('assigned_to').text, bug.find('creation_ts').text,\n bug.find('delta_ts').text)\n bot.say(message)", "title": "" }, { "docid": "cca283990a7684ae3f8d05502ef19b9c", "score": "0.49222353", "text": "def grab(self):\n\n bug_ids = flatten([self.get_remote_bug_ids_to_read(),\n self.get_remote_bug_ids_already_stored()])\n\n for bug_id in bug_ids:\n print bug_id\n bug = self.create_bug_object_for_remote_bug_id(bug_id)\n\n # If there is already a bug with this canonical_bug_link in the DB, just delete it.\n bugs_this_one_replaces = mysite.search.models.Bug.all_bugs.filter(canonical_bug_link=\n bug.canonical_bug_link)\n for delete_me in bugs_this_one_replaces:\n delete_me.delete()\n\n print bug\n # With the coast clear, we save the bug we just extracted from the Miro tracker.\n bug.save()", "title": "" }, { "docid": "b2deb653b91469a7b469de32bb78e2da", "score": "0.49175602", "text": "def _get_pull_request_title_and_number(event_path):\n with open(event_path, 'r') as json_file:\n # This is just a webhook payload available to the Action\n data = json.load(json_file)\n title = data[\"pull_request\"]['title']\n number = data['number']\n\n return title, number", "title": "" }, { "docid": "849c4882adbb27a8e8fa7ec00200c000", "score": "0.4908123", "text": "def test(config, bugnumber):\n state = read(config.configfile)\n credentials = state.get(\"BUGZILLA\")\n if not credentials:\n error_out(\"No API Key saved. Run: gg bugzilla login\")\n if config.verbose:\n info_out(f\"Using: {credentials['bugzilla_url']}\")\n\n if bugnumber:\n summary, _ = get_summary(config, bugnumber)\n if summary:\n info_out(\"It worked!\")\n success_out(summary)\n else:\n error_out(\"Unable to fetch\")\n else:\n url = urllib.parse.urljoin(credentials[\"bugzilla_url\"], \"/rest/whoami\")\n assert url.startswith(\"https://\"), url\n\n response = requests.get(url, params={\"api_key\": credentials[\"api_key\"]})\n if response.status_code == 200:\n if response.json().get(\"error\"):\n error_out(f\"Failed! - {response.json()}\")\n else:\n success_out(json.dumps(response.json(), indent=2))\n else:\n error_out(f\"Failed to query - {response.status_code} ({response.json()})\")", "title": "" }, { "docid": "387b9c499c1995e14f5c8844b9e75461", "score": "0.49021327", "text": "def build_merged_pull_requests(self, result):\n content = ''\n\n sorted_results = self.get_repositories(result['items'])\n core_items = sorted_results.get('PrestaShop')\n\n if (core_items is None):\n return ''\n\n grouped_core_items = self.sort_core_repositories(core_items)\n\n sorted_core_items = self.custom_sort(grouped_core_items, CORE_BRANCHES)\n\n category_order = CATEGORIES.keys()\n\n for branch, category_items in sorted_core_items.items():\n content += \"\\n\\n\\n## Code changes in the '\" + branch + \"' branch\"\n\n sorted_category_items = self.custom_sort(category_items, category_order)\n\n for category, items in sorted_category_items.items():\n category_name = CATEGORIES[category] if category in CATEGORIES.keys() else category\n\n if category in CATEGORIES_REJECT_LIST:\n continue\n\n content += \"\\n\\n\\n### \" + category_name\n for item in items:\n line = '* [#{pull_request_number}]({pull_request_url}): {pull_request_title}{thanks}'.format(\n pull_request_number=item['number'],\n pull_request_url=item['html_url'],\n pull_request_title=item['title'],\n thanks=self.thanks(item['user'])\n )\n\n content += \"\\n\" + line\n\n del sorted_results['PrestaShop']\n\n content += \"\\n\\n\\n## Code changes in modules, themes & tools\"\n\n for repository, items in sorted_results.items():\n content += \"\\n\\n\\n### \" + repository\n for item in items:\n line = '* [#{pull_request_number}]({pull_request_url}): {pull_request_title}{thanks}'.format(\n pull_request_number=item['number'],\n pull_request_url=item['html_url'],\n pull_request_title=item['title'],\n thanks=self.thanks(item['user']),\n )\n\n content += \"\\n\" + line\n\n return content", "title": "" }, { "docid": "8afdc602bc635a64693ab3338a9cf14b", "score": "0.4900217", "text": "def GetInfo( bugnums, attributes='', db='bugstats', output='dict' ):\n if not bugnums: return {}\n\n if isinstance( bugnums, int ): bugnums = [ bugnums ]\n if isinstance( bugnums, basestring): bugnums = bugnums.replace(' ','').split(',')\n if isinstance( bugnums, (list,tuple) ):\n temp = map( str, bugnums );\n bugnums = map( int, filter(str.isdigit, temp) )\n\n if not bugnums: return {}\n\n if not attributes: attributes = 'summary, status, resolution, priority'\n\n attr_str = attributes.replace('_','').lower()\n attr_str = re.sub( r'(bugid|bugnumber)', '', attr_str ) # -- get rid of bug id/number (will add later anyway)\n\n attr_list = MapAttrNames( attr_str, 'list' )\n attr_get = SortAttrNames( attr_list )\n\n dbc, newQ = Databases.GetDBC( db )\n\n buginfo = _get_single_value_attr( dbc, attr_get, bugnums )\n\n if 'multi_general' in attr_get and attr_get['multi_general']: # -- multi valued general attributes\n for attr in attr_get['multi_general']:\n _attach_multi_value_attr( dbc, buginfo, attr, attr, bugnums )\n\n if 'multi_person' in attr_get and attr_get['multi_person']: # -- multi valued personal attributes\n for attr in attr_get['multi_person']:\n _attach_multi_value_attr( dbc, buginfo, attr, 'person', bugnums )\n\n if newQ: dbc.close()\n\n if output == 'list':\n buginfo_list = []\n for bug in bugnums:\n if bug in buginfo: buginfo_list.append( buginfo[bug] )\n return buginfo_list\n else:\n return buginfo", "title": "" }, { "docid": "c9689d9f10127393944dfe9ed29945e9", "score": "0.489903", "text": "def get_build(self):\n return int(self._request(\"build\")[\"id\"])", "title": "" }, { "docid": "266f2adc713391d46cc1112d3abe8855", "score": "0.4892412", "text": "def get_downlouad_url(parent_num):\n build_num = 0\n if parent_num <= 0: return 0\n\n result = open_url(\"%s%s/%d/api/json\" % (WME_JENKINS, DAILY_JOB, parent_num), args.token)\n actions = result[\"actions\"]\n params = None\n for action in actions:\n if(\"parameters\" in action):\n params = action[\"parameters\"]\n break\n #print(params)\n branch_matched = False\n for param in params:\n if(param[\"name\"] == \"wme_git_branch\" and param[\"value\"] == args.branch):\n branch_matched = True\n break\n if(not branch_matched):\n return 0\n \n subBuilds = result[\"subBuilds\"]\n for sub_build in subBuilds:\n if(sub_build[\"jobName\"] == job_name):\n #print(sub_build)\n build_num = sub_build[\"buildNumber\"]\n break\n return int(build_num)", "title": "" }, { "docid": "aac664f902de22189f20ea9ff4a0cf17", "score": "0.488885", "text": "def get_linked_issues(repo, issue):\n linked_issues_list = []\n for comment in issue.get_comments():\n split_list = comment.split('#')\n for linked_issue in split_list:\n temp_issue = linked_issue(repo, linked_issue[:6])\n if temp_issue is not None:\n linked_issues_list.append([linked_issue[:6], temp_issue.html_url])\n\n # TODO\n linked_issues = ''\n return linked_issues", "title": "" }, { "docid": "4b7aebf3297f6a17c0242cc91580a238", "score": "0.4888015", "text": "def search_comments_issues(self, key):\n result = {}\n url = os.path.join(self.url, os.path.join(ISSUE_URI, os.path.join(key, 'comment')))\n headers = {'content-type': CONTENT_TYPE}\n\n response = requests.get(url=url, headers=headers, auth=HTTPBasicAuth(self.user, self.password), verify=False)\n\n info = json.loads(response.text, strict=False)\n\n result['body'] = info['comments']\n result['key'] = key\n\n return result", "title": "" }, { "docid": "0181cf30c3290bb00e7569af83c7668f", "score": "0.48858", "text": "def set_open_bug_count(self, count):\n self.open_bug_count = count", "title": "" }, { "docid": "6c1ade8714399337c4b9fb22976db594", "score": "0.48853102", "text": "def _get_bugs_from_current_status_tab( dbc, constr_single, join_see_also=False, lower=None, upper=None, bugs_list=None ):\n if isinstance(bugs_list,(list,tuple)) and not bugs_list: return [] # -- empty bugs list in bugs constraint\n\n if join_see_also:\n table = '(current_status left join see_also_assign on current_status.bug_id=see_also_assign.bugnumber) left join see_also on see_also_assign.see_also_id=see_also.id'\n else:\n table = 'current_status'\n\n constr_elems = [ constr_single ]\n if lower: constr_elems.append( 'bug_id >= ' + str(lower) )\n if upper: constr_elems.append( 'bug_id <= ' + str(upper) )\n if bugs_list: constr_elems.append( 'bug_id in (' + ','.join(map(str,bugs_list)) + ')' )\n\n if not constr_elems: return None # -- not an empty list which is the same as no match\n\n constraint = ' and '.join( constr_elems )\n\n query = 'select distinct current_status.bug_id as bugnumber from ' + table + ' where ' + constraint\n cursor = dbc.cursor()\n cursor.execute( query )\n\n rows = cursor.fetchall()\n def ext(a): return a['bugnumber']\n bugs = map( ext, rows )\n\n cursor.close()\n\n return bugs", "title": "" }, { "docid": "26de131c350d00beb08879b77448aaf6", "score": "0.4876785", "text": "def comments(description):\n if not description:\n return -1\n\n pat = '(\\d+) comments'\n match = re.search(pat, description)\n if match:\n return match.group(1)\n else:\n return -1", "title": "" }, { "docid": "6bbfee81bcc3af0e66aa6ec31077bd96", "score": "0.48638305", "text": "def _get_bug_url(self, bug_id):\n review_request = self.review_request_details.get_review_request()\n repository = self.review_request_details.repository\n local_site_name = None\n bug_url = None\n\n if review_request.local_site:\n local_site_name = review_request.local_site.name\n\n try:\n if (repository and\n repository.bug_tracker and\n '%s' in repository.bug_tracker):\n bug_url = local_site_reverse(\n 'bug_url', local_site_name=local_site_name,\n args=[review_request.display_id, bug_id])\n except NoReverseMatch:\n pass\n\n return bug_url", "title": "" }, { "docid": "77819794906210b81eee1bd1bcd0015d", "score": "0.48577383", "text": "def get_specific_build(project_id, release_id, build_id):\n return JsonResponse(get_build(get_release(get_project(project_id), release_id), build_id))", "title": "" }, { "docid": "d7f41c17777bf8e5f832ad0d3a912e0e", "score": "0.48464587", "text": "def get_closed_bug_count(self):\n return self.closed_bug_count", "title": "" }, { "docid": "3b5284667d1306a371e68e36606bcd60", "score": "0.4826385", "text": "def recent_comments(context):\n\n global DISQUS_FORUM_ID\n settings = context[\"settings\"]\n disqus_key = settings.COMMENTS_DISQUS_KEY\n disqus_shortname = settings.COMMENTS_DISQUS_SHORTNAME\n latest = settings.COMMENTS_NUM_LATEST\n context[\"comments\"] = []\n post_from_comment = lambda comment: int(comment[\"thread\"][\"identifier\"][0])\n\n def disqus_api(method, **args):\n \"\"\"\n Make a call to the Disqus API, parsing the JSON data into Python.\n \"\"\"\n args.update({\"user_api_key\": disqus_key, \"api_version\": \"1.1\"})\n url = \"http://disqus.com/api/%s/?%s\" % (method, urlencode(args))\n response = loads(urlopen(url).read())\n return response[\"message\"] if response[\"succeeded\"] else []\n\n if disqus_shortname and disqus_key:\n if DISQUS_FORUM_ID is None:\n forums = disqus_api(\"get_forum_list\")\n for forum in forums:\n if forum[\"shortname\"] == disqus_shortname:\n DISQUS_FORUM_ID = forum[\"id\"]\n if DISQUS_FORUM_ID is not None:\n comments = disqus_api(\"get_forum_posts\", forum_id=DISQUS_FORUM_ID,\n limit=latest, exclude=\"spam,killed\")\n posts = Post.objects.in_bulk(map(post_from_comment, comments))\n for comment in comments:\n try:\n blog_post = posts[post_from_comment(comment)]\n except KeyError:\n blog_post = None\n context[\"comments\"].append({\n \"name\": comment[\"author\"][\"display_name\"],\n \"email\": comment[\"author\"][\"email\"],\n \"email_hash\": comment[\"author\"][\"email_hash\"],\n \"body\": comment[\"message\"],\n \"time_created\": datetime.strptime(comment[\"created_at\"],\n \"%Y-%m-%dT%H:%M\") - timedelta(seconds=timezone),\n \"post\": blog_post,\n })\n else:\n context[\"comments\"] = Comment.objects.all().select_related(\n ).order_by(\"-id\")[:latest]\n return context", "title": "" }, { "docid": "1ecbb987624e6993d72dc0b0fed8c6e1", "score": "0.48148477", "text": "def build_stats_pull_requests(self, opened, closed, merged):\n content = '''\nPull requests:\n\nOpened: {total_opened_pull_requests}\nPrestaShop:\n{opened_branches}\nOthers:\n{opened_repositories}\n\nClosed: {total_closed_pull_requests}\nPrestaShop:\n{closed_branches}\nOthers:\n{closed_repositories}\n\nMerged: {total_fixed_pull_requests}\nPrestaShop:\n{merged_branches}\nOthers:\n{merged_repositories}\n '''.format(\n total_opened_pull_requests=opened['total_count'],\n opened_branches=self.get_items_total_count(opened['core']['branches']),\n opened_repositories=self.get_items_total_count(opened['repositories']),\n total_closed_pull_requests=closed['total_count'],\n closed_branches=self.get_items_total_count(closed['core']['branches']),\n closed_repositories=self.get_items_total_count(closed['repositories']),\n total_fixed_pull_requests=merged['total_count'],\n merged_branches=self.get_items_total_count(merged['core']['branches']),\n merged_repositories=self.get_items_total_count(merged['repositories']),\n )\n\n return content", "title": "" }, { "docid": "b57dcd748a74a935dd37e7d5162c7f0d", "score": "0.48113206", "text": "def match(s,log):\n #print log\n for bug in s.bugs:\n nmatches = 0\n for pattern in bug.patterns:\n if re.search(pattern,log):\n nmatches += 1\n if nmatches == len(bug.patterns):\n return bug\n return None", "title": "" }, { "docid": "bab717887696dce834512cf12faf0c56", "score": "0.48066452", "text": "def get_my_issues():\n access_token = _get_access_token()\n return access_token.search_issues(f'author:{GITHUB_HANDLE}')", "title": "" }, { "docid": "b4bb9708e388673903b617ecdc1d2f15", "score": "0.48048759", "text": "def query_builds(\n op_sys: str,\n version: str,\n verbose: bool = False,\n) -> list[str]:\n version_url = f\"{OB_URL}/{version}\"\n version_page = requests.get(version_url)\n version_soup = BeautifulSoup(version_page.text, features=\"html.parser\")\n\n # TODO: old version of Schrödinger Suite use different numbering (e.g. build08)\n builds = natsorted(li.text[-3:] for li in version_soup.find_all(attrs={\"class\": \"build\"}))\n if verbose:\n print(f\"{version} Build:\\n{builds}\\n\")\n\n return builds", "title": "" }, { "docid": "e16116e683dcf91afc9f1097c5891d13", "score": "0.47990376", "text": "def get_comments(fname):\n\n for url in urls:\n try: \n c, cs, locs, auth, title = fte_fetch_comments(url)\n zi = print_coms(c, cs, locs, auth, title) \n write_coms(zi, url, fname)\n print len(c)\n print len(cs)\n\n except:\n continue", "title": "" }, { "docid": "bbd62a7142216883deaf617264d946fc", "score": "0.47964898", "text": "def list_current_builds_details(self):\n url = f'{self.baseurl}/listcurrentbuildsdetails'\n\n headers = {'Content-Type':'application/json'}\n payload = {}\n myself = lambda: inspect.stack()[1][3]\n\n self.client_request(myself(), url, headers, payload)\n return", "title": "" }, { "docid": "9fe8ea9192c7b6da7d65c6e89bcb2743", "score": "0.4794774", "text": "def process_gcode_issues(existing_issues):\n\n start_index = 1\n previous_gid = 0\n max_results = GOOGLE_MAX_RESULTS\n\n while True:\n\n query = gdata.projecthosting.client.Query(start_index = start_index, max_results = max_results)\n issues_feed = gc.get_issues(google_project, query = query)\n\n if not issues_feed.entry:\n break\n\n for issue in issues_feed.entry:\n\n gid = parse_gcode_id(issue.id.text)\n\n # If we're trying to do a complete migration to a fresh Github project, and\n # want to keep the issue numbers synced with Google Code's, then we need to\n # watch out for the fact that deleted issues on Google Code leave holes in the ID numbering.\n # We'll work around this by adding dummy issues until the numbers match again.\n\n if options.synchronize_ids:\n while previous_gid + 1 < gid:\n previous_gid += 1\n output(\"Using dummy entry for missing issue %d\\n\" % (previous_gid ))\n title = \"Google Code skipped issue %d\" % (previous_gid )\n if previous_gid not in existing_issues:\n body = \"_Skipping this issue number to maintain synchronization with Google Code issue IDs._\"\n link = GOOGLE_URL % (google_project, previous_gid)\n footer = GOOGLE_ISSUE_TEMPLATE % link\n body += '\\n\\n' + footer\n github_issue = github_repo.create_issue(title, body = body, labels = [github_label(\"imported\")])\n github_issue.edit(state = \"closed\")\n existing_issues[previous_gid]=github_issue\n\n\n # Add the issue and its comments to Github, if we haven't already\n\n if gid in existing_issues:\n github_issue = existing_issues[gid]\n output(\"Not adding issue %d (exists)\" % gid)\n # Skipping issue if not in GOOGLE_STATUS_VALUES_FILTERED\n elif issue.status and issue.status.text in GOOGLE_STATUS_VALUES_FILTERED:\n github_issue = None\n output(\"Skipping issue %d (issue status filtered by GOOGLE_STATUS_VALUES_FILTERED)\" % gid)\n else: github_issue = add_issue_to_github(issue)\n\n if github_issue:\n add_comments_to_issue(github_issue, gid)\n if github_issue.state != issue.state.text:\n github_issue.edit(state = issue.state.text)\n output(\"\\n\")\n\n previous_gid = gid\n\n start_index += max_results\n log_rate_info()", "title": "" }, { "docid": "c5fd3d806bf021ebaee65a76ac636465", "score": "0.47938448", "text": "def get_patch_info(specific_patch, log):\n try:\n # IIQ 4.1.0 and newer runs on Python 2.7, older versions use Python 2.6\n iiq_dir = glob.glob('/usr/share/isilon/lib/python2.*/site-packages')[0]\n except IndexError:\n log.debug('Unable to find InsightIQ install dir. Is it installed?')\n iiq_dir = ''\n patches_dir = iiq_dir + '/' + 'insightiq/patches'\n try:\n all_patches = tuple(os.listdir(patches_dir))\n except (OSError, IOError) as doh:\n log.debug('Unable to list %s', patches_dir)\n all_patches = []\n\n is_installed = specific_patch in all_patches\n try:\n with open(patches_dir + '/' + specific_patch + '/' + 'README.txt') as the_file:\n readme = the_file.read()\n except (OSError, IOError) as doh:\n if specific_patch:\n log.debug('%s : %s', doh.strerror, doh.filename)\n readme = ''\n\n return PatchInfo(iiq_dir, patches_dir, specific_patch, is_installed, readme, all_patches)", "title": "" }, { "docid": "54daf724ab15ca415376cb6cd9816d6c", "score": "0.47899368", "text": "def addRedditDataToSubmissions(submissions, tag, min_comments=10, max_submission_api_calls=50):\r\n\r\n print(\"Fetching data from reddit. Doing up to {:d} queries\".format(\r\n min(max_submission_api_calls, len(submissions))))\r\n\r\n # The reddit instance offers built-in rate limit checks. We therefore use the same instance for all calls.\r\n reddit_instance = getRedditInstance()\r\n comments = []\r\n\r\n StartTime = datetime.now()\r\n queries = 0\r\n printPointer = 0\r\n for post in submissions:\r\n\r\n if(queries % 5 == 0 and queries > printPointer):\r\n print(\"{:5d} posts fetched from reddit. Used {:d} seconds\".format(\r\n queries, (datetime.now() - StartTime).seconds))\r\n StartTime = datetime.now()\r\n printPointer = queries\r\n\r\n if(queries <= max_submission_api_calls and post[\"num_comments\"] >= min_comments):\r\n queries += 1\r\n permalink = post['permalink']\r\n reddit_submission_instance = getSubmission(permalink, reddit_instance)\r\n post['score'] = getSubmissionScore(reddit_submission_instance)\r\n comments += getSubmissionComments(reddit_submission_instance, tag)\r\n\r\n post[\"tag\"] = tag\r\n\r\n return comments", "title": "" }, { "docid": "c7e7cbd184ebbc3615629132a23f96aa", "score": "0.47777954", "text": "def count_pull_requests(self, repo_uri: str, details: dict) -> None:\n for pull_request in self.__api_bindings.get_values(self.__api_bindings.get_pull_requests_api_string(repo_uri)):\n if pull_request[\"state\"] == \"OPEN\":\n details[\"open_pull_requests\"] += 1", "title": "" }, { "docid": "f9439f50ac71d994fa44b281f0afca9a", "score": "0.47670597", "text": "def test_url_api_comment_GET_by_ISSUE(self):\n\t\t# add new issue\n\t\tissue_db = models.Issue.objects.get_or_create(**self.issue_default)\n\t\tissue_db = issue_db[0]\n\t\ttry:\n\t\t\tu = models.User.objects.get(email='[email protected]')\n\t\texcept:\n\t\t\tu = models.User(email='[email protected]', password='secr3t')\n\t\t\tu.save()\t\t\n\t\tcomment = models.Comment()\n\t\tcomment.body = 'Atualizado novamente'\n\t\tcomment.author = u\t\t\n\t\tcomment.issue_id = issue_db\n\t\tcomment.save()\n\t\tres = self.client.get('/api/v1/comments/%s/' % (issue_db.register))\n\t\tjson_data = json.loads(res.get_data())\n\t\tself.assertIsInstance(json_data['comments'], list)\n\t\t#self.assertEqual(len(json_data['comments']), 1)", "title": "" }, { "docid": "3cf3aec898a6d7f6ff6e7862cd9af413", "score": "0.47528115", "text": "def _construct_summary(relevant_commits):\n if not relevant_commits or len(relevant_commits) == 0:\n return 'Autogenerated pull request'\n return relevant_commits[0].summary", "title": "" }, { "docid": "c5087f19e322d5aa118135e6068ac6c2", "score": "0.47512582", "text": "def API_id_extract(id):\n\tid_str = str(id)\n\tget_bug_url = 'https://bugzilla.mozilla.org/rest/bug/' + id_str + '?include_fields=id,type,product,component,creation_time,status,priority,severity,version,summary,processed_summary'\n\ttry:\n\t\tresponse = requests.get(get_bug_url)\n\t\tresponse_json = response.json()\n\t\tdata = response_json[\"bugs\"]\n\t\tprint(data)\n\t\treturn pd.DataFrame(data)\n\texcept:\n\t\tdf = pd.DataFrame([])\n\t\treturn df", "title": "" }, { "docid": "455c8962b9420747c0cc69ad86aa9dc8", "score": "0.4750657", "text": "def import_all_bugs(all_bugs):\n global ALL_BUGS\n ALL_BUGS = all_bugs", "title": "" }, { "docid": "99aeef79bddd985c8ac93885812f3219", "score": "0.47467288", "text": "def PollByRevision(self, revision, flag_check=False):\n info = {\n 'message': None,\n 'posted_to_bug': False,\n 'status': None,\n 'is_complete': False,\n 'discard': False,\n }\n buildrequests = self.scheduler_db.GetBuildRequests(revision, self.branch)\n build_type = self.ProcessPushType(revision, buildrequests, flag_check)\n bugs = self.GetBugNumbers(buildrequests)\n info['status'], info['is_complete'] = self.CalculateBuildRequestStatus(buildrequests, revision)\n if self.verbose:\n log.debug(\"POLL_BY_REVISION: RESULTS: %s BUGS: %s TYPE: %s IS_COMPLETE: %s\" % (info['status'], bugs, type, info['is_complete']))\n if info['is_complete'] and len(bugs) > 0:\n results = self.CalculateResults(buildrequests)\n info['message'] = self.GenerateResultReportMessage(revision, results, self.GetSingleAuthor(buildrequests))\n if self.verbose:\n log.debug(\"POLL_BY_REVISION: MESSAGE: %s\" % info['message'])\n for bug in bugs:\n if info['message'] != None and self.dry_run == False:\n info['posted_to_bug'] = self.ProcessCompletedRevision(revision=revision, \n message=info['message'], bug=bug, \n status_str=info['status']['status_string'], \n type=type)\n elif self.dry_run:\n log.debug(\"DRY RUN: Would have posted %s to %s\" % (info['message'], bug))\n # No bug number(s) or no try syntax, but complete gets flagged for discard\n elif info['is_complete']:\n log.debug(\"Nothing to do here for %s\" % revision)\n info['discard'] = True\n else:\n if bugs != None and not self.dry_run:\n # Cache it\n log.debug(\"Writing %s to cache\" % revision)\n incomplete = {}\n incomplete[revision] = info['status']\n self.WriteToCache(incomplete)\n else:\n info['discard'] = True\n\n return info", "title": "" }, { "docid": "27fb3987fe7196bfbc6d82fbbec6607d", "score": "0.47371218", "text": "def find_bugs_blocker_cli(runtime: Runtime, include_status, exclude_status, output):\n runtime.initialize()\n find_bugs_obj = FindBugsBlocker()\n find_bugs_obj.include_status(include_status)\n find_bugs_obj.exclude_status(exclude_status)\n exit_code = 0\n for b in [runtime.get_bug_tracker('jira'), runtime.get_bug_tracker('bugzilla')]:\n try:\n find_bugs_blocker(runtime, output, find_bugs_obj, b)\n except Exception as e:\n runtime.logger.error(traceback.format_exc())\n runtime.logger.error(f'exception with {b.type} bug tracker: {e}')\n exit_code = 1\n sys.exit(exit_code)", "title": "" }, { "docid": "c07e999b19da35e0ac6e86f983d3c631", "score": "0.47367227", "text": "def opened_pull_requests(self, result):\n return 'Opened pull requests: {opened_pull_requests_count}, Incomplete results: {query_incomplete_results}'.format(\n opened_pull_requests_count=result['total_count'],\n query_incomplete_results=result['incomplete_results']\n )", "title": "" }, { "docid": "2654970f9205b7f97674ee03c53af9f8", "score": "0.47360668", "text": "def get_build_info(job_name, build_number):\n build_info = get_server().get_build_info(job_name, build_number)\n build_info['timestamp'] = datetime.datetime.fromtimestamp(\n build_info['timestamp']/1000)\n return build_info", "title": "" }, { "docid": "01427b3224a27144bc0b84b8951e112c", "score": "0.47354856", "text": "def get_linked_task_ids(pull_request: PullRequest) -> List[str]:\n body_lines = pull_request.body().splitlines()\n stripped_body_lines = (line.strip() for line in body_lines)\n task_url_line = None\n seen_asana_tasks_line = False\n for line in stripped_body_lines:\n if seen_asana_tasks_line:\n task_url_line = line\n break\n if line.startswith(\"Asana tasks:\"):\n seen_asana_tasks_line = True\n\n if task_url_line:\n task_urls = task_url_line.split()\n task_ids = []\n for url in task_urls:\n maybe_id = re.search(\"\\d+(?!.*\\d)\", url)\n if maybe_id is not None:\n task_ids.append(maybe_id.group())\n return task_ids\n else:\n return []", "title": "" }, { "docid": "f108bccb2f5e19243a68bd227e9f25ee", "score": "0.472921", "text": "def Sort( bugnums, sortby='bugnumber', db='bugstats' ):\n sortby = re.sub( r'(bugnumber|bugid)', 'bug_id', sortby ) # -- bug_id is the columns in current_status\n\n if sortby == 'bug_id':\n return sorted( bugnums )\n\n sortby_list = MapAttrNames( sortby, 'list')\n joined_table, sortby_str = _get_query_elems_for_sorting( sortby_list )\n\n dbc, newQ = Databases.GetDBC( db );\n\n cursor = dbc.cursor()\n cursor.execute(\"create temporary table bug_nums_temp( bug_id int unsigned not null default 0 )\")\n cursor.executemany(\"insert into bug_nums_temp (bug_id) values(%s)\", bugnums )\n\n query = \"select bug_nums_temp.bug_id from \"+joined_table+\" order by \" + sortby_str;\n cursor.execute( query )\n \n rows = cursor.fetchall()\n def ext(a): return a['bug_id']\n bugnums_sorted = map( ext, rows )\n\n if newQ: dbc.close()\n\n return bugnums_sorted", "title": "" }, { "docid": "6ae8d5a53cbd6eb7e8d849dd845c7d48", "score": "0.4728269", "text": "def get_current_build_id_for_project(self, project_name):\n url = f'{self.baseurl}/currentbuildid/{project_name}'\n\n headers = {'Content-Type':'application/json'}\n payload = {}\n myself = lambda: inspect.stack()[1][3]\n\n self.client_request(myself(), url, headers, payload)\n return", "title": "" }, { "docid": "6c61f1b3e5128467e1ba6a3c58aaf5a4", "score": "0.47234184", "text": "def collect_all_bug_fix_commits(commits, importance,\n legacy_cutoff, limit=-1):\n\n legacy_ignored = 0\n total_mainline = 0\n\n guilt_data = []\n\n all_sub_branches = get_all_branch_data(commits, importance)\n for k, c in commits.items():\n # Identifies commit to be included during feature extraction\n c['is_tracked_change'] = False\n\n for k, c in commits.items():\n if not c['on_master_branch'] or not c['on_mainline']:\n continue\n\n total_mainline += 1\n # skip legacy for now\n if c['date'] <= legacy_cutoff:\n legacy_ignored += 1\n continue\n\n limit -= 1\n if limit == 0:\n break\n\n # Special case for origin commit:\n if len(c['parents']) == 0:\n c['is_tracked_change'] = True\n continue\n\n # Handle Fast-Forward Commits\n if len(c['parents']) == 1: # Fast Forward Commit\n if is_bug_fix(c, importance):\n # Bug Fix commit\n guilt_data.append(process_ffc_bug_fix(c, importance))\n process_ffc_change(c)\n elif c['change_id']:\n process_ffc_change(c)\n else: # Down't exist for Glance\n print 'collect_all_bug_fix_commits: FFC with no changeid:', k\n raise Exception\n\n else:\n sub_branch_data = all_sub_branches[k]\n\n # Correctness checks\n if not sub_branch_data:\n print 'Merge Commit missing sub-branch_data:', k\n raise Exception\n\n if len(sub_branch_data['unique_changes']) == 0:\n print 'Warning: Merge commit ', k, 'has no changes'\n print ' Bugs:', len(sub_branch_data['unique_bugs'])\n print ' Time relative to legacy cut-off:',\n print int(c['date']) - legacy_cutoff\n if len(sub_branch_data['unique_bugs']) == 0:\n print ' Ignoring sub-branch'\n continue\n\n # Simple merge commit\n if len(sub_branch_data['unique_changes']) == 1:\n # check for reverts\n if ('Revert' in c['msg'].split('\\n')[0]\n and 'Revert' in\n commits[c['parents'][1]]['msg'].split('\\n')[0]\n and 'change_id' not in commits[c['parents'][1]]):\n print 'Ignoring revert:', c['cid']\n continue\n\n # change_id should not be on merge_commit\n if 'change_id' in c:\n print 'Unexpected change_id on simple merge, ignored',\n print c['cid']\n if len(c['files']) == 0:\n continue\n else:\n assert ('change_id' not in c)\n\n # change_id always attached to parent\n assert (commits[c['parents'][1]]['change_id'])\n\n process_simple_merge_change(c, commits)\n if len(sub_branch_data['unique_bugs']) > 0:\n entry = process_simple_merge_bug_fix(c, importance,\n commits,\n sub_branch_data)\n guilt_data.append(entry)\n pass\n else:\n process_complex_merge_change(c, commits)\n if len(sub_branch_data['unique_bugs']) > 0:\n entry = process_complex_merge_bug_fix(c, importance,\n commits,\n sub_branch_data)\n guilt_data += entry\n pass\n\n pass\n print ' Mainline Commits ignored due to legacy:',\n print legacy_ignored, ' out of:', total_mainline\n print ' Total commite requiring blame computation:', len(guilt_data)\n return guilt_data", "title": "" }, { "docid": "2b280027fa4a582f46631164c09f4ebc", "score": "0.47229707", "text": "def blog_comment_added(postname, number):", "title": "" }, { "docid": "6de27fb024f667695221acad98ef4f8b", "score": "0.47219998", "text": "def extract_build_info(self, build_args=None):", "title": "" }, { "docid": "c223a2950293ef072a79d70204cfb173", "score": "0.47208256", "text": "def CalculateBuildRequestStatus(self, buildrequests, revision=None):\n is_complete = False\n status = {\n 'total_builds': 0,\n 'pending': 0,\n 'running': 0,\n 'complete': 0,\n 'cancelled': 0,\n 'interrupted': 0,\n 'misc': 0,\n 'status_string': \"\",\n }\n for value in buildrequests.values():\n status['total_builds'] +=1\n br = value.to_dict()\n if br['status_str'].lower() in status.keys():\n status[br['status_str'].lower()] += 1\n\n total_complete = status['misc'] + status['interrupted'] + status['cancelled'] + status['complete']\n if status['total_builds'] == total_complete:\n is_complete = True\n timeout_complete = []\n for value in buildrequests.values():\n br = value.to_dict()\n if br['finish_time']:\n timeout_complete.append(time() - br['finish_time'] > COMPLETION_THRESHOLD)\n for passed_timeout in timeout_complete:\n if not passed_timeout:\n is_complete = False # we'll wait a bit and make sure no tests are coming\n break\n if is_complete:\n # one more check before it's _really complete_ - any oranges to retry?\n if self.verbose:\n log.debug(\"Check Orange Factor for rev: %s\" % revision)\n is_complete, status['status_string'] = self.OrangeFactorHandling(buildrequests)\n # check timeout, perhaps it's time to kick this out of the tracking queue\n if revision != None:\n if self.revisionTimedOut(revision):\n status['status_string'] = 'TIMED_OUT'\n is_complete = True\n\n return (status,is_complete)", "title": "" }, { "docid": "d73351ddcf945f1e6c046cfa7c8e792c", "score": "0.46960232", "text": "def WriteToBuglist(self, revision, bug):\n if self.dry_run:\n log.debug(\"DRY_RUN: WRITING TO %s: %s\" % (self.posted_bugs, revision))\n else:\n try:\n f = open(self.posted_bugs, 'a')\n f.write(\"%s|%s|%d|%s\\n\" % (bug, revision, time(),\n strftime(\"%a, %d %b %Y %H:%M:%S %Z\", localtime())))\n f.close()\n log.debug(\"WROTE TO %s: %s\" % (self.posted_bugs, revision))\n self.RemoveCache(revision)\n except:\n traceback.print_exc(file=sys.stdout)", "title": "" }, { "docid": "b9dcb8e9f428a4d9c50b0118e5129eb8", "score": "0.46815732", "text": "def list_build_ids_for_project(self, project_name):\n url = f'{self.baseurl}/listbuildids/{project_name}'\n\n headers = {'Content-Type':'application/json'}\n payload = {}\n myself = lambda: inspect.stack()[1][3]\n\n self.client_request(myself(), url, headers, payload)\n return", "title": "" }, { "docid": "77681213c23321e8155f5970c95d9d15", "score": "0.4679317", "text": "def getNumOpenIssue(self):\n return self.apiJSON['open_issues']", "title": "" }, { "docid": "d939206237437be14571322ff657cbef", "score": "0.4679088", "text": "def add_comments(submission, heroes_dict):\n submission.replace_more_comments(limit=None, threshold=0)\n\n for comment in praw.helpers.flatten_tree(submission.comments):\n COMMENTS_DB_CURSOR.execute(\"SELECT id FROM comments WHERE id=?\", [comment.id])\n if COMMENTS_DB_CURSOR.fetchone():\n continue\n\n response = prepare_response(comment.body)\n \n if response in properties.EXCLUDED_RESPONSES:\n save_comment_id(comment.id)\n continue\n \n if add_flair_specific_response_and_return(comment, heroes_dict, response):\n continue\n \n if response in SPECIFIC_RESPONSES_DICT:\n SPECIFIC_RESPONSES_DICT[response](comment, heroes_dict, response)\n continue\n \n add_regular_response(comment, heroes_dict, response)\n save_comment_id(comment.id)", "title": "" }, { "docid": "2b8bda5a32ef3756f3143f1170bcaac0", "score": "0.4676068", "text": "def get_build_fail_cause(build_info, job_name, build_number):\n if build_info['result'] == 'SUCCESS':\n cause_info = ''\n else:\n cause_info = search_for_cause(job_name, build_number)\n return cause_info", "title": "" }, { "docid": "18a55dffd0c8dc7c83f673a20093ac0a", "score": "0.46753675", "text": "def get_daily_comments(day, month, year, n=1000, MoreComments = 32):\n \n #Transform the input date to strings\n full_date = datetime.date(year, month, day)\n \n str_day = str(full_date.strftime('%d'))\n str_month = str(full_date.strftime('%B'))\n str_year = str(full_date.strftime('%Y'))\n \n print(str_day, str_month, str_year)\n \n #Load comments\n \n #Connect to subreddit\n subreddit = connect_subreddit() \n \n #Name of thread (Daily Discussion) to focus on\n query = 'Daily Discussion Thread for %s %s, %s'%(str_month, str_day, str_year)\n print('Name of the thread we want to reach : ', query)\n \n \n exist = 0 #Indactor to see whether Thread exists (=1) or not (=0)\n problem_text = \"The Thread you are looking for doesn't exists. Possible reasons :\\n\\\n - Weekend day\\n\\\n - Holiday\\n\\\n - Thread not yet created\\n\\\n - Thread is too heavy for the API\" #Text to display if no results for the query\n \n #load thread and load data comments\n \n daily_comm = []\n \n for submission in subreddit.search(query, sort='new'):\n \n \n #Case if thread exists \n if submission.title == query: \n exist = 1\n \n print(submission.title)\n submission.comment_sort = \"top\" #Sort top-level-comments by score\n submission.comments.replace_more(MoreComments) #Replace the \"More Comments\" instances (default 32 times) -> Still have margin if we need only 2000 top comments\n daily_comm = submission.comments.list()[:n]#Keep the nth first top comments \n nb_comm = len(daily_comm)\n \n \n #Case if Thread doesn't exist\n if exist == 0:\n nb_comm = 0\n print(problem_text)\n return False\n \n \n #Tell user about the number of comments he get VS the number of comments he wanted\n print(\"Asked for %d comments and get a list of %d comments\"%(n,nb_comm))\n\n \n #Creat the final dataframe with comments (id,body,score)\n data_list = []\n for c in daily_comm:\n temp_dic = {}\n temp_dic['id'] = c.id\n temp_dic['body'] = c.body\n temp_dic['score'] = c.score\n \n data_list.append(temp_dic)\n\n df = pd.DataFrame(data_list)\n \n #return Comments DataFrame\n return df", "title": "" }, { "docid": "2e397e43f72c158fd45d3d620d3bbf69", "score": "0.4673406", "text": "def API_data_extract(from_a_preiod_of_time_untill_now='2h'):\n\tcontroller = 'https://bugzilla.mozilla.org/rest/bug?include_fields=id,type,product,component,creation_time,status,priority,severity,version,summary,processed_summary,duplicates&chfield=%5BBug%20creation%5D&chfieldfrom=-'\n\tget_bug_url = controller + from_a_preiod_of_time_untill_now + '&chfieldto=Now'\n\ttry:\n\t\t# Extract data from Bugzilla REST API\n\t\tresponse = requests.get(get_bug_url)\n\t\tresponse_json = response.json()\n\t\tdata = response_json[\"bugs\"]\n\t\tprint(\"Number of bug reports:\", len(data))\n\n\t\t# Initiate mongoDB\n\t\tclient = pymongo.MongoClient(os.environ.get('MONGO_ADDRESS', \"mongodb://127.0.0.1:27017/\"))\n\t\tmydb = client[\"mydatabase\"]\n\t\tmycol = mydb[\"bug_report\"]\n\t\tmycol.create_index([('id', pymongo.ASCENDING)], unique=True)\n\n\t\tfor tup in data:\n\t\t\t# convert the date and time\n\t\t\ttime_convert = datetime.datetime.strptime(tup['creation_time'], \"%Y-%m-%dT%H:%M:%SZ\")\n\t\t\ttup[\"creation_time\"] = time_convert\n\t\t\t# add insertion time to the record\n\t\t\ttz_London = pytz.timezone('Europe/London')\n\t\t\ttup['insertion_time'] = datetime.datetime.now(tz_London)\n\t\t\t# Insert record to mongoDB\n\t\t\ttry:\n\t\t\t\tmycol.insert_one(tup)\n\t\t\texcept:\n\t\t\t\tprint(tup['id'], ' Duplicate Bug Report')\n\t\tdata = pd.DataFrame(data)\n\t\tdata.drop(columns=['_id'], inplace=True)\n\t\treturn data\n\texcept:\n\t\tprint(\"API Error\")", "title": "" }, { "docid": "ce57f752bd0af5e74aa04cca437d3a97", "score": "0.46517593", "text": "def test_get_repositories_by_username_by_repo_slug_pullrequests_by_pull_request_id_comments(self):\n pass", "title": "" }, { "docid": "bf3d63c3d879f5172622aaf285b2a57c", "score": "0.4646144", "text": "def test_get_repositories_by_username_by_repo_slug_pullrequests_by_pull_request_id_comments_by_comment_id(self):\n pass", "title": "" } ]
7b7f4594694f8cfb3398fee97cad1b09
Builds a map of all the available XSD files.
[ { "docid": "be58a6fc0c24417875c01449a15e6348", "score": "0.72942144", "text": "def get_xsd_files():\n all_files = []\n xsd_directory = get_xsd_directory()\n for root, unused_dirs, files in os.walk(xsd_directory):\n all_files.extend(\n os.path.join(root, filename)\n for filename in files\n if six.ensure_str(filename).endswith('.xsd'))\n\n schemas = collections.defaultdict(dict)\n for xsd_file in all_files:\n relative_path = os.path.relpath(xsd_file, xsd_directory)\n directory, unused_filename = os.path.split(relative_path)\n profile_name, profile_version = os.path.split(directory)\n schemas[profile_name][profile_version] = xsd_file\n\n return schemas", "title": "" } ]
[ { "docid": "a31e20d3a39b037126aee79ba974b98f", "score": "0.59901017", "text": "def build_file_map(buildset_files, minimal_files,\n full_files, context_files, docs_files):\n\n sys.stderr.write(\"making file map...\\n\")\n file_map = {}\n for f in buildset_files:\n file_map[f] = TargetPlist.BUILDSET\n for f in minimal_files:\n file_map[f] = TargetPlist.MINIMAL\n for f in full_files:\n file_map[f] = TargetPlist.FULL\n for f in context_files:\n file_map[f] = TargetPlist.CONTEXT\n for f in docs_files:\n file_map[f] = TargetPlist.DOCS\n return file_map", "title": "" }, { "docid": "a6dd30ea9644cb66dc47a1a9d8c02164", "score": "0.5531153", "text": "def _CreateFilesToPydepsMap(self):\n ret = {}\n for pydep_local_path in self._pydeps_files:\n for path in self._ComputeNormalizedPydepsEntries(pydep_local_path):\n ret.setdefault(path, []).append(pydep_local_path)\n return ret", "title": "" }, { "docid": "a864b1624b3badcac4b33377cbd2e84d", "score": "0.5519168", "text": "def mapping_closure(self, files):\n closure_files = set()\n for file_ in files:\n more_files = {file_}\n if config.is_mapping(file_):\n with self.error_on_exception(file_, \"Problem loading submappings of\", repr(file_)):\n mapping = crds.get_cached_mapping(file_, ignore_checksum=\"warn\")\n more_files = {config.locate_mapping(name) for name in mapping.mapping_names()}\n more_files = (more_files - {config.locate_mapping(mapping.basename)}) | {file_}\n closure_files |= more_files\n return sorted(closure_files)", "title": "" }, { "docid": "b4a3878af0deee7e48ac93743785f948", "score": "0.5481097", "text": "def get_definition_paths_iterator():\n definitions_root = os.path.join(get_repository_root_path(), 'definitions')\n for dirpath, dirnames, filenames in os.walk(definitions_root):\n for filename in filenames:\n if filename.endswith('.xml'):\n yield os.path.join(dirpath, filename)", "title": "" }, { "docid": "30b6169be2c98fa82eabaf29f875d943", "score": "0.54121786", "text": "def available_files(self):\n podlocs = self.cfg.get('pod_locations', [ DEF_MIDAS_POD_FILE ])\n datafiles = {}\n\n # check each of the possible locations; locations found later take\n # precedence\n for root in self._indirs:\n root = root.rstrip('/')\n for dir, subdirs, files in os.walk(root):\n reldir = dir[len(root)+1:]\n for f in files:\n # don't descend into subdirectories with ignorable names\n for d in range(len(subdirs)-1, -1, -1):\n if subdirs[d].startswith('.') or \\\n subdirs[d].startswith('_'):\n del subdirs[d]\n \n if f.startswith('.') or f.startswith('_') or \\\n os.path.join(reldir,f) in podlocs:\n # skip dot-files and pod files written by MIDAS\n continue\n\n datafiles[os.path.join(reldir, f)] = os.path.join(dir, f)\n\n return datafiles", "title": "" }, { "docid": "4d1a2478323d5142e878f7f446fae410", "score": "0.53916794", "text": "def __getAllShapes(self):\n shapeFiles = {}\n\n for root, dirs, files in os.walk(self.root):\n shapeFile = []\n\n for file in files:\n if file.endswith(self.extensions):\n shapeFile.append(file)\n\n if shapeFile!=[]:\n shapeFiles[root] = shapeFile\n\n return shapeFiles", "title": "" }, { "docid": "bcd229d945ab1d1d539ddb3fadbcc1de", "score": "0.53176004", "text": "def schema_files(self):\n return [\n sch_file\n for root, directory, files in os.walk(self.schemas_path)\n for sch_file in files\n if re.compile(r\"^\\d{1,4}_\\w*\\.json$\").match(sch_file)\n ]", "title": "" }, { "docid": "6cf0c047e5c4a4d072d728220d1728a8", "score": "0.52675897", "text": "def _gen_all_maps(self):\r\n self.map.full = list(set(self.map.full))\r\n self.map.full.sort()\r\n self.gen_simple_keys()", "title": "" }, { "docid": "eaa5f0472e07f2ccae2e1600733c9a9c", "score": "0.5252028", "text": "def _gen_all_maps(self):\r\n self.map.full = list(set(self.map.full))\r\n self.map.full.sort()\r\n self._gen_simple_keys()", "title": "" }, { "docid": "e0b2b4f53e15e5575bcb5c80dc71fee5", "score": "0.5220832", "text": "def map_inputs(self):\n if not self.inputs:\n return {}\n\n else:\n all_inputs = []\n if '*' in self.inputs:\n return {'*':[ o for p in self.parents for o in p.output_files ]}\n\n for name in self.inputs:\n for p in self.parents:\n all_inputs += filter(lambda x: x,[ p.get_output(name,error_if_missing=False) ]) #filter out Nones\n\n input_dict = {}\n for input_file in set(all_inputs):\n input_dict.setdefault(input_file.name,[]).append(input_file)\n\n for k,v in input_dict.items():\n if len(v) == 0:\n raise ToolValidationError, \"Could not find input '{0}' in {1}\".format(k,self)\n\n return input_dict", "title": "" }, { "docid": "91e8b813f45829c4bec685bc8e60ef94", "score": "0.5195962", "text": "def test_build_document_map(self):\n meta_dict = SCHEMA_DATA_01['tables']['meta_library']['cf']['cq']\n\n received = self._schema.build_document_map(meta_dict)\n expected = ['00000',\n '1024x1024',\n '19961217102630',\n '19971217102630',\n '5458',\n 'airfield',\n 'base',\n 'bf01',\n 'checks',\n 'data',\n 'fort',\n 'geocentric',\n 'huachuca',\n 'i_3001a',\n 'image',\n 'jitc',\n 'missing',\n 'mono',\n 'nitf02',\n 'uncompressed',\n 'unknown',\n 'with']\n msg = 'Metadata document map error'\n self.assertListEqual(sorted(list(received)), expected, msg)", "title": "" }, { "docid": "8111c79ad245a16b4afc9f2efcd658fb", "score": "0.5161212", "text": "def build_laws_mapping():\n laws = []\n frbr_work_uri_to_law = {}\n for law_path in glob.glob(f'akn/**/*.xml', recursive=True):\n law = Law(law_path)\n frbr_work_uri_to_law[law.frbr_work_uri] = law\n laws.append(law)\n return laws, frbr_work_uri_to_law", "title": "" }, { "docid": "5ab4af0690b63c72fd07a27b35ce23a9", "score": "0.51411164", "text": "def _generate_map(self):\n root = ET.Element('Project', attrib={'ID': str(self._id)})\n for subject in self.subjects:\n root.append(subject._generate_map())\n return root", "title": "" }, { "docid": "b74c547964addb24e9ceb6d23de97394", "score": "0.51118535", "text": "def index_librettists(self):\n\n # Glob .xml files.\n path = os.path.join(self.path, '*.xml')\n\n for path in glob.glob(path):\n\n person = Person(path)\n\n # Map librettists -> path.\n for lib in person.librettists:\n paths = self.paths.setdefault(lib, [])\n paths.append(path)", "title": "" }, { "docid": "cdea0968130769cff6e2ed4969d57ca0", "score": "0.5088067", "text": "def create_date_map(monthly_tar, date_):\n doc_date_map = {}\n doc_paths = [member.name\n for member in monthly_tar\n if member.isfile()]\n # sort paths by dates\n doc_paths = sorted(doc_paths)\n for doc_path in doc_paths:\n doc_date = date_.replace('/', '') + doc_path[3:5]\n if doc_date in doc_date_map:\n doc_date_map[doc_date].append(doc_path)\n else:\n doc_date_map[doc_date] = [doc_path]\n return doc_date_map", "title": "" }, { "docid": "1429262378ac8d72f79e4f7f4e4d1d23", "score": "0.5085957", "text": "def mapForRun(runDir):\n\n mapDir = runDir / 'output'\n mapPaths = tuple(mapDir.glob('*.xml'))\n if len(mapPaths) == 0:\n return None\n elif len(mapPaths) > 1:\n raise RuntimeError(\"more than one xml file found\")\n return mapPaths[0]", "title": "" }, { "docid": "d9286fa4205e6e44d6b53aa91854f3ff", "score": "0.5085948", "text": "def SchemasMappingInfo():\n list_of_mappings = []\n for m in MAPPINGS:\n for mapping in MAPPINGS[m]:\n list_of_mappings.append(mapping.dump()) \n return jsonify(list_of_mappings)", "title": "" }, { "docid": "5050d725d7b50f4a250359769b51bafa", "score": "0.5075842", "text": "def requires(self):\n for samp, fastq in self.fastq_dic.items():\n trim_dir = os.path.join(self.workdir, \"processes\", \"qc\", samp)\n map_dir = os.path.join(self.workdir, \"processes\", \"mapping\", samp)\n if os.path.isdir(map_dir) is False:\n os.makedirs(map_dir)\n yield map_star(fastqs=[trim_dir + \"/\" + samp + \".1.trimmed.fastq\",\n trim_dir + \"/\" + samp + \".2.trimmed.fastq\"],\n stardb_dir=self.stardb_dir, map_dir=map_dir,\n sample=samp, num_cpus=self.num_cpus)", "title": "" }, { "docid": "c50135fabf3b914030add11e7991210e", "score": "0.5055077", "text": "def getParameterValidationFiles():\n dataDir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'schemas'))\n schemaFile = os.path.join(dataDir, 'paramSchema.yaml')\n schemaFuncs = os.path.join(dataDir, 'schemaFuncs.py')\n return schemaFile, schemaFuncs", "title": "" }, { "docid": "c3afa0e345aa51cc38dfd37f0bc19fcd", "score": "0.50377506", "text": "def _get_data_files(self):\r\n self.analyze_manifest()\r\n data = []\r\n for package in self.packages or ():\r\n # Locate package source directory\r\n src_dir = self.get_package_dir(package)\r\n\r\n # Compute package build directory\r\n build_dir = os.path.join(*([self.build_lib] + package.split('.')))\r\n\r\n # Length of path to strip from found files\r\n plen = len(src_dir)+1\r\n\r\n # Strip directory from globbed filenames\r\n filenames = [\r\n file[plen:] for file in self.find_data_files(package, src_dir)\r\n ]\r\n data.append( (package, src_dir, build_dir, filenames) )\r\n return data", "title": "" }, { "docid": "28b1960f856d00a795535cf9e57dae00", "score": "0.5033546", "text": "def _get_sdict(self, env):\r\n sdict = {}\r\n for bld in self.get_src_builders(env):\r\n for suf in bld.src_suffixes(env):\r\n sdict[suf] = bld\r\n return sdict", "title": "" }, { "docid": "4b16bd10fd1c119ed436427833266afa", "score": "0.5031922", "text": "def fileLocales(self) -> Iterable[str]:\n for path in self.root.joinpath('common/main').glob('*.xml'):\n if path.stem != 'root':\n yield path.stem", "title": "" }, { "docid": "884ee9a8b756fb56ea70e6b47142b286", "score": "0.50173235", "text": "def _load_concepts(self):\n\n concepts = {}\n\n for filename in os.listdir(\n os.path.join(constants.SOLAR_TAXONOMY_DIR, \"data\")):\n # if 'def.' in filename:\n if 'pre.' in filename:\n concept_name = filename[filename.find(\"solar-\") + 6:filename.find(\"_2020\")]\n concepts[concept_name] = self._load_concepts_file(\n os.path.join(constants.SOLAR_TAXONOMY_DIR,\n \"data\", filename))\n for filename in os.listdir(\n os.path.join(constants.SOLAR_TAXONOMY_DIR, \"documents\")):\n # if 'def.' in filename:\n if 'pre.' in filename:\n concept_name = filename[filename.find(\"solar-\") + 6:filename.find(\"_2020\")]\n concepts[concept_name] = self._load_concepts_file(\n os.path.join(constants.SOLAR_TAXONOMY_DIR,\n \"documents\", filename))\n\n for filename in os.listdir(\n os.path.join(constants.SOLAR_TAXONOMY_DIR, \"process\")):\n # if 'def.' in filename:\n if 'pre.' in filename:\n concept_name = filename[filename.find(\"solar-\") + 6:filename.find(\"_2020\")]\n concepts[concept_name] = self._load_concepts_file(\n os.path.join(constants.SOLAR_TAXONOMY_DIR,\n \"process\", filename))\n\n # load from \"/core/\" for the \"All\" entrypoint:\n concepts[\"All\"] = self._load_concepts_file(\n os.path.join(constants.SOLAR_TAXONOMY_DIR, \"core\",\n constants.SOLAR_ALL_PRE_XML))\n return concepts", "title": "" }, { "docid": "f218806af387e0079fc562819fc67e59", "score": "0.50173235", "text": "def get_existing_kmb_files(self):\n kmb_files = {}\n self.find_files_from_pattern(\n 'http://kmb.raa.se/cocoon/bild/show-image.html?id=', kmb_files)\n self.find_files_from_pattern(\n 'http://kulturarvsdata.se/raa/kmb/', kmb_files)\n\n # convert sets to list (to allow for json storage)\n for k, v in kmb_files.items():\n kmb_files[k] = list(v)\n\n return kmb_files", "title": "" }, { "docid": "8d5e2db2779b4b54bff143dd3462c24b", "score": "0.50145406", "text": "def get_url_map():\n map = {}\n path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), # current working dir ../\n \"custom\", # ../custom/\n \"pypi\", # ../custom/pypi/\n \"map.txt\" # ../custom/pypi/map.txt\n )\n with open(path) as f:\n for line in f.readlines():\n package, url = line.strip().split(\": \")\n map[package] = url\n return map", "title": "" }, { "docid": "0911a6c51489ec53fa04366162b0daf8", "score": "0.50090116", "text": "def map_files():\n files = OrderedDict()\n zc_instance = zeroconf.Zeroconf()\n listener = utils.ServiceListener()\n\n zeroconf.ServiceBrowser(zc_instance, \"_http._tcp.local.\", listener)\n\n try:\n # Give listener some time to discover available services.\n time.sleep(0.5)\n if not listener.services:\n click.echo('No files available. Waiting ...')\n while not listener.services:\n time.sleep(0.5)\n click.echo('Peer(s) found.')\n for service in listener.services:\n address = utils.bytes_to_ip(service.address)\n port = service.port\n filename = service.properties[b'filename'].decode('utf-8')\n url = \"http://\" + address + \":\" + str(port) + \"/\" + \\\n urllib.pathname2url(filename)\n files[filename] = url\n except KeyboardInterrupt:\n sys.exit(0)\n return files", "title": "" }, { "docid": "de0173c7baf5a259dd4af1d9ee36484b", "score": "0.49828303", "text": "def build_inscription_filepaths( self ):\n log.debug( u'in utils.indexer.InscriptionFilenamesBuilder.build_filenames(); self.inscriptions_dir_path, `%s`' % self.inscriptions_dir_path )\n inscriptions = glob.glob( u'%s/*.xml' % self.inscriptions_dir_path )\n log.debug( u'in utils.indexer.InscriptionFilenamesBuilder.build_filenames(); inscriptions[0:3], `%s`' % pprint.pformat(inscriptions[0:3]) )\n return { u'inscriptions': inscriptions }", "title": "" }, { "docid": "465f7023fd3f4e93f31dddb7d6d8403a", "score": "0.49789116", "text": "def index_composers(self):\n\n # Glob .xml files.\n path = os.path.join(self.path, '*.xml')\n\n for path in glob.glob(path):\n\n person = Person(path)\n\n # Map librettists -> path.\n for lib in person.composers:\n paths = self.paths.setdefault(lib, [])\n paths.append(path)", "title": "" }, { "docid": "dfbf4da363a50d9beb70c93368aa7672", "score": "0.4974084", "text": "def build_dpi_map():\n dpi_map = {}\n for p in Parameter.query:\n if p.data_product_identifier:\n dpi_map.setdefault(p.data_product_identifier, set()).add(p)\n return dpi_map", "title": "" }, { "docid": "3b7a579a03de1863a8acaf257e31008b", "score": "0.49519324", "text": "def load_maps(maps_dir):\n maps_dir = os.path.abspath(maps_dir)\n maps = {}\n for root, dirnames, filenames in os.walk(maps_dir):\n for filename in filenames:\n if filename.endswith(\".xml\"):\n xml_file = os.path.join(root, filename)\n map = MapSource.from_xml(xml_file, maps_dir)\n if map.id in maps:\n raise MapSourceException(\"duplicate map id: {} in file {}\".format(map.id, xml_file))\n else:\n maps[map.id] = map\n return maps", "title": "" }, { "docid": "af71948b953a757a9f3ce94410c12d5a", "score": "0.49500164", "text": "def get_mappings():\n return _mappings", "title": "" }, { "docid": "640ddec79b4c0b5ac355295ed3656e3c", "score": "0.49404514", "text": "def fill_all_xml():\n # This loops populates the database using all the xml files\n for dir in os.listdir(\"/{}/xml_data\".format(BACKEND_DIR)):\n if os.path.isdir(\"/{}/xml_data/{}\".format(BACKEND_DIR, dir)):\n for filename in os.listdir(\"/{}/xml_data/{}\".format(BACKEND_DIR, dir)):\n if filename.endswith(\".xml\"):\n fl = \"/{}/xml_data/{}/{}\".format(BACKEND_DIR, dir, filename)\n try:\n xml_to_database(fl)\n except AttributeError:\n print(\"ERROR: AttributeError in file {}\".format(fl))\n except Exception as ex:\n print(\"ERROR: {} in file {} | Arguments: {}\".format(type(ex).__name__, fl, ex.args))", "title": "" }, { "docid": "cf5e8094139d83d0a6ea3004f04a7933", "score": "0.49392924", "text": "def collect_files(base_dir, pattern):\n name_mapping = {}\n for fname in match_files(base_dir, pattern):\n name = os.path.basename(fname)\n\n if name in name_mapping:\n print('Duplicate name detected: \"%s\" and \"%s\"' % (name, name_mapping[name]))\n continue\n\n name_mapping[name] = fname\n\n return name_mapping", "title": "" }, { "docid": "bcf99761e4c556908520bbcd881a6db4", "score": "0.49321163", "text": "def build_files(self):\r\n for i in self.__dict__:\r\n if len(re.findall('file_.*', i))==0:\r\n pass\r\n else:\r\n self.__getattribute__(i).build_file()\r\n self.__getattribute__(i).create()", "title": "" }, { "docid": "4647ce61679799cc8c86e054dfae067c", "score": "0.49312335", "text": "def walk_schema(cls, root_dir, symbol=None, preprocess=lambda s, n: s,\n sparse=True, xml_root=None):\n xml_root = xml_root or cls.root\n particular_namespace = '.'.join((cls.namespace, symbol or xml_root))\n result = {}\n tree_stack = [(root_dir, None, result)] # for bottom-up reconstruction\n for root, dirs, files in walk(root_dir):\n # multi-step upwards and (followed by)/or single step downwards\n while commonprefix((root, tree_stack[-1][0])) != tree_stack[-1][0]:\n cls._walk_schema_step_up(tree_stack)\n if root != tree_stack[-1][0]:\n current_tracking = cls._walk_schema_step_down(tree_stack, root)\n else:\n assert root == root_dir\n # at root, we do not traverse to any other dir than `xml_root`\n map(lambda d: d != xml_root and dirs.remove(d), dirs[:])\n current_tracking = tree_stack[-1][2]\n files = []\n\n for i in dirs + files:\n name, ext = splitext(i) # does not hurt even if it is a dir\n if name.startswith('_') or name.startswith('.') \\\n or i in files and ext != extsep + 'py':\n continue\n\n log.debug(\"Trying `{0}' at `{1}'\".format(name, root))\n mfile, mpath, mdesc = find_module(name, [root])\n # need to obfuscate the name due to, e.g., \"logging\" clash\n mname = '.'.join((particular_namespace, 'walk_' + name))\n # suppress problems with missing parent in module hierarchy\n modules.setdefault(particular_namespace, modules[__name__])\n if mname in modules:\n mod = modules[mname]\n if hasattr(mod, '__path__') and mod.__path__[0] != mpath:\n # XXX robust?\n raise FormatError(cls, \"`{0}' already present\"\n .format(mname))\n else:\n try:\n mod = load_module(mname, mfile, mpath, mdesc)\n except ImportError:\n log.warning(\"Cannot load `{0}'\".format(mpath))\n continue\n finally:\n if mfile:\n mfile.close()\n\n available = set(dir(mod)) - set(dir(type(mod)))\n swag = None\n if not symbol or symbol in available:\n swag = getattr(mod, symbol) if symbol else tuple(available)\n swag = preprocess(swag, name)\n if swag is None and (sparse or i in files):\n continue # files are terminals anyway\n current_tracking[name] = (swag, {})\n\n for i in xrange(cls.MAX_DEPTH):\n try:\n if cls._walk_schema_step_up(tree_stack) is result:\n return result\n except IndexError:\n raise FormatError(cls, \"Format tree structure inconsistency\"\n \" detected\")\n else:\n raise FormatError(cls, \"INFLOOP detected\")", "title": "" }, { "docid": "c5734b7f1158d39406a0c42bf19d0d72", "score": "0.49281266", "text": "def parse_metashare(directory, type=None):\n resources = {}\n\n for filename in os.listdir(directory):\n if not filename.endswith(\".xml\"):\n continue\n\n path = os.path.join(directory, filename)\n resource = {}\n name_sv = \"\"\n name_en = \"\"\n description_sv = \"\"\n description_en = \"\"\n lang = \"\"\n\n # Parse xml\n xml = etree.parse(path)\n ns = \"{http://www.ilsp.gr/META-XMLSchema}\"\n # prevent etree from printing namespaces in the resulting xml file\n etree.register_namespace(\"\", \"http://www.ilsp.gr/META-XMLSchema\")\n\n # Get idenfification info\n identificationInfo = xml.find(ns + \"identificationInfo\")\n\n # Get identifier\n # shortname = identificationInfo.find(ns + \"resourceShortName\")\n # resources[shortname.text] = resource\n # resources[shortname.text][\"id\"] = shortname.text\n # Use file ID for now because things break for parallel corpora otherwiese\n fileid = filename.split(\".\")[0]\n resources[fileid] = resource\n resources[fileid][\"id\"] = fileid\n\n resource[\"type\"] = type\n\n # Get language\n lang = xml.findall(\".//\" + ns + \"languageInfo\")\n resource[\"lang\"] = []\n for i in lang:\n lang_dict = {}\n lang_dict[\"code\"] = i.find(ns + \"languageId\").text\n lang_dict[\"name_en\"] = i.find(ns + \"languageName\").text\n lang_dict[\"name_sv\"] = translate(i.find(ns + \"languageName\").text)\n resource[\"lang\"].append(lang_dict)\n\n # Get name\n for i in identificationInfo.findall(ns + \"resourceName\"):\n if i.attrib[\"lang\"] == \"eng\" and i.text:\n name_en = i.text\n if i.attrib[\"lang\"] == \"swe\":\n name_sv = i.text\n resource[\"name_sv\"] = name_sv\n resource[\"name_en\"] = name_en\n\n # Get description\n for i in identificationInfo.findall(ns + \"description\"):\n if i.attrib[\"lang\"] == \"eng\" and i.text:\n description_en = i.text\n if i.attrib[\"lang\"] == \"swe\" and i.text:\n description_sv = i.text\n resource[\"description_sv\"] = description_sv\n resource[\"description_en\"] = description_en\n\n # Get distribution info\n distributionInfo = xml.find(ns + \"distributionInfo\")\n resource[\"downloads\"] = []\n resource[\"interface\"] = []\n for i in distributionInfo.findall(ns + \"licenceInfo\"):\n if i.find(ns + \"downloadLocation\") is not None:\n distro = {}\n resource[\"downloads\"].append(distro)\n distro[\"licence\"] = i.find(ns + \"licence\").text\n distro[\"restriction\"] = i.find(ns + \"restrictionsOfUse\").text\n distro[\"download\"] = i.find(ns + \"downloadLocation\").text\n if i.find(ns + \"attributionText\") is not None:\n distro[\"info\"] = i.find(ns + \"attributionText\").text\n if i.find(ns + \"downloadLocation\").text:\n download_type, format = get_download_type(i.find(ns + \"downloadLocation\").text)\n distro[\"type\"] = download_type\n distro[\"format\"] = format\n if i.find(ns + \"executionLocation\") is not None:\n distro = {}\n resource[\"interface\"].append(distro)\n distro[\"licence\"] = i.find(ns + \"licence\").text\n distro[\"restriction\"] = i.find(ns + \"restrictionsOfUse\").text\n distro[\"access\"] = i.find(ns + \"executionLocation\").text\n\n # Add location of meta data file\n metashare = {\n \"licence\": METASHARE_LICENCE,\n \"restriction\": METASHARE_RESTRICTION,\n \"download\": METASHAREURL + type + \"/\" + filename,\n \"type\": \"metadata\",\n \"format\": \"METASHARE\"\n }\n resource[\"downloads\"].append(metashare)\n\n # Get contact person\n contactPerson = xml.find(ns + \"contactPerson\")\n resource[\"contact_info\"] = {}\n resource[\"contact_info\"][\"surname\"] = contactPerson.find(ns + \"surname\").text\n resource[\"contact_info\"][\"givenName\"] = contactPerson.find(ns + \"givenName\").text\n resource[\"contact_info\"][\"email\"] = contactPerson.find(ns + \"communicationInfo\").find(ns + \"email\").text\n resource[\"contact_info\"][\"affiliation\"] = {}\n resource[\"contact_info\"][\"affiliation\"][\"organisation\"] = contactPerson.find(ns + \"affiliation\").find(ns + \"organizationName\").text\n resource[\"contact_info\"][\"affiliation\"][\"email\"] = contactPerson.find(ns + \"affiliation\").find(ns + \"communicationInfo\").find(ns + \"email\").text\n\n # Get size info\n sizes = xml.findall(\".//\" + ns + \"sizeInfo\")\n resource[\"size\"] = {}\n for i in sizes:\n unit = i.find(ns + \"sizeUnit\").text\n resource[\"size\"][unit] = i.find(ns + \"size\").text\n\n return resources", "title": "" }, { "docid": "ad5967011766a198b19a7ae66c98aed0", "score": "0.49186856", "text": "def _gen_map(self):\n pass", "title": "" }, { "docid": "e0712a9588fc0e943ccb1639af8fa1dc", "score": "0.49174058", "text": "def _generate_datafiles(self, base_path:str, species_name:str): \n species_datafile = {}\n for dirpath, dirnames, filenames in os.walk(base_path):\n for datafile in filenames:\n logger.debug(f\"found datafile: {datafile}\") \n source_dir = \"\".join(dirpath.split(f\"/{species_name}\")[1:]).split('/')[1:] \n self.update_datafile(species_datafile, source_dir, os.path.join(base_path, datafile))\n \n return {species_name: species_datafile}", "title": "" }, { "docid": "bbb34a1c514623bb1337303863a62cd8", "score": "0.4910517", "text": "def get_key2filename_dict():\r\n return SystemXML.__key2filename_dict__", "title": "" }, { "docid": "528ff4baea9726afd56cda2214e24c84", "score": "0.49061808", "text": "def _namespaces(self):\n return set( f.name.split('.', 1)[0] for f in self.dir.glob('*.*') if f.is_file() )", "title": "" }, { "docid": "52c653cb9bdbff810cee23748f3b1db8", "score": "0.49045143", "text": "def get_all_files(self):\n pathnames = self.get_pathnames()\n files = {}\n for pathname in pathnames:\n file = self.get_file_at_pathname(pathname)\n files[pathname]=file\n return files", "title": "" }, { "docid": "2bd454d31e6892c480aba7cdad1ebd0f", "score": "0.49039847", "text": "def create_feature_dict(files):\r\n dict = {}\r\n\r\n def add_to_dict( feature, file ):\r\n if not SCons.Util.is_List( feature ):\r\n feature = [ feature ]\r\n\r\n for f in feature:\r\n if f not in dict:\r\n dict[ f ] = [ file ]\r\n else:\r\n dict[ f ].append( file )\r\n\r\n for file in files:\r\n if hasattr( file, 'PACKAGING_X_MSI_FEATURE' ):\r\n add_to_dict(file.PACKAGING_X_MSI_FEATURE, file)\r\n elif hasattr( file, 'PACKAGING_DOC' ):\r\n add_to_dict( 'PACKAGING_DOC', file )\r\n else:\r\n add_to_dict( 'default', file )\r\n\r\n return dict", "title": "" }, { "docid": "3a4bacb26c642f06c8f03e31e872839f", "score": "0.49018422", "text": "def test_find_maps_and_sizes(self):\n expected = [\n '%s/hazardmap-0.01-mean.xml' % self.output_path,\n '%s/hazardmap-0.01-quantile-0.25.xml' % self.output_path,\n '%s/hazardmap-0.01-quantile-0.50.xml' % self.output_path,\n '%s/hazardmap-0.1-mean.xml' % self.output_path,\n '%s/hazardmap-0.1-quantile-0.25.xml' % self.output_path,\n '%s/hazardmap-0.1-quantile-0.50.xml' % self.output_path,\n '%s/loss-map-0fcfdbc7.xml' % self.output_path]\n sizes = dict([(f, os.path.getsize(f)) for f in expected])\n found = find_maps(self.job)\n for output in found:\n self.assertEqual(sizes[output.path], output.size)", "title": "" }, { "docid": "16a37817aca14ded09b2f6e66b5223a4", "score": "0.4900282", "text": "def get_resolved_mappings():\n return _resolved_mappings", "title": "" }, { "docid": "58413dc442d7c1eec7bea69aed4d135f", "score": "0.4896146", "text": "def get_examples_map(input_dir):\n file_map = defaultdict(dict)\n for dir_name, subdir_names, file_names in os.walk(input_dir):\n for file_name in file_names:\n name, ext = os.path.splitext(file_name)\n if ext in ['.md', '.py', '.m']:\n language = os.path.basename(dir_name)\n file_map[name][language] = os.path.join(dir_name, file_name)\n\n return file_map", "title": "" }, { "docid": "b568c8d6182d7b3fc3ec0959eb809c0c", "score": "0.48888618", "text": "def _get_file_names():\n file_names = {}\n file_names['train'] = ['data_batch_%d' % i for i in xrange(1, 5)]\n file_names['validation'] = ['data_batch_5']\n file_names['eval'] = ['test_batch']\n return file_names", "title": "" }, { "docid": "2c92f1c405e694667b550fe216575d6e", "score": "0.48847812", "text": "def expected_static_files(self):\n expected_files = {}\n # samplesheet file with the run's paired samples\n expected_files['paired_samples'] = os.path.join(self.dir, 'samples.pairs.csv')\n # file with the original starting .fastq file paths & id's\n expected_files['samples_fastq_raw'] = os.path.join(self.dir, 'samples.fastq-raw.csv')\n # file with settings for the analysis\n expected_files['settings'] = os.path.join(self.dir, 'settings.txt')\n # summary table produced at the end of the WES pipeline\n expected_files['summary_combined_wes'] = os.path.join(self.dir, 'summary-combined.wes.csv')\n return(expected_files)", "title": "" }, { "docid": "55984eb5e12d47a423c48bad62bfc006", "score": "0.48843345", "text": "def config_files(self):\n return dict()", "title": "" }, { "docid": "a611905c3e3764e476e2dc04ab0f05a0", "score": "0.48806533", "text": "def get_distributions_with_entry_map(\n key: str\n) -> 'Iterator[tuple[Distribution, dict[str, EntryPoint]]]':\n for distribution in pkg_resources.working_set:\n if hasattr(distribution, 'get_entry_map'):\n entry_map = distribution.get_entry_map(key)\n\n if entry_map:\n yield distribution, entry_map", "title": "" }, { "docid": "2d33d562dd2a7942c9c362457d1bf246", "score": "0.48775664", "text": "def scan(self):\n scan = {} # {\"full/path/file.name\": <stat_info object>}\n for path, _, files in walk(self.base): # directories not needed\n path = abspath(path)\n for fname in files:\n full_path = join(path, fname)\n stat_info = stat(full_path)\n scan[full_path] = stat_info\n self.last_time = datetime.datetime.now()\n self.last_result = scan\n return scan", "title": "" }, { "docid": "c19ffb312aaee3dbe656022d025e515c", "score": "0.4866429", "text": "def collect_mdt_output_maps(input_dir, model_name, map_name):\n maps = {}\n for subject in sorted(os.listdir(input_dir)):\n full_path = os.path.join(input_dir, subject)\n if os.path.isdir(full_path):\n maps.update({subject: os.path.join(full_path, model_name, map_name + '.nii.gz')})\n return maps", "title": "" }, { "docid": "902393dad300096ddd18c3b7676cbbf7", "score": "0.48582286", "text": "def make_versionmap(self):\r\n versionmap = DictOfSets()\r\n for ( (server, shnum), (verinfo, timestamp) ) in self._known_shares.items():\r\n versionmap.add(verinfo, (shnum, server, timestamp))\r\n return versionmap", "title": "" }, { "docid": "b92e2d2bb1ef3a1826e7028d8a5f589a", "score": "0.48504123", "text": "def gather_entries(files):\n\tdata = {}\n\tfor f in files:\n\t\tdata.update(read_file(f))\n\treturn data", "title": "" }, { "docid": "6a5145cad46054b808e8b1ee3e3ff8ab", "score": "0.48466158", "text": "def __init__(self):\n self.sizes = os.listdir(\"data/maps\")\n self.maps = []\n \n for e in self.sizes:\n self.maps.append(os.listdir(\"data/maps/\" + e))", "title": "" }, { "docid": "13ce4c01739e9087c336664c7e2b9698", "score": "0.4845074", "text": "def find_images(self, docx_path):\n\t\tfilename = docx_path.split(\".\")[0]\n\t\tif not os.path.exists(self.temp_path):\n\t\t\tos.makedirs(self.temp_path)\n\t\tdocx_zip = zipfile.ZipFile(docx_path)\n\t\tdocx_zip.extractall(self.temp_path + filename)\n\t\tdocx_zip.close()\n\t\timage_paths = self.find_image_paths(self.temp_path + filename)\n\t\timage_names = self.find_image_names(self.temp_path + filename)\n\t\timages = {}\n\t\tif len(image_paths) == len(image_names):\n\t\t\tfor i in xrange(len(image_paths)):\n\t\t\t\timages[image_paths[i]] = image_names[i]\n\t\telse:\n\t\t\tfor path in image_paths:\n\t\t\t\timages[path] = \"\"\n\t\treturn images", "title": "" }, { "docid": "4b302793a0bc457454fccceb7f2be4b5", "score": "0.48420137", "text": "def load_files(self):\n \n fpath = '{0}/{1}/xml/{2}/{3}'.format(settings.RAW_DATA_PATH,\n self.ticker,\n self.ftype,\n self.date)\n print('dimitry, file path: '+str(fpath))\n\n files = os.listdir(fpath)\n sym_len = len(self.ticker) + 1\n date_len = len(self.date) + 1\n tot_len = sym_len + date_len\n required = ['pre', 'lab', 'cal', 'ins']\n found = []\n not_found = []\n print('dimitry, here are the files: ')\n print(files)\n for x in files:\n spli_t = os.path.splitext(x)[0]\n ext = spli_t[tot_len:]\n \n fname = '{0}/{1}'.format(fpath, x)\n ftype = self.validate_file(fname)\n \n if isinstance(ftype, list):\n continue\n\n if ftype == 'ins': #Load instance file\n with open(fname, 'r') as f:\n self.ins_sp = BS(f)\n found.append('ins')\n\n if ftype == 'schema':\n continue\n\n if ftype == 'cal': #Load calculations file\n with open(fname, 'r') as f:\n self.cal_sp = BS(f)\n found.append('cal')\n\n if ftype == 'def':\n continue\n\n if ftype == 'lab': #Load labels file\n with open(fname, 'r') as f:\n self.lab_sp = BS(f)\n found.append('lab')\n\n if ftype == 'pre': #Load presentation file\n with open(fname, 'r') as f:\n self.pre_sp = BS(f)\n found.append('pre')\n\n for i in required:\n if i not in found:\n not_found.append(i)\n if len(not_found) > 0:\n self.data['error'] = True\n return False", "title": "" }, { "docid": "800bd575b3c3dccc548e037b07547265", "score": "0.48233676", "text": "def test_find_maps_and_types(self):\n expected = [\n ('hazardmap-0.01-mean.xml', \"hazard_map\"),\n ('hazardmap-0.01-quantile-0.25.xml', \"hazard_map\"),\n ('hazardmap-0.01-quantile-0.50.xml', \"hazard_map\"),\n ('hazardmap-0.1-mean.xml', \"hazard_map\"),\n ('hazardmap-0.1-quantile-0.25.xml', \"hazard_map\"),\n ('hazardmap-0.1-quantile-0.50.xml', \"hazard_map\"),\n ('loss-map-0fcfdbc7.xml', \"loss_map\")]\n found = find_maps(self.job)\n self.assertEqual(\n expected,\n list(sorted([(os.path.basename(o.path), o.output_type)\n for o in found])))", "title": "" }, { "docid": "c5d795c71de0ab3fe92a92003a8dc7f5", "score": "0.4821001", "text": "def validate(self):\n error_list = {}\n for document, schema in self.document_schema_pairs:\n LOG.info(\n 'Validating document %s using schema %s', document, schema)\n errors = self._validate_file(document, schema)\n if errors:\n for error in errors:\n LOG.error(error.message)\n error_list[document] = errors\n\n return error_list", "title": "" }, { "docid": "7d0da5a81a5133827d7c153625c58f87", "score": "0.4817335", "text": "def get_all(self) -> Dict[str, Any]:\n global REGISTRY\n result = {}\n if self.entry_points:\n result.update(self.get_entry_points())\n # Create a copy of the global registry in case it gets modified during iteration.\n for keys, value in REGISTRY.copy().items():\n if len(self.namespace) == len(keys) - 1 and keys[:-1] == self.namespace:\n result[keys[-1]] = value\n return result", "title": "" }, { "docid": "16e3e6f8faf38c27d70992a19fd40345", "score": "0.4817004", "text": "def get_gwms_ces(repo_dir, production: bool = True) -> Set:\n config_glob = '{0}/*.xml'.format(repo_dir)\n factory_configs = glob(config_glob)\n if production:\n # Non-production entries are stored in '*-itb.xml'\n factory_configs = [filename for filename in factory_configs if '-itb.xml' not in filename]\n\n gwms_ces = set()\n for config_file in factory_configs:\n config_et = ElementTree.parse(config_file)\n gwms_ces.update(_parse_gwms_config(config_et))\n\n return gwms_ces", "title": "" }, { "docid": "a4393dde8376ce0380124166e381b732", "score": "0.4815021", "text": "def get_key2filename_dict():\r\n return PCIXML.__key2filename_dict__", "title": "" }, { "docid": "18152d9685270b7708fd95e52c4c627e", "score": "0.481124", "text": "def read_map_files(self):\n for map_path in self.map_paths_list:\n with open(map_path, 'r') as f:\n self.word_dicts_list.append(json.load(f)[str(self.reduce_index)])", "title": "" }, { "docid": "35c3312f89abf90429290700bb7bae5e", "score": "0.4811081", "text": "def getMetadataStandards(self):\n schemas = {}\n XSI = 'http://www.w3.org/2001/XMLSchema-instance'\n\n try:\n requestHelper = RequestHelper(self.endpoint, self.logger)\n requestHelper.setAcceptType(AcceptTypes.default)\n neg_source, rss_response = requestHelper.content_negotiate('FsF-F2-01M')\n if requestHelper.response_content is not None:\n feed = feedparser.parse(requestHelper.response_content)\n #print(feed.namespaces)\n for namespace_pre, namespace_uri in feed.namespaces.items():\n if namespace_uri not in self.namespaces:\n self.namespaces.append(str(namespace_uri))\n schemas[str(namespace_pre)] = str(namespace_uri)\n except Exception as e:\n print('RSS Error ',e)\n self.logger.info('{0} : Could not parse response retrieved from RSS/Atom Feed endpoint -: {1}'.format(\n self.metric_id, str(e)))\n\n return schemas", "title": "" }, { "docid": "645d117aa7c3ee54895978236731a47f", "score": "0.4807647", "text": "def tree_libs(start_path, filt_func = None):\n lib_dict = {}\n for dirpath, dirnames, basenames in os.walk(start_path):\n for base in basenames:\n fname = pjoin(dirpath, base)\n if not filt_func is None and not filt_func(fname):\n continue\n for install_name in get_install_names(fname):\n if install_name in lib_dict:\n lib_dict[install_name].add(fname)\n else:\n lib_dict[install_name] = set([fname])\n return lib_dict", "title": "" }, { "docid": "8a4dac4fb67c2c71d6683489ee2e455d", "score": "0.4804388", "text": "def _resourceLocations():\n res = []\n for path in geom_env.resource_dirs:\n res.append((path, \"FileSystem\", getResourceGroup()))\n \n import srs_engine.environment\n res.append((geom_env.res_gui_dir, \"FileSystem\", srs_engine.environment.GUI_resource_group))\n \n return res", "title": "" }, { "docid": "ad2ad5a22a1386ed88ea9487eea6ef89", "score": "0.47893554", "text": "def _init_structure_symbols_according_to_topics_they_satisfice(self):\n topic_satisficers = {}\n for symbol in self.nonterminal_symbols:\n for topic_name in symbol.topics_addressed:\n if topic_name not in topic_satisficers:\n topic_satisficers[topic_name] = set()\n topic_satisficers[topic_name].add(symbol)\n return topic_satisficers", "title": "" }, { "docid": "e1f0445259f7a49cb4b300200eb164ef", "score": "0.47884196", "text": "def __all_files(self):\n for head, files in self._files_local.items():\n for file in files.keys():\n yield file", "title": "" }, { "docid": "e165c3fb096474314949b62875c51c31", "score": "0.4785727", "text": "def _generate_data_file_list(self) -> None:\n generated_list = []\n for path_dict in self.dataset_type_list:\n current_list = []\n for key, value in path_dict.items():\n # Skip key == 'id' or 'classes', they are not valid paths.\n if key in ['id', 'classes']: continue\n # Add all files to list.\n current_list.append(value)\n generated_list.append(current_list)\n self.data_file_list = generated_list", "title": "" }, { "docid": "ed0e0b6595554801c20e6679b519e930", "score": "0.47743374", "text": "def load_mappings(self, update_mappings):\n socken_file = os.path.join(MAPPINGS_DIR, 'socken.json')\n kommun_file = os.path.join(MAPPINGS_DIR, 'kommun.json')\n countries_file = os.path.join(MAPPINGS_DIR, 'countries_for_cats.json')\n tags_file = os.path.join(MAPPINGS_DIR, 'tags.json')\n primary_classes_file = os.path.join(\n MAPPINGS_DIR, 'primary_classes.json')\n photographer_file = os.path.join(MAPPINGS_DIR, 'photographers.json')\n kmb_files_file = os.path.join(MAPPINGS_DIR, 'kmb_files.json')\n commonscat_file = os.path.join(MAPPINGS_DIR, 'commonscat.json')\n church_file = os.path.join(MAPPINGS_DIR, 'churches.json')\n photographer_page = 'Institution:Riksantikvarieämbetet/KMB/creators'\n\n if update_mappings:\n query_props = {'P373': 'commonscat'}\n self.mappings['socken'] = KMBInfo.query_to_lookup(\n KMBInfo.build_query('P777', optional_props=query_props.keys()),\n props=query_props)\n self.mappings['kommun'] = KMBInfo.query_to_lookup(\n KMBInfo.build_query('P525', optional_props=query_props.keys()),\n props=query_props)\n self.mappings['photographers'] = self.get_photographer_mapping(\n photographer_page)\n self.mappings['kmb_files'] = self.get_existing_kmb_files()\n self.mappings['commonscat'] = {'bbr': {}, 'fmis': {}}\n KMBInfo.get_commonscat_from_heritage(\n 'se-bbr', limit=1000,\n data=self.mappings['commonscat']['bbr'])\n KMBInfo.get_commonscat_from_heritage(\n 'se-fornmin', limit=1000,\n data=self.mappings['commonscat']['fmis'])\n self.load_wikidata_bbr_fmis_commonscat()\n\n # dump to mappings\n common.open_and_write_file(\n socken_file, self.mappings['socken'], as_json=True)\n common.open_and_write_file(\n kommun_file, self.mappings['kommun'], as_json=True)\n common.open_and_write_file(\n photographer_file, self.mappings['photographers'],\n as_json=True)\n common.open_and_write_file(\n kmb_files_file, self.mappings['kmb_files'], as_json=True)\n common.open_and_write_file(\n commonscat_file, self.mappings['commonscat'], as_json=True)\n else:\n self.mappings['socken'] = common.open_and_read_file(\n socken_file, as_json=True)\n self.mappings['kommun'] = common.open_and_read_file(\n kommun_file, as_json=True)\n self.mappings['photographers'] = common.open_and_read_file(\n photographer_file, as_json=True)\n self.mappings['kmb_files'] = common.open_and_read_file(\n kmb_files_file, as_json=True)\n self.mappings['commonscat'] = common.open_and_read_file(\n commonscat_file, as_json=True)\n\n self.mappings['countries'] = common.open_and_read_file(\n countries_file, as_json=True)\n self.mappings['churches'] = common.open_and_read_file(\n church_file, as_json=True)\n self.mappings['tags'] = common.open_and_read_file(\n tags_file, as_json=True)\n self.mappings['primary_classes'] = common.open_and_read_file(\n primary_classes_file, as_json=True)\n\n pywikibot.output('Loaded all mappings')", "title": "" }, { "docid": "445fcd46d695ad4f4dcb8cc97b144a4f", "score": "0.47707215", "text": "def get_published_files(self):\n files = defaultdict(list)\n for file in self.files:\n if file.file_type.publishable:\n files[file.file_type].append(file)\n return dict(files)", "title": "" }, { "docid": "9370062007781475c75da86fdeea33e1", "score": "0.47625017", "text": "def xml_po_files(self):\n\t\tif self.xml_definition is None:\n\t\t\treturn\n\t\tdirpath = os.path.dirname(self.xml_definition)\n\t\tfor lang in LANGUAGES:\n\t\t\tpath = os.path.join(dirpath, '%s.po' % lang)\n\t\t\tyield (lang, path)", "title": "" }, { "docid": "2f2f0280d8d4713b131efdad671b80ce", "score": "0.47613555", "text": "def silkschema_to_minischema(xml):\n obj_from_xml = objectify.fromstring(xml)\n schemas = []\n for schema in obj_from_xml.iterchildren(tag=\"silk\"):\n container = Empty()\n container.type = str(schema.get(\"typename\"))\n base = schema.find(\"base\")\n if base:\n base = base[0]\n container.base = str(base)\n container.properties = OrderedDict()\n container.required = []\n container.init = []\n container.order = []\n\n def parse_enum(txt):\n options = stringparse(txt)\n ret = []\n for option in options:\n if len(option) >= 2 \\\n and option[0] == option[-1] \\\n and option[0] in (\"'\", '\"'):\n option = option[1:-1]\n ret.append(option)\n return ret\n # TODO: Delete, Include\n for member in schema.iterchildren(tag=\"member\"):\n name = str(member.name)\n if hasattr(member, \"enum\"):\n container.properties[name] = { \"Enum\": parse_enum(member.enum.text) }\n else:\n container.properties[name] = str(member.type)\n if not hasattr(member, \"init\") and not member.get(\"optional\", False):\n container.required.append(name)\n if hasattr(member, \"init\"):\n container.init.append(name)\n container.order.append(name)\n for attr in \"properties\", \"required\", \"init\", \"order\":\n if not len(getattr(container, attr)):\n delattr(container, attr)\n schemas.append(container._dic)\n return schemas", "title": "" }, { "docid": "f19640cf8174c727d31025eec9d0feb3", "score": "0.4759759", "text": "def map_samples_to_files(self):\n # Compare source name values against files\n sample2datafile = defaultdict(list)\n # Find source name and data file index\n sample_node = \"sourcename\"\n if self.submission_type == \"microarray\":\n data_node = \"arraydatafile\"\n else:\n data_node = \"scanname\"\n layout_comment = \"library_layout\"\n # The fist columns for each node (expecting only one raw data column)\n sample_index = next(iter(self.header_dict.get(sample_node, [])), None)\n data_index = next(iter(self.header_dict.get(data_node, [])), None)\n layout_index = next(iter([i for i, x in enumerate(self.sdrf_header)\n if re.search(layout_comment, x, flags=re.IGNORECASE)]), None)\n # Collect all data files for each sample\n if sample_index is not None and data_index is not None:\n for row in self.sdrf:\n if layout_index is not None:\n name = str(row[sample_index])+\"~\"+str(row[layout_index])\n sample2datafile[name].append(row[data_index])\n else:\n sample2datafile[row[sample_index]].append(row[data_index])\n\n return sample2datafile", "title": "" }, { "docid": "f5a6d0b3ac39972a83784a70d6bf9e2b", "score": "0.4756389", "text": "def get_static_dirs():\n package = pkg_resources.Requirement.parse (\"bqserver\")\n package_path = pkg_resources.resource_filename(package,'bq')\n return [(package_path, os.path.join(package_path, 'graph', 'public'))]", "title": "" }, { "docid": "93bfb84aefe9a7e7ced0b3edcd397a70", "score": "0.4755776", "text": "def test_create_image_stream_mapping_for_all_namespaces(self):\n pass", "title": "" }, { "docid": "886c903d39008c5035aaf8810f8ecf8c", "score": "0.4751005", "text": "def get_schemas(path):\n return listdir(path)", "title": "" }, { "docid": "60aee8e6b3dbfd28a0b399935d3ecc44", "score": "0.47488874", "text": "def _validate_imports_map(imports_map):\n errors = []\n for short_path, paths in imports_map.items():\n for path in paths:\n if not os.path.exists(path):\n errors.append((short_path, path))\n if errors:\n log.error(\"Invalid imports_map entries (checking from root dir: %s)\",\n os.path.abspath(\".\"))\n for short_path, path in errors:\n log.error(\" file does not exist: %r (mapped from %r)\", path, short_path)\n return errors", "title": "" }, { "docid": "52a89e85c918ba06427895f771f58cb8", "score": "0.47438", "text": "def build_file_map(self, file_dict, file_type_list, upload_files, submission):\n for file_type in file_type_list:\n # if file_type not included in request, skip it, checks for validity are done before calling this\n if not file_dict.get(file_type):\n continue\n file_reference = file_dict.get(file_type)\n try:\n file_name = file_reference.filename\n except Exception:\n return JsonResponse.error(Exception('{} parameter must be a file in binary form'.format(file_type)),\n StatusCode.CLIENT_ERROR)\n if file_name:\n split_file_name = os.path.splitext(file_name)\n fillin_vals = {\n 'submission_id': submission.submission_id,\n 'raw_filename': split_file_name[0],\n 'timestamp': get_timestamp(),\n 'ext': split_file_name[1]\n }\n if not submission.is_fabs:\n fillin_vals['FYP'] = filename_fyp_sub_format(submission)\n upload_filename = SUBMISSION_FILENAMES[FILE_TYPE_DICT_NAME_LETTER[file_type]].format(**fillin_vals)\n if not self.is_local:\n upload_name = '{}/{}'.format(submission.submission_id, upload_filename)\n else:\n upload_name = os.path.join(self.server_path, upload_filename)\n\n upload_files.append(FileHandler.UploadFile(\n file_type=file_type,\n upload_name=upload_name,\n file_name=file_name,\n file_letter=FILE_TYPE_DICT_LETTER[FILE_TYPE_DICT[file_type]]\n ))", "title": "" }, { "docid": "8281ca7522d40ad089bb0b25a1c94cbd", "score": "0.47432125", "text": "def get_element_paths_iterator():\n elements_root = os.path.join(get_repository_root_path())\n for dirpath, dirnames, filenames in os.walk(elements_root):\n for filename in filenames:\n if filename.endswith('.xml'):\n yield os.path.join(dirpath, filename)", "title": "" }, { "docid": "ef99ded0c406ff58de69f66f4f84563b", "score": "0.4742476", "text": "def _getContentTypesRegistry(filename=None):\n if filename is None:\n filename = _getContentTypesFile()\n\n suffixMap = {}\n regexMap = {}\n filenameMap = {}\n log.debug('load content types file: %r' % filename)\n try:\n fin = open(filename)\n except IOError:\n return\n while 1:\n line = fin.readline()\n if not line: break\n words = line.split()\n for i in range(len(words)):\n if words[i][0] == '#':\n del words[i:]\n break\n if not words: continue\n contentType, patterns = words[0], words[1:]\n if not patterns:\n if line[-1] == '\\n': line = line[:-1]\n raise PreprocessError(\"bogus content.types line, there must \"\\\n \"be one or more patterns: '%s'\" % line)\n for pattern in patterns:\n if pattern.startswith('.'):\n if sys.platform.startswith(\"win\"):\n # Suffix patterns are case-insensitive on Windows.\n pattern = pattern.lower()\n suffixMap[pattern] = contentType\n elif pattern.startswith('/') and pattern.endswith('/'):\n regexMap[re.compile(pattern[1:-1])] = contentType\n else:\n filenameMap[pattern] = contentType\n fin.close()\n return suffixMap, regexMap, filenameMap", "title": "" }, { "docid": "5851cc2212cdd3623e962d27ac235397", "score": "0.4740644", "text": "def get_xml_list(xml_path):\n for a, b, files in os.walk(xml_path):\n return files", "title": "" }, { "docid": "828a4bfda3ef4851ba48838684f26b84", "score": "0.47393087", "text": "def _available_services():\n available_services = dict()\n for launch_dir in _launchd_paths():\n for root, dirs, files in salt.utils.path.os_walk(launch_dir):\n for filename in files:\n file_path = os.path.join(root, filename)\n # Follow symbolic links of files in _launchd_paths\n true_path = os.path.realpath(file_path)\n # ignore broken symlinks\n if not os.path.exists(true_path):\n continue\n\n try:\n # This assumes most of the plist files\n # will be already in XML format\n with salt.utils.files.fopen(file_path):\n plist = plistlib.readPlist(salt.utils.data.decode(true_path))\n\n except Exception: # pylint: disable=broad-except\n # If plistlib is unable to read the file we'll need to use\n # the system provided plutil program to do the conversion\n cmd = '/usr/bin/plutil -convert xml1 -o - -- \"{}\"'.format(true_path)\n plist_xml = __salt__[\"cmd.run_all\"](cmd, python_shell=False)[\n \"stdout\"\n ]\n plist = plistlib.readPlistFromBytes(\n salt.utils.stringutils.to_bytes(plist_xml)\n )\n\n try:\n available_services[plist.Label.lower()] = {\n \"filename\": filename,\n \"file_path\": true_path,\n \"plist\": plist,\n }\n except AttributeError:\n # As of MacOS 10.12 there might be plist files without Label key\n # in the searched directories. As these files do not represent\n # services, thay are not added to the list.\n pass\n\n return available_services", "title": "" }, { "docid": "2cf27023b12c4905fe9fe03a35d8a7ce", "score": "0.47367263", "text": "def __mappingExistingFiles():\n database_files = [f for f in listdir(DATABASE_PATH) if path.isfile(path.join(DATABASE_PATH, f))] # Get the name of all files in database folder\n\n for f in database_files: # For each file in database, create a dict for it in cache\n if(f not in FILES.keys()):\n FILES[f] = {}", "title": "" }, { "docid": "622969f12ebe71e60973cbc036e73b35", "score": "0.47324058", "text": "def map_files(self):\n self.filepaths = [os.path.join(dp, f) for dp, dn, filenames in os.walk(self.folderpath) for f in filenames if (os.path.splitext(f)[1]).lower() == self.fileext]", "title": "" }, { "docid": "a2638e29d2aa7a28ef15741b1a111e4b", "score": "0.4728341", "text": "def _loadDeviceDefinitions(self, urlOfXMLDefinition, xml):\n\n # extract the base path of the given XML to make sure any relative URL later will be created correctly\n url = urlparse(urlOfXMLDefinition)\n baseURIPath = url.path.rpartition('/')[0] + \"/\"\n\n try:\n root = ET.fromstring(xml)\n except Exception as e:\n raise ValueError(\"Can not parse CPE definitions '\" + urlOfXMLDefinition + \"': \" + str(e))\n\n self.__deviceServiceDefinitions = {}\n self.__deviceSCPD = {}\n self.__deviceInformations = {'rootURL': urlOfXMLDefinition}\n self.__deviceUnknownKeys = {}\n self.__deviceXMLInitialized = False\n\n # iterate through all the informations\n self._iterateToFindSCPDElements(root, baseURIPath)\n self.__deviceXMLInitialized = True", "title": "" }, { "docid": "1374e8c6063ffd2e7bcaf5e13d66ed12", "score": "0.47260112", "text": "def __build_tbl_map(database):\n return [\n ( PERSON_TBL, database.person_map.db),\n ( FAMILY_TBL, database.family_map.db),\n ( PLACES_TBL, database.place_map.db),\n ( SOURCES_TBL, database.source_map.db),\n ( CITATIONS_TBL, database.citation_map.db),\n ( REPO_TBL, database.repository_map.db),\n ( NOTE_TBL, database.note_map.db),\n ( MEDIA_TBL, database.media_map.db),\n ( EVENTS_TBL, database.event_map.db),\n ( TAG_TBL, database.tag_map.db),\n ( META, database.metadata.db),\n ]", "title": "" }, { "docid": "cd14cf693aa1fdf14e657e4af9c51627", "score": "0.47243875", "text": "def _setup_loading_dictionary():\n LD = {} # fuel loading dictionary\n for t in zip(*LoadDefs)[1]: # lists internal tags\n LD[t] = []\n return LD", "title": "" }, { "docid": "447f09639e79420640e3824fd54e48b1", "score": "0.47228718", "text": "def _scrapeXmlFile(self, xmlFileName):\n tree = ET.parse(xmlFileName)\n root = tree.getroot()\n # Child here can be of type 'string' or 'string-array' or maybe more\n resourcesInfo = []\n for resource in root:\n listOfDicts = self._getInfoFromResourceTag(resource)\n for resourceDict in listOfDicts:\n resourcesInfo.append(resourceDict)\n return resourcesInfo", "title": "" }, { "docid": "732cd56437acfd4bd985be60c5666f10", "score": "0.4721027", "text": "def all_roots(cls):\n return dict(cls._TYPES_BY_ROOT)", "title": "" }, { "docid": "14e3ad688b32d3442fd345d4dd916f1a", "score": "0.47022498", "text": "def get_maps(self):\n\n\t\treturn self.__maps", "title": "" }, { "docid": "0d1956fbf78af790134c941756858d5d", "score": "0.47019643", "text": "def generate_dump():\n with open('_distro_map.yml') as distro_map_yml:\n distro_map = yaml.load(distro_map_yml)\n for distro in distro_map:\n site_folder = distro_map[distro]['site']\n data = defaultdict(list)\n if not distro_exists(site_folder):\n continue\n sitemap = find_and_parse_sitemap(site_folder)\n urls = sitemap['urlset']['url']\n site_name = urls[0]['loc']\n for url in urls:\n # If the url is equal to site_name or url has index.html\n # Then ignore it since they won't have any documentation\n if site_name == url['loc'] or 'index.html' in url['loc']:\n continue\n # HACK\n # Things act differently if the site url does not end with /\n # so appending / it if not present\n if site_name[-1] != '/':\n site_name += '/'\n topic_path = url['loc'].replace(site_name, \"\")\n doc_content = parse_html_doc('_package' + '/' + site_folder + '/' + topic_path)\n version = topic_path[:topic_path.index('/')]\n if doc_content:\n data[version].append({\n \"topic_url\": topic_path,\n \"title\": doc_content['title'],\n \"content\": doc_content['content'],\n \"site_name\": site_name\n })\n for version in data:\n dump_file = open('{}/data_{}.json'.format('_package/' + site_folder + '/',\n version), 'w+')\n json.dump(data[version], dump_file)\n dump_file.close()\n if verbose:\n print(\"File Created in: \" + os.path.realpath(dump_file.name))\n\n copy_static_assets('_package/{}/'.format(site_folder))\n versions_file = open('{}/versions.json'.format('_package/' + site_folder + '/'), 'w+')\n json.dump({\"versions\": list(data.keys())}, versions_file)\n versions_file.close()\n\n if verbose:\n print(\"File Created in: \" + os.path.realpath(versions_file.name))", "title": "" }, { "docid": "a14a1b3c48e0b5778f421a56ca5dcb28", "score": "0.4699618", "text": "def generate_mappings_standard_validators(self) -> List[ValidatorFieldMap]:\n\n items = self.field_validator_names.items()\n standard_validators = {field: validator\n for field, validator in items\n if field != \"*\"}\n to_ignore = set(self.field_validator_names.get(\"*\", []))\n\n mappings = []\n for field, validators in standard_validators.items():\n for name in validators:\n if name in to_ignore:\n continue\n\n _validator = NamedRef(\n name=name,\n ref=self.get_reference(name))\n _field = NamedRef(\n name=field,\n ref=self.get_reference(field)\n )\n mapping = ValidatorFieldMap(\n validator=_validator,\n field=_field,\n is_asterisk=False\n )\n mappings.append(mapping)\n\n return mappings", "title": "" }, { "docid": "5e4de6b1723a25298513d3414a9a0669", "score": "0.4696792", "text": "def build_file_list():\n p = Path(\"../source/official\")\n return sorted(p.glob(\"**/*.png\"))", "title": "" }, { "docid": "1c97871a966d752909912514d567b441", "score": "0.46920088", "text": "def collect_keys(self):\n\n if self.layer_type.value == \"spatial\":\n return [projected_extent_decoder(key) for key in self.srdd.collectKeys()]\n else:\n return [temporal_projected_extent_decoder(key) for key in self.srdd.collectKeys()]", "title": "" }, { "docid": "073b0feeb25c7cc27739f80cf3764cc4", "score": "0.46914393", "text": "def _GetTemplateMappings(self):\n format_definition = self._GetFormatDefinitions()\n\n template_mappings = {}\n\n authors = format_definition.metadata.get('authors', None)\n if authors:\n template_mappings['authors'] = ', '.join(authors)\n\n year = format_definition.metadata.get('year', None)\n if year:\n date = datetime.date.today()\n if year != date.year:\n copyright_years = '{0:d}-{1:d}'.format(year, date.year)\n else:\n copyright_years = '{0:d}'.format(year)\n\n template_mappings['copyright'] = copyright_years\n\n template_mappings['prefix'] = self._prefix\n template_mappings['prefix_upper_case'] = self._prefix.upper()\n\n return template_mappings", "title": "" }, { "docid": "9f37f922e82a6aeb4afb77bb00f39b8f", "score": "0.46895683", "text": "def _buildVtxMaps(self):\n print \"\\t ---> Vertex Maps ...\"\n self.vtxMaps = dict()\n for mapName in pCloth.getVtxMaps(self.shapeName):\n if pCloth.getVtxMapType(self.shapeName, '%sMapType' % mapName) == 1:\n self.vtxMaps['%sPerVertex' % mapName] = pCloth.getVtxMapData(self.shapeName, '%sPerVertex' % mapName)", "title": "" }, { "docid": "01f41e549edcf41edfaea4dec6e21156", "score": "0.46851927", "text": "def nsmap(self):\n\n NSMAP = dict()\n # solve 3) by using a set\n for k, v in set(self.namespaces):\n s_prefix = self.sb[k]\n s_uri = self.sb[v]\n # Solve 2) & 4) by not including\n if s_uri != \"\" and s_prefix != \"\":\n # solve 1) by using the last one in the list\n NSMAP[s_prefix] = s_uri.strip()\n\n return NSMAP", "title": "" }, { "docid": "63519694a37ee2104a89e8351cb75a78", "score": "0.4682707", "text": "def get_public_api_definitions() -> Set[Definition]:\n code_file_contents = FileContents.create_from_local_files(\n _default_code_absolute_paths()\n )\n\n code_parser = CodeParser(file_contents=code_file_contents)\n\n public_api_checker = PublicAPIChecker(code_parser=code_parser)\n\n return public_api_checker.get_all_public_api_definitions()", "title": "" }, { "docid": "702902022bfb5574140e775c51dca9e4", "score": "0.46802932", "text": "def get_map(filename):\n file = check_file(filename)\n content = file.read().splitlines()\n result = get_all_line_name(content)\n requirements = get_requirements(content)\n return result, requirements", "title": "" }, { "docid": "b72134b70259c3b936dac53b04d8ed96", "score": "0.46783093", "text": "def get_element_dictionary():\n # we keep a mapping from names to \n element_dictionary = dict()\n cvode_element = get_cvode_solver_element(element_dictionary)\n\n solver_choices = [ cvode_element ]\n solver_element = OneOfElement(\"Solver\", solver_choices)\n element_dictionary[solver_element.name] = solver_element\n\n setup_element = get_setup_element(element_dictionary)\n\n config_children = [ solver_element, setup_element ]\n config_element = ContainerElement(\"OFWConfig\", config_children)\n element_dictionary[config_element.name] = config_element\n return element_dictionary", "title": "" } ]
58b4c21a6b2aa69ec9a7f44cb57cdf77
Should erase itself from self.surface and return a list of dirty rectangles
[ { "docid": "0f01a94c876eb3204446440d5d404949", "score": "0.0", "text": "def erase(self):\r\n raise Exception(\"erase not implemented\")", "title": "" } ]
[ { "docid": "ba4440c20abcd1b1a8ef3e790b5735c9", "score": "0.740515", "text": "def erase(self):\r\n if self.is_focus:\r\n dirty_rect=pygame.draw.circle(self.surface, 0, (int(self.cx),int(self.cy)), self.radius, self.border_width)\r\n else:\r\n dirty_rect=pygame.draw.circle(self.surface, 0, (int(self.cx),int(self.cy)), int(self.radius/2.0), self.border_width)\r\n return [dirty_rect]", "title": "" }, { "docid": "04a49da908a0a1ecd1d2895c691a086d", "score": "0.63233846", "text": "def get_rectangles(self) -> List[Tuple[Tuple[int, int, int, int],\r\n Tuple[int, int, int]]]:\r\n if self.data_size == 0:\r\n return []\r\n if len(self._subtrees) == 0:\r\n return [(self.rect, self._colour)]\r\n else:\r\n lst = []\r\n for subtree in self._subtrees:\r\n if self._expanded:\r\n lst.extend(subtree.get_rectangles())\r\n else:\r\n lst.extend([(self.rect, self._colour)])\r\n return lst\r\n return lst", "title": "" }, { "docid": "becf431934ee40adf379753f599f5d67", "score": "0.623961", "text": "def draw(self, surface):\n offsets = [0,0]\n changes = []\n for sb in self.sprites:\n offsets[sb.axis] = sb.get_scrolled()[sb.axis]\n if sb.dirty:\n changes.extend(sb.draw(surface))\n if changes:\n # Comment out this blit to see just the scrollbars.\n # To date, don't add to changes since the sb includes it.\n surface.blit(self.world, self.viewRect.topleft,\n (offsets, self.viewRect.size))\n if self.pad and not self.pretty:\n pygame.draw.rect(\n self.pane, self.bgColor,\n self.viewRect.inflate(self.pad+1,self.pad+1), self.pad)\n\n return changes", "title": "" }, { "docid": "65eb1a172c1723ce5cb3b90056d0aee1", "score": "0.6121684", "text": "def draw(self, surface):\n if self.dirty and surface:\n self.dirty = False\n pygame.draw.rect(self.image, self.bgColor, self.rect, 0) \n if self.pretty:\n self.drawPretty()\n else:\n pygame.draw.rect(self.image, self.fgColor, self.knob, 0)\n if self.pad:\n pygame.draw.rect(self.image,\n self.bgColor, self.knob, self.pad)\n return [surface.blit(self.image, self.initTopleft)]\n else:\n return []", "title": "" }, { "docid": "007bc23442ce7073450214e1a8a80952", "score": "0.6065546", "text": "def _clear_rectangle(self):\n self._rectangles.pop()\n self._update_cross_section_plot()\n print(self._rectangles)", "title": "" }, { "docid": "6c8ba6a40ad7fd78c707aa99e07574f7", "score": "0.6023195", "text": "def removeChildrenRects(rects):\n cleaned_rects_vcenter=[]\n cleaned_rects_hcenter=[]\n only_external = []\n is_external = []\n for i in range(len(rects)):\n cx, cy, cw, ch = rects[i]\n has_parent = False\n for j in range(len(rects)):\n if (j != i):\n x,y,w,h = rects[j]\n if ( (cx >= x) and (cy >= y) and ((cx + cw) <= (x+w)) and ((cy + ch) <= (y + h))):\n has_parent = True\n break\n #else:\n # continue\n #else:\n # continue\n\n if(has_parent):\n is_external.append(False)\n else:\n is_external.append(True)\n\n for i in range(len(rects)):\n if(is_external[i]):\n only_external.append(rects[i])\n cleaned_rects_vcenter.append(rects[i][1] + rects[i][3]//2)\n cleaned_rects_hcenter.append(rects[i][0] + rects[i][2]//2)\n\n cleaned_rects_vcenter=np.array(cleaned_rects_vcenter).reshape(-1,1)\n cleaned_rects_hcenter=np.array(cleaned_rects_hcenter).reshape(-1,1)\n\n return only_external, cleaned_rects_vcenter, cleaned_rects_hcenter", "title": "" }, { "docid": "8af6192ebe0c24c4592e0cd5aba1ea53", "score": "0.60077006", "text": "def sub_rect(r1, r2):\n if not r1.colliderect(r2):\n return [r1]\n \n clip = r2.clip(r1)\n \n #first we have to see if there is a complete rect to each side of the removing rect\n need_com_left = False\n need_com_right = False\n need_com_top = False\n need_com_bottom = False\n \n if clip.left > r1.left:\n need_com_left = True\n if clip.right < r1.right:\n need_com_right = True\n if clip.top > r1.top:\n need_com_top = True\n if clip.bottom < r1.bottom:\n need_com_bottom = True\n \n left = None\n right = None\n top = None\n bottom = None\n \n if need_com_left: #ok, we also need to check top and bottom here...\n t = r1.top\n b = r1.bottom - r1.top\n l = r1.left\n r = clip.left - r1.left\n if need_com_top: #we need to cut a bit off the top of this\n t = clip.top\n if need_com_bottom:\n b = clip.bottom - t\n \n left = pygame.Rect(l, t, r, b)\n if need_com_right:\n t = r1.top\n b = r1.bottom - r1.top\n l = clip.right\n r = r1.right - clip.right\n if need_com_top:\n t = clip.top\n if need_com_bottom:\n b = clip.bottom - t\n \n right = pygame.Rect(l, t, r, b)\n if need_com_top:\n top = pygame.Rect(r1.left, r1.top, r1.width, clip.top-r1.top)\n if need_com_bottom:\n bottom = pygame.Rect(r1.left, clip.bottom, r1.width, r1.bottom-clip.bottom)\n \n ret = []\n for i in [left, right, top, bottom]:\n if i: ret.append(i)\n \n return ret", "title": "" }, { "docid": "44563bf42782d813db2ea566d88c7aeb", "score": "0.5814195", "text": "def get_bounding_boxes(self) -> []:\n return self.rectangles", "title": "" }, { "docid": "4067ad5ff215f257e20a7af6fcba9537", "score": "0.5788794", "text": "def vertical_rectangles_get(self) -> List[Square]:\n romi_base: RomiBase = self\n base_dxf: BaseDXF = romi_base.base_dxf\n upper_rectangle0: Square = base_dxf.rectangle_locate(\"RIGHT: UPPER: Rectangle 0\",\n -1.511965, 3.569984,\n -1.460783, 3.683232)\n upper_rectangle1: Square = base_dxf.rectangle_locate(\"RIGHT: UPPER: Rectangle 1\",\n -1.511965, 3.749122,\n -1.460783, 3.897803)\n upper_rectangle2: Square = base_dxf.rectangle_locate(\"RIGHT: UPPER: Rectangle 2\",\n -1.511965, 3.963677,\n -1.460783, 4.076929)\n upper_rectangle3: Square = base_dxf.rectangle_locate(\"RIGHT: UPPER: Rectangle 3\",\n -1.511965, 4.142815,\n -1.460783, 4.291496)\n upper_rectangles: List[Square] = [\n upper_rectangle0,\n upper_rectangle1,\n upper_rectangle2,\n upper_rectangle3\n ]\n upper_rectangle: Square\n lower_rectangles: List[Square] = []\n for upper_rectangle in upper_rectangles:\n lower_rectangle: SimplePolygon = upper_rectangle.x_mirror(\"UPPER:\", \"LOWER:\")\n assert isinstance(lower_rectangle, Square)\n lower_rectangles.append(lower_rectangle)\n vertical_rectangles: List[Square] = upper_rectangles + lower_rectangles\n return vertical_rectangles", "title": "" }, { "docid": "79ed94d2d3b5afc2d23be8ede2c69721", "score": "0.57748365", "text": "def refresh(self):\n self.rect_list = arcade.ShapeElementList()\n\n for i, row in enumerate(self._grid):\n for j, column in enumerate(row):\n x = SIDE_MARGIN + j * 24\n y = BOTTOM_MARGIN + i * 24\n if i < 21:\n if j == 0 or j == 11 or i == 0:\n self._grid[i][j] = 1\n # Color in left, bottom and right borders\n self.create_border_rect(x, y)\n elif self._grid[i][j] > 1:\n # Color in any game pieces on the board\n self.create_game_piece_rect(x, y, self._grid[i][j])\n self.create_grid_rect(x, y)\n if self.check_for_line_clear():\n self.refresh()\n self.refreshed = True", "title": "" }, { "docid": "fc4bb6815744a2b505eed57cd32c3899", "score": "0.57604927", "text": "def rect(self):\n if self.composite:\n return self.composite.get_rect()\n else:\n return pygame.Rect((0,0,1,1))", "title": "" }, { "docid": "61d9dbbebfb3a115c7fb577dd73c1a51", "score": "0.5755647", "text": "def update_screen(self):\n #~ if len(self.new_dead_cells) + len(self.new_alive_cells) < 200:\n dirty_rects = []\n for coords, team in self.alive_cells.items():\n i,j = coords\n rect = pygame.Rect((i-1)*self.tile_size, (j-1)*self.tile_size, self.tile_size, self.tile_size)\n self.screen.fill(self.shade(i,j,team), rect)\n dirty_rects.append(rect)\n self.new_alive_cells = {}\n for i,j in self.new_dead_cells:\n rect = pygame.Rect((i-1)*self.tile_size, (j-1)*self.tile_size, self.tile_size, self.tile_size)\n self.screen.fill(GRAY, rect)\n dirty_rects.append(rect)\n self.new_dead_cells = set()\n pygame.display.update(dirty_rects)\n #~ else:\n #~ self.new_alive_cells = []\n #~ self.new_dead_cells = []\n #~ self.screen.fill(self.gray)\n #~ for i,j in self.alive_cells:\n #~ rect = pygame.Rect((i-1)*self.tile_size, (j-1)*self.tile_size, self.tile_size, self.tile_size)\n #~ self.screen.blit(self.green_tile, rect)\n #~ pygame.display.flip()", "title": "" }, { "docid": "cce866ba7cb4faa7ebb7e18a63c0f5d9", "score": "0.5722426", "text": "def draw(self):\n\t\trects = []\n\t\trects.append( pygame.draw.circle( screen, ( 0, 0, 0 ), ( int(round(self.oldx)), int(round(self.oldy)) ), self.radius, 0 ) )\n\t\trects.append( pygame.draw.circle( screen, ( 51, 51, 51 ), ( int(round(self.x)), int(round(self.y)) ), self.radius, 0 ) )\n\t\trects.append( pygame.draw.circle( screen, ( 102, 102, 102 ), ( int(round(self.x-1)), int(round(self.y-1)) ), self.radius-2, 0 ) )\n\t\trects.append( pygame.draw.circle( screen, ( 153, 153, 153 ), ( int(round(self.x-2)), int(round(self.y-2)) ), self.radius-4, 0 ) )\n\t\trects.append( pygame.draw.circle( screen, ( 203, 204, 204 ), ( int(round(self.x-3)), int(round(self.y-3)) ), self.radius-6, 0 ) )\n\t\trects.append( pygame.draw.circle( screen, ( 255, 255, 255 ), ( int(round(self.x-4)), int(round(self.y-4)) ), self.radius-8, 0 ) )\n\t\tself.rect = rects[1]\n\t\treturn rects", "title": "" }, { "docid": "b162b9aa0699f60fa3234443ef19ea03", "score": "0.5712293", "text": "def repaint(self):\n if self.board in app.App.running_boards:\n self.board.tokens.clear(self.surface, self.image)\n repaint_rects = self.board.tokens.draw(self.surface)\n if self.board.container_top_left_x != 0 or self.board.container_top_left_y != 0:\n new_repaint_rects = []\n for rect in repaint_rects:\n rect.topleft = self.board.container_top_left_x + rect.topleft[0], self.board.container_top_left_y + rect.topleft[1]\n new_repaint_rects.append(rect)\n repaint_rects = new_repaint_rects\n self.board.app.window.repaint_areas.extend(repaint_rects)", "title": "" }, { "docid": "d6cabd9132580240134acae555f2a9f1", "score": "0.5702454", "text": "def findSquaresToRemove(self):\r\n\r\n squaresFound = []\r\n\r\n for j in range(self.height):\r\n for i in range(self.width):\r\n if self.squareAt(i, j):\r\n tuple = (i, j)\r\n squaresFound.append(tuple)\r\n\r\n return squaresFound", "title": "" }, { "docid": "7e9572810250b4422060c66bba1e7636", "score": "0.56922716", "text": "def rectSet(rectList):\n toReturn = []\n for rect in rectList:\n if rect not in toReturn:\n toReturn.append(rect)\n return toReturn", "title": "" }, { "docid": "139fa49195d6520c838f68a0a48b1f36", "score": "0.56911933", "text": "def GetBoxRects(dc, buffer, attr):", "title": "" }, { "docid": "39b5711375b6e5202ef5d335648ed955", "score": "0.5659274", "text": "def get_drawables(self):\n return [DrawableSurface(self.image, self.image.get_rect().move(self.pos_x, self.pos_y))]", "title": "" }, { "docid": "39b5711375b6e5202ef5d335648ed955", "score": "0.5659274", "text": "def get_drawables(self):\n return [DrawableSurface(self.image, self.image.get_rect().move(self.pos_x, self.pos_y))]", "title": "" }, { "docid": "b7be1edba6e0de4d6e9fa223f9f7821e", "score": "0.56569487", "text": "def glclear(self):\n self.pixels = [\n [color(self.r, self.g, self.b) for x in range(self.width)]\n for y in range(self.height)\n ]", "title": "" }, { "docid": "f2322a8682e579a17ba2370121280c4b", "score": "0.56554437", "text": "def draw_rectangles(area, grid, surface=WIN):\n rows = S['rows_n']\n cols = S['cols_n']\n\n col_size = area.w / cols\n row_size = area.h / rows\n for col in range(cols):\n for row in range(rows):\n square = pygame.Rect(area.x + col * col_size, area.y + row * row_size, col_size, row_size)\n\n # Color possible moves\n if not S['hide_moves'] and grid[col][row].considered:\n grid[col][row].color = COLORS['orange'] \n\n # Color current choosen field\n if not S['hide_track'] and grid[col][row].is_current:\n grid[col][row].color = COLORS['blue'] \n\n # Color final path \n if grid[col][row].is_path:\n grid[col][row].color = COLORS['yellow']\n\n # Color start field\n if grid[col][row].is_start:\n grid[col][row].color = COLORS['green'] \n \n # Color end field\n if grid[col][row].is_end:\n grid[col][row].color = COLORS['red'] \n \n pygame.draw.rect(surface, grid[col][row].color, square)\n\n # Add f score string to considered fields\n if S['show_fscore'] and grid[col][row].considered:\n f_score_string = str(round(grid[col][row].f))\n score_text = SCORE_FONT.render(f_score_string, True, COLORS['black'])\n WIN.blit(score_text, (area.x + col * col_size, area.y + row * row_size))", "title": "" }, { "docid": "db269894617432ce1c9205848820f2ba", "score": "0.56099856", "text": "def drawCards(self):\n if len(self.draw_pile) < self.rules.Draw_Size:\n note = self.refreshDrawPile()\n print(note)\n #todo: want to broadcast this note.\n result = []\n for _ in range(self.rules.Draw_Size):\n result.append(self.draw_pile.pop())\n return result", "title": "" }, { "docid": "150dcf8e8052cf254c1b8c8ac6bcdece", "score": "0.56090385", "text": "def get_full_rect(self):\n x, y = self.get_position()\n w, h = self.get_size(), self.get_size()\n if Constants.DEBUG_MODE:\n pygame.draw.rect(self.get_surface(), (255, 255, 255), pygame.Rect(x - w, y - h, w * 2, h * 2), 1)\n return pygame.Rect(x - w, y - h, w * 2, h * 2)", "title": "" }, { "docid": "066f96da49304805e39b191715439868", "score": "0.56020993", "text": "def update(self):\n img = self.mask\n nrects = []\n # Iterate over all the rects\n # If one is colliding with light rect, it gets cropped and then moved,\n # and added to nrects list\n if self.mode == 'dirty':\n obstructors_list = self.obstructors + self.auxiliar_obstructors\n elif self.mode == 'clean':\n obstructors_list = self.clean_obstructors + self.auxiliar_obstructors\n else:\n raise ValueError('Invalid mode')\n\n for r in obstructors_list:\n if self.light_rect.colliderect(r):\n nr = r.clip(self.light_rect) # Returns a new rectangle that is cropped to be completely inside the argument Rect.\n # Normalize the rectangle(move it near to 0,0 for following comparisons)\n # Imagine a new rectangle at top left of size light_size*light_size,\n # which is the mask, the rectangles are moved there.\n nr.top = nr.top - self.light_rect.top\n nr.left = nr.left - self.light_rect.left\n nrects.append(nr)\n \n img.fill(1) # black, which is set to transparent before\n # draws the light circle\n if self.gradient:\n def f(x):\n # return ((x*x))\n return math.sqrt(x) - 0.1\n # return math.exp(x)\n # return -math.cos(x/1.2)\n # return 0.49*math.cos(10*x)+0.5\n # return math.exp(-x/10.)*math.sin(x)\n # return math.ceil(x/10.)\n # return math.exp(x-10)+math.exp(-x-10)\n # return x**2-x**4\n # return 10*x+10\n\n def f2(x):\n return x\n return math.sqrt(x) - 0.1\n # return math.exp(x)\n\n start = (self.size, self.size)\n end = (self.size*2, self.size)\n start_color = self.color\n end_color = (0,0,0)\n mode = 1\n g_func = f\n r_func = f\n b_func = f\n a_func = f2\n draw_circle(img, start, end, start_color, end_color, mode = mode, Afunc=a_func)\n # Rfunc = r_func, Gfunc = g_func, Bfunc = b_func, Afunc = a_func)\n else:\n pygame.draw.circle(img, self.color, (self.size,self.size), self.size,0)\n \n # iterates over all the rects (which were found colliding, were cropped and moved)\n for r in nrects:\n # if r.collidepoint(self.x, self.y):\n # img.fill(1)\n # return\n p = self.getPolygon(self.size,self.size,r) \n if p:\n pygame.draw.polygon(img, 1, p, 0)\n \n # draws the center of the light - the light 'producer'\n # pygame.draw.circle(img, 3, (self.size,self.size), 2)", "title": "" }, { "docid": "57c1a98d6ce961e1597efcf39758dfb3", "score": "0.559342", "text": "def rect(self):\n r = self.surface.get_rect()\n r.x, r.y = self.pos\n return r", "title": "" }, { "docid": "4a46befac460957e3396a961364457ed", "score": "0.55825394", "text": "def draw(self, surface):\n sprites = self.sprites()\n surface_blit = surface.blit\n for spr in sprites:\n self.spritedict[spr] = surface_blit(spr.image, spr.rect)\n self.lostsprites = []", "title": "" }, { "docid": "61c090bfdb48f498b91b5f8cc2278c87", "score": "0.5572083", "text": "def _destroy_collided_plates(self):\n for index, object in enumerate(self.plates):\n if object.colliderect(self.ball.get_rect()):\n del self.plates[index]\n self.score += 10\n return True\n\n return False", "title": "" }, { "docid": "baa0c28a360947590334f161db107c35", "score": "0.5543157", "text": "def dirty(self):\n for viewport in self._viewports:\n if viewport.dirty:\n return True\n return self._dirty", "title": "" }, { "docid": "1a4e52b440508df7b58546caad28f1da", "score": "0.55389154", "text": "def clear(self, surface, bgd):\n try:\n bgd.__call__\n except AttributeError:\n pass\n else:\n for r in self.lostsprites:\n bgd(surface, r)\n for r in self.spritedict.values():\n if r is not 0: bgd(surface, r)\n return\n surface_blit = surface.blit\n for r in self.lostsprites:\n surface_blit(bgd, r, r)\n for r in self.spritedict.values():\n if r is not 0: surface_blit(bgd, r, r)", "title": "" }, { "docid": "b5d4f1d19b1a1eab681a50ae12768623", "score": "0.55215484", "text": "async def clear_draw(self):\n self.draw = []", "title": "" }, { "docid": "6decdf4e1ec36afb682021b8ad747afb", "score": "0.5515264", "text": "def tight_crop(self):\n rect = self.calc_rect()\n abs_inplane = abs(self._in_plane)\n abs_lr = abs(self._left_right_adjusted)\n\n if abs_inplane < 40:\n fo_rect = self.calc_orienation()\n\n if abs_lr > 7:\n alpha = min(abs_lr, 15) / 15\n one_minus = 1 - alpha\n\n if self._is_looking_to_their_right:\n x1, x2 = fo_rect[0] * alpha + one_minus * rect[0], rect[2] * alpha + one_minus * fo_rect[2]\n else:\n x1, x2 = rect[0] * alpha + one_minus * fo_rect[0], fo_rect[2] * alpha + one_minus * rect[2]\n else:\n x1, x2 = (fo_rect[0] + rect[0]) / 2, (fo_rect[2] + rect[2]) / 2\n\n y1, y2 = rect[1], rect[3] # we use the height from calc_rect() rectangle.\n rect = [x1, y1, x2, y2]\n elif abs_inplane > 75:\n pts = self.rot_crop()\n rect = mef.rect_hull(pts)\n\n return rect", "title": "" }, { "docid": "92850aeadf62cb0fd882ded0bae854ac", "score": "0.5511932", "text": "def getDestructibleObstacles(self) -> list:\n pass", "title": "" }, { "docid": "6fcefc315df7d81338798d73ad58b3a4", "score": "0.5470629", "text": "def CleanRectangles(rects, h_min=35, w_min=25, h_max = 75, w_max = 75):\n cleaned_rects=[]\n cleaned_rects_vcenter=[]\n cleaned_rects_hcenter=[]\n widths = []\n heights = []\n\n for j, rect in enumerate(rects):\n # Draw the rectangles\n if (( h_min <= rect[3] <= h_max) and ( w_min <= rect[2] <= w_max)):\n cleaned_rects.append(rect)\n cleaned_rects_vcenter.append(rect[1] + rect[3]//2)\n cleaned_rects_hcenter.append(rect[0] + rect[2]//2)\n widths.append(rect[2])\n heights.append(rect[3])\n #print(widths)\n mean_width = int(np.mean(widths)) + 3*int(np.std(widths))\n mean_heigth = int(np.mean(heights)) + 3*int(np.std(heights))\n cleaned_rects_vcenter=np.array(cleaned_rects_vcenter).reshape(-1,1)\n cleaned_rects_hcenter=np.array(cleaned_rects_hcenter).reshape(-1,1)\n return (cleaned_rects,cleaned_rects_vcenter,cleaned_rects_hcenter, mean_width, mean_heigth)", "title": "" }, { "docid": "4759d2e5e29bdf3bacaedf91d7bfcd76", "score": "0.545285", "text": "def deinit(self): # type: () -> None\n for pixel_index in range(len(self)):\n self[pixel_index] = BLACK", "title": "" }, { "docid": "8903fbdb24adc332e3ec5bfc7c06be15", "score": "0.5445114", "text": "def draw(self):\n return self.surf, self.rect.topleft", "title": "" }, { "docid": "c8833376a08ba99d7934884a21045fb4", "score": "0.54395336", "text": "def unmovable(self, wall_list, space_list):\n i = 0\n j = 0\n\n window = pygame.display.set_mode((480, 480), RESIZABLE)\n wall = pygame.image.load(\"ressource/mur.png\").convert()\n wall.get_rect()\n while i != len(wall_list):\n window.blit(wall, tuple(32*x for x in wall_list[i]))\n i = i + 1\n space = pygame.image.load(\"ressource/espace.png\").convert()\n space.get_rect()\n while j != len(space_list):\n window.blit(space, tuple(32*x for x in space_list[j]))\n j = j + 1\n pygame.display.flip()\n return window, space", "title": "" }, { "docid": "0a9a6959e83387b3da41829694f8d7e5", "score": "0.5436811", "text": "def refresh(self):\n dx = (self.xmax - self.xmin)/(self.xlen - 1)\n dy = (self.ymax - self.ymin)/(self.ylen - 1)\n listoflists = []\n for i in range(self.xlen):\n sublist = []\n for j in range(self.ylen):\n sublist.append((self.xmin + i*dx)+(self.ymin +j*dy)*1j)\n listoflists.append(sublist)\n self.plane = listoflists\n self.fs = []\n return", "title": "" }, { "docid": "f1c18aedeb679eba8698b476c62444a6", "score": "0.5413466", "text": "def draw(self):\n self.rect_list.draw()", "title": "" }, { "docid": "a62a8eed4293e561fb5fda09a594557e", "score": "0.53975105", "text": "def _clear(self):\n while self._patches:\n self._patches.pop().remove()", "title": "" }, { "docid": "f756dd9c4245164eb84879b2efb947dd", "score": "0.53970104", "text": "def clear(self):\n self.group.clear()\n self._texture_rectangles = {}\n self.texs = []\n self.size = [1, 1]", "title": "" }, { "docid": "3e3a4567f74b591d681c7b241c280c8b", "score": "0.5392908", "text": "def _rect_grid_to_shape_list(self):\n return list(self._rect_grid_to_shape_generator())", "title": "" }, { "docid": "4e0a5f85e7479eed620328fc3390d4fe", "score": "0.53648037", "text": "def getUndestructibleOstacles(self) -> list:\n pass", "title": "" }, { "docid": "fd8cbbf6eca18013edd904ddc2fb406c", "score": "0.5360282", "text": "def commitUpdate(self, rectangles=None):", "title": "" }, { "docid": "7c4e633ff14bdd5c2b5e939aa7e1c8d1", "score": "0.53482413", "text": "def surfaces(self):\n return self._surfaces", "title": "" }, { "docid": "65ec5ef1ab706a860fa23cde5e565c1e", "score": "0.5346353", "text": "def get_wall_mask(self):\n processed_walls = []\n all_lines = []\n for room_key in self.room_description_map:\n room_walls = self.room_description_map[room_key].walls\n for wall in room_walls:\n if wall in processed_walls:\n continue\n\n wall_length = math.sqrt((wall.p2.pos[0] - wall.p1.pos[0]) ** 2 + (wall.p2.pos[1] - wall.p1.pos[1]) ** 2)\n\n ls = LineString([(wall.p1.pos[0], wall.p1.pos[1]), (wall.p2.pos[0], wall.p2.pos[1])]).buffer(1)\n\n for hole in wall.holes:\n h_start_x = ((wall.p2.pos[0] - wall.p1.pos[0]) / wall_length * hole.min_x) + wall.p1.pos[0]\n h_end_x = ((wall.p2.pos[0] - wall.p1.pos[0]) / wall_length * hole.max_x) + wall.p1.pos[0]\n h_start_y = ((wall.p2.pos[1] - wall.p1.pos[1]) / wall_length * hole.min_x) + wall.p1.pos[1]\n h_end_y = ((wall.p2.pos[1] - wall.p1.pos[1]) / wall_length * hole.max_x) + wall.p1.pos[1]\n\n hs = LineString([(h_start_x, h_start_y), (h_end_x, h_end_y)]).buffer(1)\n ls = ls.difference(hs)\n\n all_lines.append(ls)\n\n from shapely.ops import cascaded_union\n u = cascaded_union(all_lines)\n return u", "title": "" }, { "docid": "0d9a13cfd350b2fd6371a2dd000ed82f", "score": "0.5321643", "text": "def erase(self):\r\n for s in self.shapes:\r\n if s.item is not None:\r\n s._app.canvas.delete(s.item)", "title": "" }, { "docid": "7a5a784bafa9176378bb3eb64927da4a", "score": "0.53182244", "text": "def __updatetodraw(self, screenrect):\r\n\r\n if screenrect.topleft != self.prevpos:\r\n #The position has changed, get list of what should be visible\r\n self.todraw = []\r\n topLeft = ( screenrect.left - (screenrect.left % self.twidth), screenrect.top - (screenrect.top % self.theight))\r\n numDir = ( int(ceil(screenrect.width / float(self.twidth)))+1, int(ceil(screenrect.height / float(self.theight)))+1 )\r\n for ty in range(topLeft[1], topLeft[1]+(numDir[1]*self.theight), self.theight):\r\n for tx in range(topLeft[0], topLeft[0]+(numDir[0]*self.twidth), self.twidth):\r\n if self.spritedict.has_key((tx,ty)):\r\n self.todraw.append(self.spritedict[(tx,ty)])\r\n \r\n #Set prevpos for next time\r\n self.prevpos = screenrect.topleft", "title": "" }, { "docid": "750c158d3c42583c9db95c51e6702e96", "score": "0.5315095", "text": "def fill_panel(self):\n self.canva = []\n if len(self.objects)<>0:\n for obj in self.objects:\n for p in obj.pixels:\n temp = utils.transform(p.position)\n if temp not in self.canva and (temp[1]<=self.cols and temp[0]<=self.rows-1 and temp[1] > 0 and temp[0] > 0):\n self.canva.append(temp)\n return self.canva", "title": "" }, { "docid": "aa3828f9510db32bf882ee89c715b6e2", "score": "0.5313356", "text": "def apply_erase_rectangles(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n if not (params['widths'].size() == params['heights'].size() == params['xs'].size() == params['ys'].size()):\n raise TypeError(\n f\"''rectangle params components must have same shape\"\n )\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n mask = torch.zeros(input.size()).type_as(input)\n values = torch.zeros(input.size()).type_as(input)\n\n widths = params['widths']\n heights = params['heights']\n xs = params['xs']\n ys = params['ys']\n vs = params['values']\n for i_elem in range(input.size()[0]):\n h = widths[i_elem].item()\n w = heights[i_elem].item()\n y = ys[i_elem].item()\n x = xs[i_elem].item()\n v = vs[i_elem].item()\n mask[i_elem, :, int(y):int(y + w), int(x):int(x + h)] = 1.\n values[i_elem, :, int(y):int(y + w), int(x):int(x + h)] = v\n transformed = torch.where(mask == 1., values, input)\n return transformed", "title": "" }, { "docid": "8d512454d1a934ccefbf20f77c1c2127", "score": "0.5288344", "text": "def remove_full_cur_blocks(self):\n for tag, cur_obj_dict in self._cur_blocks.items():\n to_del_block_index = [\n block_index for block_index, obj in cur_obj_dict.items()\n if obj.is_full()]\n for block_index in to_del_block_index:\n del cur_obj_dict[block_index]", "title": "" }, { "docid": "b658913f0efd0e07f87640b00d067a4f", "score": "0.5271799", "text": "def SafeThreadLoop(self, now):\n shapesToKill = []\n if self.debugRender:\n try:\n for shape in self.shapes:\n shape.Render()\n if shape.currentTime > shape.time:\n shapesToKill.append(shape)\n\n except:\n log.LogException('Error rendering shape in debugRenderClient.SafeThreadLoop')\n self.shapes = []\n\n else:\n self.ClearAllShapes()\n for shape in shapesToKill:\n self.shapes.remove(shape)", "title": "" }, { "docid": "a8469b8c60cd78e0d58a9f7a4ffd657c", "score": "0.5271158", "text": "def empty(self):\n for s in self.sprites():\n self.remove_internal(s)\n s.remove_internal(self)", "title": "" }, { "docid": "85a4afa12143ebbae72d8311c42af440", "score": "0.52668947", "text": "def __get_paintings(self, painting):\n\n paintings = []\n smaller_painting = self.__resize_painting(painting)\n\n for i in range(self.number_of_colors):\n new_painting = smaller_painting.copy()\n paintings.append(new_painting)\n return paintings", "title": "" }, { "docid": "8ba2e393d07480f25bcabd003b691278", "score": "0.526548", "text": "def Redraw( self,eraseBackground=False ):\n for window in self.ellipse_windows:\n window.redraw(eraseBackground=eraseBackground)", "title": "" }, { "docid": "0b0cd4f1bf348159cb6e6466ddee3ae9", "score": "0.52522117", "text": "def _cast_shadow(self):\n mouse = self.entities[\"mouse\"]\n nontransparent = [b for b in self.boundaries if not b.transparent]\n # find angles of endpoints of boundaries relative to mouse\n endpoint_angles = mouse.sort_obstacle_endpoint_angles(nontransparent)\n\n # initialize obstacles and closest_obstacle at first angle\n obstacles = [\n b\n for b in nontransparent\n if mouse.distance_to_obstacle_along_angle(b, endpoint_angles[0])\n < np.inf\n ]\n obstacles.sort(\n key=lambda o: (\n mouse.distance_to_obstacle_along_angle(o, endpoint_angles[0]),\n mouse.closest_distance_to_obstacle_clockwise_of_angle(\n o, endpoint_angles[0]\n ), # fix for obstacles starting at same point\n )\n )\n closest_obstacle = obstacles[0]\n # start first wall\n old_point = mouse.point_on_obstacle_along_angle(\n closest_obstacle, endpoint_angles[0]\n )\n\n # list to hold lit walls\n visible_walls = [mouse.position]\n\n # loop over endpoint angles\n for angle in endpoint_angles:\n\n # add any walls that begin at this angle to list\n for boundary in nontransparent:\n these_points = [\n p\n for p in boundary.points\n if fuzzy_equal(mouse.angle_to_point(p), angle)\n ]\n if these_points:\n other_points = [\n p\n for p in boundary.points\n if not fuzzy_equal(mouse.angle_to_point(p), angle)\n ]\n counterclockwise = [\n orientation(mouse.position, p, o) < 0\n for p in these_points\n for o in other_points\n ]\n if all(counterclockwise):\n obstacles.append(boundary)\n obstacles = list(set(obstacles))\n\n # remove any walls that end at this angle from list\n to_remove = []\n for boundary in obstacles:\n these_points = [\n p\n for p in boundary.points\n if fuzzy_equal(mouse.angle_to_point(p), angle)\n ]\n if these_points:\n other_points = [\n p\n for p in boundary.points\n if not fuzzy_equal(mouse.angle_to_point(p), angle)\n ]\n clockwise = [\n orientation(mouse.position, p, o) > 0\n for p in these_points\n for o in other_points\n ]\n if all(clockwise):\n to_remove.append(boundary)\n obstacles = [o for o in obstacles if o not in to_remove]\n\n # figure out which wall is now nearest\n obstacles.sort(\n key=lambda o: (\n mouse.distance_to_obstacle_along_angle(o, angle),\n mouse.closest_distance_to_obstacle_clockwise_of_angle(\n o, angle\n ), # fix for obstacles starting at same point\n )\n )\n\n # if nearest wall changed or this is the last iteration\n nearest_wall_changed = obstacles[0] is not closest_obstacle\n is_last_iter = angle == endpoint_angles[-1]\n if nearest_wall_changed or is_last_iter:\n # complete current wall\n new_point = mouse.point_on_obstacle_along_angle(\n closest_obstacle, angle\n )\n\n visible_walls.append(old_point)\n visible_walls.append(new_point)\n\n # and begin a new one\n closest_obstacle = obstacles[0]\n old_point = mouse.point_on_obstacle_along_angle(\n closest_obstacle, angle\n )\n\n return visible_walls", "title": "" }, { "docid": "ea1eed7b84dd730ffbfcc6e1c89bc52c", "score": "0.5250658", "text": "def draw_bars(self, surface: Any):\n for fro in Table.POSITIONS:\n for to in Table.POSITIONS:\n if not self.state.table.neighbours(fro, to):\n continue\n if fro[1] > to[1]:\n continue\n tb = self.state.table.state\n if tb[fro[0]][fro[1]] == S_INV:\n continue\n if tb[to[0]][to[1]] == S_INV:\n continue\n x_shift, y_shift = 0, 0\n # print(f\"Vecini: {fro}, {to}\")\n fromCord, toCord = self.cells[fro[0]][fro[1]], self.cells[to[0]][to[1]]\n lg = dist_2d(fromCord, toCord) ** 0.5\n # lg -= 2\n angle = 0\n if to[1] != fro[1]:\n if fro[0] < to[0]:\n angle = 90 * 3 + 45\n elif fro[0] > to[0]:\n angle = 45\n y_shift = toCord[1] - fromCord[1]\n else:\n if fro[0] < to[0]:\n angle = 90\n else:\n continue\n # Create rectangle\n Width = 4\n myBar: Any = pygame.Surface((lg, Width), pygame.SRCALPHA)\n myBar.fill(WALL_COLOR)\n myBar = pygame.transform.rotate(myBar, angle)\n x_shift += fromCord[0]\n y_shift += fromCord[1]\n surface.blit(myBar, (x_shift - Width // 2, y_shift - Width // 2))", "title": "" }, { "docid": "88fe441cc72581d27b495b0c45e992ba", "score": "0.5229404", "text": "def undraw(self):\n # Takes self.Vis list and undraws all components in that card\n for i in self.Vis:\n i.undraw()", "title": "" }, { "docid": "59a71f77e8949c30c9a1b54f98272d00", "score": "0.52261263", "text": "def cover_background(self, num_triangles):\n dimension = int(round(math.sqrt(num_triangles / 2.0)))\n while (dimension * dimension * 2) > num_triangles:\n dimension -= 1\n if dimension <= 0:\n return []\n\n triangles = []\n block_height = self.size.y // dimension\n block_width = self.size.x // dimension\n\n for x in range(dimension):\n for y in range(dimension):\n top = y * block_height\n bottom = (y+1) * block_height\n left = x * block_width\n right = (x+1) * block_width\n triangles += self.get_triangles_for_rectangle(left, right, top, bottom)\n return triangles", "title": "" }, { "docid": "0704ab64d8bc9d7697f7a53a347ae728", "score": "0.52253765", "text": "def render_colliders(self, surface, offset):\n dirty_rects = []\n\n # Draw player and floor colliders to surface\n collider = self.collider.move(offset)\n pygame.draw.rect(surface, (0, 255, 0), collider)\n dirty_rects.append(collider)\n\n floor_collider = self.floor_collider.move(offset)\n pygame.draw.rect(surface, (255, 255, 0), floor_collider)\n dirty_rects.append(floor_collider)\n\n # Test conditions for valid attacks\n if self.can_attack:\n if self.animation_name == \"attack0\" and self.frame_num >= 2:\n # Create modified copy of player collider to resize into attack \n attack_collider = collider.inflate(\n self.attack0_length, self.attack0_width)\n attack_collider.move_ip(\n (((not self.flipX)*2) - 1)*(self.attack0_length/2 + self.collider_size.x/2), 0)\n pygame.draw.rect(surface, (0, 0, 255), attack_collider)\n dirty_rects.append(attack_collider)\n\n if self.animation_name == \"attack1\" and self.frame_num >= 3 and self.frame_num <= 5:\n attack_collider = collider.inflate(\n self.attack1_width, self.attack1_length)\n attack_collider.move_ip(\n 0, -self.attack1_length/2 - self.collider_size.y/2)\n pygame.draw.rect(surface, (0, 0, 255), attack_collider)\n dirty_rects.append(attack_collider)\n\n if self.animation_name == \"attack2\" and self.frame_num < 5:\n attack_collider = collider.inflate(\n self.attack2_width, self.attack2_length)\n attack_collider.move_ip(\n 0, self.attack1_length/2 + self.collider_size.y/2)\n pygame.draw.rect(surface, (0, 0, 255), attack_collider)\n dirty_rects.append(attack_collider)\n return dirty_rects", "title": "" }, { "docid": "519162a2dee7c6b8edc937aad178bd06", "score": "0.5222242", "text": "def clean_area(screen,origin,width,height,color):\r\n ox,oy = origin\r\n points = [(ox,oy),(ox+width,oy),(ox+width,oy+height),(ox,oy+height),(ox,oy)]\r\n pygame.draw.polygon(screen, color, points, 0)", "title": "" }, { "docid": "557cd385cec77063feda1883db3fc921", "score": "0.5214095", "text": "def unsplit_boxes(self, top_bounds, bot_bounds, box, actual_left, actual_right):\n \n new_boxes_m = []\n if top_bounds:\n new_boxes = [{'t': c_top, 'b': c_bot,\n 'l': actual_left, 'r': actual_right,\n 'w': actual_right-actual_left, 'h': c_bot-c_top,\n 'color': 'seagreen', 'num_col': 'one'} \\\n for (c_top,c_bot) in zip(top_bounds, bot_bounds)]\n \n ## Makes sure that there is no smaller box in a larger box. If there is, merge them.\n new_boxes = sorted(new_boxes, key=lambda k: (\"t\" not in k, k.get('t', None)))\n new_boxes_m = [new_boxes[0]]\n for b_num in list(range(1,len(new_boxes))):\n if new_boxes[b_num]['t'] > new_boxes_m[-1]['t'] and \\\n new_boxes[b_num]['t'] < new_boxes[b_num-1]['b']+20:\n new_boxes_m[-1]['b'] = new_boxes[b_num]['b']\n else:\n new_boxes_m.append(new_boxes[b_num])\n return new_boxes_m", "title": "" }, { "docid": "62e52be06f66f5d412ae578f95ab8721", "score": "0.52083397", "text": "def clear_scene(self):\n for poly in self.polygons:\n poly.remove_points()\n self.m_scene.removeItem(poly)\n for text in self.texts:\n self.m_scene.removeItem(text)\n for poly in self.m_scene.added_polygons:\n poly.remove_points()\n self.m_scene.removeItem(poly)\n \n self.m_scene.added_polygons =[]\n self.texts = []\n self.polygons = []", "title": "" }, { "docid": "dc2ceb69c705dfc9bfc7f397e81059f4", "score": "0.5193337", "text": "def clean_area(screen,origin,width,height,color):\n ox,oy = origin\n points = [(ox,oy),(ox+width,oy),(ox+width,oy+height),(ox,oy+height),(ox,oy)]\n pygame.draw.polygon(screen, color, points, 0)", "title": "" }, { "docid": "8f4b26a57a0cb5ad30a352f7e53cc4ba", "score": "0.5178954", "text": "def remove_bound(self):\n bnd_dic = self.bnd_dic.copy()\n for i in bnd_dic:\n self._remove_bound(i)\n return bnd_dic", "title": "" }, { "docid": "8600f93f94528e5b9f8aafc2fee09366", "score": "0.51735514", "text": "def clean_area(self,screen,origin,width,height,color):\r\n ox,oy = origin\r\n points = [(ox,oy),(ox+width,oy),(ox+width,oy+height),(ox,oy+height),(ox,oy)]\r\n pygame.draw.polygon(screen, color, points, 0)", "title": "" }, { "docid": "a43f94bf679efffaa80c5fd86473e871", "score": "0.51695704", "text": "def removeAll(self):\n contents.removeAll()\n super(GCanvas, self).removeAll()\n repaint()", "title": "" }, { "docid": "5b8f7722fdca74036016b5b5933868a6", "score": "0.51694643", "text": "def erase_scrollmap(rect):\n global __game\n surf = __game.bscrollmap.get_image().subsurface(rect).copy()\n __game.bscrollmap.blit(surf, rect.topleft)", "title": "" }, { "docid": "ca8cb2b107f982a8e8e2dccf4aa0c45f", "score": "0.5167439", "text": "def create_rectangles(self):\n pl = self.position_list # position list\n for index in range(len(pl) - 1):\n # if we have x motion\n if pl[index][0] != pl[index + 1][0]:\n line_length = pl[index + 1][0] - pl[index][0]\n offset = [pl[index][0],\n pl[index][1] - 0.5 * self.layer_width]\n if line_length < 0:\n offset = [offset[0] + line_length,\n offset[1]]\n self.add_line(layer_name=self.layer_name,\n length=abs(line_length),\n offset=offset,\n orientation=\"horizontal\",\n layer_width=self.layer_width)\n # if we have y motion\n elif pl[index][1] != pl[index + 1][1]:\n line_length = pl[index + 1][1] - pl[index][1]\n offset = [pl[index][0] - 0.5 * self.layer_width,\n pl[index][1]]\n if line_length < 0:\n offset = [offset[0],\n offset[1] + line_length]\n self.add_line(layer_name=self.layer_name,\n length=abs(line_length),\n offset=offset,\n orientation=\"vertical\",\n layer_width=self.layer_width)", "title": "" }, { "docid": "108e4055e4c80b1817cdf5069b044811", "score": "0.51637465", "text": "def draw(self, surface):\r\n self.new_image = self.image.copy()\r\n return self.new_image", "title": "" }, { "docid": "1f6c517a1e20c57349e9fd2308361dcf", "score": "0.51635563", "text": "def reset_generation(self):\r\n # w pętli wypełnij listę kolumnami\r\n # które także w pętli zostają wypełnione wartością 0 (DEAD)\r\n return [[DEAD for y in range(self.height)] for x in range(self.width)]", "title": "" }, { "docid": "e2fdebde37ff18652c1f31afa0145535", "score": "0.51506054", "text": "def draw(self, surface):\n\n # draw.rect(where, colour, (x,y,width, height))\n # self.collumn * 16 converts pixels to squares on gird\n pygame.draw.rect(surface, self.color, (int(self.position * 16), self.position_bottom, self.size * 16, 16))", "title": "" }, { "docid": "60aff96b7a490425310bf379090b3d32", "score": "0.51279974", "text": "def blow_down_objects(self):\n if self.__collision_object_buffer:\n self.__planning_scene_interface.add_objects(self.__collision_object_buffer)\n self.__collision_object_buffer = []", "title": "" }, { "docid": "d25335f8075a3ee1341f08dfb7c7da0d", "score": "0.5120241", "text": "def removeFluxSurfaces(self):\n if self._fluxOverlayHandles is not None:\n for h in self._fluxOverlayHandles:\n h.remove()\n\n self._fluxOverlayHandles = []\n self.overlayFluxSurfaces = False", "title": "" }, { "docid": "36b19d0a1c569e822c239329a03c505a", "score": "0.5119798", "text": "def ClientRects(self):\n\n return [self.ClientRect(), ]", "title": "" }, { "docid": "c982dd8b6e27194d2d57264e14055a8a", "score": "0.51123387", "text": "def GetRect(self):", "title": "" }, { "docid": "c982dd8b6e27194d2d57264e14055a8a", "score": "0.51123387", "text": "def GetRect(self):", "title": "" }, { "docid": "862891342a4b14557ae2b03ea3c52469", "score": "0.51110953", "text": "def update_rect(points):\r\n\tmin_x, max_x = inf, -inf\r\n\tmin_y, max_y = inf, -inf\r\n\tfor point in points:\r\n\t\tmin_x = min(min_x, point.x)\r\n\t\tmax_x = max(max_x, point.x)\r\n\t\tmin_y = min(min_y, point.y)\r\n\t\tmax_y = max(max_y, point.y)\r\n\twidth = max_x - min_x\r\n\theight = max_y - min_y\r\n\treturn pg.Rect((min_x, min_y), (width, height))", "title": "" }, { "docid": "5fb4d3fad753edc9a282f1e929e71fcd", "score": "0.510917", "text": "def render(self, surface, offset=pygame.Vector2(0, 0), size=None, delta=None):\n # Update if delta given\n if not delta == None:\n self.update_animation(delta)\n \n # Render all sprites\n dirty_rects = super().render(surface, offset)\n\n dirty_rects += self.water_big_splash.render(surface, offset)\n dirty_rects += self.water_splash.render(surface, offset)\n\n dirty_rects += self.short_stop.render(surface, offset)\n dirty_rects += self.hard_stop.render(surface, offset)\n dirty_rects += self.hard_stop1.render(surface, offset)\n\n # Return rects to describe stale parts of screen\n return dirty_rects", "title": "" }, { "docid": "4dd791312c9b7be58e47d8c5fc583618", "score": "0.5108367", "text": "def GetUnscaledRect(self, rect):", "title": "" }, { "docid": "303ae69665c609cce772e455f65525a8", "score": "0.5107626", "text": "def get_drawables(self):\n drawables = self.background.get_drawables()+self.wc.get_drawables()+self.enemy1.get_drawables()+self.enemy2.get_drawables()+self.enemy3.get_drawables()+self.enemy4.get_drawables()\n for obstacle in self.obstacles:\n drawables += obstacle.get_drawables()\n return drawables", "title": "" }, { "docid": "b08d2c46f6b746fe56ff00e4610725c7", "score": "0.5107396", "text": "def draw(self, surface, screenRect):\r\n\r\n surfaceBlit = surface.blit\r\n shiftx, shifty = screenRect.topleft\r\n shiftx = shiftx * -1\r\n shifty = shifty * -1\r\n\r\n #Which sprites to draw and\r\n toDrawList = self.collideWith(screenRect, True)\r\n toDraw = {}\r\n #Sort todraw as lists of sprites on horiz pixel rows\r\n for objTuple in toDrawList:\r\n key = objTuple[0].rect.bottom\r\n if not toDraw.has_key(key):\r\n toDraw[key] = []\r\n toDraw[key].append(objTuple)\r\n\r\n #Draw horiz rows from low to high (top to bottom)\r\n toDrawRows = toDraw.keys()\r\n toDrawRows.sort()\r\n for rowKey in toDrawRows:\r\n thisRow = toDraw[rowKey]\r\n for objTuple in thisRow:\r\n shiftRect = objTuple[2].move(shiftx, shifty)\r\n surfaceBlit(objTuple[1], shiftRect)", "title": "" }, { "docid": "ee3195da60c6b5b279fa59a6e4021b1e", "score": "0.5105937", "text": "def dirty(self):\n\n self.__matrix, self.__gl_matrix = None, None\n\n for user in self.__used_by:\n user.dirty()", "title": "" }, { "docid": "4336c5cd91d90dc1b7fed543dee387c9", "score": "0.5097586", "text": "def CreateRevolvedSurface(self):\r\n pass", "title": "" }, { "docid": "505b0cace1b608c6df358929a3146f5a", "score": "0.50945026", "text": "def remove_background(p):\n new_p = []\n new_rect = []\n for i in xrange(len(p)):\n if len(p[i]) > 0:\n probs = p[i][:, 1:21]\n probs /= np.sum(probs, axis=1)[:, np.newaxis]\n rects = p[i][:, 21:]\n new_p.append(probs)\n new_rect.append(rects)\n else:\n new_p.append(np.zeros((0, 0)))\n new_rect.append(np.zeros((0, 0)))\n return new_p, new_rect", "title": "" }, { "docid": "07f1589a1ae415e3526771b66b754682", "score": "0.5093973", "text": "def relative_crop(self, rect):\n return [geom.translate(drow=-rect.top, dcol=-rect.left) for geom in self.crop(rect)]", "title": "" }, { "docid": "fac8462458fa62c0799c799990697baa", "score": "0.50938946", "text": "def rect( self ):\r\n return self._rect", "title": "" }, { "docid": "500e3416e1f67d5364c7579167e89787", "score": "0.5093712", "text": "def updateGameCanvasContents(self):\n self.photos = [] # Unload current photos\n\n pPos = self.world.userPos # Player position\n leftEdge = pPos[0] - VIEW_DISTANCE # Left edge of area\n rightEdge = pPos[0] + VIEW_DISTANCE + 1 # Right edge of area\n topEdge = pPos[1] - VIEW_DISTANCE # Top edge of area\n bottomEdge = pPos[1] + VIEW_DISTANCE + 1 # Bottom edge of area\n\n sqX = 0\n sqY = 0\n\n for locY in range(topEdge, bottomEdge):\n for locX in range(leftEdge, rightEdge):\n location = self.world.getLocation((locX, locY))\n if DEBUGMODE:\n print('Adding square', location.subType, 'to', locX, locY)\n self.addImage(sqX, sqY, location.imageName)\n sqX += 1\n sqX = 0\n sqY += 1", "title": "" }, { "docid": "2fe4b1642e57a1ff66286ef3a64fa91f", "score": "0.50933856", "text": "def collideWith(self, rect, bigReturn=False):\r\n crashed = []\r\n spritecollide = rect.colliderect\r\n for s in self.sprites():\r\n sdo = s.getDrawObjects()\r\n if spritecollide(sdo[1]):\r\n if bigReturn:\r\n crashed.append((s,sdo[0],sdo[1]))\r\n else:\r\n crashed.append(s)\r\n return crashed", "title": "" }, { "docid": "02a3d3b02fc91073df55a901d0f30d49", "score": "0.5093172", "text": "def _draw_back(self, outer_width, outer_height):\n rectangles = [\n [\n (0, 0),\n (outer_width, 0),\n (outer_width, outer_height),\n (0, outer_height)\n ],\n [\n (self.back, self.back),\n (outer_width - self.back, self.back),\n (outer_width - self.back, outer_height - self.back),\n (self.back, outer_height - self.back)\n ]\n ]\n for rect in rectangles:\n self.msp.add_lwpolyline(\n rect, dxfattribs={'layer': 'CUT', 'closed': True})\n\n radius = self.back / 4\n circles = [\n (self.back / 2, self.back / 2),\n (self.back / 2, outer_height / 2),\n (self.back / 2, outer_height - self.back / 2),\n (outer_width / 2, outer_height - self.back / 2),\n (outer_width / 2, self.back / 2),\n (outer_width - self.back / 2, self.back / 2),\n (outer_width - self.back / 2, outer_height / 2),\n (outer_width - self.back / 2, outer_height - self.back / 2)\n ]\n for center in circles:\n self.msp.add_circle(center, radius, dxfattribs={'layer': 'CUT'})", "title": "" }, { "docid": "be27053553f1ab44a4609c3d196875e1", "score": "0.50906366", "text": "def crop_faces(image, rectangle_list, return_original=False):\n faces = []\n for rect in rectangle_list:\n coords = convert_coords_to_points(rect)\n faces.append(image[coords[0][1]:coords[1][1], coords[0][0]:coords[1][0]])\n if return_original and len(faces) == 0:\n return [image]\n return faces", "title": "" }, { "docid": "4289873482baaef0b74411a7499750db", "score": "0.50904745", "text": "def __fields(self):\n result = RectangleList()\n islands = list(self.islands(useexitstatus=True))\n onepixel = self.onepixel\n gxdict = collections.defaultdict(dict)\n gydict = collections.defaultdict(dict)\n primaryregionsx = {}\n primaryregionsy = {}\n\n shape = {tuple(r.shape) for r in self.rectangles}\n if len(shape) > 1:\n raise ValueError(\"Some rectangles have different shapes\")\n shape = shape.pop()\n\n for gc, island in enumerate(islands, start=1):\n rectangles = [self.rectangles[self.rectangledict[n]] for n in island]\n\n for i, (primaryregions, gdict) in enumerate(zip((primaryregionsx, primaryregionsy), (gxdict, gydict))):\n #find the gx and gy that correspond to cx and cy\n average = []\n cs = sorted({r.cxvec[i] for r in rectangles})\n for g, c in enumerate(cs, start=1):\n gdict[gc][c] = g\n theserectangles = [r for r in rectangles if r.cxvec[i] == c]\n average.append(np.mean(units.nominal_values([self.x(r)[i] for r in theserectangles])))\n\n #find mx1, my1, mx2, my2\n #the middle ones come from the average positions of the HPFs on either side\n primaryregions[gc] = [(x1+shape[i] + x2)/2 for x1, x2 in more_itertools.pairwise(average)]\n\n if len(primaryregions[gc]) >= 2:\n #the outer ones come from fitting a line to the middle ones\n m, b = units.np.polyfit(\n x=range(1, len(average)),\n y=primaryregions[gc],\n deg=1,\n )\n primaryregions[gc].insert(0, m*0+b)\n primaryregions[gc].append(m*len(average)+b)\n else:\n #can't fit a line because there are only at most 2 rows/columns, so do an approximation\n allcs = sorted({r.cxvec[i] for r in self.rectangles})\n mindiff = min(np.diff(allcs))\n divideby = 1\n while mindiff / divideby > shape[i]:\n divideby += 1\n mindiff /= divideby\n\n if len(primaryregions[gc]) == 1:\n primaryregions[gc].insert(0, primaryregions[gc][0] - mindiff)\n primaryregions[gc].append(primaryregions[gc][1] + mindiff)\n else: #len(primaryregions[gc]) == 0\n primaryregions[gc].append(average[0] + (shape[i] - mindiff) / 2)\n primaryregions[gc].append(average[0] + (shape[i] + mindiff) / 2)\n\n mx1 = {}\n mx2 = {}\n my1 = {}\n my2 = {}\n\n #set gx, gy, mx1, my1, mx2, my2 for the HPFs\n for i, island in enumerate(islands, start=1):\n for rid in island:\n r = self.rectangles[self.rectangledict[rid]]\n\n gx = gxdict[i][r.cx]\n gy = gydict[i][r.cy]\n\n mx1[rid] = primaryregionsx[i][gx-1]\n mx2[rid] = primaryregionsx[i][gx]\n my1[rid] = primaryregionsy[i][gy-1]\n my2[rid] = primaryregionsy[i][gy]\n\n #see if the primary regions of any HPFs in different islands overlap\n for (i1, island1), (i2, island2) in itertools.combinations(enumerate(islands, start=1), r=2):\n if len(island1) == 1 or len(island2) == 1: continue #orphans are excluded\n\n #first see if the islands overlap\n x11 = min(primaryregionsx[i1])\n x21 = max(primaryregionsx[i1])\n x12 = min(primaryregionsx[i2])\n x22 = max(primaryregionsx[i2])\n\n y11 = min(primaryregionsy[i1])\n y21 = max(primaryregionsy[i1])\n y12 = min(primaryregionsy[i2])\n y22 = max(primaryregionsy[i2])\n\n #if a box around the islands overlaps in both x and y\n if (\n max(x21, x22) - min(x11, x12) + 1e-5*x11 < (x21 - x11) + (x22 - x12)\n and max(y21, y22) - min(y11, y12) + 1e-5*x11 < (y21 - y11) + (y22 - y12)\n ):\n self.__logger.info(f\"Primary regions for islands {i1} and {i2} overlap in both x and y, seeing if any field primary regions overlap\")\n\n xoverlapstoadjust = collections.defaultdict(list)\n yoverlapstoadjust = collections.defaultdict(list)\n cornerstoadjust = collections.defaultdict(list)\n\n for rid1, rid2 in itertools.product(island1, island2):\n xx11 = mx1[rid1]\n xx21 = mx2[rid1]\n xx12 = mx1[rid2]\n xx22 = mx2[rid2]\n\n yy11 = my1[rid1]\n yy21 = my2[rid1]\n yy12 = my1[rid2]\n yy22 = my2[rid2]\n\n if (\n max(xx21, xx22) - min(xx11, xx12) + 1e-5*x11 < (xx21 - xx11) + (xx22 - xx12)\n and max(yy21, yy22) - min(yy11, yy12) + 1e-5*x11 < (yy21 - yy11) + (yy22 - yy12)\n ):\n self.__logger.warningglobal(f\"Primary regions for fields {rid1} and {rid2} overlap, adjusting them\")\n\n threshold = 100*onepixel\n xs = ys = None\n ridax = ridbx = riday = ridby = None\n if abs(xx21 - xx12) <= threshold:\n xs = xx12, xx21\n ridax, ridbx = rid2, rid1\n elif abs(xx11 - xx22) <= threshold:\n xs = xx11, xx22\n ridax, ridbx = rid1, rid2\n if abs(yy21 - yy12) <= threshold:\n ys = yy12, yy21\n riday, ridby = rid2, rid1\n elif abs(yy11 - yy22) <= threshold:\n ys = yy11, yy22\n riday, ridby = rid1, rid2\n if xs is ys is None:\n raise ValueError(f\"Primary regions for fields {rid1} and {rid2} have too big of an overlap\")\n\n if xs is not None and ys is not None:\n cornerstoadjust[xs, ys].append((ridax, ridbx, riday, ridby))\n elif xs is not None:\n xoverlapstoadjust[xs].append((ridax, ridbx))\n elif ys is not None:\n yoverlapstoadjust[ys].append((riday, ridby))\n\n cornerxscounter = collections.Counter(xs for xs, ys in cornerstoadjust)\n corneryscounter = collections.Counter(ys for xs, ys in cornerstoadjust)\n xswith2corners = [xs for xs, count in cornerxscounter.items() if count >= 2]\n yswith2corners = [ys for ys, count in corneryscounter.items() if count >= 2]\n\n for (xs, ys), rids in cornerstoadjust.items():\n if xs in xswith2corners:\n xoverlapstoadjust[xs] += [(ridax, ridbx) for ridax, ridbx, riday, ridby in rids]\n if ys in yswith2corners:\n yoverlapstoadjust[ys] += [(riday, ridby) for ridax, ridbx, riday, ridby in rids]\n for (xs, ys), rids in cornerstoadjust.items():\n if xs in xoverlapstoadjust:\n xoverlapstoadjust[xs] += [(ridax, ridbx) for ridax, ridbx, riday, ridby in rids]\n if ys in yoverlapstoadjust:\n yoverlapstoadjust[ys] += [(riday, ridby) for ridax, ridbx, riday, ridby in rids]\n\n for ((oldmx1, oldmx2), (oldmy1, oldmy2)), rids in cornerstoadjust.items():\n if xs in xoverlapstoadjust or ys in yoverlapstoadjust:\n pass\n elif oldmx1 - oldmx2 < oldmy1 - oldmy2:\n xoverlapstoadjust[oldmx1, oldmx2] += rids\n else:\n yoverlapstoadjust[oldmy1, oldmy2] += rids\n\n for (oldmx1, oldmx2), rids in xoverlapstoadjust.items():\n for rid1, rid2 in rids:\n newmx = (oldmx1 + oldmx2)/2\n assert (mx1[rid1] == oldmx1 or mx1[rid1] == newmx) and (mx2[rid2] == oldmx2 or mx2[rid2] == newmx), (mx1[rid1], oldmx1, mx2[rid2], oldmx2, newmx)\n mx1[rid1] = mx2[rid2] = newmx\n for (oldmy1, oldmy2), rids in yoverlapstoadjust.items():\n for rid1, rid2 in rids:\n newmy = (oldmy1 + oldmy2)/2\n assert (my1[rid1] == oldmy1 or my1[rid1] == newmy) and (my2[rid2] == oldmy2 or my2[rid2] == newmy), (my1[rid1], oldmy1, my2[rid2], oldmy2, newmy)\n my1[rid1] = my2[rid2] = newmy\n\n for rid1, rid2 in itertools.product(island1, island2):\n xx11 = mx1[rid1]\n xx21 = mx2[rid1]\n xx12 = mx1[rid2]\n xx22 = mx2[rid2]\n\n yy11 = my1[rid1]\n yy21 = my2[rid1]\n yy12 = my1[rid2]\n yy22 = my2[rid2]\n\n if (\n max(xx21, xx22) - min(xx11, xx12) + 1e-5*x11 < (xx21 - xx11) + (xx22 - xx12)\n and max(yy21, yy22) - min(yy11, yy12) + 1e-5*x11 < (yy21 - yy11) + (yy22 - yy12)\n ):\n raise ValueError(f\"Primary regions for fields {rid1} and {rid2} still overlap\")\n\n #if there are any HPFs that are in the wrong quadrant (negative px or py), adjust the whole slide\n minpxvec = [np.inf * onepixel, np.inf * onepixel]\n for rectangle in self.rectangles:\n for gc, island in enumerate(islands, start=1):\n if rectangle.n in island:\n break\n else:\n assert False\n gx = gxdict[gc][rectangle.cx]\n gy = gydict[gc][rectangle.cy]\n pxvec = self.x(rectangle) - self.origin\n minpxvec = np.min([minpxvec, units.nominal_values(pxvec)], axis=0)\n result.append(\n Field(\n rectangle=rectangle,\n ixvec=floattoint(np.round((rectangle.xvec / onepixel).astype(float))) * onepixel,\n gc=0 if len(island) == 1 else gc,\n pxvec=pxvec,\n gxvec=(gx, gy),\n primaryregionx=np.array([mx1[rectangle.n], mx2[rectangle.n]]) - self.origin[0],\n primaryregiony=np.array([my1[rectangle.n], my2[rectangle.n]]) - self.origin[1],\n readingfromfile=False,\n )\n )\n\n minx, miny = np.floor(minpxvec/(100*onepixel))*100*onepixel\n if minx > 0: minx = 0\n if miny > 0: miny = 0\n if minx or miny:\n self.__logger.warningglobal(f\"Some HPFs have (x, y) < (xposition, yposition), shifting the whole slide by {-minx, -miny}\")\n for f in result:\n f.pxvec -= (minx, miny)\n f.primaryregionx -= minx\n f.primaryregiony -= miny\n\n return result", "title": "" }, { "docid": "bca6f75a60851434c7609ca3fb42f58e", "score": "0.5081102", "text": "def get_rect_surf(cls, size, color, alpha):\n # Truncate alpha to int to limit number of surfaces saved\n alpha = int(alpha)\n key = (size, color, alpha)\n val = cls.fonts.get(key, None)\n if val is not None:\n return val\n surf = pygame.Surface(size)\n surf.set_alpha(alpha)\n surf.fill(color)\n cls.rect_surfs[key] = surf\n return surf", "title": "" }, { "docid": "a8dea764f39f4112fd2222f7066febda", "score": "0.5078422", "text": "def s_rect(self):\r\n return Rect(copy.copy(self.s_position), copy.copy(self.s_size))", "title": "" }, { "docid": "8f02ff1ca571cb8249b7788873964d33", "score": "0.50748736", "text": "def _draw_loot(self):\n \n for _loot in self._gameplay.loot:\n _rect = pygame.Rect(_loot.x * self._tile_size, _loot.y * self._tile_size, self._tile_size, self._tile_size)\n pygame.draw.rect(self._surface, self._colors.get(\"yellow\"), _rect)", "title": "" }, { "docid": "a86dbca447e5a8f968f03b53354cfab2", "score": "0.5072699", "text": "def update_life(rects):\n all_neighbors = {}\n sx, sy = CELL_SIZE[0], CELL_SIZE[1]\n for i in rects:\n all_neighbors.setdefault(i, 0)\n curr_neighbors = (\n (i[0] + sx, i[1]),\n (i[0], i[1] + sy),\n (i[0] - sx, i[1]),\n (i[0], i[1] - sy),\n (i[0] + sx, i[1] + sy),\n (i[0] + sx, i[1] - sy),\n (i[0] - sx, i[1] - sy),\n (i[0] - sx, i[1] + sy),\n )\n for j in curr_neighbors:\n all_neighbors.setdefault(j, 0)\n all_neighbors[j] += 1\n for i in rects.copy():\n if all_neighbors[i] > 3 or all_neighbors[i] < 2:\n rects.remove(i)\n for k in all_neighbors:\n if all_neighbors[k] == 3:\n rects.add(k)\n return rects", "title": "" }, { "docid": "e5397f8d1af87f207e77b1321ecba581", "score": "0.50703526", "text": "def mirror_removes(self, x0, y0,lengths):\n coords = lambda x,dx=0: x+dx\n l1, l2, l3, arc = lengths\n rs = Shapes(self.__cell)\n\n # Make Bragg mirror sections\n #\n bragg = [rs.rect(self.__width,l1, x0, y0)]\n\n x1,y1 = [coords(x0,self.__radius+self.__width), coords(y0,l1)]\n bragg += [rs.halfarc(self.__radius, self.__width, x1, y1, orientation='N', npoints=40)]\n\n x2,y2 = [coords(x1,self.__radius), coords(y1,-l2)]\n bragg += [rs.rect(self.__width,l2,x2,y2)]\n\n x3,y3 = [coords(x2,self.__radius+self.__width), coords(y2)]\n bragg += [rs.halfarc(self.__radius, self.__width,x3, y3, orientation='S', npoints=40)]\n\n x4,y4 = [coords(x3,self.__radius), coords(y3)] \n bragg += [rs.rect(self.__width,l3,x4,y4)]\n\n self.__xstrt = x1\n self.__xstop = x4\n self.__ystrt = y1\n self.__ystop = y4 + l3\n\n return bragg", "title": "" }, { "docid": "80c962b96edc7d076c0c736bcc931dad", "score": "0.5065412", "text": "def clean_board(self):\n i = self.HEIGHT - 1 # Start from the bottom...not that it really matters \n lines_to_delete = []\n while i >= 0:\n flagged_for_delete = True\n # Scan each line and see if is filled\n for square in self.board[i]:\n # Line not completely filled, stop checking\n if square != 2:\n flagged_for_delete = False\n break\n # Add line to list of lines to delete\n if flagged_for_delete:\n lines_to_delete.append(i)\n i-= 1\n # Delete lines that are filled\n lines_to_delete.sort(reverse=True) # Reverse list, otherwise deleting will cause indices to shift\n for line in lines_to_delete:\n del self.board[line]\n for _ in range(len(lines_to_delete)):\n # Replace deleted lines\n self.board.insert(0,[0 for x in range(self.WIDTH)])\n self.clean_board_flag = False", "title": "" }, { "docid": "ec151219944812a5943d5825478d79df", "score": "0.50638974", "text": "def calc_rect(self):\n\n if self._rect is None:\n (w, h), _ = self.calc_size()\n cx, cy = self._4pt_center\n\n if self._LR_fudge_ratio > 0:\n center_shift = min(w * 0.2, self._LR_fudge_ratio * w * 10)\n # cx, cy = mef.rotate_point((cx, cy), self._in_plane, self._4pt_center)\n if self._is_looking_to_their_right:\n cx += center_shift\n else:\n cx -= center_shift\n\n cx, cy = mef.rotate_point((cx, cy), -self._in_plane, self._4pt_center)\n\n rx, ry = cx - w / 2, cy - h / 2\n self._rect = [rx, ry, rx + w - 1, ry + h - 1]\n\n return self._rect", "title": "" }, { "docid": "4cf22cb3114d59183f4e26eca75535e8", "score": "0.506265", "text": "def clear():\n for x in range(_light_count * PIXELS_PER_LIGHT):\n pixels[x][0:3] = [0, 0, 0]", "title": "" }, { "docid": "3797510cd3c447b8bc83374444f8739b", "score": "0.50585115", "text": "def get_obstacles(self):\n\n return [self.blobs[blob_id] for blob_id in (self.obj_ids + self.obs_ids)]", "title": "" }, { "docid": "26458e6ad74f8c2dea4ec932a5964349", "score": "0.5058115", "text": "def redraw_board(self, board):\n self.canvas.delete(\"pawns\")\n self.canvas.delete(\"h_walls\")\n self.canvas.delete(\"v_walls\")\n self.scoreboard.delete(\"scoreboard_walls\")\n for i in range(board.rows):\n for j in range(board.cols):\n # ensure coherent unselected appearance\n self.mark_object((i, j, \"tile\"))\n for player in range(2):\n i, j = self.board.pawns[player]\n x, y = self.get_tile_xy(i, j)\n self.pawn_ids[player] = self.canvas.create_oval(\n x - self.w_pawn / 2, y - self.w_pawn / 2,\n x + self.w_pawn / 2, y + self.w_pawn / 2,\n fill=self.pawn_colors[player],\n outline=self.pawn_outlin[player],\n width=2, tags=[\"pawns\"])\n for wall_num in range(self.board.nb_walls[player]):\n x, y = self.get_wall_scoreboard_xy(player, wall_num)\n self.scoreboard_wall_ids[player][wall_num] = \\\n self.scoreboard.create_rectangle(\n x - self.w_wall / 2 + 1, y - self.l_wall / 2 + 1,\n x + self.w_wall / 2, y + self.l_wall / 2,\n fill=self.wall_color, outline=self.wall_outli[player],\n width=2,\n tags=[\"scoreboard_walls\"])\n for i in range(board.rows - 1):\n for j in range(board.cols - 1):\n self.mark_object((i, j, \"bg_h_wall\"))\n self.mark_object((i, j, \"bg_v_wall\"))\n for (i, j) in self.board.horiz_walls:\n x, y = self.get_wall_xy(i, j)\n self.canvas.create_rectangle(\n x - self.l_wall / 2, y - self.w_wall / 2 + 2,\n x + self.l_wall / 2, y + self.w_wall / 2 - 3,\n fill=self.wall_color, width=2,\n tags=[\"h_walls\"])\n for (i, j) in self.board.verti_walls:\n x, y = self.get_wall_xy(i, j)\n self.canvas.create_rectangle(\n x - self.w_wall / 2 + 2, y - self.l_wall / 2,\n x + self.w_wall / 2 - 3, y + self.l_wall / 2,\n fill=self.wall_color, width=2,\n tags=[\"v_walls\"])", "title": "" } ]
b4ee8b559c2f4b6f1cb5f7335cc4859b
Add placeholders and classes, remove autogenerated labels and set autofocus on first field
[ { "docid": "ba0e42ff03f5470d6030d92c8536c04d", "score": "0.5561623", "text": "def __init__(self, *args, **kwargs):\n\n # We call the default init method to set the form up\n # as it would be by default.\n super().__init__(*args, **kwargs)\n\n # Create dictionary of placeholders that will show up\n # in the form fields.\n placeholders = {\n 'full_name': 'Full Name',\n 'email': 'Email Address',\n 'phone_number': 'Phone Number',\n 'postcode': 'Postal Code',\n 'town_or_city': 'Town or City',\n 'street_address1': 'Street Address 1',\n 'street_address2': 'Street Address 2',\n 'county': 'County, State or Locality',\n }\n\n # Set the autofocus attribute on the full name field to true\n # so the cursor will start in the full name field when\n # the user loads the page.\n self.fields['full_name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n # To prevent an error due to country not having a placeholder\n if field != 'country':\n # Iterate through the form's fields, adding a star if it's\n # a required field on the model\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n\n # Set all the placeholder attributes to their values in the\n # dictionary defined above\n self.fields[field].widget.attrs['placeholder'] = placeholder\n\n # Add a CSS class that we'll use later\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'\n\n # Remove the form field labels as we no longer need them\n self.fields[field].label = False", "title": "" } ]
[ { "docid": "ee58ce594e73263af8c770f223304e5d", "score": "0.6516639", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'title': 'Title',\n 'body': 'Body',\n 'slug': 'Slug'\n }\n\n self.fields['title'].widget.attrs['autofocus'] = True\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].label = False", "title": "" }, { "docid": "addb1b4ae4076a58d7543aa9f18980b7", "score": "0.6465558", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'time': 'hh:mm',\n 'event': 'Event Details',\n }\n\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'", "title": "" }, { "docid": "7314821896b2c512b048006de6ac183a", "score": "0.645658", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'name': 'Full Name',\n 'from_email': 'Email Address',\n 'subject': 'Subject',\n 'content': 'Message',\n }\n\n self.fields['name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n if self.fields[field].required:\n placeholder = f'{placeholders[field]} *'\n else:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder\n self.fields[field].label = False", "title": "" }, { "docid": "37ef4ff8644469c4ce25115d98bd3d8f", "score": "0.64472383", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'default_phone_number': 'Phone Number',\n 'default_postcode': 'Postal Code',\n 'default_town_or_city': 'Town or City',\n 'default_street_address1': 'Street Address 1',\n 'default_street_address2': 'Street Address 2',\n }\n\n self.fields['default_phone_number'].widget.attrs['autofocus'] = True", "title": "" }, { "docid": "cd23914e925d3dd8470336feb7e7ba12", "score": "0.63115615", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['description'].widget.attrs['autofocus'] = False\n for field in self.fields:\n self.fields[field].widget.attrs['class'] = 'commentform'\n self.fields[field].label = False", "title": "" }, { "docid": "6728a69c72cbbbbee56d905757c97ed3", "score": "0.6048441", "text": "def text_ctrl_placeholder_on_gain_focus(self, event):\n if not self.ldraw_name_isvalid:\n self.ldraw_name_input.SetValue(\"\")\n\n event.Skip()", "title": "" }, { "docid": "bb880305aff5e0f00f77c3b0110330bb", "score": "0.5874018", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n 'route_name': 'The name of the route you belayed the climber on...',\n 'climber': 'The climber who you belayed on that route...',\n 'grade': 'The grade of the route climbed...',\n 'comment': 'Any comments...',\n }\n\n for field in self.fields:\n if field in placeholders.keys():\n placeholder = placeholders[field]\n self.fields[field].widget.attrs['placeholder'] = placeholder", "title": "" }, { "docid": "c7a1f55b13f2b5f6c3a42b6a6563f960", "score": "0.58391094", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n for field in self.fields:\n if self.fields[field].required:\n label = f'{field} *'\n self.fields[field].label = label\n else:\n label = f'{field}'\n self.fields[field].label = label", "title": "" }, { "docid": "bbdb6484a4f83be092d81ff5c8c9a196", "score": "0.58323646", "text": "def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n placeholders = {\r\n 'full_name': 'Prenume si Nume (Ex: Ioana Maria Popescu)',\r\n 'email': 'Adresa email',\r\n 'phone_number': 'Telefon international (Ex: +39 000 000 0000 sau +40 700 000 000)',\r\n 'country': 'Tara',\r\n 'town_or_city': 'Orasul',\r\n 'app': 'Selectati aplicatia',\r\n 'mac': 'Adresa MAC / TV ID',\r\n 'mac_pass': 'PIN MAC / Parola MAC',\r\n 'notes': 'Nota comanda (opțional)',\r\n }\r\n\r\n for field in self.fields:\r\n if self.fields[field].required:\r\n placeholder = f'{placeholders[field]} *'\r\n else:\r\n placeholder = placeholders[field]\r\n self.fields[field].widget.attrs['placeholder'] = placeholder\r\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'\r\n self.fields[field].label = False", "title": "" }, { "docid": "2df40030d94e9f304722b0dcbe6ec378", "score": "0.58055294", "text": "def __init__(self, *args, **kwargs):\n super(BaseCreateCharacterForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs['class'] = 'form-control'", "title": "" }, { "docid": "a9d301da91f7cb7e49d3290fd6c4e315", "score": "0.5767531", "text": "def label_placeholder(self):\n raise NotImplementedError(\"has to be overwritten\")", "title": "" }, { "docid": "263a92a31667b326d48da98bffb2e614", "score": "0.5685015", "text": "def __init__(self, *args, **kwargs):\n # Call default __init__() method\n super().__init__(*args, **kwargs)\n # Dictionary of placeholders displayed in form fields\n placeholders = {\n 'full_name': 'Full Name',\n 'email': 'Email Address',\n 'phone_number': 'Phone Number',\n 'postcode': 'Postal Code',\n 'town_or_city': 'Town or City',\n 'street_address1': 'Street Address 1',\n 'street_address2': 'Street Address 2',\n 'county': 'County, State or Locality',\n }\n\n # full_name autofocus attribute set to true, so cursor starts here\n self.fields['full_name'].widget.attrs['autofocus'] = True\n for field in self.fields:\n # Ensure that code doesn't seek 'country' field\n if field != 'country':\n # If the form field is required\n if self.fields[field].required:\n # Place a star next to it to indicate this\n placeholder = f'{placeholders[field]} *'\n else:\n # Otherwise, there is no star\n placeholder = placeholders[field]\n # Set form placeholder attributes to respective\n # values in placeholder dict above\n self.fields[field].widget.attrs['placeholder'] = placeholder\n # Add stripe-style-input CSS class to each field\n self.fields[field].widget.attrs['class'] = 'stripe-style-input'\n # Remove label from each field\n self.fields[field].label = False", "title": "" }, { "docid": "3b2790b59ace5e4f884dbc4e8f2da14d", "score": "0.55438095", "text": "def add_input_text(name: str, default_value: str = \"\", hint: str = \"\", multiline: bool = False, no_spaces: bool = False, uppercase: bool = False, decimal: bool = False, hexadecimal: bool = False, \n\t\t\t\t readonly: bool = False, password: bool = False, callback: str = \"\", tip: str = \"\", parent: str = \"\", before: str = \"\", data_source: str = \"\", width: int = 0) -> None:\n\t...", "title": "" }, { "docid": "14b50d0a8d5154f11f62718f3e8a35f9", "score": "0.5532926", "text": "def default_placeholder(self):\n return self.field.label if get_bootstrap_setting(\"set_placeholder\") else \"\"", "title": "" }, { "docid": "862701459bd1bd51a70294c63dbd854d", "score": "0.5472716", "text": "def populate_fields(self):\n self.ids.add_label.text = f'Visualizando [b]{self.item_data[\"cls_nome\"]}[/b]'\n self.ids.cls_nome.text = str(self.item_data['cls_nome'])\n self.ids.cls_codigo.text = str(self.item_data['cls_codigo'])\n self.ids.cls_sub.text = str(self.item_data['cls_sub']) if self.item_data['cls_sub'] != '' else '[ Fundo ]'\n self.ids.reg_abertura.text = str(self.item_data['reg_abertura'])\n self.ids.reg_desativacao.text = str(self.item_data['reg_desativacao'])\n self.ids.reg_reativacao.text = str(self.item_data['reg_reativacao'])\n self.ids.reg_mudanca_nome.text = str(self.item_data['reg_mudanca_nome'])\n self.ids.reg_deslocamento.text = str(self.item_data['reg_deslocamento'])\n self.ids.reg_extincao.text = str(self.item_data['reg_extincao'])\n self.ids.ativa.state = 'down' if self.item_data['cls_indicador'] == 'Ativa' else 'normal'\n self.ids.inativa.state = 'normal' if self.item_data['cls_indicador'] == 'Ativa' else 'down'\n self.ids.fase_corrente.text = str(self.item_data['fase_corrente'])\n self.ids.evento_fase_corrente.text = str(self.item_data['evento_fase_corrente'])\n self.ids.fase_intermediaria.text = str(self.item_data['fase_intermediaria'])\n self.ids.evento_fase_inter.text = str(self.item_data['evento_fase_inter'])\n self.ids.preservacao.state = 'down' if self.item_data['dest_final'] == 'Preservação' else 'normal'\n self.ids.eliminacao.state = 'normal' if self.item_data['dest_final'] == 'Preservação' else 'down'\n self.ids.reg_alteracao.text = str(self.item_data['reg_alteracao'])\n self.ids.observacoes.text = str(self.item_data['observacoes'])", "title": "" }, { "docid": "f6535b4ff512cd7fd75667b12821d8da", "score": "0.5459139", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"username\"].label = \"Nom d'utilisateur\"\n self.fields[\"phone_number\"].label = \"Numéro de téléphone\"\n self.fields[\"password1\"].label = \"Mot de passe\"\n self.fields[\"password2\"].label = \"Confirmation du mot de passe\"", "title": "" }, { "docid": "cb96e5de8315a4441457fafab772e424", "score": "0.5409572", "text": "def setup_placeholder_label(self):\n self.update_placeholder_text()\n\n if not self.layout():\n layout = QVBoxLayout()\n self.setLayout(layout)\n\n self.layout().addWidget(self._placeholder_label)", "title": "" }, { "docid": "77dd63490c17752ef16eb9ad0b51d017", "score": "0.5324823", "text": "def show_placeholder_text(self):\n self._placeholder_label.show()", "title": "" }, { "docid": "767b33cd4ffb30b27667a2ffecfb144f", "score": "0.53181434", "text": "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)\n # Put the first and last name at the top\n new_order = self.fields.keyOrder[:-4]\n new_order.insert(0, 'first_name')\n new_order.insert(1, 'last_name')\n new_order.insert(2, 'Age')\n new_order.insert(3, 'school')\n self.fields.keyOrder = new_order", "title": "" }, { "docid": "3f35a32ff89ed663387857ff9ef3b1e8", "score": "0.53073275", "text": "def clear_add_member_inputs(self):\n self.first_name_text.delete(0, END)\n self.last_name_text.delete(0, END)\n self.email_text.delete(0, END)\n self.phone_text.delete(0, END)\n self.home_address_text.delete(0, END)", "title": "" }, { "docid": "d09e40772db2df8722e080d357499294", "score": "0.52865267", "text": "def set_placeholder(self, val):\r\n self.options['placeholder'] = val", "title": "" }, { "docid": "29d19f829a89abe581e15d51c2fc7669", "score": "0.52451897", "text": "def focus_inputs(self):\n self.frame.body.focus_position = self.focus_order('command_input')\n self.command_input.set_edit_pos(5) # set cursor behind 'find '", "title": "" }, { "docid": "b4225f41b92a7dab33e4ed2d4ecb57e2", "score": "0.5203788", "text": "def style_text_input(self):", "title": "" }, { "docid": "0aa2ce4731e321cb6192e7f24c49cb39", "score": "0.51859146", "text": "def __init__(self,*args,**kwargs):\n\n try:\n super().__init__(*args,**kwargs)\n self.fields['username'].widget.attrs['placeholder'] = 'Display Name'\n self.fields['email'].widget.attrs[\"placeholder\"] = \"[email protected]\"\n self.fields['password1'].widget.attrs[\"placeholder\"] = \"Password\"\n self.fields['password2'].widget.attrs[\"placeholder\"] = \"Password Confirmation\"\n self.fields['email'].widget.attrs[\"placeholder\"] = \"[email protected]\"\n self.fields['username'].label = \"\"\n self.fields['email'].label = \"\"\n self.fields['password1'].label = \"\"\n self.fields['password2'].label = \"\"\n except Exception as e:\n logger.error('The error in UserForm - __init__ method is :'+str(e))\n print(e)", "title": "" }, { "docid": "a4d0b6ad3f4eb226e73e424c4bb88784", "score": "0.51649386", "text": "def __init__(self, master, prefill=False, text=\"YYYY-MM-DD\", **kwargs):\n ttk.Entry.__init__(self, master, **kwargs)\n self.master = master\n self.text = text\n self.prefill = prefill\n self.bind('<FocusIn>', self._focus_in)\n self.bind('<FocusOut>', self._focus_out)\n self.insert(0, text)\n if prefill:\n self.configure(foreground='grey')", "title": "" }, { "docid": "9ce5ad44ac886cbfe702cadffbad5d18", "score": "0.51613367", "text": "def add_judge_fields_from_list(master, model):\n fields = model.return_data_fields()\n row_count = 0\n for field in fields:\n if row_count == 4:\n master.set_row_cursor(17)\n master.set_col_cursor (2)\n label = Label(master, text=field[0])\n label.grid(row=master.row_cursor, column=master.col_cursor, sticky=W, padx=10, pady=5)\n if field[0] == 'Institution':\n master.set_prison_field(field, model)\n else:\n entry = Entry(master, textvariable=field[1], width=20)\n if row_count >= 4:\n entry.grid(row=master.row_cursor, column=master.col_cursor, sticky=E, pady=5)\n else:\n entry.grid(row=master.row_cursor, column=master.col_cursor+1, pady=5)\n master.row_cursor += 1\n row_count +=1", "title": "" }, { "docid": "c754a8fc3c0b8a9469bea9c8260eb28d", "score": "0.51477605", "text": "def __init__(self, names, *args, **kwargs):\n super(DynamicForm, self).__init__(*args, **kwargs)\n self.names = names\n for name, type in names:\n self.fields[name] = type.as_field(label=name.replace('_', ' '),\n required=False)", "title": "" }, { "docid": "10a4e1e7be8bda27252aa9f9e7dd6f21", "score": "0.51097035", "text": "def focus_up():\r\n pass", "title": "" }, { "docid": "d86f1cc0649d569fec3bd473c295aa98", "score": "0.51087236", "text": "def set_values(self):\n self.txt_ip.set_text(self.format_entry(\"ip\"))\n self.txt_netmask.set_text(self.format_entry(\"netmask\"))\n self.txt_gateway.set_text(self.format_entry(\"gateway\"))\n self.txt_dns_1.set_text(self.format_entry(\"dns1\"))\n self.txt_dns_2.set_text(self.format_entry(\"dns2\"))\n self.txt_dns_3.set_text(self.format_entry(\"dns3\"))\n self.reset_static_checkboxes()", "title": "" }, { "docid": "45d781c170c8963d9a37875a00f307a0", "score": "0.5102946", "text": "def _set_up_fields(self, *args, **kwargs):\n queryset = self.fields['organization'].queryset.select_related().annotate(\n email_restricted=models.Sum('domain__id')\n )\n self.fields['email'].required = True\n self.fields['username'].help_text = ''\n self.fields['organization'].required = True\n self.fields['organization'].queryset = queryset\n self.fields['organization'].choices = self.compose_organization_choices(queryset)\n help_text = (\n '<a href=\"{}?next={}&subject={}\">I want to create my own group</a>'\n )\n self.fields['organization'].help_text = help_text.format(\n reverse('askup:feedback'),\n reverse('askup:sign_up'),\n 'I want to create my own group'\n )", "title": "" }, { "docid": "8cb6ea7b2a54d5a4a4bfb810bcd0634d", "score": "0.5065685", "text": "def load_gui(self):\n #build a Lable (tk widget) with fname, put it on self.master, and give it text \"First Name:\"\n self.lbl_fname = tk.Label(self.master,text='First Name:')\n self.lbl_fname.grid(row=0,column=0,padx=(27,0),pady=(10,0),sticky=N+W)\n self.lbl_lname = tk.Label(self.master,text='Last Name:')\n self.lbl_lname.grid(row=2,column=0,padx=(27,0),pady=(10,0),sticky=N+W)\n self.lbl_phone = tk.Label(self.master,text='Phone Number:')\n self.lbl_phone.grid(row=4,column=0,padx=(27,0),pady=(10,0),sticky=N+W)\n self.lbl_email = tk.Label(self.master,text='Email Address:')\n self.lbl_email.grid(row=6,column=0,padx=(27,0),pady=(10,0),sticky=N+W)\n self.lbl_info = tk.Label(self.master,text='Information:')\n self.lbl_info.grid(row=0,column=2,padx=(0,0),pady=(10,0),sticky=N+W)\n\n #built text boxes for data entry, that take up more than one column. Note: rowspan 1 is default and\n #doesn't need to be explicitly stated\n self.txt_fname = tk.Entry(self.master,text='')\n self.txt_fname.grid(row=1,column=0,rowspan=1,columnspan=2,padx=(30,40),pady=(0,0),sticky=N+E+W)\n self.txt_lname = tk.Entry(self.master,text='')\n self.txt_lname.grid(row=3,column=0,rowspan=1,columnspan=2,padx=(30,40),pady=(0,0),sticky=N+E+W)\n self.txt_phone = tk.Entry(self.master,text='')\n self.txt_phone.grid(row=5,column=0,rowspan=1,columnspan=2,padx=(30,40),pady=(0,0),sticky=N+E+W)\n self.txt_email = tk.Entry(self.master,text='')\n self.txt_email.grid(row=7,column=0,rowspan=1,columnspan=2,padx=(30,40),pady=(0,0),sticky=N+E+W)\n\n #Define the listbox with a scrollbar and grid them\n self.scrollbar1 = Scrollbar(self.master,orient=VERTICAL)\n self.lstList1 = Listbox(self.master,exportselection=0,yscrollcommand=self.scrollbar1.set)\n self.lstList1.bind('<<ListboxSelect>>',lambda event: phonebook_func.onSelect(self,event))\n self.scrollbar1.config(command=self.lstList1.yview)\n self.scrollbar1.grid(row=1,column=5,rowspan=7,columnspan=1,padx=(0,0),pady=(0,0),sticky=N+E+S)\n self.lstList1.grid(row=1,column=2,rowspan=7,columnspan=3,padx=(0,0),pady=(0,0),sticky=N+E+S+W)\n\n #define buttons\n #lambda = annonymous function\n self.btn_add = tk.Button(self.master,width=12,height=2,text='Add',command=lambda: phonebook_func.addToList(self))\n self.btn_add.grid(row=8,column=0,padx=(25,0),pady=(45,10),sticky=W)\n self.btn_update = tk.Button(self.master,width=12,height=2,text='Update',command=lambda: phonebook_func.onUpdate(self))\n self.btn_update.grid(row=8,column=1,padx=(15,0),pady=(45,10),sticky=W)\n self.btn_delete = tk.Button(self.master,width=12,height=2,text='Delete',command=lambda: phonebook_func.onDelete(self))\n self.btn_delete.grid(row=8,column=2,padx=(15,0),pady=(45,10),sticky=W)\n self.btn_close = tk.Button(self.master,width=12,height=2,text='Close',command=lambda: phonebook_func.ask_quit(self))\n self.btn_close.grid(row=8,column=4,columnspan=1,padx=(15,0),pady=(45,10),sticky=E)\n\n phonebook_func.create_db(self)\n phonebook_func.onRefresh(self)", "title": "" }, { "docid": "7a8a9f393b9d02a142dfb7ad15b04d13", "score": "0.50606596", "text": "def __init__(self, label, helptext=None, interactive=True,\n args_key=None, hideable=False, validator=None):\n GriddedInput.__init__(self, label=label, helptext=helptext,\n interactive=interactive,\n args_key=args_key, hideable=hideable,\n validator=validator)\n self.textfield = Text.TextField()\n self.textfield.textChanged.connect(self._text_changed)\n self.widgets[2] = self.textfield", "title": "" }, { "docid": "152c6436042c016b952778149be04c81", "score": "0.5051721", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['company'].empty_label = None", "title": "" }, { "docid": "152c6436042c016b952778149be04c81", "score": "0.5051721", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['company'].empty_label = None", "title": "" }, { "docid": "b1fd3000d899d6b0ad631fd8eeedeff0", "score": "0.50395644", "text": "def hide_placeholder_text(self):\n self._placeholder_label.hide()", "title": "" }, { "docid": "f1bb9717134acd581ca29b42c6bd82e8", "score": "0.5034833", "text": "def _placeholders(Slide):\n return _shapes(Slide, ['msoPlaceholder'])", "title": "" }, { "docid": "4590547b259dba18dab3e69fd542e228", "score": "0.50268036", "text": "def displayWidgets(self):\r\n # Create name label and line edit widdgets\r\n QLabel(\"Please enter your name below\", self).move(100, 10)\r\n name_label = QLabel(\"Name:\", self)\r\n name_label.move(70, 50)\r\n\r\n self.name_entry = QLineEdit(self)\r\n self.name_entry.setAlignment(Qt.AlignLeft) # default alignment is Alignleft\r\n self.name_entry.move(130, 50)\r\n self.name_entry.resize(200, 20) # change size of entry field\r\n\r\n self.clear_button = QPushButton('Clear', self)\r\n self.clear_button.clicked.connect(self.clearEntries)\r\n self.clear_button.move(160, 110)", "title": "" }, { "docid": "f63bb8eeb2394029108baa486abada7b", "score": "0.50204194", "text": "def on_focus(self):", "title": "" }, { "docid": "e7af8ab4c5ab66cd6730275b95f58639", "score": "0.50113934", "text": "def clear_fields(self):\r\n self.root.ids.new_item_name.text = \"\"\r\n self.root.ids.new_item_description.text = \"\"\r\n self.root.ids.new_item_price.text = \"\"", "title": "" }, { "docid": "dd27d891be84d6dffa1eb748dc19d966", "score": "0.5007725", "text": "def update_placeholder_text(self, text=None):\n if text:\n self._placeholder_text = text\n\n label = self._placeholder_label\n label.setText(self._placeholder_text)\n\n # Text\n label.setWordWrap(True)\n label.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n\n # transperant\n label.setAutoFillBackground(False)\n label.setAttribute(Qt.WA_TranslucentBackground)\n\n # color PlaceholderText\n palette = self.palette()\n # This enum value has been introduced in Qt 5.12\n if hasattr(palette, 'PlaceholderText'):\n placeholder_color = palette.PlaceholderText\n else:\n placeholder_color = palette.WindowText\n color = palette.color(placeholder_color)\n palette.setColor(palette.Text, color)\n palette.setColor(palette.Text, color)\n label.setPalette(palette)", "title": "" }, { "docid": "add24ca144c31f20371122830521b943", "score": "0.49937108", "text": "def add_textfield(self, merge_var):\n \n self.fields[merge_var['tag']] = forms.CharField(\n **self.get_default_args(merge_var))", "title": "" }, { "docid": "1b200e136f52b92498b3430e8fac185c", "score": "0.4984685", "text": "def make_expts_fields(self, frame, template, boxes, comments):\n for i, field in enumerate(template):\n if isinstance(template[field], collections.Mapping):\n self.make_label(frame, field, row=i + 1, sticky='ne')\n # Another nesting level!\n boxes[field] = {}\n subframe = ttk.Frame(frame)\n for j, subfield in enumerate(template[field]):\n self.make_label(subframe, subfield, row=j, sticky='e',\n tooltip_text=comments[field][subfield])\n boxes[field][subfield] = ttk.Entry(subframe, width=40)\n boxes[field][subfield].grid(row=j, column=1, sticky='w')\n subframe.grid(row=i + 1, column=1, sticky='we')\n else:\n tooltip = comments.get(field, '')\n self.make_label(frame, field, row=i + 1, sticky='ne',\n tooltip_text=tooltip)\n boxes[field] = ttk.Entry(frame, width=70)\n boxes[field].grid(row=i + 1, column=1, sticky='w')", "title": "" }, { "docid": "20233a656ff9d4bcaa1f80160ba0c660", "score": "0.49806383", "text": "def customise_widgets(self,fields):\n pass", "title": "" }, { "docid": "075fc403b86f86f87cfa112d708c8e68", "score": "0.49528688", "text": "def contribute_to_class(self, cls, name):\n super(PlaceholderField, self).contribute_to_class(cls, name)\n\n # overwrites what instance.<colname> returns; give direct access to the placeholder\n setattr(cls, name, PlaceholderFieldDescriptor(self.slot))\n\n # Make placeholder fields easy to find\n if not hasattr(cls._meta, 'placeholder_fields'):\n cls._meta.placeholder_fields = {}\n cls._meta.placeholder_fields[name] = self", "title": "" }, { "docid": "d4ec2ed58df682a108f715ee04badcc7", "score": "0.49423596", "text": "def __init__(self, *args, **kwargs):\n super(CreationUserForm, self).__init__(*args, **kwargs)\n self.fields['password1'].help_text = ''\n self.fields['password2'].help_text = ''", "title": "" }, { "docid": "b3209fdfa15c84577e5f1bcd8cdd7a07", "score": "0.49378434", "text": "def add_fields_from_list(master, model):\n fields = model.return_data_fields()\n row_count = 0\n for field in fields:\n if row_count == 9:\n master.set_row_cursor(3)\n master.set_col_cursor (2)\n label = Label(master, text=field[0])\n label.grid(row=master.row_cursor, column=master.col_cursor, sticky=W, padx=10, pady=5)\n if field[0] == 'Institution':\n master.set_prison_field(field, model)\n else:\n entry = Entry(master, textvariable=field[1], width=20)\n entry.grid(row=master.row_cursor, column=master.col_cursor+1, pady=5)\n master.row_cursor += 1\n row_count +=1", "title": "" }, { "docid": "9e31c6c0acc52d5e1b763a77ece1744a", "score": "0.49346593", "text": "def __init__(self,parent,fieldName,dataType,label,*args,**kargs):\r\n tk.Frame.__init__(\r\n self,parent,\r\n pady=2\r\n )\r\n self.font=tkFont.Font(family='tahoma',size=10)\r\n self.parent=parent\r\n self.fieldName=fieldName\r\n self.dataType=dataType\r\n self.label=label\r\n self.__value=None\r\n self.__variable=tk.StringVar()\r\n self.__variable.set(None)\r\n self.__variable.trace('w',\r\n self.__callback)\r\n self._label=tk.Label(\r\n self,text=label,\r\n font=self.font,\r\n )\r\n self._entry=tk.Entry(\r\n self,\r\n textvariable=self.__variable,\r\n font=self.font,\r\n )\r\n \r\n self._label.grid(\r\n row=0,column=0,\r\n sticky='nsew',\r\n )\r\n self._entry.grid(\r\n row=0,column=1,\r\n sticky='nsew'\r\n )\r\n self.grid_columnconfigure(\r\n 1,weight=1)", "title": "" }, { "docid": "da1d7dbe2babb7f33821e3fa90c65f72", "score": "0.49191117", "text": "def enable(self, max_characters):\n # Maybe turn this into a function which creates a text field\n # at given coordinates or something\n self.text = \"\"\n self.max_characters = max_characters\n if self.active:\n for instance in self.instances:\n instance.accepting_text = False\n instance.focused = False\n else:\n setattr(TextInput, \"active\", True)\n self.accepting_text = True\n self.focused = True", "title": "" }, { "docid": "9139c3c0d4a5b98c4b4215d338850d08", "score": "0.49097076", "text": "def _prepare_insert(self, tmpl, record_class, field_names, placeholder_for_id=False):", "title": "" }, { "docid": "97e9336dc92857e748016b8e7d68d5c5", "score": "0.4902299", "text": "def __lw_initial_value_entry__(self):\n x = 1000\n y = 180\n self.initial_value_entry = QLineEdit(self)\n self.initial_value_entry.setToolTip(\"Initial value\")\n self.initial_value_entry.setText('0')\n self.initial_value_entry.move(x, y)\n self.initial_value_entry.resize(40, 20)\n self.initial_value_entry.show()", "title": "" }, { "docid": "83a34941c1693f9a770a332e151990ae", "score": "0.4899289", "text": "def CreateLabeledTextField(self, panel, label):\n label = wx.StaticText(panel, wx.ID_ANY, label)\n text = wx.TextCtrl(panel, wx.ID_ANY)\n box = wx.BoxSizer(wx.HORIZONTAL)\n box.Add(label, 0, wx.RIGHT|wx.ALIGN_CENTER, 5)\n box.Add(text, 1, wx.ALIGN_CENTER)\n return (box, text)", "title": "" }, { "docid": "f92e9b0c73e8ded3ff48b23b8a585fbf", "score": "0.48791882", "text": "def label_input(field, value, **attributes):\n\n from gluon.sqlhtml import StringWidget\n\n default = {\"value\": (value is not None and str(value)) or \"\"}\n attr = StringWidget._attributes(field, default, **attributes)\n\n placeholder = current.T(\"Enter or scan ID\")\n attr[\"_placeholder\"] = placeholder\n\n postfix = ICON(\"fa fa-close\")\n\n widget = DIV(DIV(INPUT(**attr),\n _class=\"small-11 columns\",\n ),\n DIV(SPAN(postfix, _class=\"postfix clear-btn\"),\n _class=\"small-1 columns\",\n ),\n _class=\"row collapse\",\n )\n\n return widget", "title": "" }, { "docid": "a45e8165560fa00929b88a64510841b6", "score": "0.48743284", "text": "def add_placeholders(self):\n ### YOUR CODE HERE (~3-5 lines)\n self.input_placeholder = tf.placeholder(dtype=tf.int32, shape=(None, self.config.n_window_features))\n self.labels_placeholder = tf.placeholder(dtype=tf.int32, shape=(None,))\n self.dropout_placeholder = tf.placeholder(dtype=tf.float32)\n ### END YOUR CODE", "title": "" }, { "docid": "d367e1a8fc4fd4c11c00620ef039072e", "score": "0.48558968", "text": "def test_glossary_form_has_placeholder(self):\n form = GlossaryForm()\n self.assertIn('placeholder=\"New term\"', form.as_p())\n self.assertIn('placeholder=\"Term definition\"', form.as_p())\n self.assertIn('placeholder=\"Reference\"', form.as_p())", "title": "" }, { "docid": "1c2b085fdd59f88ae61365578d243b57", "score": "0.48429856", "text": "def on_entry_click(event):\r\n if name.get() == 'Your name':\r\n name.delete(0, \"end\") # delete all the text in the name\r\n name.insert(0, '') #Insert blank for user input\r\n name.config(fg = 'white')", "title": "" }, { "docid": "a95adf93a4b0a4ddd45ea79e10ad27c6", "score": "0.48351467", "text": "def setup_edit(self):\r\n\r\n self.edit = urwid.IntEdit()\r\n if not self.is_top:\r\n self.edit.set_caption( self.op + \" \" )\r\n self.edit.set_layout( None, None, CALC_LAYOUT )", "title": "" }, { "docid": "eb4f14bac30e69ef243d9ee654310f09", "score": "0.48302642", "text": "def fix_form(self):\n \n if self.is_uc():\n self.set_feature('form', self.get_feature('form').title())", "title": "" }, { "docid": "4205d171331f97a576e791abeaafecc4", "score": "0.48298773", "text": "def updateFields(self):\n settings = self._get_security_settings()\n use_email_as_login = settings.use_email_as_login\n\n # Finally, let autoform process the schema and any FormExtenders do\n # their thing\n super().updateFields()\n\n if use_email_as_login:\n self.fields[\"email\"].field.description = _(\n \"help_email_creation_for_login\",\n default=\"Enter an email \"\n \"address. This will be your login name. We respect your \"\n \"privacy, and will not give the address away to any third \"\n \"parties or expose it anywhere.\",\n )\n del self.fields[\"username\"]\n else:\n self.fields[\"email\"].field.description = _(\n \"help_email_creation\",\n default=\"Enter an email address. This is necessary in case \"\n \"the password is lost. We respect your privacy, and \"\n \"will not give the address away to any third parties \"\n \"or expose it anywhere.\",\n )\n\n # Change the password description based on PAS Plugin The user needs a\n # list of instructions on what kind of password is required. We'll\n # reuse password errors as instructions e.g. \"Must contain a letter and\n # a number\". Assume PASPlugin errors are already translated\n if self.fields.get(\"password\", None):\n registration = getToolByName(self.context, \"portal_registration\")\n err_str = registration.testPasswordValidity(\"\")\n if err_str:\n msg = _(\n \"help_password_creation_with_errors\",\n default=\"Enter your new password. ${errors}\",\n mapping=dict(errors=err_str),\n )\n self.fields[\"password\"].field.description = msg", "title": "" }, { "docid": "29d474bf6eb0f44aa0f16c42348b98ae", "score": "0.4825862", "text": "def fields_start(self):\n pass", "title": "" }, { "docid": "7a105cce2a887f56a2da2fedb243db59", "score": "0.4818756", "text": "def blank(self):\n self.root.ids.song_title.text = '' # Empty the song title input\n self.root.ids.song_artist.text = '' # Empty the song artist input\n self.root.ids.song_year.text = '' # Empty the song year input", "title": "" }, { "docid": "4c5b8e3755a9bb346f604675024b0aec", "score": "0.48167503", "text": "def add_placeholders(self):\n ### \n input_placeholder = tf.placeholder(tf.int32,(None, self.config.max_length))\n labels_placeholder = tf.placeholder(tf.int32,(None, self.config.max_length))\n mask_placeholder = tf.placeholder(tf.bool,(None,self.config.max_length))\n self.input_placeholder = input_placeholder\n self.labels_placeholder = labels_placeholder\n self.mask_placeholder = mask_placeholder\n ### ", "title": "" }, { "docid": "ff2e028864a78da8adef5c9f2eb30532", "score": "0.48157597", "text": "def __load_widgets__(self):\n self.__lw_kp_entry__()\n self.__lw_ki_entry__()\n self.__lw_kd_entry__()\n self.__lw_initial_value_entry__()\n self.__lw_target_value_entry__()\n self.__lw_error_entry__()\n self.__lw_noise_entry__()", "title": "" }, { "docid": "d8c764a6de078aacb966d1b0fcb09f44", "score": "0.48118734", "text": "def field(\n label='', \n field='',\n required=False, \n label_desc='', \n field_desc='',\n help='',\n error='',\n field_pre='',\n):\n if error:\n field = HTML.div(class_='error', c=error)+field\n if label:\n label = label + literal(':')\n if field_pre:\n field = HTML.div(c=field_pre) + field\n rows = []\n if required:\n required_html = HTML.span(class_=\"required\", c='*')\n else:\n required_html = HTML.span(style=\"visibility: hidden\", c='*')\n label_html = HTML.td(valign=\"top\", class_=\"label\", c=required_html+HTML.label(c=label))\n if help:\n field_html = HTML.td(valign=\"top\", class_=\"field\", c=field)\n help_html = HTML.td(valign=\"top\", class_=\"help\", c=help)+'\\n'\n else:\n field_html = HTML.td(valign=\"top\", class_=\"field\", colspan=\"2\", c=field)\n help_html = ''\n rows.append(HTML.tr(class_='field', c='\\n'+label_html+'\\n'+field_html+'\\n'+help_html))\n if label_desc or field_desc:\n label_desc_html = HTML.td(valign=\"top\", class_=\"label_desc\", c=HTML.span(class_=\"small\", c=label_desc))\n field_desc_html = HTML.td(valign=\"top\", class_=\"field_desc\", colspan=\"2\", c=HTML.span(class_=\"small\", c=field_desc)) \n rows.append(HTML.tr(class_=\"description\", c='\\n'+label_desc_html+'\\n'+field_desc_html+'\\n'))\n return literal('\\n').join(rows)", "title": "" }, { "docid": "44cdc2075f316c423921db846e4bcb38", "score": "0.4800646", "text": "def add_placeholders(self):\n raise NotImplementedError(\"Each Model must re-implement this method.\")", "title": "" }, { "docid": "c50d09104ee80d294636b1d0c5cc399b", "score": "0.47938144", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['domain'].empty_label = None", "title": "" }, { "docid": "2bf6feb844988db1dee38ac7028ddc9c", "score": "0.47707114", "text": "def add_labels(self, regex):\n global g_is_it_showing\n g_is_it_showing = False\n\n global last_index, hints\n\n last_index = 0\n hints = []\n\n self.views = self.views_to_label()\n self.region_type = self.get_region_type()\n self.changed_views = []\n self.breakpoints = []\n changed_buffers = []\n\n for view in self.views[:]:\n if view.buffer_id() in changed_buffers:\n break\n\n view.run_command(\"add_ace_jump_labels\", {\n \"regex\": regex,\n \"region_type\": self.region_type,\n \"labels\": self.labels,\n \"highlight\": self.highlight,\n \"case_sensitive\": self.case_sensitivity\n })\n self.breakpoints.append(last_index)\n self.changed_views.append(view)\n changed_buffers.append(view.buffer_id())\n\n if next_search:\n break\n\n self.views.remove(view)\n\n set_views_syntax(self.all_views, list(itertools.repeat(\n \"Packages/AceJump/AceJump.tmLanguage\",\n len(self.all_views)\n )))\n\n set_views_settings(\n self.all_views,\n self.view_settings,\n self.view_values\n )", "title": "" }, { "docid": "192963e387c9f917406f193c3e4cfb0d", "score": "0.4768051", "text": "def clear_field(self):\n self.root.ids.input_name.text = \"\"\n self.root.ids.output_label.text = \"\"", "title": "" }, { "docid": "b581deb4ab25921fe9668a9a2c81ba87", "score": "0.47650036", "text": "def labelStart(self):\n self.label = 1\n self.labelVar.set('1')", "title": "" }, { "docid": "54460457342e12b9df7c0bb3e923eba1", "score": "0.47634348", "text": "def insertFields(self):\n\n # add common survey fields\n fields = super(GradeSurveyRecordForm, self).insertFields()\n\n # add empty option to choices\n grade_choices = (('', \"Choose a Grade\"),) + tuple(DEF_GRADE_CHOICES)\n\n gradeField = forms.fields.CharField(widget=custom_widgets.PlainTextWidget,\n initial=self.data.get('grade'))\n # add the grade field at the form's bottom\n fields.insert(len(fields) + 1, 'grade', gradeField)\n\n return fields", "title": "" }, { "docid": "d57dd849ebcc4e1dbb4214d3a034a84d", "score": "0.47554097", "text": "def auto_regex_label(cls, a_head, a_file_name):\n\n # For every label regex\n for l_pattern in cls.s_patterns_list:\n\n # Check for regex match\n l_match = l_pattern[2].match(a_head)\n\n # Add a label if the regex matches\n if l_match:\n cls.add_label(l_pattern[0], l_pattern[1], Procedure.REGEX,\n a_file_name, \"True\", \"Head\", [l_match[1]])", "title": "" }, { "docid": "a6bf4434be3f627e39493799400a438a", "score": "0.47473997", "text": "def __init__(self,*args,**kwargs):\n\n try:\n super().__init__(*args, **kwargs)\n self.fields['username'].label = \"\"\n self.fields['password'].label = \"\"\n self.fields['username'].widget.attrs[\"placeholder\"] = \"Username\"\n self.fields['password'].widget.attrs[\"placeholder\"] = \"Password\"\n except Exception as e:\n logger.error('The error in UserLoginForm - __init__ method is :'+str(e))\n print(e)", "title": "" }, { "docid": "69ea16d7b30436247dc11589f53ae252", "score": "0.47250906", "text": "def body(self, master):\n Label(master, text=\"Time to wait between loops (seconds): \").grid(row=0, column=0)\n\n self.e1 = Entry(master)\n self.e1.grid(row=0, column=1)\n\n return self.e1#initial focus", "title": "" }, { "docid": "5944ce19f316452d42687b0ac5380312", "score": "0.47223747", "text": "def focus_down():\r\n pass", "title": "" }, { "docid": "57fc517bc1982916e319f799843fa70a", "score": "0.47181505", "text": "def reset_controls(self):\n self.submit_button.description = \"Add to Pipeline\"", "title": "" }, { "docid": "c2b42a2e6209ea907a671c92105c4d22", "score": "0.47085324", "text": "def add_placeholders(self):\n\t\t### YOUR CODE HERE (~4-6 lines)\n\t\tself.input_placeholder = tf.placeholder(tf.int32, shape=(None, self.max_length)) #int32 because integer tokens to index into word embeddings\n\t\tself.labels_placeholder = tf.placeholder(tf.float32, shape=(None, self.max_length))\n\t\tself.mask_placeholder = tf.placeholder(tf.bool, shape=(None, self.max_length)) \n\t\tself.dropout_placeholder = tf.placeholder(tf.float32)\n\t\t### END YOUR CODE", "title": "" }, { "docid": "b05f856cbdca4830506c49b2352928bf", "score": "0.47025907", "text": "def populate_defaults(self):\n self.set_tabs(3)\n self.tabDefaults.setTabText(0, \"ID Labels\")\n self.tabDefaults.setTabText(1, \"Properties\")\n self.tabDefaults.setTabText(2, \"Hydraulics\")\n\n self.set_tab_prefix()\n self.set_tab_properties()\n self.set_tab_hydraulics()\n #self.tab_changed(0)", "title": "" }, { "docid": "21feb8a62d4b7d7656cc430750f2a77b", "score": "0.4697297", "text": "def _createEditText(self):\n self.__viewer.widget = render_engine.Gui.createWidgetT(\"Edit\", \"Edit\",\n mygui.IntCoord(0, 0, 91, 91),\n mygui.Align(mygui.ALIGN_VSTRETCH),\n \"Main\")\n self.__viewer.widget.setVisible(False)\n self.__viewer.widget.setTextColour(mygui.Colour(0.0, 0.0, 0.0, 1.0))\n self.__viewer.widget.setEditMultiLine(True)", "title": "" }, { "docid": "caf33153dd0cccc46b8deaaf07955d61", "score": "0.46952784", "text": "def index_begin(self):\n item = self.field\n while item.show is False:\n item = item.repeatedlist[item.selection][3]\n if item.index is not None:\n item.index = 0\n item.display_on_screen()", "title": "" }, { "docid": "5c04c07121cbc2c876571243cd6d854e", "score": "0.46884638", "text": "def gain_focus_if_needed(self, widget):\n pass", "title": "" }, { "docid": "f6845d05f20987b0e5cf6a71991f2438", "score": "0.4676074", "text": "def focus(self):\n pass", "title": "" }, { "docid": "c6cdeca1ece38d74f3f1e51039057bae", "score": "0.466625", "text": "def set_fields(self):\n if self.view_only:\n ## populate fields with current database row\n self.populate_fields()\n else:\n ## if not view_only tests for new item \n if self.new_cls:\n ## if new item, changes label icon and adds 'None' to parent node\n self.ids.label_icon.icon = 'folder-plus-outline'\n self.ids.cls_sub.text = '[ Fundo ]'\n self.app.pcd_tree.disabled = True ## Locks treeview\n else:\n ## if not new item, adds current item code as item parent\n self.ids.cls_sub.text = self.item_data['cls_codigo']", "title": "" }, { "docid": "ac4a3cd8e34470bdd6ea20e96fb8c11f", "score": "0.4665814", "text": "def alterForm(self, selected_layer):\n for field_index, field in enumerate(selected_layer.pendingFields()):\n f_type = field.typeName()\n if selected_layer.editorWidgetV2(field_index) != 'TextEdit':\n pass\n elif f_type == \"text\":\n selected_layer.setEditorWidgetV2(field_index, 'TextEdit')\n selected_layer.setEditorWidgetV2Config(field_index, {\n 'IsMultiline': True, 'UseHtml': False})\n elif f_type == \"varchar\":\n selected_layer.setEditorWidgetV2(field_index, 'TextEdit')\n selected_layer.setEditorWidgetV2Config(field_index, {\n 'IsMultiline': (field.length() > 80), 'UseHtml': False})\n elif f_type == \"date\":\n selected_layer.setEditorWidgetV2(field_index, 'DateTime')\n selected_layer.setEditorWidgetV2Config(field_index, {\n 'display_format': 'yyyy-MM-dd',\n 'field_format': 'yyyy-MM-dd', 'calendar_popup': True})\n elif f_type == \"bool\":\n selected_layer.setEditorWidgetV2(field_index, 'CheckBox')\n selected_layer.setEditorWidgetV2Config(field_index, {\n 'CheckedState': 't', 'UncheckedState': 'f'})", "title": "" }, { "docid": "0c61af171e315150871c88a5254e6867", "score": "0.4664462", "text": "def CreateField(label, file, frame, row, col):\r\n\ttext = label + \": \"\r\n\ttk.Label(frame, text=text, anchor=\"w\").grid(row=row, column=col, sticky=\"w\")\r\n\ttk.Entry(frame, textvariable=file).grid(row=row, column=col+1, columnspan=2)\r\n\ttk.Button(frame, text=\"Browse\", command= lambda: AskOpenFile(file, frame)).grid(row=row, column=col+3)", "title": "" }, { "docid": "ee0153aa44c745aab8a9ec575cbc3c5b", "score": "0.46634993", "text": "def form_setup(self):\n pass", "title": "" }, { "docid": "be6a858f5cd776b4f202772441130af7", "score": "0.46628255", "text": "def __init__(self, *args, **kwargs):\n super(UserProfileForm, self).__init__(*args, **kwargs)\n self.fields[\"First Name\"] = forms.CharField(\n initial=self.instance.user.first_name)\n self.fields[\"Last Name\"] = forms.CharField(\n initial=self.instance.user.last_name)\n self.fields[\"Email\"] = forms.EmailField(\n initial=self.instance.user.email)\n del self.fields[\"user\"]", "title": "" }, { "docid": "4f7ecfd76b3a39af22d4b4a4e4b02224", "score": "0.4662619", "text": "def __init__(self, *args, **kwargs):\r\n super(PublicTicketForm, self).__init__(*args, **kwargs)\r\n for field in CustomField.objects.filter(staff_only=False):\r\n instanceargs = {\r\n 'label': field.label,\r\n 'help_text': field.help_text,\r\n 'required': field.required,\r\n }\r\n if field.data_type == 'varchar':\r\n fieldclass = forms.CharField\r\n instanceargs['max_length'] = field.max_length\r\n elif field.data_type == 'text':\r\n fieldclass = forms.CharField\r\n instanceargs['widget'] = forms.Textarea\r\n instanceargs['max_length'] = field.max_length\r\n elif field.data_type == 'integer':\r\n fieldclass = forms.IntegerField\r\n elif field.data_type == 'decimal':\r\n fieldclass = forms.DecimalField\r\n instanceargs['decimal_places'] = field.decimal_places\r\n instanceargs['max_digits'] = field.max_length\r\n elif field.data_type == 'list':\r\n fieldclass = forms.ChoiceField\r\n choices = field.choices_as_array\r\n if field.empty_selection_list:\r\n choices.insert(0, ('','---------' ) )\r\n instanceargs['choices'] = choices\r\n elif field.data_type == 'boolean':\r\n fieldclass = forms.BooleanField\r\n elif field.data_type == 'date':\r\n fieldclass = forms.DateField\r\n elif field.data_type == 'time':\r\n fieldclass = forms.TimeField\r\n elif field.data_type == 'datetime':\r\n fieldclass = forms.DateTimeField\r\n elif field.data_type == 'email':\r\n fieldclass = forms.EmailField\r\n elif field.data_type == 'url':\r\n fieldclass = forms.URLField\r\n elif field.data_type == 'ipaddress':\r\n fieldclass = forms.IPAddressField\r\n elif field.data_type == 'slug':\r\n fieldclass = forms.SlugField\r\n \r\n self.fields['custom_%s' % field.name] = fieldclass(**instanceargs)", "title": "" }, { "docid": "55731fd3e6211b3afe33f3dac2f67510", "score": "0.46609715", "text": "def formfield(self, **kwargs):\r\n from mezzanine.generic.forms import KeywordsWidget\r\n kwargs[\"widget\"] = KeywordsWidget\r\n return super(KeywordsField, self).formfield(**kwargs)", "title": "" }, { "docid": "24586e464dd96ddd894ce8135ef9f882", "score": "0.46588576", "text": "def __init__(self, slot, plugins=None, **kwargs):\n super(PlaceholderField, self).__init__(**kwargs)\n\n self.slot = slot\n self.plugins = plugins\n\n # Overwrite some hardcoded defaults from the base class.\n self.editable = True\n self.blank = True # TODO: support blank: False to enforce adding at least one plugin.\n self.rel = PlaceholderRel(self.slot) # This support queries", "title": "" }, { "docid": "9ce1b411d78f0971c0410d476008bce9", "score": "0.46576175", "text": "def __init__(self, *args, **kwargs):\n super(ChangePasswordForm, self).__init__(*args, **kwargs)\n self.fields['old_password'].help_text = ''\n self.fields['new_password1'].help_text = ''\n self.fields['new_password2'].help_text = ''", "title": "" }, { "docid": "c90d658af9bf8fb7c94d0ecc746391fd", "score": "0.46573803", "text": "def fill_by_text(self, fields, scroll=NotPassed):\n for selector, text in fields.items():\n form, field_name, _ = self._find_form_and_field_by_css_selector(self.last_response, selector)\n self._fill_field_by_text(form, field_name, text)", "title": "" }, { "docid": "cea17e2457114ca9bb72ff951d556869", "score": "0.4653654", "text": "def build_fields(self):\n return [(\"text\", self.text), (\"label\", self.label)]", "title": "" }, { "docid": "790335fc223694cc616525f1b137ba23", "score": "0.46514022", "text": "def __init__(self, *args, **kwargs):\n super(LocationForm, self).__init__(*args, **kwargs)\n edits = self.instance.locationedit_set.filter(pending=True)\n bools = self.instance.get_boolean_fieldnames()\n\n # Loop through edits and apply classes to fields for highlighting\n for edit in edits.filter(edit_type='update'):\n if edit.fieldname in bools:\n edit.new_value = True if edit.new_value == 'True' else False\n self.initial[edit.fieldname] = edit.new_value\n field = self.fields[edit.fieldname]\n field.widget.attrs['class'] = edit.edit_type\n if edits.filter(edit_type='delete'):\n # If edit type is delete, all fields are red\n for field in self.fields.values():\n field.widget.attrs['class'] = 'delete'\n if edits.filter(edit_type='create'):\n # If edit type is create, all fields are green\n for field in self.fields.values():\n field.widget.attrs['class'] = 'create'", "title": "" }, { "docid": "29ba221b98eb7e2c9bc64ae8260609bb", "score": "0.4651339", "text": "def default_focus(self):\n return None", "title": "" }, { "docid": "47f1ad08487e1c3b1105de3a7113c8bb", "score": "0.4634376", "text": "def createNotepadWidget(self):\n\t\tself.text_field = QTextEdit()\n\t\tself.setCentralWidget(self.text_field)", "title": "" }, { "docid": "6bf59eb32936814f944a76b274054b71", "score": "0.46337304", "text": "def __init__(self, *args, **kwargs):\n super(AccountUserForm, self).__init__(*args, **kwargs)\n if self.instance.pk is not None:\n self.fields['first_name'].initial = self.instance.user.first_name\n self.fields['last_name'].initial = self.instance.user.last_name\n self.fields['email'].initial = self.instance.user.email", "title": "" }, { "docid": "21b39adbb7427b657720ede59798d66d", "score": "0.46305984", "text": "def buildCustomTitleOptions(gui: tk.Tk, parent: tk.Frame,\r\n waitFunc, startPlotFunc, ) -> None:\r\n\r\n # - - - - - - - - - -\r\n # Row 0 - Text\r\n gui.titleText = tk.StringVar()\r\n titlekwargs = {'width': 35, 'textvariable': gui.titleText, }\r\n gui.titleEntry = ttk.Entry(parent, **titlekwargs)\r\n gui.titleEntry.insert(0, '')\r\n\r\n # Adds a waiting period for the user to stop typing\r\n gui.titleEntry.bind('<Key>', waitFunc)\r\n gui.titleEntry.grid(row=0, column=0, sticky=tk.W, columnspan=5)\r\n\r\n # - - - - - - - - - -\r\n # Row 1 - Styling\r\n gui.titleSize = ttk.Spinbox(parent, from_=0, to=32, width=3,\r\n command=lambda: startPlotFunc(1))\r\n gui.titleSize.set('15')\r\n gui.titleSize.grid(row=1, column=0)\r\n\r\n gui.boldTitleOn, gui.itTitleOn = (0, 0)\r\n\r\n # Bold button\r\n boldFont = font.Font(size=10, weight=\"bold\")\r\n gui.boldTitleButton = tk.Button(\r\n parent, text=\"B\", width=3,\r\n relief=tk.FLAT,\r\n font=boldFont,\r\n command=lambda: cf.editTitleOptions(gui, 'b'))\r\n gui.boldTitleButton.grid(row=1, column=1,)\r\n\r\n # Italic button\r\n itFont = font.Font(size=10, slant=\"italic\")\r\n gui.itTitleButton = tk.Button(\r\n parent, text=\"I\", width=3,\r\n relief=tk.FLAT,\r\n font=itFont,\r\n command=lambda: cf.editTitleOptions(gui, 'i'))\r\n gui.itTitleButton.grid(row=1, column=2,)\r\n\r\n # Title Color Picker\r\n tc_kwargs = {'width': 8, 'textvariable': gui.titleColorHex, }\r\n gui.titleColorEntry = ttk.Entry(parent, **tc_kwargs)\r\n\r\n # Adds a waiting period for the user to stop typing\r\n gui.titleColorEntry.bind('<Key>', waitFunc)\r\n gui.titleColorEntry.grid(row=1, column=3, sticky=tk.W)\r\n\r\n # Setting up color wheel buttong\r\n gui.titleColorWheel = tk.PhotoImage(file='images/color_wheel.png')\r\n gui.titleColorButton = tk.Button(\r\n parent,\r\n image=gui.titleColorWheel,\r\n command=lambda: cf.pickTitleColor(gui))\r\n gui.titleColorButton.grid(row=1, column=4,)", "title": "" }, { "docid": "7eeb45c8d377fcb9ef69d93468d4b5a7", "score": "0.46284765", "text": "def clear_member_search_inputs(self):\n self.first_name_g_text.delete(0, END)\n self.email_g_text.delete(0, END)", "title": "" }, { "docid": "b76dddd5830a6f29fd3cf7e4f5cae5c3", "score": "0.46261188", "text": "def populate_widgets(self):\n pass", "title": "" }, { "docid": "3fcf11ee39c3d2c463107ffe0ddf6a15", "score": "0.46255594", "text": "def __init__(self, field, input_class, *args, **kwargs):\n super(MultilineField, self).__init__(field, *args, **kwargs)\n self.input_class = input_class", "title": "" }, { "docid": "985f5af11e4471e26fe373fed4da4221", "score": "0.46251556", "text": "def create_placeholder(self):\n if self.user is None and self.text == \"\":\n # Comment is already a placeholder, don't create a replacement.\n return\n\n Comment.objects.create(\n user=None,\n text=\"\",\n visible=True,\n target_type=self.target_type,\n target_id=self.target_id,\n created_on=self.created_on,\n )", "title": "" } ]
d1ab94935e1af99e184fbbc6b888b926
Encodes obj as bytes.
[ { "docid": "bbfcc47a776f32e4075a6592a8af4227", "score": "0.8698663", "text": "def encode(self, obj: Any) -> bytes:", "title": "" } ]
[ { "docid": "bc24fde68354c537e45fcf8c96b600d1", "score": "0.84944046", "text": "def encode(self, obj: Any) -> bytes:\n return self.encoder.encode(obj).encode(\"utf8\")", "title": "" }, { "docid": "9eabecd52ef02bc813bb5e953625cca1", "score": "0.778802", "text": "def encode(self, obj):\n if self.encoder == 'pickle':\n import cPickle as pickle\n return pickle.dumps(obj, -1)\n elif self.encoder == 'json':\n try:\n import simplejson as json\n except ImportError:\n import json\n return json.dumps(obj)", "title": "" }, { "docid": "84c42e46f9ff0d287691a8fb819d0c69", "score": "0.7658311", "text": "def encode(obj):\n # type specific casting:\n settings = get_settings()\n encode_functions = settings.get('serial_encode_functions', {})\n for obj_type in encode_functions:\n if isinstance(obj, obj_type):\n obj = encode_functions[obj_type](obj)\n\n return str(obj)", "title": "" }, { "docid": "ede5383d01276dcca99be74adaa3942b", "score": "0.75428253", "text": "def serialize(self, obj):\n self.object_ = obj\n self.serial_ = SealedObject.__serialize_(obj)\n self.encrypted_ = None\n return base64.b64encode(self.serial_)", "title": "" }, { "docid": "f37faa8f568e823f0dbfdf5128fbd7cf", "score": "0.7521224", "text": "def dumps(obj):\n return msgpack.packb(obj, default=_serializer, use_bin_type=True)", "title": "" }, { "docid": "aae049e3a769005deeffcc6bde5bdf3e", "score": "0.7407988", "text": "def dumps(obj, big_endian=True):\n return _dumps(obj, big_endian)", "title": "" }, { "docid": "f7a7e3181cdd8ecff5c9fc0673a75c8a", "score": "0.7248636", "text": "def __serialize_(obj):\n return pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "f55a25174147de668bbf8fc67f00f5bc", "score": "0.72302896", "text": "def serialize_to_bytes(obj: typing.Any) -> bytes:\n\n bitbuffer = serialize(obj)\n\n return bitbuffer.buffer", "title": "" }, { "docid": "293a9cf01884a4e3885a50cd28b1ff81", "score": "0.71709406", "text": "def dump(self, obj: object, stream: Stream) -> int:\n self._encoder.encode(obj, stream)", "title": "" }, { "docid": "040445ce80d1fc7a67506152d5c28c32", "score": "0.7126788", "text": "def dumps(self, obj: object) -> AnyStr:\n buf = IO_MODE_MAP[self._encoder.mode]()\n self.dump(obj, buf)\n return buf.getvalue()", "title": "" }, { "docid": "31450a9f2ab24f9d4560412b37365ec4", "score": "0.7046709", "text": "def pickle(obj):\n return codecs.encode(pkl.dumps(obj), \"base64\").decode()", "title": "" }, { "docid": "d3691ade1e4a73771bcee1ebd26ff4bf", "score": "0.7017267", "text": "def dumps(self, obj):\n pass", "title": "" }, { "docid": "da396d82059db3d25afe0f396dd5537c", "score": "0.6995841", "text": "def send(self, obj):\n\n msg = msgpack.dumps(obj, use_bin_type=False)\n assert len(msg) < MAX_SIZE, \"Encoded message too big!\"\n self.sock.send(msg)", "title": "" }, { "docid": "46b0465ffdb92842bb7d48e40de64c3d", "score": "0.6911768", "text": "def dumps(obj):\n # type: (Pekelable) -> bytes\n f = BytesIO()\n pekeler = Pekeler(f)\n pekeler.dump(obj)\n return f.getvalue()", "title": "" }, { "docid": "aaf662e5a834adfe13152e4912013b33", "score": "0.68919545", "text": "def write_object(self, obj):\n resp = jsonpickle.encode(obj, unpicklable=False)\n self.write_json(resp)", "title": "" }, { "docid": "02b8c46b91b2e5c8593f94f103445aba", "score": "0.6868509", "text": "def _pack_object(cls, obj):\n\t\tdata = \"\"\n\t\t\n\t\tobject_type = type(obj)\n\t\t\n\t\tif\t object_type == NoneType:\n\t\t\tdata += \"\\x00\"\n\t\t\n\t\telif object_type == BooleanType:\n\t\t\tif not obj:\n\t\t\t\tdata += \"\\x08\"\n\t\t\telse:\n\t\t\t\tdata += \"\\x09\"\n\t\t\n\t\telif object_type == IntType:\n\t\t\tobject_marker = 0x10\n\t\t\tbuf = \"\"\n\t\t\t#XXX: need to actually catch unsupported packed sizes\n\t\t\tfor fmt in [\">B\", \">H\", \">I\", \">Q\"]:\n\t\t\t\ttry:\n\t\t\t\t\tbuf = struct.pack(fmt, obj)\n\t\t\t\texcept struct.error:\n\t\t\t\t\tlogging.debug(\"XXX: skipping {0}\".format(fmt))\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tobject_marker += int(log(len(buf), 2))\n\t\t\t\n\t\t\tdata += chr(object_marker)\n\t\t\tdata += buf\n\t\t\n\t\telif object_type == FloatType:\n\t\t\tobject_marker = 0x20\n\t\t\tbuf = \"\"\n\t\t\t#XXX: need to actually catch unsupported packed sizes\n\t\t\tfor fmt in [\"!f\", \"!d\"]:\n\t\t\t\ttry:\n\t\t\t\t\tbuf = struct.pack(fmt, obj)\n\t\t\t\texcept struct.error:\n\t\t\t\t\tlogging.debug(\"XXX: skipping {0}\".format(fmt))\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tobject_marker += int(log(len(buf), 2))\n\t\t\t\n\t\t\tdata += chr(object_marker)\n\t\t\tdata += buf\n\t\t\n\t\t#XXX: DateType?\n\t\t\n\t\telif object_type == StringType:\n\t\t\tobject_marker = 0x40\n\t\t\tdata_len = len(obj)\n\t\t\tif data_len < 0xF:\n\t\t\t\tobject_marker += data_len\n\t\t\t\tdata += chr(object_marker)\n\t\t\telse:\n\t\t\t\tobject_marker += 0xF\n\t\t\t\tdata += chr(object_marker)\n\t\t\t\tdata += cls._pack_object(data_len)\n\t\t\tdata += obj\n\t\t\n\t\telif object_type == UnicodeType:\n\t\t\tdata += \"\\x70\"\n\t\t\tdata += obj.encode(\"utf-8\")\n\t\t\tdata += \"\\x00\"\n\t\t\n\t\telif object_type == ListType:\n\t\t\tdata += \"\\xA0\"\n\t\t\tfor element in obj:\n\t\t\t\tdata += cls._pack_object(element)\n\t\t\tdata += \"\\x00\"\n\t\t\n\t\telif object_type in [DictType, OrderedDict]:\n\t\t\tdata += \"\\xD0\"\n\t\t\tfor k, v in obj.iteritems():\n\t\t\t\tdata += cls._pack_object(k)\n\t\t\t\tdata += cls._pack_object(v)\n\t\t\tdata += \"\\x00\"\n\t\t\n\t\telse:\n\t\t\traise CFLBinaryPListComposeError(\"unsupported Python built-in type: {0}\".format(type(obj)))\n\t\t\n\t\treturn data", "title": "" }, { "docid": "0dc51240edd13d52f763d1c1a9de6b8e", "score": "0.6842263", "text": "def encodePyObj(py_obj):\n pkl_obj = pickle.dumps(py_obj)\n return base64.b64encode(pkl_obj)", "title": "" }, { "docid": "9ff5fab6f1140d1cbff9358e33e74691", "score": "0.6823277", "text": "def encode(self, o):\n\t\tpass", "title": "" }, { "docid": "f4c3e8a1cc161c2e2fbf7535cb511ca4", "score": "0.6763752", "text": "def encode(self) -> bytes:", "title": "" }, { "docid": "7188eb963bb76156ce47590735c075c5", "score": "0.67352635", "text": "def encode(obj):\n if hasattr(obj, '__id__'):\n return msgpack.ExtType(ExtType.REF, msgpack.packb(obj.__id__))\n return obj", "title": "" }, { "docid": "9e1eeab505b87208abeb440efcd83894", "score": "0.67114764", "text": "def send_object(self, obj):\n data_b64 = base64.b64encode(pickle.dumps(obj))\n # Enncrypt if needed\n if self.crypto is not None:\n data_b64 = self.crypto.encrypt(data_b64)\n\n self.writer.sendto(data_b64, (self.mcast_ip, self.mcast_port))", "title": "" }, { "docid": "a362d2ed8ae78aa8e69ed8033021b86f", "score": "0.66672003", "text": "def serialize(obj) -> str:\n return jsons.dumps(obj)", "title": "" }, { "docid": "b26d4062fc5658af4e9c0201280a4dc3", "score": "0.6622929", "text": "def send(self, obj) -> None:\n self.socket.send(pickle.dumps(obj))", "title": "" }, { "docid": "16585e132a1c2360085b7377696da05f", "score": "0.6616933", "text": "def convertToJson(self, obj):\n return dumps(obj)", "title": "" }, { "docid": "26b740eda00727bcb4241160e77b2c41", "score": "0.6575763", "text": "def json_dumps(self, o: object) -> bytes:", "title": "" }, { "docid": "95b0eb939966f5e4a2293c72b92849b7", "score": "0.6568878", "text": "def Encode( obj ):\n return json.dumps( obj, cls = WidgetEncoder, indent = 2 )", "title": "" }, { "docid": "595199497b18221ff5826913386fcf5d", "score": "0.65595984", "text": "def encode(self, object,final):\n\t\tpass", "title": "" }, { "docid": "98dc3c928544d3cf01c9f88e073f5d4b", "score": "0.6543425", "text": "def serialize(cls, obj, many=False):\r\n return cls().dump(obj, many=many)", "title": "" }, { "docid": "6c232bbcdf94691570e8712f63263c2a", "score": "0.65401846", "text": "def to_bytes(self, **kwargs):\n ...", "title": "" }, { "docid": "9492b840f73f98bb283acae82c29a54b", "score": "0.65261537", "text": "def encode(obj):\n if hasattr(obj, 'json'):\n return obj.json\n if hasattr(obj, '__json__'):\n return obj.__json__()\n return dumps(obj)", "title": "" }, { "docid": "b4379243a9478fcc0304376cfba0c3c5", "score": "0.6519997", "text": "def serialize_bytes(self):\n return json.dumps(self._to_dict()).encode('utf-8')", "title": "" }, { "docid": "8727baf1fe3e09c537ed65da324ec62d", "score": "0.6509552", "text": "def serialize(obj, **kwargs):\n pass", "title": "" }, { "docid": "6307ce33cc5f68f9301e77fbe23984a6", "score": "0.6502792", "text": "def json_encode(obj, encoding=None):\n return _encoder.encode(obj)", "title": "" }, { "docid": "56542a4bf1890759d9be41372d261a2d", "score": "0.6500174", "text": "def serialize(cls, obj, many=False):\n return cls().dump(obj, many=many)", "title": "" }, { "docid": "236586ea8f7391fbfcfb9650ec9904d4", "score": "0.6449128", "text": "def to_bytes(self):\n ...", "title": "" }, { "docid": "15abc1773954a61ce2f58e378cc83581", "score": "0.6419116", "text": "def dumps(obj, **kwargs):\n return json.dumps(obj, cls=JSONEncoderRaw, **kwargs)", "title": "" }, { "docid": "ed53faf17c274b597ff92d1bab657d13", "score": "0.64170384", "text": "def to_bytes(self) -> bytes:\n return fobs.dumps(self)", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "eb1323807a0d33e870d6461f3eae3987", "score": "0.6383514", "text": "def to_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "146f7cc394c9d06e37a8e812ad93505f", "score": "0.6366757", "text": "def serialize(obj: typing.Any) -> BitBuffer:\n\n writer = BitStreamWriter()\n obj.write(writer)\n\n return BitBuffer(writer.byte_array, writer.bitposition)", "title": "" }, { "docid": "aa54904c3d4f6dbfaec5fc012f363dba", "score": "0.6348701", "text": "def __obj2json(self, obj):\n return json.dumps(obj, indent=self._indent, sort_keys=self._sort_keys)", "title": "" }, { "docid": "9488183b5e65bf2d56be01eeee5d3ebb", "score": "0.6296685", "text": "def __bytes__(self) -> bytes:\n return str(self).encode(\"utf-8\")", "title": "" }, { "docid": "925e654bb6cad4ba62489f82f9c4d5c6", "score": "0.6292989", "text": "def serialize(self, obj):\n\n try:\n serialization_func = self.serialization_functions[obj]\n except KeyError:\n raise SerializationError(\"Cannot serialize type {}\".format(type(obj)))\n\n return serialization_func(obj)", "title": "" }, { "docid": "d59ed0f76c638d5f6977c74f095f5acc", "score": "0.629043", "text": "def encrypt_pickle(obj):\n pickled_bytes = pickle.dumps(obj)\n encoded_str = base64.b64encode(pickled_bytes).decode()\n return _get_configured_encryptor().encrypt(encoded_str)", "title": "" }, { "docid": "9e095c7bebd363e9919c0b0508f8d063", "score": "0.62595373", "text": "def dumps(obj, **kwargs):\n return json.dumps(obj, cls=JSONEncoder, **kwargs)", "title": "" }, { "docid": "b6585f7c8dbd2a52cbd39b59da87bc74", "score": "0.6230227", "text": "def serialize(self) -> bytearray:\n return self.pack()", "title": "" }, { "docid": "d860deda97ccd7cdc2a463c6ca9ad38a", "score": "0.6221185", "text": "def dump(obj, fp):\n return msgpack.pack(obj, fp, default=_serializer, use_bin_type=True)", "title": "" }, { "docid": "7e4a40f0dba893c180c8d8cfa8b9b7a8", "score": "0.62153864", "text": "def _convert_object(self, obj, encoding):\n return safe_unicode(obj)", "title": "" }, { "docid": "8c266ab40b5f9d1798037be11397afcd", "score": "0.6208121", "text": "def dumps(self, obj):\n if isinstance(obj, list):\n return json.dumps(obj, default=lambda x: x.__dict__, cls=Serializer.CustomEncoder, sort_keys=True,\n indent=4)\n\n \"\"\"Converts to Json String\"\"\"\n if isinstance(obj, dict):\n return json.dumps(obj, cls=Serializer.CustomEncoder, sort_keys=True, indent=4)\n\n return json.dumps(obj.__dict__, cls=Serializer.CustomEncoder, sort_keys=True, indent=4).encode()", "title": "" }, { "docid": "1d7378ecd5981edc4d1be170167d9d7d", "score": "0.6206965", "text": "def to_raw(self, obj):\n return obj._get_data()", "title": "" }, { "docid": "159db80c808f710e8f9b42744c97ca14", "score": "0.62046653", "text": "def dump_object(self, value):\n t = type(value)\n if t in integer_types:\n return str(value).encode('ascii')\n return b'!' + pickle.dumps(value)", "title": "" }, { "docid": "159db80c808f710e8f9b42744c97ca14", "score": "0.62046653", "text": "def dump_object(self, value):\n t = type(value)\n if t in integer_types:\n return str(value).encode('ascii')\n return b'!' + pickle.dumps(value)", "title": "" }, { "docid": "dc436934ed7e81d7c8cca70b15addfb2", "score": "0.62017596", "text": "def serialize(self, value: Any) -> bytes:\n raise NotImplementedError", "title": "" }, { "docid": "3729ef5f9e8fe3755733037a3d772b6b", "score": "0.6201379", "text": "def msgpack_encoder(sig, obj):\n #if isinstance(obj, (list, tuple)):\n # return sig, [encode(o) for o in obj]\n return sig, encode(obj)", "title": "" }, { "docid": "31335c82ffb1e1ae10608b0752b833ba", "score": "0.61954856", "text": "def json_encode(self, object):\n\t\treturn simplejson.dumps(object, cls=JSONPlus, ensure_ascii=False)", "title": "" }, { "docid": "f89bd1d781e89019ff67b289092011d8", "score": "0.61641955", "text": "def encode_raw(objs):\n encoded = io.BytesIO()\n for s in objs:\n wire_type = s['wire_type']\n encoded.write(_encode_header(wire_type, s['id']))\n current_encoder = _WIRE_TYPE_TO_ENCODER_MAP.get(wire_type)\n if not current_encoder:\n raise ValueError('Unknown type {}'.format(wire_type))\n encoded.write(current_encoder(s['data']))\n\n return encoded.getvalue()", "title": "" }, { "docid": "be225958d167320dd828582c8158f33b", "score": "0.6161361", "text": "def dump(obj):\n stream = []\n _dump(obj, stream)\n return \"\".join(stream)", "title": "" }, { "docid": "bd43c2ad7b78bca193fe8b4b17834206", "score": "0.61544627", "text": "def _encode_value(self, o):\n return self.object_hook(o)", "title": "" }, { "docid": "62037a44721a68d365ad5d4ad9b2a1cf", "score": "0.6152823", "text": "def to_base64_encoded_json(obj) -> str:\n json_string = json.dumps(obj)\n return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')", "title": "" }, { "docid": "d9cdc319704840e2c61bd67acad3716c", "score": "0.615239", "text": "def __bytes__(self):\n if len(self._object_queue) != 1:\n raise Exception('Multiple sub-object exist. Did you forget to close a sub-object?')\n return bytes(self._object_queue[0])", "title": "" }, { "docid": "3e55bb606d6072f6ec8e23db48765578", "score": "0.6149482", "text": "def to_bytes(self):\n raise NotImplementedError", "title": "" }, { "docid": "cf8d752b94919bbc0f5ba96c8a2c35db", "score": "0.61428106", "text": "def encode(value: Any) -> bytes:\n return get_type(type(value)).encode(value)", "title": "" }, { "docid": "049a2f605b5bf693aa26fb0ae06e0948", "score": "0.60939264", "text": "def _encode_object_identifier(self, oid: str) -> bytes:\n if not self._re_oid.match(oid):\n raise Error(\"Illegal object identifier\")\n cmps = list(map(int, oid.split(\".\")))\n if cmps[0] > 39 or cmps[1] > 39:\n raise Error(\"Illegal object identifier\")\n cmps = [40 * cmps[0] + cmps[1]] + cmps[2:]\n cmps.reverse()\n result = []\n for cmp_data in cmps:\n result.append(cmp_data & 0x7F)\n while cmp_data > 0x7F:\n cmp_data >>= 7\n result.append(0x80 | (cmp_data & 0x7F))\n result.reverse()\n return bytes(result)", "title": "" }, { "docid": "d030126b9fb15bf3ed73cd920b321f62", "score": "0.60929865", "text": "def packObject(self, object):\n\t\tif type(object) is types.IntType:\n\t\t\treturn struct.pack(\"<i\", object)\n\t\telif type(object) is types.LongType:\n\t\t\treturn struct.pack(\"<Q\", object)\n\t\telif type(object) is types.StringType:\n\t\t\treturn struct.pack(\"<i\", len(object))+object\n\t\telif type(object) is ToBeEncoded:\n\t\t\t# The server seeems to cut off strings at \\x00 regardless of the length\n\t\t\treturn struct.pack(\"<i\", len(object.bytes))+object.bytes\n\t\telif type(object) is types.UnicodeType:\n\t\t\tlog.addwarning(_(\"Warning: networking thread has to convert unicode string %(object)s message %(type)s\") % {'object':object, 'type':self.__class__})\n\t\t\tencoded = object.encode(\"utf-8\",'replace')\n\t\t\treturn struct.pack(\"<i\", len(encoded))+encoded\n\t\telif type(object) is NetworkIntType:\n\t\t\treturn struct.pack(\"<I\", object.value)\n\t\telif type(object) is NetworkSignedIntType:\n\t\t\treturn struct.pack(\"<i\", object.value)\n\t\telif type(object) is NetworkLongLongType:\n\t\t\treturn struct.pack(\"<Q\", object.value)\n\t\tlog.addwarning(_(\"Warning: unknown object type %s\") % type(object) +\" \"+ (\"in message %(type)s\") % {'type':self.__class__})\n\t\treturn \"\"", "title": "" }, { "docid": "b4743082821cb29e71547d9682d8c768", "score": "0.6079765", "text": "def serialize(object):\n return vistrails.db.services.io.serialize(object)", "title": "" }, { "docid": "0ebdd3fde32767af05435932135a4b2d", "score": "0.60541093", "text": "def dumps(obj, **kwargs):\n kwargs[\"cls\"] = DjangoJSONEncoder\n return json.dumps(obj, **kwargs)", "title": "" }, { "docid": "dbd5fa82ab4bd4b14b4fd8979a4135ba", "score": "0.6049644", "text": "def __call__(self, obj):\n return self.deepstr(obj, {})", "title": "" }, { "docid": "b52244787bc24308b22d8ef977a2303b", "score": "0.6037295", "text": "def dumps(obj, encoding='utf8', indent=None, errors=\"strict\"):\n string = dumpu(obj, indent)\n if encoding is not None:\n return string.encode(encoding, errors=errors)\n else:\n return string", "title": "" }, { "docid": "e239e93c6ecd79919a74d28fb30030ee", "score": "0.60267466", "text": "def _Encode(self, attr, value):\n if hasattr(value, \"SerializeToString\"):\n return buffer(value.SerializeToString())\n else:\n # Types \"string\" and \"bytes\" are stored as strings here.\n return buffer(utils.SmartStr(value))", "title": "" }, { "docid": "0fbcfb81336d01615bfc4d6f12c4bfb8", "score": "0.6025893", "text": "def writeObject(self, value):\n self.encoder.writeElement(value)", "title": "" }, { "docid": "3816546007286f6d42b144836f3a7c5e", "score": "0.60013646", "text": "def dumps(obj, **kwargs):\n return __default.dumps(obj, **kwargs)", "title": "" }, { "docid": "e09b39b2e22da0f39da54d2709b1d6bc", "score": "0.5999749", "text": "def serialize(self, buff):\n try:\n length = len(self.objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.objects:\n _x = val1\n buff.write(_get_struct_2HB().pack(_x.object, _x.target, _x.decoy))\n _x = self.workstation_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "title": "" }, { "docid": "56117d48fa0e976466bdd991f2720f1d", "score": "0.59868", "text": "def serialize(self, value: Any) -> bytes:\n return json.dumps(value).encode()", "title": "" } ]
08b62f4fb374af1c8cfcc184505c65f9
Asserts that the cycle parameters are correctly updated.
[ { "docid": "ffdf0b4a006e974cc807d2374817e258", "score": "0.67932165", "text": "def test_cycle_updater_function_wrong_length(self):\n data = CellParameters(**CELL_DATA)\n new_cycle_values = {\n \"phase_0\": 20.0,\n \"phase_1\": 180.0,\n \"phase_2\": 240.0,\n }\n self.assertRaises(\n ValueError, updaters.update_cycle_values, data, new_values=new_cycle_values\n )", "title": "" } ]
[ { "docid": "3039942404e954fea5c6d5299e65357b", "score": "0.7294884", "text": "def test_cycle_updater_function(self):\n data = CellParameters(**CELL_DATA)\n new_cycle_values = {\n \"phase_0\": 20.0,\n \"phase_1\": 180.0,\n \"phase_2\": 240.0,\n \"phase_3\": 60.0,\n }\n updaters.update_cycle_values(cell_data=data, new_values=new_cycle_values)\n self.assertEqual(EXPECTED_CYCLE, data.cycle)", "title": "" }, { "docid": "179c699afeceaded99cc7f01f79376fa", "score": "0.6177725", "text": "def test_cycle(self):\n self.assert_enter_command_mode()\n\n self.assert_cycle()\n self.assert_cycle()\n self.assert_cycle()\n self.assert_cycle()", "title": "" }, { "docid": "88941638c4465f7f9b4a3a2180af5242", "score": "0.61768794", "text": "def test_update_connectivity_values(self):\n self.graph.update_connectivity_values()\n self.assertEqual(self.graph.vertices[0].connectivity1, 1)\n self.assertEqual(self.graph.vertices[0].connectivity2, 2)\n self.assertEqual(self.graph.vertices[0].connectivity3, 3)\n self.assertEqual(self.graph.vertices[0].sorting_label, -1)\n self.assertEqual(self.graph.vertices[1].connectivity1, 2)\n self.assertEqual(self.graph.vertices[1].connectivity2, 3)\n self.assertEqual(self.graph.vertices[1].connectivity3, 6)\n self.assertEqual(self.graph.vertices[1].sorting_label, -1)\n self.assertEqual(self.graph.vertices[2].connectivity1, 2)\n self.assertEqual(self.graph.vertices[2].connectivity2, 4)\n self.assertEqual(self.graph.vertices[2].connectivity3, 7)\n self.assertEqual(self.graph.vertices[2].sorting_label, -1)\n self.assertEqual(self.graph.vertices[3].connectivity1, 2)\n self.assertEqual(self.graph.vertices[3].connectivity2, 4)\n self.assertEqual(self.graph.vertices[3].connectivity3, 7)\n self.assertEqual(self.graph.vertices[3].sorting_label, -1)\n self.assertEqual(self.graph.vertices[4].connectivity1, 2)\n self.assertEqual(self.graph.vertices[4].connectivity2, 3)\n self.assertEqual(self.graph.vertices[4].connectivity3, 6)\n self.assertEqual(self.graph.vertices[4].sorting_label, -1)\n self.assertEqual(self.graph.vertices[5].connectivity1, 1)\n self.assertEqual(self.graph.vertices[5].connectivity2, 2)\n self.assertEqual(self.graph.vertices[5].connectivity3, 3)\n self.assertEqual(self.graph.vertices[5].sorting_label, -1)", "title": "" }, { "docid": "e0ee2f177694b47f3775263b0b83fb86", "score": "0.6169417", "text": "def test_update_tasks_from_2_cycles(self):\n with factories.single_commit():\n _, _, group, _ = self._create_cycle_structure()\n group_id = group.id\n all_models.Cycle.query.update({\n all_models.Cycle.is_verification_needed: False\n })\n db.session.commit()\n self.tasks = all_models.CycleTaskGroupObjectTask.query.order_by(\n all_models.CycleTaskGroupObjectTask.id\n ).all()\n # all tasks in assigned state\n self.assertEqual([self.ASSIGNED] * 6, [t.status for t in self.tasks])\n # all tasks in progress state\n self.assert_status_over_bulk_update([self.IN_PROGRESS] * 6,\n [self.IN_PROGRESS] * 6)\n group = all_models.CycleTaskGroup.query.get(group_id)\n self.assertEqual(self.IN_PROGRESS, self.group.status)\n self.assertEqual(self.IN_PROGRESS, group.status)\n self.assertEqual(self.IN_PROGRESS, self.cycle.status)\n self.assertEqual(self.IN_PROGRESS, group.cycle.status)\n self.assertEqual(all_models.Workflow.ACTIVE, self.workflow.status)\n self.assertEqual(all_models.Workflow.ACTIVE, group.cycle.workflow.status)\n # update 1 task to finished\n self.assert_status_over_bulk_update(\n [self.FINISHED],\n [self.FINISHED] + [self.IN_PROGRESS] * 5)\n group = all_models.CycleTaskGroup.query.get(group_id)\n self.assertEqual(self.IN_PROGRESS, self.group.status)\n self.assertEqual(self.IN_PROGRESS, group.status)\n self.assertEqual(self.IN_PROGRESS, self.cycle.status)\n self.assertEqual(self.IN_PROGRESS, group.cycle.status)\n self.assertEqual(all_models.Workflow.ACTIVE, self.workflow.status)\n self.assertEqual(all_models.Workflow.ACTIVE, group.cycle.workflow.status)\n self.assert_notifications_for_object(self.tasks[0])\n for task in self.tasks[1:]:\n self.assert_notifications_for_object(task,\n u'cycle_task_due_in',\n u'cycle_task_due_today',\n u'cycle_task_overdue')\n self.assert_notifications_for_object(self.group.cycle)\n self.assert_notifications_for_object(group.cycle)\n # all tasks moved to finished\n self.assert_status_over_bulk_update([self.FINISHED] * 6,\n [self.FINISHED] * 6)\n group = all_models.CycleTaskGroup.query.get(group_id)\n self.assertEqual(self.FINISHED, self.group.status)\n self.assertEqual(self.FINISHED, group.status)\n self.assertEqual(self.FINISHED, self.cycle.status)\n self.assertEqual(self.FINISHED, group.cycle.status)\n self.assertEqual(all_models.Workflow.INACTIVE, self.workflow.status)\n self.assertEqual(all_models.Workflow.INACTIVE, group.cycle.workflow.status)\n for task in self.tasks:\n self.assert_notifications_for_object(task)\n self.assert_notifications_for_object(self.cycle,\n \"all_cycle_tasks_completed\")\n self.assert_notifications_for_object(group.cycle,\n \"all_cycle_tasks_completed\")", "title": "" }, { "docid": "fad928c3ccf6e21bd21bcbff3a9f63b3", "score": "0.6114712", "text": "def test_multiple_cycles(self):\n test_graph = graph.Graph()\n test_graph.add_edge_if_new(self.KEY_0, self.KEY_1)\n test_graph.add_edge_if_new(self.KEY_1, self.KEY_2)\n test_graph.add_edge_if_new(self.KEY_2, self.KEY_0)\n test_graph.add_edge_if_new(self.KEY_2, self.KEY_1)\n test_graph.add_edge_if_new(self.KEY_2, self.KEY_3)\n\n res = count_cycles.find_cycles(test_graph, self.MAX_CYCLE_LENGTH)\n expected_cycles = {\n 2: 1,\n 3: 1,\n }\n for cycle_length, cycles in enumerate(res):\n self.assertEqual(len(cycles), expected_cycles.get(cycle_length, 0))", "title": "" }, { "docid": "343d3fb5b713e9873394058e1520cb54", "score": "0.6093562", "text": "def test_state_verified_cycle(self, state):\n self.setup_cycle_state(True)\n resp = self.api.put(self.cycle, data={\"status\": state})\n cycle = all_models.Cycle.query.get(resp.json[\"cycle\"][\"id\"])\n self.assertEqual(state, cycle.status)", "title": "" }, { "docid": "bd31173a060fadaae5cedfb080ff9fea", "score": "0.5938496", "text": "def test_change_is_verification(self, flag):\n self.setup_cycle_state(flag)\n resp = self.api.put(self.cycle, data={\"is_verification_needed\": flag})\n self.assert200(resp)\n cycle = all_models.Cycle.query.get(resp.json[\"cycle\"][\"id\"])\n self.assertEqual(flag, cycle.is_verification_needed)", "title": "" }, { "docid": "ee0d40b232fe373db3af754100513f3e", "score": "0.58968014", "text": "def test_cycle_list_order_relevant_cycles(self):\n # Create a cycle of length 5\n edge = Edge(self.graph.vertices[0], self.graph.vertices[4])\n self.graph.add_edge(edge)\n # Test RC\n rc = self.graph.get_relevant_cycles()\n self.assertEqual(len(rc), 1)\n self.assertEqual(len(rc[0]), 5)\n for i in range(5):\n self.assertTrue(self.graph.has_edge(rc[0][i], rc[0][i - 1]))", "title": "" }, { "docid": "e22dde5f2287e0a64300e436147a8bd1", "score": "0.585942", "text": "def test_propagation_status_short(self):\n all_models.Cycle.query.filter(\n all_models.Cycle.id == self.cycle.id\n ).update({\n all_models.Cycle.is_verification_needed: False\n })\n db.session.commit()\n self.tasks = all_models.CycleTaskGroupObjectTask.query.order_by(\n all_models.CycleTaskGroupObjectTask.id\n ).all()\n # all tasks in assigned state\n self.assertEqual([self.ASSIGNED] * 3, [t.status for t in self.tasks])\n # all tasks in progress state\n self.assert_status_over_bulk_update([self.IN_PROGRESS] * 3,\n [self.IN_PROGRESS] * 3)\n self.assertEqual(self.IN_PROGRESS, self.group.status)\n self.assertEqual(self.IN_PROGRESS, self.cycle.status)\n self.assertEqual(all_models.Workflow.ACTIVE, self.workflow.status)\n # update 1 task to finished\n self.assert_status_over_bulk_update(\n [self.FINISHED],\n [self.FINISHED, self.IN_PROGRESS, self.IN_PROGRESS])\n self.assertEqual(self.IN_PROGRESS, self.group.status)\n self.assertEqual(self.IN_PROGRESS, self.cycle.status)\n self.assertEqual(all_models.Workflow.ACTIVE, self.workflow.status)\n self.assert_notifications_for_object(self.tasks[0])\n for task in self.tasks[1:]:\n self.assert_notifications_for_object(task,\n u'cycle_task_due_in',\n u'cycle_task_due_today',\n u'cycle_task_overdue')\n self.cycle = self.tasks[0].cycle\n self.assert_notifications_for_object(self.cycle)\n # all tasks moved to finished\n self.assert_status_over_bulk_update([self.FINISHED] * 3,\n [self.FINISHED] * 3)\n self.assertEqual(self.FINISHED, self.group.status)\n self.assertEqual(self.FINISHED, self.cycle.status)\n self.assertEqual(all_models.Workflow.INACTIVE, self.workflow.status)\n for task in self.tasks:\n self.assert_notifications_for_object(task)\n self.cycle = self.tasks[0].cycle\n self.assert_notifications_for_object(self.cycle,\n \"all_cycle_tasks_completed\")", "title": "" }, { "docid": "367e8bfea04caf047421eaec0bbec529", "score": "0.58319294", "text": "def test_change_is_vf_wrong(self, flag):\n self.setup_cycle_state(flag)\n resp = self.api.put(self.cycle, data={\"is_verification_needed\": not flag})\n self.assert200(resp)\n cycle = all_models.Cycle.query.get(resp.json[\"cycle\"][\"id\"])\n self.assertEqual(flag, cycle.is_verification_needed)", "title": "" }, { "docid": "1fd2184dea1df330b35d320c827606a9", "score": "0.5829845", "text": "def test_update_attributes(self):\n ops = [qml.PauliX(0), qml.PauliY(1)]\n coeffs = [1, 2]\n H = ParametrizedHamiltonian(coeffs, ops)\n ev = ParametrizedEvolution(H=H, mxstep=10)\n\n # pylint:disable = use-implicit-booleaness-not-comparison\n assert ev.parameters == []\n assert ev.num_params == 0\n assert ev.t is None\n assert ev.odeint_kwargs == {\"mxstep\": 10}\n params = []\n t = 6\n new_ev = ev(params, t, atol=1e-6, rtol=1e-4)\n\n assert new_ev is not ev\n assert qml.math.allequal(new_ev.parameters, params)\n assert new_ev.num_params == 0\n assert qml.math.allequal(new_ev.t, [0, 6])\n assert new_ev.odeint_kwargs == {\"mxstep\": 10, \"atol\": 1e-6, \"rtol\": 1e-4}", "title": "" }, { "docid": "821a0e583efce33e5586806d1838003b", "score": "0.5828177", "text": "def test_is_vertex_in_cycle(self):\n for vertex in self.graph.vertices:\n self.assertFalse(self.graph.is_vertex_in_cycle(vertex))\n edge = Edge(self.graph.vertices[0], self.graph.vertices[3])\n self.graph.add_edge(edge) # To create a cycle\n for vertex in self.graph.vertices[0:4]:\n self.assertTrue(self.graph.is_vertex_in_cycle(vertex))\n for vertex in self.graph.vertices[4:]:\n self.assertFalse(self.graph.is_vertex_in_cycle(vertex))", "title": "" }, { "docid": "4ff12de6daf5b58a075c603cc0d347ba", "score": "0.58281136", "text": "def test_graphs_update(self):\n pass", "title": "" }, { "docid": "a1d1ade8039c3b93b66ecd93ca5f202c", "score": "0.58181715", "text": "def test_get_all_cycles(self):\n cycle_list = self.graph.get_all_cycles(self.graph.vertices[0])\n self.assertEqual(len(cycle_list), 0)\n edge = Edge(self.graph.vertices[0], self.graph.vertices[3])\n self.graph.add_edge(edge) # To create a cycle\n cycle_list = self.graph.get_all_cycles(self.graph.vertices[0])\n self.assertEqual(len(cycle_list), 2)\n self.assertEqual(len(cycle_list[0]), 4)\n self.assertEqual(len(cycle_list[1]), 4)", "title": "" }, { "docid": "1d10bb843285daf96bcc72f53bdaa831", "score": "0.57835644", "text": "def test_getReturnCycle():\n t = np.linspace(0,1,101)\n x1 = 1 - 1.5*t\n y1 = (3)*x1**2 - 1\n x2 = x1[-1] + t*2\n y2 = y1[-1] + t*4\n \n \n TestCycle1 = hys.SimpleCycle(np.column_stack([x1,y1]))\n TestCycle2 = hys.SimpleCycle(np.column_stack([x2,y2]))\n TestCycle3 = getReturnCycle(TestCycle1, TestCycle2)\n \n xySolution = np.zeros([76,2])\n xySolution[:75, :] = TestCycle2.xy[:75, :]\n xySolution[-1, :] = TestCycle1.xy[0, :]\n \n assert(np.all(xySolution == TestCycle3.xy))", "title": "" }, { "docid": "40003637b8b3f9578c4b1bf81058c248", "score": "0.5762237", "text": "def test_update(self):\n particle = self.particle.copy() # Unbound copy of particle\n self.assertIsNone(self.particle._pos)\n self.assertIsNone(self.particle._vel)\n self.assertIsNone(self.particle._state)\n self.assertIsNone(self.particle._target)\n\n self.particle.update()\n\n self.assertEqual(self.particle.pos, particle.pos)\n self.assertEqual(self.particle.vel, particle.vel)\n self.assertEqual(self.particle.state, particle.state)\n\n self.assertIsNotNone(self.particle._pos)\n self.assertIsNotNone(self.particle._vel)\n self.assertIsNotNone(self.particle._state)\n #self.assertIsNotNone(self.particle._target) will be none on spreading state", "title": "" }, { "docid": "f36c900fea59ea240a67bbb0888973f0", "score": "0.57433295", "text": "def test_graphs_partial_update(self):\n pass", "title": "" }, { "docid": "2ae4aaf45ba904fff6772f78301ec4e8", "score": "0.57243663", "text": "def test_assignment_to_parameters(self):\n Config.parameters = \"hello\"\n assert Config.parameters == \"hello\"\n Config.parameters = None\n assert Config.parameters is None", "title": "" }, { "docid": "b98c9246a79581e47f5df3ac53218336", "score": "0.57213235", "text": "def setPostCycle(self, cycle, mod='None'):\n \n pass", "title": "" }, { "docid": "f7c06eb2bee951367292cb9117a1f7b8", "score": "0.5707401", "text": "def test_set_params(self):\n mult = mticker.MultipleLocator(base=0.7)\n mult.set_params(base=1.7)\n assert mult._edge.step == 1.7", "title": "" }, { "docid": "851d19808da8907fefde3ed6f2b9f6ba", "score": "0.56970894", "text": "def test_is_edge_in_cycle(self):\n for vertex1 in self.graph.vertices:\n for vertex2, edge in vertex1.edges.items():\n self.assertFalse(self.graph.is_edge_in_cycle(edge))\n edge = Edge(self.graph.vertices[0], self.graph.vertices[3])\n self.graph.add_edge(edge) # To create a cycle\n for vertex1 in self.graph.vertices:\n for vertex2, edge in vertex1.edges.items():\n if self.graph.vertices.index(vertex1) < 4 and self.graph.vertices.index(vertex2) < 4:\n self.assertTrue(self.graph.is_edge_in_cycle(edge))\n else:\n self.assertFalse(self.graph.is_edge_in_cycle(edge))", "title": "" }, { "docid": "5b0ace4262137dcc11a2737338a035c3", "score": "0.5678323", "text": "def testParams(self):\n \n self.assertDictEqual(self.jobOrder.params, self.params)", "title": "" }, { "docid": "12e0265f97b7c96cf14efccac5b38bee", "score": "0.5677242", "text": "def test_state_non_verified_cycle(self, state, is_valid):\n self.setup_cycle_state(False)\n resp = self.api.put(self.cycle, data={\"status\": state})\n if is_valid:\n cycle = all_models.Cycle.query.get(resp.json[\"cycle\"][\"id\"])\n self.assertEqual(state, cycle.status)\n else:\n self.assert400(resp)", "title": "" }, { "docid": "649f9ef32e64c8c44352ff58f9897a1b", "score": "0.5672587", "text": "def test_parameters():\n repex = ReplicaExchange(store_filename='test', nsteps_per_iteration=1e6)\n assert repex.nsteps_per_iteration == 1000000\n assert repex.collision_rate == repex.default_parameters['collision_rate']", "title": "" }, { "docid": "18b883c0da560f234cdbc713f2738ec0", "score": "0.56610554", "text": "def test_change_cycle_none_flag(self, flag):\n self.setup_cycle_state(flag)\n resp = self.api.put(self.cycle, not_send_fields=[\"is_verification_needed\"])\n self.assert200(resp)\n cycle = all_models.Cycle.query.get(resp.json[\"cycle\"][\"id\"])\n self.assertEqual(flag, cycle.is_verification_needed)", "title": "" }, { "docid": "5d276fa8df7aac57f1da0ca1a2f05fcb", "score": "0.56296664", "text": "def test_get_relevant_cycles(self):\n cycle_list = self.graph.get_relevant_cycles()\n self.assertEqual(len(cycle_list), 0)\n # Create a cycle of length 4\n edge = Edge(self.graph.vertices[0], self.graph.vertices[3])\n self.graph.add_edge(edge)\n # Create a second cycle of length 4\n edge = Edge(self.graph.vertices[0], self.graph.vertices[5])\n self.graph.add_edge(edge)\n # Create a bridge forming multiple cycles of length 4\n edge = Edge(self.graph.vertices[1], self.graph.vertices[4])\n self.graph.add_edge(edge)\n\n # SSSR should be 3 cycles of length 4\n cycle_list = self.graph.get_smallest_set_of_smallest_rings()\n self.assertEqual(len(cycle_list), 3)\n size_list = sorted([len(cycle) for cycle in cycle_list])\n self.assertEqual(size_list, [4, 4, 4])\n\n # RC should be 5 cycles of length 4\n cycle_list = self.graph.get_relevant_cycles()\n self.assertEqual(len(cycle_list), 5)\n size_list = sorted([len(cycle) for cycle in cycle_list])\n self.assertEqual(size_list, [4, 4, 4, 4, 4])", "title": "" }, { "docid": "24504615c0b44796164a740da54004d0", "score": "0.55920804", "text": "def test_update13(self):\n pass", "title": "" }, { "docid": "535995d141197ba83de69eec1a5c54d8", "score": "0.55912656", "text": "def test_big_cycle(self):\n test_graph = graph.Graph()\n test_graph.add_edge_if_new(self.KEY_0, self.KEY_1)\n test_graph.add_edge_if_new(self.KEY_1, self.KEY_2)\n test_graph.add_edge_if_new(self.KEY_2, self.KEY_3)\n test_graph.add_edge_if_new(self.KEY_3, self.KEY_0)\n\n res = count_cycles.find_cycles(test_graph, self.MAX_CYCLE_LENGTH)\n expected_cycles = {\n 4: 1,\n }\n for cycle_length, cycles in enumerate(res):\n self.assertEqual(len(cycles), expected_cycles.get(cycle_length, 0))", "title": "" }, { "docid": "533e5f9c33f6fdbf1e6994a2839728eb", "score": "0.5587841", "text": "def test_complete_graph(self):\n test_graph = graph.Graph()\n for ka, kb in itertools.permutations(\n [self.KEY_0, self.KEY_1, self.KEY_2, self.KEY_3], 2):\n test_graph.add_edge_if_new(ka, kb)\n\n res = count_cycles.find_cycles(test_graph, self.MAX_CYCLE_LENGTH)\n expected_cycles = {2: 6, 3: 8, 4: 6}\n for cycle_length, cycles in enumerate(res):\n self.assertEqual(len(cycles), expected_cycles.get(cycle_length, 0))", "title": "" }, { "docid": "d32cf09b13497e3691433f288c818aa6", "score": "0.55735636", "text": "def test_update(self):\n pass", "title": "" }, { "docid": "039d929eaa051b6b171eb15955151194", "score": "0.55450803", "text": "def test_no_self_cycles(self):\n test_graph = graph.Graph()\n test_graph.add_edge_if_new(self.KEY_0, self.KEY_1)\n test_graph.add_edge_if_new(self.KEY_1, self.KEY_0)\n test_graph.add_edge_if_new(self.KEY_0, self.KEY_0)\n test_graph.add_edge_if_new(self.KEY_1, self.KEY_1)\n\n res = count_cycles.find_cycles(test_graph, self.MAX_CYCLE_LENGTH)\n expected_cycles = {\n 2: 1,\n }\n for cycle_length, cycles in enumerate(res):\n self.assertEqual(len(cycles), expected_cycles.get(cycle_length, 0))", "title": "" }, { "docid": "7f0c04cee18be9a9594cd8e14249d087", "score": "0.55381936", "text": "def test_check_for_cycles_positive(self):\n\n # The below schemata cover a few real edge cases; e.g., subgroups,\n # schemata with no incoming links, schemata with no outgoing links,\n # and schemata that link back to themselves.\n schemata = {\n 'analyte': {\n 'links': [\n {\n 'subgroup': [\n {'target_type': 'portion'},\n {'target_type': 'sample'},\n ]\n }\n ]\n },\n 'case': {\n 'links': [\n {'target_type': 'tissue_source_site'},\n ]\n },\n 'portion': {\n 'links': [\n {'target_type': 'sample'},\n ]\n },\n 'sample': {\n 'links': [\n {'target_type': 'case'},\n {'target_type': 'sample'},\n {'target_type': 'tissue_source_site'},\n ]\n },\n 'slide': {\n 'links': [\n {\n 'subgroup': [\n {'target_type': 'portion'},\n {'target_type': 'sample'},\n ]\n }\n ]\n },\n 'tissue_source_site': {'links': []},\n }\n\n check_for_cycles(schemata)", "title": "" }, { "docid": "5da58e9f6eaf277a24809f9b8314652f", "score": "0.55368423", "text": "def test_parameter_get_updated(self):\n tw_tensor_before_training = self.acess_named_parameter(self.model_binary, \"_transformation_tensor\")\n combining_tensor_before_training = self.acess_named_parameter(self.model_binary, \"_combining_tensor\")\n\n for epoch in range(0, 10):\n self.optimizer_binary.zero_grad()\n output = self.model_binary(self.batch_bin)\n loss = binary_class_cross_entropy(output, self.label)\n loss.backward()\n self.optimizer_binary.step()\n tw_tensor_after_training = self.acess_named_parameter(self.model_binary, \"_transformation_tensor\")\n combining_tensor_after_training = self.acess_named_parameter(self.model_binary, \"_combining_tensor\")\n difference_combining_tensor = torch.sum(\n combining_tensor_before_training - combining_tensor_after_training).item()\n difference_tw_tensor = torch.sum(tw_tensor_before_training - tw_tensor_after_training).item()\n np.testing.assert_equal(difference_combining_tensor != 0.0, True)\n np.testing.assert_equal(difference_tw_tensor != 0.0, True)", "title": "" }, { "docid": "ad3b2265710d14c7e42ebd290393d390", "score": "0.5531577", "text": "def testCyclicalDeps(self):\n series = self.GetPatchSeries()\n\n patch1, patch2, patch3 = patches = self.GetPatches(3)\n\n self.SetPatchDeps(patch1, [patch2.id])\n self.SetPatchDeps(patch2, cq=[patch3.id])\n self.SetPatchDeps(patch3, [patch1.id])\n\n self.SetPatchApply(patch1)\n self.SetPatchApply(patch2)\n self.SetPatchApply(patch3)\n\n self.mox.ReplayAll()\n self.assertResults(series, patches, [patch2, patch1, patch3])\n self.mox.VerifyAll()", "title": "" }, { "docid": "ce21b21ed3f8487bf06114377baea4c5", "score": "0.5520966", "text": "def updateCycleDoseParams(self):\n cycle = self.r.p.cycle\n timeNode = self.r.p.timeNode\n\n if timeNode <= 0:\n return\n\n daysIntoCycle = sum(self.r.o.stepLengths[cycle][:timeNode])\n cycleLength = self.r.p.cycleLength\n\n maxDetailedDpaThisCycle = 0.0\n peakDoseAssem = None\n for a in self.r.core:\n if a.getMaxParam(\"detailedDpaThisCycle\") > maxDetailedDpaThisCycle:\n maxDetailedDpaThisCycle = a.getMaxParam(\"detailedDpaThisCycle\")\n peakDoseAssem = a\n self.r.core.p.maxDetailedDpaThisCycle = maxDetailedDpaThisCycle\n\n if peakDoseAssem is None:\n return\n\n doseHalfMaxHeights = peakDoseAssem.getElevationsMatchingParamValue(\n \"detailedDpaThisCycle\", maxDetailedDpaThisCycle / 2.0\n )\n if len(doseHalfMaxHeights) != 2:\n runLog.warning(\n \"Something strange with detailedDpaThisCycle shape in {}, \"\n \"non-2 number of values matching {}\".format(\n peakDoseAssem, maxDetailedDpaThisCycle / 2.0\n )\n )\n else:\n self.r.core.p.dpaFullWidthHalfMax = (\n doseHalfMaxHeights[1] - doseHalfMaxHeights[0]\n )\n\n aclpDoseLimit = self.options.aclpDoseLimit\n aclpDoseLimit3 = aclpDoseLimit / 3.0 * (daysIntoCycle / cycleLength)\n aclpLocations3 = peakDoseAssem.getElevationsMatchingParamValue(\n \"detailedDpaThisCycle\", aclpDoseLimit3\n )\n if len(aclpLocations3) != 2:\n runLog.warning(\n \"Something strange with detailedDpaThisCycle shape in {}\"\n \", non-2 number of values matching {}\".format(\n peakDoseAssem, aclpDoseLimit / 3.0\n )\n )\n else:\n self.r.core.p.elevationOfACLP3Cycles = aclpLocations3[1]\n\n aclpDoseLimit7 = aclpDoseLimit / 7.0 * (daysIntoCycle / cycleLength)\n aclpLocations7 = peakDoseAssem.getElevationsMatchingParamValue(\n \"detailedDpaThisCycle\", aclpDoseLimit7\n )\n if len(aclpLocations7) != 2:\n runLog.warning(\n \"Something strange with detailedDpaThisCycle shape in {}, \"\n \"non-2 number of values matching {}\".format(\n peakDoseAssem, aclpDoseLimit / 7.0\n )\n )\n else:\n self.r.core.p.elevationOfACLP7Cycles = aclpLocations7[1]", "title": "" }, { "docid": "29b5141f3c41d8f56b9fcd0bc820c003", "score": "0.5506806", "text": "def test_update(self):\n self._unit.update(0, 0, 1, 1)\n self.assertEqual(self._unit.get_euler(), (0, 0, 1.))\n self._unit.update(0, 0, -0.5, 2)\n self.assertEqual(self._unit.get_euler(), (0, 0, 0.))\n\n self._unit.update(0, 1, 0, 1)\n self.assertEqual(self._unit.get_euler(), (0, 1, 0.))\n self._unit.update(0, -2., 0, 0.5)\n self.assertEqual(self._unit.get_euler(), (0, 0, 0.))\n\n self._unit.update(1, 0, 0, 1)\n self.assertEqual(self._unit.get_euler(), (1, 0, 0.))\n self._unit.update(-2, 0., 0, 0.5)\n self.assertAlmostEqual(self._unit.get_euler(), (0, 0, 0.))", "title": "" }, { "docid": "fe4230dc50a873a0c5c29522dc68da9e", "score": "0.5501097", "text": "def test_modifparameter(self):\n\n #1. create the object without argument\n param1 = MadLoopParam()\n\n to_test = {\"MLReductionLib\": {'correct': ['1|2', ' 1|2 '],\n 'wrong':[1/2, 0.3, True],\n 'target': ['1|2', '1|2']},\n \"IREGIMODE\": {'correct' : [1.0, 2, 3, -1, '1.0', '2', '-3', '-3.0'],\n 'wrong' : ['1.5', '-1.5', 1.5, -3.4, True, 'starwars'],\n 'target': [1,2,3,-1,1,2,-3,-3]\n },\n \"IREGIRECY\": {'correct' : [True, False, 0, 1, '0', '1',\n '.true.', '.false.','T', \n 'F', 'true', 'false', 'True \\n'],\n 'wrong' : ['a', [], 5, 66, {}, None, -1],\n \"target\": [True, False, False, True, False, True, \n True, False,True, False,True,False, True]},\n \"CTStabThres\": {'correct': [1.0, 1e-3, 1+0j, 1,\"1d-3\", \"1e-3\"],\n 'wrong': [True, 'hello'],\n 'target': [1.0,1e-3, 1.0, 1.0, 1e-3, 1e-3]}\n }\n\n import madgraph.various.misc as misc\n for name, data in to_test.items():\n for i,value in enumerate(data['correct']):\n param1[name] = value\n self.assertEqual(param1[name], data['target'][i])\n self.assertTrue(name.lower() not in param1.user_set)\n self.assertEqual(type(data['target'][i]), type(param1[name]))\n for value in data['wrong']:\n self.assertRaises(Exception, param1.__setitem__, (name, value))", "title": "" }, { "docid": "ade331bf1c331ae9f213ad0f6f415e2c", "score": "0.54963577", "text": "def test_digest_changes_on_assignment(self):\n for test_label,(obj,statement) in UNEQUAL_DIGEST_TEST_DICT.items():\n # use subtest to get individual test result.\n # tests will not stop if one subtest fails!\n with self.subTest(test_label):\n digest_before_statement,digest_after_statement = self.get_digests(obj, statement)\n self.assertNotEqual(digest_before_statement,digest_after_statement)", "title": "" }, { "docid": "749569814c6f01228bdb5918228c3222", "score": "0.5485593", "text": "def setCycle(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "ed1ea6d4b17b6f1fcc667b4d86fe7435", "score": "0.5484079", "text": "def test_is_cyclic(self):\n self.assertFalse(self.graph.is_cyclic())\n edge = Edge(self.graph.vertices[0], self.graph.vertices[3])\n self.graph.add_edge(edge) # To create a cycle\n self.assertTrue(self.graph.is_cyclic())", "title": "" }, { "docid": "e2122e4928deaf9b570228dc0eac276e", "score": "0.5475277", "text": "def test_startup_params_second_pass(self):\n self.assert_enter_command_mode()\n\n self.assert_get_parameter(Parameter.SERIAL_FLOW_CONTROL, '11110') # Immutable\n self.assert_get_parameter(Parameter.BANNER, False)\n self.assert_get_parameter(Parameter.INSTRUMENT_ID, 0)\n self.assert_get_parameter(Parameter.SLEEP_ENABLE, 0)\n self.assert_get_parameter(Parameter.SAVE_NVRAM_TO_RECORDER, True) # Immutable\n self.assert_get_parameter(Parameter.POLLED_MODE, False)\n self.assert_get_parameter(Parameter.XMIT_POWER, 255)\n self.assert_get_parameter(Parameter.SPEED_OF_SOUND, 1485)\n self.assert_get_parameter(Parameter.PITCH, 0)\n self.assert_get_parameter(Parameter.ROLL, 0)\n self.assert_get_parameter(Parameter.SALINITY, 35)\n self.assert_get_parameter(Parameter.TIME_PER_ENSEMBLE, '00:00:00.00')\n self.assert_get_parameter(Parameter.TIME_PER_PING, '00:01.00')\n self.assert_get_parameter(Parameter.FALSE_TARGET_THRESHOLD, '050,001')\n self.assert_get_parameter(Parameter.BANDWIDTH_CONTROL, 0)\n self.assert_get_parameter(Parameter.CORRELATION_THRESHOLD, 64)\n self.assert_get_parameter(Parameter.SERIAL_OUT_FW_SWITCHES, '111100000') # Immutable\n self.assert_get_parameter(Parameter.ERROR_VELOCITY_THRESHOLD, 2000)\n self.assert_get_parameter(Parameter.BLANK_AFTER_TRANSMIT, 704)\n self.assert_get_parameter(Parameter.CLIP_DATA_PAST_BOTTOM, 0)\n self.assert_get_parameter(Parameter.RECEIVER_GAIN_SELECT, 1)\n self.assert_get_parameter(Parameter.WATER_REFERENCE_LAYER, '001,005')\n self.assert_get_parameter(Parameter.WATER_PROFILING_MODE, 1) # Immutable\n self.assert_get_parameter(Parameter.NUMBER_OF_DEPTH_CELLS, 100)\n self.assert_get_parameter(Parameter.PINGS_PER_ENSEMBLE, 1)\n self.assert_get_parameter(Parameter.DEPTH_CELL_SIZE, 800)\n self.assert_get_parameter(Parameter.TRANSMIT_LENGTH, 0)\n self.assert_get_parameter(Parameter.PING_WEIGHT, 0)\n self.assert_get_parameter(Parameter.AMBIGUITY_VELOCITY, 175)\n\n # Change these values anyway just in case it ran first.\n self.assert_set_parameter(Parameter.INSTRUMENT_ID, 1)\n self.assert_set_parameter(Parameter.SLEEP_ENABLE, 1)\n self.assert_set_parameter(Parameter.POLLED_MODE, True)\n self.assert_set_parameter(Parameter.XMIT_POWER, 250)\n self.assert_set_parameter(Parameter.SPEED_OF_SOUND, 1480)\n self.assert_set_parameter(Parameter.PITCH, 1)\n self.assert_set_parameter(Parameter.ROLL, 1)\n self.assert_set_parameter(Parameter.SALINITY, 36)\n self.assert_set_parameter(Parameter.TIME_PER_ENSEMBLE, '00:00:01.00')\n self.assert_set_parameter(Parameter.TIME_PER_PING, '00:02.00')\n self.assert_set_parameter(Parameter.FALSE_TARGET_THRESHOLD, '049,002')\n self.assert_set_parameter(Parameter.BANDWIDTH_CONTROL, 1)\n self.assert_set_parameter(Parameter.CORRELATION_THRESHOLD, 63)\n self.assert_set_parameter(Parameter.ERROR_VELOCITY_THRESHOLD, 1999)\n self.assert_set_parameter(Parameter.BLANK_AFTER_TRANSMIT, 714)\n self.assert_set_parameter(Parameter.CLIP_DATA_PAST_BOTTOM, 1)\n self.assert_set_parameter(Parameter.RECEIVER_GAIN_SELECT, 0)\n self.assert_set_parameter(Parameter.WATER_REFERENCE_LAYER, '002,006')\n self.assert_set_parameter(Parameter.NUMBER_OF_DEPTH_CELLS, 99)\n self.assert_set_parameter(Parameter.PINGS_PER_ENSEMBLE, 0)\n self.assert_set_parameter(Parameter.DEPTH_CELL_SIZE, 790)\n self.assert_set_parameter(Parameter.TRANSMIT_LENGTH, 1)\n self.assert_set_parameter(Parameter.PING_WEIGHT, 1)\n self.assert_set_parameter(Parameter.AMBIGUITY_VELOCITY, 176)", "title": "" }, { "docid": "b78d470c9920e7b9fde53cbee0b5a497", "score": "0.54703593", "text": "def test_invalid_update(self):\n self.estimator._reset()\n self.estimator.update(0, 0)\n self.assertEqual(0, len(self.estimator._buffer_wheel))\n with self.assertRaises(ValueError) as ve:\n self.assertEqual(self.estimator._min_iterations, self.estimator.get(-1))\n self.assertIn('Invalid `total_secs`', ve.message)\n with self.assertRaises(ValueError) as ve:\n self.assertEqual(self.estimator._min_iterations, self.estimator.get(0))\n self.assertIn('Invalid `total_secs`', ve.message)", "title": "" }, { "docid": "7032e8554a30d823312ba22b7fa4fd00", "score": "0.5462364", "text": "def test_extras_graphs_update(self):\n pass", "title": "" }, { "docid": "f4c15917bdcd8adbc5380e28d2b89c9b", "score": "0.5453746", "text": "def test_update_convergence(self):\n for _ in range(0, self.estimator._capacity):\n self.estimator.update(2.0, 4)\n self.assertEqual(2, self.estimator._mean_runtime_secs())\n self.assertEqual(0.5, self.estimator._mean_step_time_secs())\n\n iterations = 4\n target_elapsed_time = 10\n actual_elapsed_time = 2\n secs_per_iterations = actual_elapsed_time / iterations\n for _ in range(0, 5):\n self.estimator.update(actual_elapsed_time, iterations)\n iterations = self.estimator.get(target_elapsed_time)\n actual_elapsed_time = iterations * secs_per_iterations\n self.assertLessEqual(abs(actual_elapsed_time - target_elapsed_time), 1)", "title": "" }, { "docid": "a5ca0b25a294585768cb2c69d88fb909", "score": "0.5446503", "text": "def test_update(self):\n subscriber = PressureSubscriber()\n context = {\"pressure\": 0.23}\n expected = \"Current pressure is 0.23 atm\"\n assert expected == subscriber.update(context)", "title": "" }, { "docid": "aba2902256c6ef79d630b886241d5b33", "score": "0.5444706", "text": "def test_propagation_status_full(self):\n # all tasks in assigned state\n self.assertEqual([self.ASSIGNED] * 3, [t.status for t in self.tasks])\n # all tasks in progress state\n self.assert_status_over_bulk_update([self.IN_PROGRESS] * 3,\n [self.IN_PROGRESS] * 3)\n self.assertEqual(self.IN_PROGRESS, self.group.status)\n self.assertEqual(self.IN_PROGRESS, self.cycle.status)\n self.assertEqual(all_models.Workflow.ACTIVE, self.workflow.status)\n # update 1 task to finished\n self.assert_status_over_bulk_update(\n [self.FINISHED],\n [self.FINISHED, self.IN_PROGRESS, self.IN_PROGRESS])\n self.assertEqual(self.IN_PROGRESS, self.group.status)\n self.assertEqual(self.IN_PROGRESS, self.cycle.status)\n self.assertEqual(all_models.Workflow.ACTIVE, self.workflow.status)\n # all tasks moved to finished\n self.assert_status_over_bulk_update([self.FINISHED] * 3,\n [self.FINISHED] * 3)\n self.assertEqual(self.FINISHED, self.group.status)\n self.assertEqual(self.FINISHED, self.cycle.status)\n self.assertEqual(all_models.Workflow.ACTIVE, self.workflow.status)\n for task in self.tasks:\n self.assert_notifications_for_object(task,\n u'cycle_task_due_in',\n u'cycle_task_due_today',\n u'cycle_task_overdue')\n self.cycle = self.tasks[0].cycle\n self.assert_notifications_for_object(self.cycle)\n # all tasks moved to verified\n self.assert_status_over_bulk_update([self.VERIFIED] * 3,\n [self.VERIFIED] * 3)\n self.assertEqual(self.VERIFIED, self.group.status)\n self.assertEqual(self.VERIFIED, self.cycle.status)\n self.assertEqual(all_models.Workflow.INACTIVE, self.workflow.status)\n for task in self.tasks:\n self.assert_notifications_for_object(task)\n self.cycle = self.tasks[0].cycle\n self.assert_notifications_for_object(self.cycle,\n \"all_cycle_tasks_completed\")", "title": "" }, { "docid": "32d8a4df966e77a5c9a883f54a6bb8a7", "score": "0.5434007", "text": "def setPreCycle(self, cycle, mod='None'):\n \n pass", "title": "" }, { "docid": "3873969f1ed3bbcf816c86e90de7c190", "score": "0.5406309", "text": "def test_update_vehicles(self):\n pass", "title": "" }, { "docid": "77c8de278688f11da2ee8924a6d86aae", "score": "0.5396581", "text": "def test_cycle_detection(self):\n self.assertRaises(CycleDetectionError, App._create_, get_test_data_folder(\n version='2.0',\n which=os.path.join('aggt', 'circular')\n ))", "title": "" }, { "docid": "ddb4b9e25f098edf6ef415164eed134c", "score": "0.53597534", "text": "def test_set_new_value(self):", "title": "" }, { "docid": "89ab9446ab0be17cd32dca9b69905690", "score": "0.535627", "text": "def test_components_cycles():\n c = TestClient()\n conanfile = textwrap.dedent(\"\"\"\n from conan import ConanFile\n\n class TestcycleConan(ConanFile):\n name = \"testcycle\"\n version = \"1.0\"\n\n def package_info(self):\n self.cpp_info.components[\"c\"].requires = [\"b\"]\n self.cpp_info.components[\"b\"].requires = [\"a\"]\n self.cpp_info.components[\"a\"].requires = [\"c\"] # cycle!\n \"\"\")\n test_conanfile = textwrap.dedent(\"\"\"\n from conan import ConanFile\n class Test(ConanFile):\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n generators = \"CMakeDeps\"\n\n def requirements(self):\n self.requires(self.tested_reference_str)\n\n def test(self):\n pass\n \"\"\")\n c.save({\"conanfile.py\": conanfile,\n \"test_package/conanfile.py\": test_conanfile})\n with pytest.raises(Exception) as exc:\n c.run(\"create .\")\n out = str(exc.value)\n assert \"ERROR: Error in generator 'CMakeDeps': error generating context for 'testcycle/1.0': \" \\\n \"There is a dependency loop in 'self.cpp_info.components' requires:\" in out\n assert \"a requires c\" in out\n assert \"b requires a\" in out\n assert \"c requires b\" in out", "title": "" }, { "docid": "1e6ce46350abc1d92a2bf540e71b0689", "score": "0.53485376", "text": "def test_update(self):\n\n pco = self.pco\n\n # Get our test victim, verify attributes are as expected\n pico = pco.people.people.get(\"34765191\")\n self.assertEqual(pico.child, False)\n self.assertEqual(pico.nickname, None)\n \n # Change our test victim, ensure attributes are saved\n pico.child = True\n pico.nickname = \"PiRo\"\n self.assertEqual(pico.child, True)\n self.assertEqual(pico.nickname, \"PiRo\")\n\n # Update our test victim, ensure attributes come back\n pico.update()\n self.assertEqual(pico.child, True)\n self.assertEqual(pico.nickname, \"PiRo\")\n\n # Get another instance of our test victim, verify correct attributes\n pico2 = pco.people.people.get(\"34765191\")\n self.assertEqual(pico.child, True)\n self.assertEqual(pico.nickname, \"PiRo\")\n\n # Set our test victime back, and verify correct attributes\n pico2.child = False\n pico2.nickname = None\n pico2.update()\n\n self.assertEqual(pico2.child, False)\n self.assertEqual(pico2.nickname, None)\n\n # Test more updates using same object\n # This verifies changed attribute tracking is reset properly\n pico2.child = True\n pico2.nickname = \"PiRo\"\n pico2.update()\n\n self.assertEqual(pico2.child, True)\n self.assertEqual(pico2.nickname, \"PiRo\")\n\n # Verify attributes were set properly with a third object\n # Reset the original object\n pico3 = pco.people.people.get(\"34765191\")\n \n self.assertEqual(pico3.child, True)\n self.assertEqual(pico3.nickname, \"PiRo\")\n\n pico3.child = False\n pico3.nickname = None\n pico3.update()", "title": "" }, { "docid": "2125341465640e457e6d8e9c9c66bbcc", "score": "0.53462815", "text": "def count_cycle_params():\n return sum(2 if '<=>' in r else 1 for r in cycle1_reactions), \\\n sum(2 if '<=>' in r else 1 for r in cycle2_reactions)", "title": "" }, { "docid": "1d9399efa174b527e5f94e1551e3a4a6", "score": "0.5343934", "text": "def test_1_args(self):\n self.set_nb_to_zero()\n r1 = Rectangle(2, 1, 10, 0)\n r1.update(100)\n self.assertEqual(r1.id, 100)", "title": "" }, { "docid": "87b2fc887855518930dc28fc124ead54", "score": "0.5339864", "text": "def test_startup_params_first_pass(self):\n self.assert_enter_command_mode()\n\n self.assert_get_parameter(Parameter.SERIAL_FLOW_CONTROL, '11110') # Immutable\n self.assert_get_parameter(Parameter.BANNER, False)\n self.assert_get_parameter(Parameter.INSTRUMENT_ID, 0)\n self.assert_get_parameter(Parameter.SLEEP_ENABLE, 0)\n self.assert_get_parameter(Parameter.SAVE_NVRAM_TO_RECORDER, True) # Immutable\n self.assert_get_parameter(Parameter.POLLED_MODE, False)\n self.assert_get_parameter(Parameter.XMIT_POWER, 255)\n self.assert_get_parameter(Parameter.SPEED_OF_SOUND, 1485)\n self.assert_get_parameter(Parameter.PITCH, 0)\n self.assert_get_parameter(Parameter.ROLL, 0)\n self.assert_get_parameter(Parameter.SALINITY, 35)\n self.assert_get_parameter(Parameter.TIME_PER_ENSEMBLE, '00:00:00.00')\n self.assert_get_parameter(Parameter.TIME_PER_PING, '00:01.00')\n self.assert_get_parameter(Parameter.FALSE_TARGET_THRESHOLD, '050,001')\n self.assert_get_parameter(Parameter.BANDWIDTH_CONTROL, 0)\n self.assert_get_parameter(Parameter.CORRELATION_THRESHOLD, 64)\n self.assert_get_parameter(Parameter.SERIAL_OUT_FW_SWITCHES, '111100000') # Immutable\n self.assert_get_parameter(Parameter.ERROR_VELOCITY_THRESHOLD, 2000)\n self.assert_get_parameter(Parameter.BLANK_AFTER_TRANSMIT, 704)\n self.assert_get_parameter(Parameter.CLIP_DATA_PAST_BOTTOM, 0)\n self.assert_get_parameter(Parameter.RECEIVER_GAIN_SELECT, 1)\n self.assert_get_parameter(Parameter.WATER_REFERENCE_LAYER, '001,005')\n self.assert_get_parameter(Parameter.WATER_PROFILING_MODE, 1) # Immutable\n self.assert_get_parameter(Parameter.NUMBER_OF_DEPTH_CELLS, 100)\n self.assert_get_parameter(Parameter.PINGS_PER_ENSEMBLE, 1)\n self.assert_get_parameter(Parameter.DEPTH_CELL_SIZE, 800)\n self.assert_get_parameter(Parameter.TRANSMIT_LENGTH, 0)\n self.assert_get_parameter(Parameter.PING_WEIGHT, 0)\n self.assert_get_parameter(Parameter.AMBIGUITY_VELOCITY, 175)\n\n # Change these values anyway just in case it ran first.\n self.assert_set_parameter(Parameter.INSTRUMENT_ID, 1)\n self.assert_set_parameter(Parameter.SLEEP_ENABLE, 1)\n self.assert_set_parameter(Parameter.POLLED_MODE, True)\n self.assert_set_parameter(Parameter.XMIT_POWER, 250)\n self.assert_set_parameter(Parameter.SPEED_OF_SOUND, 1480)\n self.assert_set_parameter(Parameter.PITCH, 1)\n self.assert_set_parameter(Parameter.ROLL, 1)\n self.assert_set_parameter(Parameter.SALINITY, 36)\n self.assert_set_parameter(Parameter.TIME_PER_ENSEMBLE, '00:00:01.00')\n self.assert_set_parameter(Parameter.TIME_PER_PING, '00:02.00')\n self.assert_set_parameter(Parameter.FALSE_TARGET_THRESHOLD, '049,002')\n self.assert_set_parameter(Parameter.BANDWIDTH_CONTROL, 1)\n self.assert_set_parameter(Parameter.CORRELATION_THRESHOLD, 63)\n self.assert_set_parameter(Parameter.ERROR_VELOCITY_THRESHOLD, 1999)\n self.assert_set_parameter(Parameter.BLANK_AFTER_TRANSMIT, 714)\n self.assert_set_parameter(Parameter.CLIP_DATA_PAST_BOTTOM, 1)\n self.assert_set_parameter(Parameter.RECEIVER_GAIN_SELECT, 0)\n self.assert_set_parameter(Parameter.WATER_REFERENCE_LAYER, '002,006')\n self.assert_set_parameter(Parameter.NUMBER_OF_DEPTH_CELLS, 99)\n self.assert_set_parameter(Parameter.PINGS_PER_ENSEMBLE, 0)\n self.assert_set_parameter(Parameter.DEPTH_CELL_SIZE, 790)\n self.assert_set_parameter(Parameter.TRANSMIT_LENGTH, 1)\n self.assert_set_parameter(Parameter.PING_WEIGHT, 1)\n self.assert_set_parameter(Parameter.AMBIGUITY_VELOCITY, 176)", "title": "" }, { "docid": "61f4d61fdc0e65ff48cb8d8932bc8e25", "score": "0.5324551", "text": "def test_set(self):\n self.assert_initialize_driver()\n\n params = {\n Parameter.INSTRUMENT_ID: 0,\n Parameter.SLEEP_ENABLE: 0,\n Parameter.POLLED_MODE: False,\n Parameter.XMIT_POWER: 255,\n Parameter.SPEED_OF_SOUND: 1485,\n Parameter.PITCH: 0,\n Parameter.ROLL: 0,\n Parameter.SALINITY: 35,\n Parameter.SENSOR_SOURCE: \"1111101\",\n Parameter.TIME_PER_ENSEMBLE: '00:00:00.00',\n Parameter.TIME_PER_PING: '00:01.00',\n Parameter.FALSE_TARGET_THRESHOLD: '050,001',\n Parameter.BANDWIDTH_CONTROL: 0,\n Parameter.CORRELATION_THRESHOLD: 64,\n Parameter.ERROR_VELOCITY_THRESHOLD: 2000,\n Parameter.BLANK_AFTER_TRANSMIT: 704,\n Parameter.CLIP_DATA_PAST_BOTTOM: False,\n Parameter.RECEIVER_GAIN_SELECT: 1,\n Parameter.WATER_REFERENCE_LAYER: '001,005',\n Parameter.NUMBER_OF_DEPTH_CELLS: 100,\n Parameter.PINGS_PER_ENSEMBLE: 1,\n Parameter.DEPTH_CELL_SIZE: 800,\n Parameter.TRANSMIT_LENGTH: 0,\n Parameter.PING_WEIGHT: 0,\n Parameter.AMBIGUITY_VELOCITY: 175,\n }\n\n # Set all parameters to a known ground state\n self.assert_set_bulk(params)\n\n ###\n # Instrument Parameteres\n ###\n\n self.assert_set_readonly(Parameter.SERIAL_DATA_OUT)\n self.assert_set_readonly(Parameter.SERIAL_FLOW_CONTROL)\n self.assert_set_readonly(Parameter.SAVE_NVRAM_TO_RECORDER)\n self.assert_set_readonly(Parameter.WATER_PROFILING_MODE)\n self.assert_set_readonly(Parameter.SERIAL_OUT_FW_SWITCHES)\n self.assert_set_readonly(Parameter.BANNER)\n\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 64)\n self.assert_set(Parameter.TIME_PER_ENSEMBLE, '00:00:00.00')\n self.assert_set(Parameter.INSTRUMENT_ID, 0)\n self.assert_set(Parameter.SLEEP_ENABLE, 0)\n self.assert_set(Parameter.POLLED_MODE, False)\n self.assert_set(Parameter.XMIT_POWER, 255)\n self.assert_set(Parameter.SPEED_OF_SOUND, 1485)\n self.assert_set(Parameter.PITCH, 0)\n self.assert_set(Parameter.ROLL, 0) \n self.assert_set(Parameter.SALINITY, 35)\n self.assert_set(Parameter.SENSOR_SOURCE, \"1111101\")\n self.assert_set(Parameter.TIME_PER_PING, '00:01.00')\n self.assert_set(Parameter.FALSE_TARGET_THRESHOLD, '050,001')\n self.assert_set(Parameter.BANDWIDTH_CONTROL, 0)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 2000) \n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 704) \n self.assert_set(Parameter.CLIP_DATA_PAST_BOTTOM, False)\n self.assert_set(Parameter.RECEIVER_GAIN_SELECT, 1)\n self.assert_set(Parameter.WATER_REFERENCE_LAYER, '001,005')\n self.assert_set(Parameter.NUMBER_OF_DEPTH_CELLS, 100)\n self.assert_set(Parameter.PINGS_PER_ENSEMBLE, 1)\n self.assert_set(Parameter.DEPTH_CELL_SIZE, 800)\n self.assert_set(Parameter.TRANSMIT_LENGTH, 0)\n self.assert_set(Parameter.PING_WEIGHT, 0)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 175)\n\n \"\"\"\n test a variety of paramater ranges.\n \"\"\"\n\n # INSTRUMENT_ID -- Int 0-255\n self.assert_set_exception(Parameter.INSTRUMENT_ID, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.INSTRUMENT_ID, -1)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.INSTRUMENT_ID, 0)\n\n # SLEEP_ENABLE: -- (0,1,2)\n self.assert_set(Parameter.SLEEP_ENABLE, 1)\n self.assert_set(Parameter.SLEEP_ENABLE, 2)\n\n self.assert_set_exception(Parameter.SLEEP_ENABLE, -1)\n self.assert_set_exception(Parameter.SLEEP_ENABLE, 3)\n self.assert_set_exception(Parameter.SLEEP_ENABLE, 3.1415926)\n self.assert_set_exception(Parameter.SLEEP_ENABLE, \"LEROY JENKINS\")\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.SLEEP_ENABLE, 0)\n\n # POLLED_MODE: -- (True/False)\n self.assert_set(Parameter.POLLED_MODE, True)\n self.assert_set_exception(Parameter.POLLED_MODE, \"LEROY JENKINS\")\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.POLLED_MODE, False)\n\n # XMIT_POWER: -- Int 0-255\n self.assert_set(Parameter.XMIT_POWER, 0)\n self.assert_set(Parameter.XMIT_POWER, 128)\n self.assert_set(Parameter.XMIT_POWER, 254)\n\n self.assert_set_exception(Parameter.XMIT_POWER, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.XMIT_POWER, 256)\n self.assert_set_exception(Parameter.XMIT_POWER, -1)\n self.assert_set_exception(Parameter.XMIT_POWER, 3.1415926)\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.XMIT_POWER, 255)\n\n # SPEED_OF_SOUND: -- Int 1485 (1400 - 1600)\n self.assert_set(Parameter.SPEED_OF_SOUND, 1400)\n self.assert_set(Parameter.SPEED_OF_SOUND, 1450)\n self.assert_set(Parameter.SPEED_OF_SOUND, 1500)\n self.assert_set(Parameter.SPEED_OF_SOUND, 1550)\n self.assert_set(Parameter.SPEED_OF_SOUND, 1600)\n\n self.assert_set_exception(Parameter.SPEED_OF_SOUND, 0)\n self.assert_set_exception(Parameter.SPEED_OF_SOUND, 1399)\n\n self.assert_set_exception(Parameter.SPEED_OF_SOUND, 1601)\n self.assert_set_exception(Parameter.SPEED_OF_SOUND, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.SPEED_OF_SOUND, -256)\n self.assert_set_exception(Parameter.SPEED_OF_SOUND, -1)\n self.assert_set_exception(Parameter.SPEED_OF_SOUND, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.SPEED_OF_SOUND, 1485)\n\n # PITCH: -- Int -6000 to 6000\n self.assert_set(Parameter.PITCH, -6000)\n self.assert_set(Parameter.PITCH, -4000)\n self.assert_set(Parameter.PITCH, -2000)\n self.assert_set(Parameter.PITCH, -1)\n self.assert_set(Parameter.PITCH, 0)\n self.assert_set(Parameter.PITCH, 1)\n self.assert_set(Parameter.PITCH, 2000)\n self.assert_set(Parameter.PITCH, 4000)\n self.assert_set(Parameter.PITCH, 6000)\n\n self.assert_set_exception(Parameter.PITCH, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.PITCH, -6001)\n self.assert_set_exception(Parameter.PITCH, 6001)\n self.assert_set_exception(Parameter.PITCH, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.PITCH, 0)\n\n # ROLL: -- Int -6000 to 6000\n self.assert_set(Parameter.ROLL, -6000)\n self.assert_set(Parameter.ROLL, -4000)\n self.assert_set(Parameter.ROLL, -2000)\n self.assert_set(Parameter.ROLL, -1)\n self.assert_set(Parameter.ROLL, 0)\n self.assert_set(Parameter.ROLL, 1)\n self.assert_set(Parameter.ROLL, 2000)\n self.assert_set(Parameter.ROLL, 4000)\n self.assert_set(Parameter.ROLL, 6000)\n\n self.assert_set_exception(Parameter.ROLL, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.ROLL, -6001)\n self.assert_set_exception(Parameter.ROLL, 6001)\n self.assert_set_exception(Parameter.ROLL, 3.1415926)\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.ROLL, 0)\n\n # SALINITY: -- Int (0 - 40)\n self.assert_set(Parameter.SALINITY, 0)\n self.assert_set(Parameter.SALINITY, 10)\n self.assert_set(Parameter.SALINITY, 20)\n self.assert_set(Parameter.SALINITY, 30)\n self.assert_set(Parameter.SALINITY, 40)\n\n self.assert_set_exception(Parameter.SALINITY, \"LEROY JENKINS\")\n\n # AssertionError: Unexpected exception: ES no value match (40 != -1)\n self.assert_set_exception(Parameter.SALINITY, -1)\n\n # AssertionError: Unexpected exception: ES no value match (35 != 41)\n self.assert_set_exception(Parameter.SALINITY, 41)\n\n self.assert_set_exception(Parameter.SALINITY, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.SALINITY, 35)\n\n # SENSOR_SOURCE: -- (0/1) for 7 positions.\n # note it lacks capability to have a 1 in the #6 position\n self.assert_set(Parameter.SENSOR_SOURCE, \"0000000\")\n self.assert_set(Parameter.SENSOR_SOURCE, \"1111101\")\n self.assert_set(Parameter.SENSOR_SOURCE, \"1010101\")\n self.assert_set(Parameter.SENSOR_SOURCE, \"0101000\")\n self.assert_set(Parameter.SENSOR_SOURCE, \"1100100\")\n\n self.assert_set_exception(Parameter.SENSOR_SOURCE, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.SENSOR_SOURCE, 2)\n self.assert_set_exception(Parameter.SENSOR_SOURCE, -1)\n self.assert_set_exception(Parameter.SENSOR_SOURCE, \"1111112\")\n self.assert_set_exception(Parameter.SENSOR_SOURCE, \"11111112\")\n self.assert_set_exception(Parameter.SENSOR_SOURCE, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.SENSOR_SOURCE, \"1111101\")\n\n # TIME_PER_ENSEMBLE: -- String 01:00:00.00 (hrs:min:sec.sec/100)\n self.assert_set(Parameter.TIME_PER_ENSEMBLE, \"00:00:00.00\")\n self.assert_set(Parameter.TIME_PER_ENSEMBLE, \"00:00:01.00\")\n self.assert_set(Parameter.TIME_PER_ENSEMBLE, \"00:01:00.00\")\n\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, '30:30:30.30')\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, '59:59:59.99')\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, 2)\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, -1)\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, '99:99:99.99')\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, '-1:-1:-1.+1')\n self.assert_set_exception(Parameter.TIME_PER_ENSEMBLE, 3.1415926)\n #\n # Reset to good value.\n #\n\n self.assert_set(Parameter.TIME_PER_ENSEMBLE, \"00:00:00.00\")\n\n # TIME_OF_FIRST_PING: -- str ****/**/**,**:**:** (CCYY/MM/DD,hh:mm:ss)\n # THIS IS AN EVIL COMMAND! NEVER USE.\n #now_1_hour = (dt.datetime.utcnow() + dt.timedelta(hours=1)).strftime(\"%Y/%m/%d,%H:%m:%S\")\n #today_plus_10 = (dt.datetime.utcnow() + dt.timedelta(days=10)).strftime(\"%Y/%m/%d,%H:%m:%S\")\n #today_plus_1month = (dt.datetime.utcnow() + dt.timedelta(days=31)).strftime(\"%Y/%m/%d,%H:%m:%S\")\n #today_plus_6month = (dt.datetime.utcnow() + dt.timedelta(days=183)).strftime(\"%Y/%m/%d,%H:%m:%S\")\n\n #self.assert_set(Parameter.TIME_OF_FIRST_PING, now_1_hour)\n #self.assert_set(Parameter.TIME_OF_FIRST_PING, today_plus_10)\n #self.assert_set(Parameter.TIME_OF_FIRST_PING, today_plus_1month)\n #self.assert_set(Parameter.TIME_OF_FIRST_PING, today_plus_6month)\n\n # AssertionError: Unexpected exception: TG no value match (2013/06/06,06:06:06 != LEROY JENKINS)\n #self.assert_set_exception(Parameter.TIME_OF_FIRST_PING, \"LEROY JENKINS\")\n\n #self.assert_set_exception(Parameter.TIME_OF_FIRST_PING, 2)\n #self.assert_set_exception(Parameter.TIME_OF_FIRST_PING, -1)\n #self.assert_set_exception(Parameter.TIME_OF_FIRST_PING, '99:99.99')\n #self.assert_set_exception(Parameter.TIME_OF_FIRST_PING, '-1:-1.+1')\n #self.assert_set_exception(Parameter.TIME_OF_FIRST_PING, 3.1415926)\n\n # TIME_PER_PING: '00:01.00'\n self.assert_set(Parameter.TIME_PER_PING, '01:00.00')\n self.assert_set(Parameter.TIME_PER_PING, '59:59.99')\n self.assert_set(Parameter.TIME_PER_PING, '30:30.30')\n\n self.assert_set_exception(Parameter.TIME_PER_PING, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.TIME_PER_PING, 2)\n self.assert_set_exception(Parameter.TIME_PER_PING, -1)\n self.assert_set_exception(Parameter.TIME_PER_PING, '99:99.99')\n self.assert_set_exception(Parameter.TIME_PER_PING, '-1:-1.+1')\n self.assert_set_exception(Parameter.TIME_PER_PING, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.TIME_PER_PING, '00:01.00')\n\n # FALSE_TARGET_THRESHOLD: string of 0-255,0-255\n self.assert_set(Parameter.FALSE_TARGET_THRESHOLD, \"000,000\")\n self.assert_set(Parameter.FALSE_TARGET_THRESHOLD, \"255,000\")\n self.assert_set(Parameter.FALSE_TARGET_THRESHOLD, \"000,255\")\n self.assert_set(Parameter.FALSE_TARGET_THRESHOLD, \"255,255\")\n\n self.assert_set_exception(Parameter.FALSE_TARGET_THRESHOLD, \"256,000\")\n self.assert_set_exception(Parameter.FALSE_TARGET_THRESHOLD, \"256,255\")\n self.assert_set_exception(Parameter.FALSE_TARGET_THRESHOLD, \"000,256\")\n self.assert_set_exception(Parameter.FALSE_TARGET_THRESHOLD, \"255,256\")\n self.assert_set_exception(Parameter.FALSE_TARGET_THRESHOLD, -1)\n\n self.assert_set_exception(Parameter.FALSE_TARGET_THRESHOLD, \"LEROY JENKINS\")\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.FALSE_TARGET_THRESHOLD, \"050,001\")\n\n # BANDWIDTH_CONTROL: 0/1,\n self.assert_set(Parameter.BANDWIDTH_CONTROL, 1)\n\n self.assert_set_exception(Parameter.BANDWIDTH_CONTROL, -1)\n self.assert_set_exception(Parameter.BANDWIDTH_CONTROL, 2)\n self.assert_set_exception(Parameter.BANDWIDTH_CONTROL, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.BANDWIDTH_CONTROL, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.BANDWIDTH_CONTROL, 0)\n\n # CORRELATION_THRESHOLD: int 064, 0 - 255\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 50)\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 100)\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 150)\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 200)\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 255)\n\n self.assert_set_exception(Parameter.CORRELATION_THRESHOLD, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.CORRELATION_THRESHOLD, -256)\n self.assert_set_exception(Parameter.CORRELATION_THRESHOLD, -1)\n self.assert_set_exception(Parameter.CORRELATION_THRESHOLD, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 64)\n\n # ERROR_VELOCITY_THRESHOLD: int (0-5000 mm/s) NOTE it enforces 0-9999\n # decimals are truncated to ints\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 0)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 128)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 1000)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 2000)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 3000)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 4000)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 5000)\n\n self.assert_set_exception(Parameter.ERROR_VELOCITY_THRESHOLD, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.ERROR_VELOCITY_THRESHOLD, -1)\n self.assert_set_exception(Parameter.ERROR_VELOCITY_THRESHOLD, 10000)\n self.assert_set_exception(Parameter.ERROR_VELOCITY_THRESHOLD, -3.1415926)\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 2000)\n\n # BLANK_AFTER_TRANSMIT: int 704, (0 - 9999)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 0)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 128)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 1000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 2000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 3000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 4000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 5000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 6000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 7000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 8000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 9000)\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 9999)\n\n self.assert_set_exception(Parameter.BLANK_AFTER_TRANSMIT, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.BLANK_AFTER_TRANSMIT, -1)\n self.assert_set_exception(Parameter.BLANK_AFTER_TRANSMIT, 10000)\n self.assert_set_exception(Parameter.BLANK_AFTER_TRANSMIT, -3.1415926)\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 704)\n\n # CLIP_DATA_PAST_BOTTOM: True/False,\n self.assert_set(Parameter.CLIP_DATA_PAST_BOTTOM, True)\n\n self.assert_set_exception(Parameter.CLIP_DATA_PAST_BOTTOM, \"LEROY JENKINS\")\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.CLIP_DATA_PAST_BOTTOM, False)\n\n # RECEIVER_GAIN_SELECT: (0/1),\n self.assert_set(Parameter.RECEIVER_GAIN_SELECT, 0)\n self.assert_set(Parameter.RECEIVER_GAIN_SELECT, 1)\n\n self.assert_set_exception(Parameter.RECEIVER_GAIN_SELECT, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.RECEIVER_GAIN_SELECT, 2)\n self.assert_set_exception(Parameter.RECEIVER_GAIN_SELECT, -1)\n self.assert_set_exception(Parameter.RECEIVER_GAIN_SELECT, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.RECEIVER_GAIN_SELECT, 1)\n\n # WATER_REFERENCE_LAYER: -- int Begin Cell (0=OFF), End Cell (0-100)\n self.assert_set(Parameter.WATER_REFERENCE_LAYER, \"000,001\")\n self.assert_set(Parameter.WATER_REFERENCE_LAYER, \"000,100\")\n self.assert_set(Parameter.WATER_REFERENCE_LAYER, \"000,100\")\n\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, \"255,000\")\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, \"000,000\")\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, \"001,000\")\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, \"100,000\")\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, \"000,101\")\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, \"100,101\")\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, -1)\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, 2)\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.WATER_REFERENCE_LAYER, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.WATER_REFERENCE_LAYER, \"001,005\")\n\n # NUMBER_OF_DEPTH_CELLS: -- int (1-255) 100,\n self.assert_set(Parameter.NUMBER_OF_DEPTH_CELLS, 1)\n self.assert_set(Parameter.NUMBER_OF_DEPTH_CELLS, 128)\n self.assert_set(Parameter.NUMBER_OF_DEPTH_CELLS, 254)\n\n self.assert_set_exception(Parameter.NUMBER_OF_DEPTH_CELLS, \"LEROY JENKINS\")\n self.assert_set_exception(Parameter.NUMBER_OF_DEPTH_CELLS, 256)\n self.assert_set_exception(Parameter.NUMBER_OF_DEPTH_CELLS, 0)\n self.assert_set_exception(Parameter.NUMBER_OF_DEPTH_CELLS, -1)\n self.assert_set_exception(Parameter.NUMBER_OF_DEPTH_CELLS, 3.1415926)\n\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.NUMBER_OF_DEPTH_CELLS, 100)\n\n # PINGS_PER_ENSEMBLE: -- int (0-16384) 1,\n self.assert_set(Parameter.PINGS_PER_ENSEMBLE, 0)\n self.assert_set(Parameter.PINGS_PER_ENSEMBLE, 16384)\n\n self.assert_set_exception(Parameter.PINGS_PER_ENSEMBLE, 16385)\n self.assert_set_exception(Parameter.PINGS_PER_ENSEMBLE, -1)\n self.assert_set_exception(Parameter.PINGS_PER_ENSEMBLE, 32767)\n self.assert_set_exception(Parameter.PINGS_PER_ENSEMBLE, 3.1415926)\n self.assert_set_exception(Parameter.PINGS_PER_ENSEMBLE, \"LEROY JENKINS\")\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.PINGS_PER_ENSEMBLE, 1)\n\n # DEPTH_CELL_SIZE: int 80 - 3200\n self.assert_set(Parameter.DEPTH_CELL_SIZE, 80)\n self.assert_set(Parameter.PINGS_PER_ENSEMBLE, 3200)\n\n self.assert_set_exception(Parameter.PING_WEIGHT, 3201)\n self.assert_set_exception(Parameter.PING_WEIGHT, -1)\n self.assert_set_exception(Parameter.PING_WEIGHT, 2)\n self.assert_set_exception(Parameter.PING_WEIGHT, 3.1415926)\n self.assert_set_exception(Parameter.PING_WEIGHT, \"LEROY JENKINS\")\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.PINGS_PER_ENSEMBLE, 0)\n\n # TRANSMIT_LENGTH: int 0 to 3200\n self.assert_set(Parameter.TRANSMIT_LENGTH, 80)\n self.assert_set(Parameter.TRANSMIT_LENGTH, 3200)\n\n self.assert_set_exception(Parameter.TRANSMIT_LENGTH, 3201)\n self.assert_set_exception(Parameter.TRANSMIT_LENGTH, -1)\n self.assert_set_exception(Parameter.TRANSMIT_LENGTH, 3.1415926)\n self.assert_set_exception(Parameter.TRANSMIT_LENGTH, \"LEROY JENKINS\")\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.TRANSMIT_LENGTH, 0)\n\n # PING_WEIGHT: (0/1),\n self.assert_set(Parameter.PING_WEIGHT, 0)\n self.assert_set(Parameter.PING_WEIGHT, 1)\n\n self.assert_set_exception(Parameter.PING_WEIGHT, 2)\n self.assert_set_exception(Parameter.PING_WEIGHT, -1)\n self.assert_set_exception(Parameter.PING_WEIGHT, 3.1415926)\n self.assert_set_exception(Parameter.PING_WEIGHT, \"LEROY JENKINS\")\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.PING_WEIGHT, 0)\n\n # AMBIGUITY_VELOCITY: int 2 - 700\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 2)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 111)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 222)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 333)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 444)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 555)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 666)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 700)\n\n self.assert_set_exception(Parameter.AMBIGUITY_VELOCITY, 0)\n self.assert_set_exception(Parameter.AMBIGUITY_VELOCITY, 1)\n self.assert_set_exception(Parameter.AMBIGUITY_VELOCITY, -1)\n self.assert_set_exception(Parameter.AMBIGUITY_VELOCITY, 3.1415926)\n self.assert_set_exception(Parameter.AMBIGUITY_VELOCITY, \"LEROY JENKINS\")\n #\n # Reset to good value.\n #\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 175)\n\n # Test read only raise exceptions on set.\n self.assert_set_exception(Parameter.SERIAL_DATA_OUT, '000 000 111')\n self.assert_set_exception(Parameter.SERIAL_FLOW_CONTROL, '10110')\n self.assert_set_exception(Parameter.SAVE_NVRAM_TO_RECORDER, False)\n self.assert_set_exception(Parameter.SERIAL_OUT_FW_SWITCHES, '110100100')\n self.assert_set_exception(Parameter.WATER_PROFILING_MODE, 0)\n self.assert_set_exception(Parameter.BANNER, True)\n\n # TODO: remove this its only here for testing to assert that it\n # isn't being caused by a leftover funky value..\n self.assert_set(Parameter.CORRELATION_THRESHOLD, 64)\n self.assert_set(Parameter.TIME_PER_ENSEMBLE, '00:00:00.00')\n self.assert_set(Parameter.INSTRUMENT_ID, 0)\n self.assert_set(Parameter.SLEEP_ENABLE, 0)\n self.assert_set(Parameter.POLLED_MODE, False)\n self.assert_set(Parameter.XMIT_POWER, 255)\n self.assert_set(Parameter.SPEED_OF_SOUND, 1485)\n self.assert_set(Parameter.PITCH, 0)\n self.assert_set(Parameter.ROLL, 0) \n self.assert_set(Parameter.SALINITY, 35)\n self.assert_set(Parameter.SENSOR_SOURCE, \"1111101\")\n self.assert_set(Parameter.TIME_PER_PING, '00:01.00')\n self.assert_set(Parameter.FALSE_TARGET_THRESHOLD, '050,001')\n self.assert_set(Parameter.BANDWIDTH_CONTROL, 0)\n self.assert_set(Parameter.ERROR_VELOCITY_THRESHOLD, 2000) \n self.assert_set(Parameter.BLANK_AFTER_TRANSMIT, 704) \n self.assert_set(Parameter.CLIP_DATA_PAST_BOTTOM, False)\n self.assert_set(Parameter.RECEIVER_GAIN_SELECT, 1)\n self.assert_set(Parameter.WATER_REFERENCE_LAYER, '001,005')\n self.assert_set(Parameter.NUMBER_OF_DEPTH_CELLS, 100)\n self.assert_set(Parameter.PINGS_PER_ENSEMBLE, 1)\n self.assert_set(Parameter.DEPTH_CELL_SIZE, 800)\n self.assert_set(Parameter.TRANSMIT_LENGTH, 0)\n self.assert_set(Parameter.PING_WEIGHT, 0)\n self.assert_set(Parameter.AMBIGUITY_VELOCITY, 175)", "title": "" }, { "docid": "3cf9409e6d51b46ce7a41b355d423676", "score": "0.53219724", "text": "def test_extras_graphs_partial_update(self):\n pass", "title": "" }, { "docid": "0bbd91072e36d50aa8d53415d9937f32", "score": "0.53183764", "text": "def test_change_params():\n C_fid = cp.CosmoPie(defaults.cosmology.copy(),'jdem')\n\n\n f_set_in1 = np.zeros(3,dtype=object)\n for i in range(0,3):\n f_set1 = np.random.rand(6,6)\n f_set1 = np.dot(f_set1.T,f_set1)\n f_set1 = f_set1+np.diag(np.random.rand(6))\n f_set_in1[i] = f_set1\n f_set_in2 = rotate_jdem_to_lihu(f_set_in1,C_fid)\n f_set_in3 = rotate_lihu_to_jdem(f_set_in2,C_fid)\n f_set_in4 = rotate_jdem_to_lihu(f_set_in3,C_fid)\n for i in range(0,3):\n assert np.allclose(f_set_in1[i],f_set_in3[i])\n assert np.allclose(f_set_in2[i],f_set_in4[i])", "title": "" }, { "docid": "568cea7d8a8c2d1f9aa3d0b0b7abc91b", "score": "0.5317378", "text": "def test_set_params(self):\n loc = mticker.NullLocator()\n with pytest.warns(UserWarning):\n loc.set_params()", "title": "" }, { "docid": "ab62a758b889031354213ad6f7f7278a", "score": "0.53079605", "text": "async def test_lazy_error_binary_sensor(\n hass: HomeAssistant, start_expect, end_expect, mock_do_cycle: FrozenDateTimeFactory\n) -> None:\n assert hass.states.get(ENTITY_ID).state == start_expect\n await do_next_cycle(hass, mock_do_cycle, 11)\n assert hass.states.get(ENTITY_ID).state == start_expect\n await do_next_cycle(hass, mock_do_cycle, 11)\n assert hass.states.get(ENTITY_ID).state == end_expect", "title": "" }, { "docid": "d6df45c17dd1598ca7c2826530a7f54b", "score": "0.528704", "text": "def test_clone_change_param(self, cosmo):\n super().test_clone_change_param(cosmo)\n\n # `w` params\n c = cosmo.clone(w0=0.1, wa=0.2)\n assert c.w0 == 0.1\n assert c.wa == 0.2\n for n in (set(cosmo.__parameters__) - {\"w0\", \"wa\"}):\n v = getattr(c, n)\n if v is None:\n assert v is getattr(cosmo, n)\n else:\n assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, \"unit\", 1))", "title": "" }, { "docid": "26263a43960fcd3dec631aad0abe5d58", "score": "0.52747184", "text": "def testSubmitCycle(self):\n self.patch_mock.SetCQDependencies(self.patches[0], [self.patches[1]])\n self.SubmitPool(submitted=self.patches)", "title": "" }, { "docid": "33d8687c09a70b4d159664cbef604c99", "score": "0.5267747", "text": "def _checkChanges(self, paramName):\n self.setParamFromVar(paramName)\n param = self.protocol.getParam(paramName)\n \n for d in param._dependants:\n self._checkCondition(d)", "title": "" }, { "docid": "1ebe85177a0bc28796b1e1da8bdfecb3", "score": "0.52601963", "text": "def test_update_args(self):\n\n rect = rectangle.Rectangle(5, 5)\n\n # check for function\n self.assertTrue('update' in dir(rect))\n\n # test id update\n rect.update(89)\n self.assertEqual(rect.id, 89)\n\n # test width update\n rect.update(89, 2)\n self.assertEqual(rect.width, 2)\n\n # test height upadte\n rect.update(89, 2, 3)\n self.assertEqual(rect.height, 3)\n\n # test x update\n rect.update(89, 2, 3, 4)\n self.assertEqual(rect.x, 4)\n\n # test y update\n rect.update(89, 2, 3, 4, 2)\n self.assertEqual(rect.y, 2)", "title": "" }, { "docid": "cb04bf8ac9c9bb8a41a05aa2077817d4", "score": "0.5257355", "text": "def test_cyclic_dependency(self):\n self.fail(\"Not implemented \")", "title": "" }, { "docid": "4ca80daea6fd66a0db7742b935c135e0", "score": "0.52372885", "text": "def test_update(self):\n r1 = Rectangle(10, 10, 10, 10, 1)\n self.assertEqual(r1.__str__(), \"[Rectangle] (1) 10/10 - 10/10\")\n r1.update(89)\n self.assertEqual(r1.__str__(), \"[Rectangle] (89) 10/10 - 10/10\")\n r1.update(88, 1)\n self.assertEqual(r1.__str__(), \"[Rectangle] (88) 10/10 - 1/10\")\n r1.update(87, 2, 1)\n self.assertEqual(r1.__str__(), \"[Rectangle] (87) 10/10 - 2/1\")\n r1.update(86, 3, 2, 1)\n self.assertEqual(r1.__str__(), \"[Rectangle] (86) 1/10 - 3/2\")\n r1.update(85, 4, 3, 2, 1)\n self.assertEqual(r1.__str__(), \"[Rectangle] (85) 2/1 - 4/3\")\n r1.update(84, 5, 4, 3, 2)\n self.assertEqual(r1.__str__(), \"[Rectangle] (84) 3/2 - 5/4\")", "title": "" }, { "docid": "ffa951c23462c3d947a17de8e24ade5c", "score": "0.523648", "text": "def test_update__1(self):\n decls = SConsArguments.Declarations._ArgumentDeclarations()\n decls._ArgumentDeclarations__ensure_not_committed = mock.Mock(name = '__ensure_not_commited')\n decls._ArgumentDeclarations__update_supp_dicts = mock.Mock(name = '__update_supp_dicts')\n with mock.patch.object(SConsArguments.Declarations._ArgumentDeclarations, '_ArgumentDeclarations__validate_values') as __validate_values:\n decls.update({'foo' : 'bar'}, geez = 123)\n try:\n __validate_values.assert_called_once_with({'foo' : 'bar'}, geez = 123)\n decls._ArgumentDeclarations__ensure_not_committed.assert_called_once_with()\n decls._ArgumentDeclarations__update_supp_dicts.assert_called_once_with()\n except AssertionError as e:\n self.fail(str(e))\n self.assertEqual(decls['foo'], 'bar')\n self.assertEqual(decls['geez'], 123)", "title": "" }, { "docid": "d80f1dbf51ac9f38cda74d07aa7d3281", "score": "0.5235788", "text": "def test_update_player(self):\n self.game.update_player()\n self.assertEqual(self.game.turn, 1, \"Fails to increment Game.turn\")", "title": "" }, { "docid": "a2765077e607ab12e0314e44a537e3e2", "score": "0.5231093", "text": "def test_4_args(self):\n self.set_nb_to_zero()\n r1 = Rectangle(2, 1, 10, 0)\n r1.update(100, 200, 300, 400)\n self.assertEqual(r1.id, 100)\n self.assertEqual(r1.width, 200)\n self.assertEqual(r1.height, 300)\n self.assertEqual(r1.x, 400)", "title": "" }, { "docid": "5c9903911a368a900b76e149827e22d4", "score": "0.52244467", "text": "def test_clone_change_param(self, cosmo):\n super().test_clone_change_param(cosmo)\n\n # `w` params\n c = cosmo.clone(wp=0.1, wa=0.2, zp=14)\n assert c.wp == 0.1\n assert c.wa == 0.2\n assert c.zp == 14\n for n in (set(cosmo.__parameters__) - {\"wp\", \"wa\", \"zp\"}):\n v = getattr(c, n)\n if v is None:\n assert v is getattr(cosmo, n)\n else:\n assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, \"unit\", 1))", "title": "" }, { "docid": "2d012fbb440ce7182e938bb96d184b97", "score": "0.52210957", "text": "def test_set_params(self):\n params = Some.VALID_PARAMS + Some.INVALID_PARAMS\n\n set_params = self._prepare_set_params(params)\n\n set_result = self.driver.set_resource(set_params)\n\n _print_dict(\"\\nSET set_result\", set_result)\n\n # now, get the values for the valid parameters and check\n get_result = self.driver.get_resource(Some.VALID_PARAMS)\n\n _print_dict(\"\\nGET get_result\", get_result)\n\n # verify the new values are the ones we wanted\n for cp in Some.VALID_PARAMS:\n self.assertEqual(set_params[cp], get_result[cp])", "title": "" }, { "docid": "d4124a7f8dcbe2a32d7463517cd16056", "score": "0.5219314", "text": "def test_port_update(self, mock_refresh):\n self.agt.port_update('context')\n mock_refresh.assert_called_once_with()", "title": "" }, { "docid": "42b17e730b13e44712ce7a0e926afb13", "score": "0.5215645", "text": "def test_update_vdu(self):\n pass", "title": "" }, { "docid": "41a4d8308fe9a6c685ab38def20a74b4", "score": "0.5215595", "text": "def test_all_params(self):\r\n persistence_helper = PersistenceHelper(use_riak=True, is_sync=True)\r\n self.assertEqual(persistence_helper.use_riak, True)\r\n self.assertEqual(persistence_helper.is_sync, True)", "title": "" }, { "docid": "3af5bc81e5881a50a72752d1d3ab6332", "score": "0.5215115", "text": "def test_eq(self):\n self.assertEqual(self.same_stay_1, self.same_stay_2)", "title": "" }, { "docid": "a4f36f6d51c57e0d6cf563b060c01e04", "score": "0.52064794", "text": "def flatten_parameters_to_reference(self, cycle): \n if (self.working_mode == 'inversion'):\n for k, v in self.atmospheres.items():\n v.set_reference(cycle=cycle)\n \n for k, v in self.spectrum.items(): \n v.stokes_cycle[cycle] = copy.deepcopy(v.stokes)\n if (v.interpolate_to_lr):\n v.stokes_lr_cycle[cycle] = copy.deepcopy(v.stokes_lr)\n\n if (self.working_mode == 'inversion'): \n v.chi2_cycle[cycle] = copy.deepcopy(v.chi2) \n v.bic_cycle[cycle] = copy.deepcopy(self.n_free_parameters * np.log(v.dof) + v.dof * np.log(v.rss))\n v.aic_cycle[cycle] = copy.deepcopy(2.0 * self.n_free_parameters + v.dof * np.log(v.rss))", "title": "" }, { "docid": "7e8d4ebc9fdd2dd968af473824e0ae89", "score": "0.5201005", "text": "def test_update_connection(self):\n pass", "title": "" }, { "docid": "f5d1a2cae12d66c1f2a7683b7a4e4676", "score": "0.5198233", "text": "def test_2_args(self):\n self.set_nb_to_zero()\n r1 = Rectangle(2, 1, 10, 0)\n r1.update(100, 200)\n self.assertEqual(r1.id, 100)\n self.assertEqual(r1.width, 200)", "title": "" }, { "docid": "d5959b7e56e832bfd4b6cf3060e68fa6", "score": "0.51950717", "text": "def test_update_challenge_activity(self):\n pass", "title": "" }, { "docid": "58a967e1275437026817de6645892f13", "score": "0.5185644", "text": "def getCycle(self, **kwargs):\n \n pass", "title": "" }, { "docid": "61e8c54900d7c2faa92ee900c7006940", "score": "0.51724523", "text": "def test_check_for_cycles_negative(self):\n\n # There is one cycle involving case, sample, and analyte, and a second\n # cycle that also includes portion.\n schemata = {\n 'analyte': {\n 'links': [\n {\n 'subgroup': [\n {'target_type': 'portion'},\n {'target_type': 'sample'},\n ]\n }\n ]\n },\n 'case': {\n 'links': [\n {'target_type': 'analyte'},\n ]\n },\n 'portion': {\n 'links': [\n {'target_type': 'sample'},\n ]\n },\n 'sample': {\n 'links': [\n {'target_type': 'case'},\n {'target_type': 'sample'},\n {'target_type': 'tissue_source_site'},\n ]\n },\n 'slide': {\n 'links': [\n {\n 'subgroup': [\n {'target_type': 'portion'},\n {'target_type': 'sample'},\n ]\n }\n ]\n },\n 'tissue_source_site': {'links': []},\n }\n\n with self.assertRaisesRegex(AssertionError, 'cycle detected'):\n check_for_cycles(schemata)", "title": "" }, { "docid": "35a743b2bbe73d08c3ab6e65d10c3768", "score": "0.5170865", "text": "def testNextPass(self):", "title": "" }, { "docid": "d1a4dd1798a1934319a5fb8c464304c4", "score": "0.5169077", "text": "def test_set_params(self):\n fixed = mticker.FixedLocator(range(0, 24), nbins=5)\n fixed.set_params(nbins=7)\n assert fixed.nbins == 7", "title": "" }, { "docid": "4a5ee2259b883ad121cf8f42af5ba921", "score": "0.5168377", "text": "def assertParamVals(self, params, correct_params):\n self.assertEqual(set(params.keys()), set(correct_params.keys()))\n for (key, val) in params.iteritems():\n if key == Parameter.DATE_TIME: \n continue\n correct_val = correct_params[key]\n if isinstance(val, float):\n # Verify to 5% of the larger value.\n max_val = max(abs(val), abs(correct_val))\n self.assertAlmostEqual(val, correct_val, delta=max_val*.01)\n\n else:\n # int, bool, str, or tuple of same\n self.assertEqual(val, correct_val)", "title": "" }, { "docid": "149fa3b53d90dc13b45266c2123a3a28", "score": "0.51628315", "text": "def test_advance_stage_two(self):\n self.game.currentStage = 2\n self.game.angryDieA.currentValue = \"ANGRY\"\n self.game.angryDieB.currentValue = \"4\"\n self.game.stage_check()\n self.assertEqual(3, self.game.currentStage,\"Stage didn't advanced when it should.\")\n self.game.currentStage = 2\n self.game.angryDieA.currentValue = \"4\"\n self.game.angryDieB.currentValue = \"ANGRY\"\n self.game.stage_check()\n self.assertEqual(3, self.game.currentStage,\"Stage didn't advanced when it should.\")", "title": "" }, { "docid": "9ec8e1825bb3260f8c137a70603dc7cb", "score": "0.5148819", "text": "def test_update_progress() -> None:\n # test a random update\n alert = sw.Alert()\n alert.update_progress(0.5)\n assert alert.progress_bar.n == 0.5\n assert alert.viz is True\n\n # show that a value > 1 raise an error\n with pytest.raises(ValueError):\n alert.reset()\n alert.update_progress(1.5)\n\n # check that if total is set value can be more than 1\n alert.reset()\n alert.update_progress(0, total=100)\n alert.update_progress(50)\n assert alert.progress_bar.n == 50\n assert alert.viz is True\n\n return", "title": "" }, { "docid": "9f61e968790f7cb78eb3773a4304cbc5", "score": "0.51406205", "text": "def test_stream_update_values(self):\n pass", "title": "" }, { "docid": "0e64a78091126a414b6e4b234f0f85ab", "score": "0.51328874", "text": "def test_is_dirty(self):\n address = random_address()\n delay_prop = ExtendedProperty(\n address, \"delay\", int, False, False, PropertyType.ADVANCED\n )\n prescaler_prop = ExtendedProperty(\n address, \"delay\", int, False, False, PropertyType.ADVANCED\n )\n\n test_prop = MomentaryDelayProperty(\n address, \"test_prop\", delay_prop, prescaler_prop\n )\n assert not test_prop.is_dirty\n delay_prop.set_value(0)\n assert not test_prop.is_dirty\n prescaler_prop.set_value(0)\n assert not test_prop.is_dirty\n\n delay_prop.new_value = randint(1, 255)\n assert test_prop.is_dirty\n\n delay_prop.new_value = None\n assert not test_prop.is_dirty\n\n prescaler_prop.new_value = randint(1, 255)\n assert test_prop.is_dirty\n\n prescaler_prop.new_value = None\n assert not test_prop.is_dirty\n\n delay_prop.new_value = randint(1, 255)\n assert test_prop.is_dirty\n\n delay_prop.new_value = randint(1, 255)\n prescaler_prop.new_value = randint(1, 255)\n assert test_prop.is_dirty\n\n delay_prop.new_value = None\n prescaler_prop.new_value = None\n assert not test_prop.is_dirty", "title": "" }, { "docid": "c801cc60e67bc02552df501a30b07a96", "score": "0.5124957", "text": "def test_update_properties(metric, method):\n m = metric()\n x = torch.randn(\n 1,\n ).squeeze()\n for i in range(10):\n if method == \"update\":\n m.update(x)\n if method == \"forward\":\n _ = m(x)\n assert m.update_called\n assert m.update_count == i + 1\n\n m.reset()\n assert not m.update_called\n assert m.update_count == 0", "title": "" }, { "docid": "cfa84978f3147b558cdb5f2a93e710ae", "score": "0.51196307", "text": "def setCycleStep(self, val='True', **kwargs):\n \n pass", "title": "" }, { "docid": "6295b2995eae991388d366b49c7759a6", "score": "0.51173973", "text": "def test_update(self):\n subscriber = TemperatureSubscriber()\n context = {\"temperature\": 20}\n expected = \"Current temperature is 20°C\"\n assert expected == subscriber.update(context)", "title": "" }, { "docid": "011ba6b269b41c36cd58875265085485", "score": "0.51173717", "text": "def test_update_challenge(self):\n pass", "title": "" }, { "docid": "585c0e09b95a4c5dbd6ef8cfc90fa68c", "score": "0.51131195", "text": "def test_update(self):\n subscriber = HumiditySubscriber()\n context = {\"humidity\": 57}\n expected = \"Current humidity level is 57%\"\n assert expected == subscriber.update(context)", "title": "" }, { "docid": "7c58e438166110d426a6b88303d0bfb3", "score": "0.5109691", "text": "def test_get_all_cycles_of_size(self):\n cycle_list = self.graph.get_all_cycles_of_size(6)\n self.assertEqual(len(cycle_list), 0)\n edge = Edge(self.graph.vertices[0], self.graph.vertices[3])\n self.graph.add_edge(edge) # To create a cycle of length 4\n edge = Edge(self.graph.vertices[0], self.graph.vertices[5])\n self.graph.add_edge(edge) # To create a cycle of length 6 and another cycle of length 4\n cycle_list = self.graph.get_all_cycles_of_size(4)\n self.assertEqual(len(cycle_list), 2)\n self.assertEqual(len(cycle_list[0]), 4)\n self.assertEqual(len(cycle_list[1]), 4)", "title": "" }, { "docid": "6676f8fb1b37c5c65a9582a512b609ff", "score": "0.51023436", "text": "def test_trajectory_wheelbase_cannot_be_changed(self):\n trajectories = dict()\n start = WheelbaseTrajectory(1., 1., tuple())\n update_actuator(trajectories, \"foo\", start)\n\n with self.assertRaises(ValueError):\n update_actuator(trajectories, \"foo\", TorqueSetpoint(10.))", "title": "" }, { "docid": "3361f66ced29d34dba6cd7cb6081b1b6", "score": "0.50995135", "text": "def test_3_args(self):\n self.set_nb_to_zero()\n r1 = Rectangle(2, 1, 10, 0)\n r1.update(100, 200, 300)\n self.assertEqual(r1.id, 100)\n self.assertEqual(r1.width, 200)\n self.assertEqual(r1.height, 300)", "title": "" }, { "docid": "631e1b92af282deb8f99a82d4a3ec200", "score": "0.50918704", "text": "def check_params(old, new):\n for k in old.keys():\n old_val = old[k]\n new_val = new[k]\n if re.match('.*vf.*', k):\n assert_raises(AssertionError, assert_array_equal,\n old_val, new_val)\n else:\n assert_array_equal(old_val, new_val)", "title": "" }, { "docid": "42c9a5223a70123b4f3d4ab11be30d8f", "score": "0.50903136", "text": "def test_change_test(self):\n self.assertEqual(change(1423), {500: 2, 200: 2, 100: 0, 50: 0, 20: 1, 10: 0, 5: 0, 2: 1, 1: 1})", "title": "" }, { "docid": "263e3db9f2d6a9ecbb4053a5625ba645", "score": "0.50895876", "text": "def test_test_setup(self):\n\n\n params = self.model.get_params()\n\n grads1 = T.grad(self.prob, params)\n f1 = function([], grads1)\n gv1 = f1()\n\n grads2 = T.grad(self.prob, params, consider_constant = self.stats.d.values() )\n f2 = function([], grads2)\n gv2 = f2()\n\n fails = {}\n for g1, g2, p in zip(gv1,gv2,params):\n d = np.abs(g1-g2).max()\n if d > self.tol:\n fails[p.name] = d\n\n if len(fails.keys()) > 0:\n raise Exception(\"gradients wrt parameters should not change if \" + \\\n \" the suff stats are considered constant, but the following \"+\\\n \" gradients changed by the following amounts: \"+str(fails)+\\\n \" (this indicates a problem in the testing setup itself) \")", "title": "" }, { "docid": "967437c5a63bfd9af2ec3ba79e12e2d0", "score": "0.50884247", "text": "def assert_internal_state(self):\n\n\tassert len(self._dict) == len(self._list)\n\tfor k, (i, v) in self._dict.items(): # noqa\n\t\tk2, v2 = self._list[i]\n\t\tassert k2 == k\n\t\tassert v2 is v", "title": "" } ]
ffbcdbe876282ffb2f40846e205be13a
obj.text_payload is a base64encoded payload as text.
[ { "docid": "65369942118cfb69adb3b58495223283", "score": "0.6641096", "text": "def test_text_payload(self):\n payload = pem.parse(KEY_PEM_PKCS5_UNENCRYPTED)[0].text_payload\n\n assert (\n KEY_PEM_PKCS5_UNENCRYPTED_PAYLOAD.decode().replace(\"\\n\", \"\")\n == payload\n )\n assert isinstance(payload, str)", "title": "" } ]
[ { "docid": "3b63a512897ae29d232ed7d2ee66b267", "score": "0.63219917", "text": "def _encode_text(text):\n message_bytes = text.encode('ascii')\n base64_bytes = base64.b64encode(message_bytes)\n base64_text = base64_bytes.decode('ascii')\n return base64_text", "title": "" }, { "docid": "fe66da19f694fd9854ffa3a0cc0493c3", "score": "0.6266693", "text": "def data_as_text(self) -> str:\n return base64.b64decode(self.data).decode(\"utf-8\")", "title": "" }, { "docid": "562beff978385c7eee17f1526186c13c", "score": "0.61690164", "text": "def _encode_payload(self, payload):\n\n wire = json.dumps(payload, ensure_ascii=False)\n return six.ensure_binary(wire)", "title": "" }, { "docid": "54d053851d9503384c7952af5cf2cc6c", "score": "0.6154445", "text": "def payload(self, p: bytes):\n if isinstance(p, str):\n p = p.encode(\"utf-8\")\n elif isinstance(p, utils.CoAPPayload):\n p = p.raw\n self._payload.payload = p", "title": "" }, { "docid": "a5f241adff95da5d90885e0fc3b4eaa5", "score": "0.61390704", "text": "def _base64_encode_payload(self, secret_doc):\n\n payload = secret_doc.data\n secret_type = self._get_secret_type(secret_doc.schema)\n\n # NOTE(felipemonteiro): The logic for the 2 conditions below is\n # enforced from Barbican's Python client. Some pre-processing and\n # transformation is needed to make Barbican work with non-compatible\n # formats.\n if not payload and payload is not False:\n # There is no point in even bothering to encrypt an empty\n # body, which just leads to needless overhead, so return\n # early.\n LOG.info('Barbican does not accept empty payloads so '\n 'Deckhand will not encrypt document [%s, %s] %s.',\n secret_doc.schema, secret_doc.layer, secret_doc.name)\n secret_doc.storage_policy = types.CLEARTEXT\n elif not isinstance(\n payload, (six.text_type, six.binary_type)):\n LOG.debug('Forcibly setting secret_type=opaque and '\n 'base64-encoding non-string payload for '\n 'document [%s, %s] %s.', secret_doc.schema,\n secret_doc.layer, secret_doc.name)\n # NOTE(felipemonteiro): base64-encoding the non-string payload is\n # done for serialization purposes, not for security purposes.\n # 'opaque' is used to avoid Barbican doing any further\n # serialization server-side.\n secret_type = 'opaque'\n try:\n payload = base64.encode_as_text(six.text_type(payload))\n except Exception:\n message = ('Failed to base64-encode payload of type %s '\n 'for Barbican storage.', type(payload))\n LOG.error(message)\n raise errors.UnknownSubstitutionError(\n src_schema=secret_doc.schema,\n src_layer=secret_doc.layer, src_name=secret_doc.name,\n schema='N/A', layer='N/A', name='N/A', details=message)\n return secret_type, payload", "title": "" }, { "docid": "6312745c73019cc17630a4eff856677e", "score": "0.59765613", "text": "def _decode_text(base64_message):\n base64_bytes = base64_message.encode('ascii')\n message_bytes = base64.b64decode(base64_bytes)\n message = message_bytes.decode('ascii')\n return message", "title": "" }, { "docid": "aafe55e59066eeec2717d8e254dcc25a", "score": "0.59291196", "text": "def b64_decode(text):\n\n return base64.b64decode(text).decode()", "title": "" }, { "docid": "ccb9db3d6e1b9c0306b9ed11c5db744d", "score": "0.5830508", "text": "def test_simple_payload(self):\n text = \"English: mountain pass\"\n response = self.client.post(\n \"/parse_encoded_text/\", text, content_type=\"text/plain\"\n )\n self.assertEqual(response.content, text.encode())", "title": "" }, { "docid": "0bb7a12615c1abcb6df9f3ca06319dd5", "score": "0.5731676", "text": "def payload(self) -> str:\n return self[\"payload\"]", "title": "" }, { "docid": "c2d1d08680d4d1489006bc768a675d08", "score": "0.5700011", "text": "def get_payload():", "title": "" }, { "docid": "dad1f02b6d9ad35b1ce2ce7ed82c0f54", "score": "0.567945", "text": "def decode(payload: str) -> bytes:\n\n if sys.version_info[0] < 3:\n # Base64 encode/decode does not accept `str` as input in python2\n # By running `future-fstrings-show`, it adds `unicode_literals` that\n # redefines some classes so the default behaviour changes\n def native_str_to_bytes(s, encoding=None):\n from future.types import newbytes\n return newbytes(s, encoding=encoding)\n payload = native_str_to_bytes(payload, encoding='utf-8')\n\n return base64.b64decode(payload, altchars=b'-_')", "title": "" }, { "docid": "8f710248a539e5d2f1ae438ea714b783", "score": "0.5673324", "text": "def encode(payload: Union[bytes, str]) -> bytes:\n if isinstance(payload, str):\n payload = payload.encode('utf-8')\n\n return base64.b64encode(payload, altchars=b'-_')", "title": "" }, { "docid": "4ab5772f1e2f0399ca7c89f751a3a67a", "score": "0.56535834", "text": "def decodedPayload(self, part, asStr=True):\n payload = part.get_payload(decode=1)\n\n if asStr and isinstance(payload, bytes):\n encodings = []\n enchdr = part.get_content_charset()\n\n if enchdr:\n encodings += [enchdr]\n\n encodings += [sys.getdefaultencoding()]\n encodings += ['latin1', 'utf8']\n\n for enc in encodings:\n try:\n payload = payload.decode(enc)\n break\n except (UnicodeError, LookupError):\n pass\n else:\n # some exception\n payload = '--Sorry: cannot decode Unicode text--'\n return payload", "title": "" }, { "docid": "c7230c96a396e52ca1491204dc35970c", "score": "0.5611554", "text": "def decode(self, text: bytes, **kwargs) -> bytes:\n return text", "title": "" }, { "docid": "af6ad96e456991bbba56f51eaab0fd0d", "score": "0.5591929", "text": "def encode_text(text, username):\n # Create text string.\n text = base64.b64encode(text.encode('utf-8')).decode('utf-8')\n msg = { \n 'msg': text,\n 'from': username,\n }\n return json.dumps(msg)", "title": "" }, { "docid": "572d1ca6a41d30279820fa12976ab249", "score": "0.55827826", "text": "def _encode_payload(self, payload):\n return urlencode(payload)", "title": "" }, { "docid": "b9a5cbc00c4c4949a719542b2aef9723", "score": "0.55178905", "text": "def parse_payload(self):\r\n pass", "title": "" }, { "docid": "ef846b09d036648559050cb77751ad72", "score": "0.55175376", "text": "def on_payload(self, payload: Payload) -> None:", "title": "" }, { "docid": "7e19d94b3e812ee55f759066bfca58af", "score": "0.5503595", "text": "def encode(self, text: bytes, **kwargs) -> bytes:\n return text", "title": "" }, { "docid": "df526105b835ee33248f4ac77fa1cd10", "score": "0.5496658", "text": "def render_payload(self, event):\n str_payload = \"\"\n for modifier in event['attributes']:\n #logging.debug(\"Found modifier: %s\", modifier)\n keyword = modifier\n value = event['attributes'][keyword]\n\n #logging.debug(\"Parsed keyword: %s value: %s\", keyword, value)\n\n if 'contents' in event:\n for contentobj in event['contents']:\n content_value = contentobj['value']\n content_type = contentobj['type']\n if (content_type == 'string'):\n str_payload = \"%s%s\" % (str_payload, self.parse_content(content_value))\n elif (content_type == 'file'):\n if ARGS.no_filecontent:\n # '--no-filecontent' option was passed to flowsynth\n # This is also checked previously in the code path but adding here too\n compiler_bailout(\"The 'filecontent' attribute is not supported in this context.\")\n else:\n str_payload = \"%s%s\" % (str_payload, self.get_file_content(content_value))\n\n return str_payload", "title": "" }, { "docid": "3de368785bcfcc24710caa6fabc66cee", "score": "0.544884", "text": "def encode_base64(msg):\r\n orig = msg.get_payload()\r\n encdata = str(_bencode(orig), 'ascii')\r\n msg.set_payload(encdata)\r\n msg['Content-Transfer-Encoding'] = 'base64'", "title": "" }, { "docid": "f559609a384a135f7666050c813f17ba", "score": "0.5441276", "text": "def makePayload(self, **kwargs):\n # kwargs for non-global variables\n text = kwargs.get('text', self.getText())\n limit = kwargs.get('limit', \"\")\n \n self.payload = {\"token\":self.getToken(), \n \"channel\":self.getChannel(),\n \"thread_ts\":self.getThreadTS(),\n \"text\":text,\n \"limit\":limit,\n \"unfurl_links\":self.getUnfurlLinks()\n }", "title": "" }, { "docid": "a4abd999f299c15c453c304c88b6ef24", "score": "0.5431677", "text": "def serialize_payload(cls, payload):\n return json.dumps(payload, cls=cls.get_json_encoder())", "title": "" }, { "docid": "1cfeb58e995c329e727f74d9aae27113", "score": "0.5419014", "text": "def text(self, value: str):\n if value is None:\n self.pop('text')\n self.mime_type = None\n return\n self._pb_body.text = value\n self.mime_type = 'text/plain'", "title": "" }, { "docid": "ae62cc1a20360fb650e343ef393b183b", "score": "0.5379192", "text": "def parse_text_message(index, key, data):\n text_send = {\n \"type\": \"textsendmessage\",\n \"version\": \"1.0\",\n \"payload\": \"\"\n }\n\n tmp_obj = deepcopy(text_send)\n tmp_obj['payload'] = data[key]\n\n return tmp_obj", "title": "" }, { "docid": "ad4d4c247f67b145f20db7c07f8b50eb", "score": "0.53737426", "text": "def __str__(self) -> str:\n return self.body.decode(\"utf-8\")", "title": "" }, { "docid": "869a464b39e4bbd0daca027d8129b7c5", "score": "0.5368872", "text": "def serialize(self, obj):\n return text_transform(obj)", "title": "" }, { "docid": "26285d8a95e8fac9951259136685e491", "score": "0.5360607", "text": "def text(self) -> str:\n return self.content.decode(self.encoding)", "title": "" }, { "docid": "1bd9dcb37eda8cca9d515caa394e7134", "score": "0.5348297", "text": "def create_message(to, subject, message_text):\n message = MIMEText(message_text)\n message['to'] = to\n message['subject'] = subject\n message_bytes = message.as_bytes() # turn MIMETEXT into byte string\n # encode byte string to base64 encoding and then decode the result into a regular string\n result = {'raw': base64.urlsafe_b64encode(message_bytes).decode()}\n print(result)\n return result", "title": "" }, { "docid": "c253f71fd19c75f41c0c96dd9b326e93", "score": "0.532352", "text": "def _base64_to_str(self, b):\n return base64.b64decode(b).decode('utf-8')", "title": "" }, { "docid": "62fbebb110fe01b4d97c254b06507b81", "score": "0.5322333", "text": "def encode_base64_to_string(data):\n if isinstance(data, str):\n data = bytes(data, \"utf-8\")\n return encode_base64(data).decode(\"ascii\").rstrip(\"\\n\")", "title": "" }, { "docid": "de6693da83e1a8dc451d38d674b52df1", "score": "0.5321437", "text": "def payload_submit(self, payload):\n raise NotImplementedError", "title": "" }, { "docid": "f680ee0d0d97457af52b8bf6eff38c56", "score": "0.5317126", "text": "def text(self):\n return self._get_body_content(\"text/plain\")", "title": "" }, { "docid": "231639c6e89cf2b5348a7068d80b9beb", "score": "0.5307571", "text": "def _base64tostring_(item):\n\treturn base64.b64decode(item)", "title": "" }, { "docid": "dc07339fa9c4e3f46fa0bec8d8e9cd27", "score": "0.53060967", "text": "def _base64_json(self, data):\n content = json.dumps(data).encode('utf-8')\n return base64.b64encode(content).decode('utf-8')", "title": "" }, { "docid": "a4d79863c6ca5fa8823cdb1bcd5ba2e0", "score": "0.5286031", "text": "def decode(self):\n try:\n self.content = base64.b64decode(str(self.s))\n return self.content\n except Exception as e:\n return print(f'B64.decode : {str(e)}')", "title": "" }, { "docid": "0ce53a8b4427e196320238332e8a02f6", "score": "0.52846724", "text": "def _receiver(self, data):\n msg = json.loads(binascii.a2b_base64(data))\n if self.session:\n self.session.send(msg)\n else:\n logging.error(\"from app: %s\", str(msg))", "title": "" }, { "docid": "f810ec8e82ca01f30c8832a0ee493665", "score": "0.5267466", "text": "def create_message(self, subject, message_text):\n message = MIMEText(message_text)\n message['to'] = self.send_to\n message['from'] = self.send_from\n message['subject'] = subject\n #raw = base64.urlsafe_b64encode(message.as_string().encode('ascii'))\n raw = base64.urlsafe_b64encode(message.as_bytes())\n raw = raw.decode()\n return {'raw': raw }\n #return {'raw': base64.urlsafe_b64encode(message.encode('ascii'))}", "title": "" }, { "docid": "0751767a7691220dd6c98ae93af16cd3", "score": "0.5266764", "text": "def decode_text(msg_recv):\n msg = json.loads(msg_recv)\n\n # Is true if the received message is a message with username.\n if 'request' in msg:\n return base64.b64decode(msg['request']).decode()\n\n # Is true if the received message is a message with text.\n if 'msg' in msg:\n return base64.b64decode(msg['msg']).decode(), msg['from']", "title": "" }, { "docid": "69f4f1c6004f9e15d2d2c42661fc2b14", "score": "0.52474785", "text": "def set_payload(self, payload, charset=None):\r\n self._payload = payload\r\n if charset is not None:\r\n self.set_charset(charset)", "title": "" }, { "docid": "2022ce0f47a5a6e5d8555957aa000c76", "score": "0.5247171", "text": "def _receiver(self, data):\n msg = json.loads(binascii.a2b_base64(data))\n self.handler(msg, self._writer)", "title": "" }, { "docid": "fc8b45a2da3ee7c6f5cfd09d66bd1b26", "score": "0.5241355", "text": "def setPayload(self, payload):\n self.payload = payload", "title": "" }, { "docid": "69bd85219a0d64f5e855a7bb94274014", "score": "0.523601", "text": "def payload(self, p):\n if isinstance(p, tuple):\n k = p[0]\n v = p[1]\n self.actual_content_type = k\n self._payload[k] = v\n else:\n self._payload = {defines.Content_types[\"text/plain\"]: p}", "title": "" }, { "docid": "e41cd414338a2d4ac995ec6475ccbaad", "score": "0.5226852", "text": "def loads(self, payload):\n raise NotImplementedError()", "title": "" }, { "docid": "991a1d751da9d161d48ab81ffdb5e283", "score": "0.5224788", "text": "def value(self) -> str:\n return self._encoded_body.decode()", "title": "" }, { "docid": "91f8ffcc791e40af10940684c34fd58d", "score": "0.52226543", "text": "def text(self) -> str:\n return self._pb_body.text", "title": "" }, { "docid": "cec1ba29926438b4b1790335d9e639ca", "score": "0.5208592", "text": "def plain_text(self):\n val = ''\n if isinstance(self.content, list):\n for c in self.content:\n val += c.plain_text()\n elif issubclass(self.content.__class__, Element):\n val = self.content.plain_text()\n else:\n val += self.content.encode('utf-8')\n if isinstance(val, unicode):\n return val.encode('utf-8')\n else:\n return val", "title": "" }, { "docid": "cfb0395c52a23c952afb93ac88fdaff1", "score": "0.52040565", "text": "def parse_body_message( self, email_obj ):\n body_message = email_obj.get_payload( decode=True ) # body-content in bytes\n try:\n final = body_message.decode( 'utf-8' )\n except UnicodeDecodeError:\n try:\n final = body_message.decode( chardet.detect( body_message )['encoding'] ) # chardet result, eg ```{'encoding': 'ISO-8859-1', 'confidence': 0.73, 'language': ''}```\n except Exception as e:\n log.error( 'exception, ```%s```' % e )\n final = body_message.decode('utf-8', errors='backslashreplace')\n log.debug( 'final, ```%s```' % final[0:100] )\n return final", "title": "" }, { "docid": "dd0118407ed16492fcd1f4fd6ca22bda", "score": "0.51931065", "text": "def str_to_b64(spayload):\n payload_b64 = base64.b64encode(spayload.encode())\n\n return payload_b64.decode(\"utf-8\")", "title": "" }, { "docid": "082ddf8cbec315debdaf9878e06b3dc3", "score": "0.5185561", "text": "def text_quick_reply(title, payload):\n quick_reply = {\n \"content_type\": \"text\",\n \"title\": title,\n \"payload\": payload\n }\n return quick_reply", "title": "" }, { "docid": "ebb09005c5224875f0d9c53a914d965c", "score": "0.51846105", "text": "def payload(self):\n return self._payload", "title": "" }, { "docid": "c3c58c28547057807b7da6ad7dec829a", "score": "0.5158626", "text": "def test_str(self):\n crypto_params = attributes.CryptographicParameters(\n cryptographic_algorithm=enums.CryptographicAlgorithm.ECDSA\n )\n payload = sign.SignRequestPayload(\n unique_identifier='b4faee10-aa2a-4446-8ad4-0881f3422959',\n cryptographic_parameters=crypto_params,\n data=b'\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08',\n )\n\n expected = str({\n 'unique_identifier': 'b4faee10-aa2a-4446-8ad4-0881f3422959',\n 'cryptographic_parameters': crypto_params,\n 'data': b'\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08'\n })\n observed = str(payload)\n\n self.assertEqual(expected, observed)", "title": "" }, { "docid": "0c9e157be951df2bfa7c58a14060ba6d", "score": "0.5154437", "text": "def get_payload(self):\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "5546816dbf32b3b29071fd7f187c817a", "score": "0.5145127", "text": "def text(self, encoding: Optional[str] = None) -> str:", "title": "" }, { "docid": "9d6fbe317b953b7bc08d0f422117379a", "score": "0.51404566", "text": "def get_publish_body(message_attributes, message_data, delim_char):\n message = {}\n if message_data:\n # convert to base64 string\n message[\"data\"] = base64.b64encode(message_data.encode(\"utf8\")).decode('utf8')\n if message_attributes:\n message[\"attributes\"] = attribute_pairs_to_dict(message_attributes, delim_char)\n body = {\"messages\": [message]}\n return body", "title": "" }, { "docid": "5d1581a48c1af96d0d43130a99c7b07b", "score": "0.5133271", "text": "def test_str(self):\n payload = sign.SignResponsePayload(\n unique_identifier='00000000-1111-2222-3333-444444444444',\n signature_data=b'\\x01\\x02\\x03'\n )\n\n expected = str({\n 'unique_identifier': '00000000-1111-2222-3333-444444444444',\n 'signature_data': b'\\x01\\x02\\x03'\n })\n\n observed = str(payload)\n self.assertEqual(expected, observed)", "title": "" }, { "docid": "44aa7655c96c9d08c56d51286aedda76", "score": "0.5124512", "text": "def get_base64(self):\r\n return base64.encodestring(str(self)).replace('\\n', '')", "title": "" }, { "docid": "1411402c00a206ff2b6d7f0e9c9bfd02", "score": "0.51175344", "text": "def _format_payload(self, obj):\n if obj.r_type != 'RawData':\n obj.data = Fill.verify_fill(obj.begin_time, obj.end_time,\n obj.agg, obj.data)\n\n return obj", "title": "" }, { "docid": "547cf0e561143b325696bc859c2fa778", "score": "0.5094974", "text": "def pure_base64_dumps(elem):\n return base64.b64encode(six.binary_type(elem))\\\n .replace('=', '').replace('\\n', '')", "title": "" }, { "docid": "5fea63f27d857212bc844eb04903f691", "score": "0.50841284", "text": "def as_string(self):\r\n return self.text", "title": "" }, { "docid": "6b5b61ca4b37891fe46485775229eaa9", "score": "0.50826764", "text": "def from_knx(cls, payload: DPTArray | DPTBinary) -> str:\n raw = cls.validate_payload(payload)\n return bytes(byte for byte in raw if byte != 0x00).decode(\n cls._encoding, errors=\"replace\"\n )", "title": "" }, { "docid": "dac6ca67d09e3170d2f3862595600106", "score": "0.50809366", "text": "def decode_base64(base64_bytes: bytes) -> str:\r\n return base64_bytes.decode()", "title": "" }, { "docid": "bb924efd89367638e8e6873175becbb6", "score": "0.5069545", "text": "def _base64_(item):\n\treturn base64.b64encode(item)", "title": "" }, { "docid": "912ad6987a58cde67d118050c72b52bb", "score": "0.505031", "text": "def create_message(self, sender, to, subject, message_text):\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n raw = base64.urlsafe_b64encode(message.as_bytes()).decode('utf-8')\n return {'raw': raw}", "title": "" }, { "docid": "a9ce5305a23005a5cd8964c9d003ce1c", "score": "0.5048256", "text": "def make_text(self, record):\r\n return record.message", "title": "" }, { "docid": "211a3e9af0f78990f0dbd6e349c5ad35", "score": "0.50456417", "text": "def base64encode(self, value):\n return value.encode('base64').strip().replace('\\n', '')", "title": "" }, { "docid": "24bfeedb2a254f829f524816740d0b57", "score": "0.504464", "text": "async def b64enc(ctx):\n text = ctx.message.content.split()[1:]\n encode = \"\"\n for x in text:\n encode += str(base64.b64encode(bytes(x, \"utf-8\")), \"utf-8\")\n await bot.say(encode)", "title": "" }, { "docid": "1e870e79620a1d1c951144eebf4cfb02", "score": "0.50420356", "text": "def base64(self) -> str:\n return self._base64", "title": "" }, { "docid": "f5299259adaafbee79445a7f0fce3bad", "score": "0.5031169", "text": "def _encode_base64(original: Text, encoding: Text = \"utf-8\") -> Text:\n import base64\n\n return base64.b64encode(original.encode(encoding)).decode(encoding)", "title": "" }, { "docid": "8332ea926b428a193f5fac22a1a9bf6d", "score": "0.50265086", "text": "def create_message(sender, to, subject, message_text):\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n \n return {'raw': base64.urlsafe_b64encode(message.as_string().encode()).decode()}", "title": "" }, { "docid": "e3a5afd03c9edc8c571e6a412c8365dd", "score": "0.5023361", "text": "def wrap(self,payload,id = None):\n if id is None:\n id = str(uuid.uuid4())\n\n wrapper = dict()\n wrapper['id'] = str(id)\n wrapper['event_type'] = self.event_type\n wrapper['version'] = self.version\n wrapper['timestamp'] = str(datetime.datetime.now())\n wrapper['payload'] = payload\n\n return (wrapper['id'].encode(encoding='utf-8'),\n json.dumps(wrapper).encode(encoding='utf-8'))", "title": "" }, { "docid": "b57160f6ee5486ecb12d289b292d5a0f", "score": "0.50213736", "text": "def _build_payload(self) -> dict:\n pass", "title": "" }, { "docid": "b168cb422d31ba9eb6e0e3ae360e13ea", "score": "0.50122255", "text": "def get_Base64(self):\n return base64_with_linebreaks(self.get_DER())", "title": "" }, { "docid": "37725506c34612ff23bce36ae1ae5e51", "score": "0.50037044", "text": "def read(self):\n\n return self.text.encode()", "title": "" }, { "docid": "81f331f861cba5b6235fbfd9378fa897", "score": "0.49990487", "text": "def format_payload(payload):\n formatted_payload = '\\n'\n for line in payload.split('\\n'):\n formatted_line = '\\t\\t\"' + line + '\",\\n'\n formatted_line = formatted_line.replace('\\\\\"', '\\\\\\\\\\\\\"') # Escape the f*cking backslash\n formatted_payload += formatted_line\n\n formatted_payload += '\\t\\tNULL\\n'\n return formatted_payload", "title": "" }, { "docid": "41d69d0a491967e5cde27444ea5056ba", "score": "0.49974507", "text": "def decode_json(payload):\n return json.loads(payload.decode('utf-8'))", "title": "" }, { "docid": "41d69d0a491967e5cde27444ea5056ba", "score": "0.49974507", "text": "def decode_json(payload):\n return json.loads(payload.decode('utf-8'))", "title": "" }, { "docid": "dfb17e1b74fafd389011d486766df193", "score": "0.49934405", "text": "def get_content_text(self, charset=None, errors=None):\n maintype = self.get_content_maintype()\n if maintype == \"message\":\n # The attachment's payload is a single (parsed) email Message;\n # flatten it to text.\n # (Note that self.is_multipart() misleadingly returns True in this case.)\n payload = self.get_payload()\n assert len(payload) == 1 # should be exactly one message\n return payload[0].as_string()\n elif maintype == \"multipart\":\n # The attachment itself is multipart; the payload is a list of parts,\n # and it's not clear which one is the \"content\".\n raise ValueError(\n \"get_content_text() is not valid on multipart messages \"\n \"(perhaps you want as_string()?)\"\n )\n else:\n payload = self.get_payload(decode=True)\n if payload is None:\n return payload\n charset = charset or self.get_content_charset(\"US-ASCII\")\n errors = errors or \"replace\"\n return payload.decode(charset, errors=errors)", "title": "" }, { "docid": "a5bbc3990f723181bd7e29540465b0fa", "score": "0.49931407", "text": "def get_file_content(self, obj):\n return obj.decoded_content.decode()", "title": "" }, { "docid": "f7abeba65167b31947b35b11928c78b5", "score": "0.49888244", "text": "def getPayload(self):\n return self.payload", "title": "" }, { "docid": "56ce4f884a080ce311bcece40f92e212", "score": "0.49669605", "text": "def content(self) -> bytes:", "title": "" }, { "docid": "26474a7b7df58f0f1cd4362653120795", "score": "0.4963215", "text": "def get_payload(self, i=None, decode=False):\r\n # Here is the logic table for this code, based on the email5.0.0 code:\r\n # i decode is_multipart result\r\n # ------ ------ ------------ ------------------------------\r\n # None True True None\r\n # i True True None\r\n # None False True _payload (a list)\r\n # i False True _payload element i (a Message)\r\n # i False False error (not a list)\r\n # i True False error (not a list)\r\n # None False False _payload\r\n # None True False _payload decoded (bytes)\r\n # Note that Barry planned to factor out the 'decode' case, but that\r\n # isn't so easy now that we handle the 8 bit data, which needs to be\r\n # converted in both the decode and non-decode path.\r\n if self.is_multipart():\r\n if decode:\r\n return None\r\n if i is None:\r\n return self._payload\r\n else:\r\n return self._payload[i]\r\n # For backward compatibility, Use isinstance and this error message\r\n # instead of the more logical is_multipart test.\r\n if i is not None and not isinstance(self._payload, list):\r\n raise TypeError('Expected list, got %s' % type(self._payload))\r\n payload = self._payload\r\n # cte might be a Header, so for now stringify it.\r\n cte = str(self.get('content-transfer-encoding', '')).lower()\r\n # payload may be bytes here.\r\n if isinstance(payload, str):\r\n payload = str(payload) # for Python-Future, so surrogateescape works\r\n if utils._has_surrogates(payload):\r\n bpayload = payload.encode('ascii', 'surrogateescape')\r\n if not decode:\r\n try:\r\n payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')\r\n except LookupError:\r\n payload = bpayload.decode('ascii', 'replace')\r\n elif decode:\r\n try:\r\n bpayload = payload.encode('ascii')\r\n except UnicodeError:\r\n # This won't happen for RFC compliant messages (messages\r\n # containing only ASCII codepoints in the unicode input).\r\n # If it does happen, turn the string into bytes in a way\r\n # guaranteed not to fail.\r\n bpayload = payload.encode('raw-unicode-escape')\r\n if not decode:\r\n return payload\r\n if cte == 'quoted-printable':\r\n return utils._qdecode(bpayload)\r\n elif cte == 'base64':\r\n # XXX: this is a bit of a hack; decode_b should probably be factored\r\n # out somewhere, but I haven't figured out where yet.\r\n value, defects = decode_b(b''.join(bpayload.splitlines()))\r\n for defect in defects:\r\n self.policy.handle_defect(self, defect)\r\n return value\r\n elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):\r\n in_file = BytesIO(bpayload)\r\n out_file = BytesIO()\r\n try:\r\n uu.decode(in_file, out_file, quiet=True)\r\n return out_file.getvalue()\r\n except uu.Error:\r\n # Some decoding problem\r\n return bpayload\r\n if isinstance(payload, str):\r\n return bpayload\r\n return payload", "title": "" }, { "docid": "e40d645d4fa227fb91ff2feb7f4626f8", "score": "0.4960808", "text": "def validate_payload(self, payload):\n if not payload:\n return\n \n for key, val in payload.iteritems():\n if isinstance(val, dict):\n self.validate_payload(val)\n if isinstance(val, (datetime, date)):\n pattern = '%Y-%m-%dT%H:%M:%SZ'\n payload[key] = val.strftime(pattern) \n if isinstance(val, str):\n val.replace('\"','\\\\\"')", "title": "" }, { "docid": "063cb0b3d8b3c403db2423f69fbb9d02", "score": "0.49576274", "text": "def test_non_utf_payload(self):\n text = \"dog: собака\"\n response = self.client.post(\n \"/parse_encoded_text/\", text, content_type=\"text/plain; charset=koi8-r\"\n )\n self.assertEqual(response.content, text.encode(\"koi8-r\"))", "title": "" }, { "docid": "9bee2b0d49151fadd4545ff61bfe5553", "score": "0.49568388", "text": "def _format_payload(self, obj):\n obj.data = Fill.verify_fill(obj.begin_time, obj.end_time,\n obj.agg, obj.data)\n\n return obj", "title": "" }, { "docid": "246ea0da3cbe1f13684b0c879e72ca6e", "score": "0.4956687", "text": "def onMessage(self, payload, isBinary):\n if not isBinary:\n self.client.handle_message(payload.decode('utf8'))", "title": "" }, { "docid": "514a10b9097c2185cd0fa07bd95d1f79", "score": "0.49543387", "text": "def encode(self, text):\n if isinstance(text, str):\n text = text.encode('utf-8')\n return text", "title": "" }, { "docid": "514a10b9097c2185cd0fa07bd95d1f79", "score": "0.49543387", "text": "def encode(self, text):\n if isinstance(text, str):\n text = text.encode('utf-8')\n return text", "title": "" }, { "docid": "757ada0a9589f247fdf002087f68a943", "score": "0.4951175", "text": "def text(self):\n return self._data.text_data.get(b'Txt ').value.rstrip('\\x00')", "title": "" }, { "docid": "047bd3791437ee7b42c25c9485a84cd0", "score": "0.49462974", "text": "def body(self):\n if not self._auto_decode:\n return self._body\n if 'body' in self._decode_cache:\n return self._decode_cache['body']\n body = try_utf8_decode(self._body)\n self._decode_cache['body'] = body\n return body", "title": "" }, { "docid": "27e431c4fd4f72b9db87628eba3569b1", "score": "0.4944953", "text": "def content_as_str(content) -> str:\n if isinstance(content, str):\n return content\n elif content is None:\n return \"\"\n try:\n if isinstance(content, bytes):\n content = content.decode(\"utf-8\")\n else:\n content = json.dumps(content)\n except Exception as error:\n ctx.log.info(\"Error converting to text: {}: {}\".format(error, content))\n content = \"\"\n return content", "title": "" }, { "docid": "31c953f0c5e7f67fdb8cc6a79eec39a5", "score": "0.49379092", "text": "def __decode_webpush_b64(self, data):\r\n missing_padding = len(data) % 4\r\n if missing_padding != 0:\r\n data += '=' * (4 - missing_padding)\r\n return base64.urlsafe_b64decode(data)", "title": "" }, { "docid": "3325b9b3c82371c819ffce079311f5f7", "score": "0.49373913", "text": "def str_payload(broken_payload):\n str_payload = copy.deepcopy(broken_payload)\n str_payload[\"memberwise\"] = [\n {k: str(v) if k == \"icpsr\" else v for k, v in m.iteritems()}\n for m in str_payload[\"memberwise\"]\n ]\n return str_payload", "title": "" }, { "docid": "b5484b72949241c119c72cc2ea35bb94", "score": "0.49358928", "text": "def _b64decode(self, string):\n import base64\n return base64.b64decode(string)", "title": "" }, { "docid": "ba80264c07b6010b726f2cb69826c10a", "score": "0.49355206", "text": "def _build_payload(self, raw_msg, raw_headers):\n return raw_msg", "title": "" }, { "docid": "20bd5cd9ba2a2a0332a85068cbc90626", "score": "0.49354053", "text": "def get_str(self, data: bytes) -> str:\n return data.decode('utf-8')", "title": "" }, { "docid": "1f0d2af54f0a04f7dc1dbaece0a7c8f9", "score": "0.49290657", "text": "def body_encode(self, s,convert):\n\t\tpass", "title": "" }, { "docid": "0630c6a1f50f00f9a9afa5cd8befe64c", "score": "0.49249637", "text": "def processPayload(self, payload):\n req = AutoBeanCodex.decode(FACTORY, RequestMessage, payload).as_()\n responseBean = FACTORY.response()\n # Create a new response envelope, since the state is unknown\n # Return a JSON-formatted payload\n try:\n self.process(req, responseBean.as_())\n except ReportableException, e:\n responseBean = FACTORY.response()\n responseBean.as_().setGeneralFailure(self.createFailureMessage(e).as_())\n return AutoBeanCodex.encode(responseBean).getPayload()", "title": "" }, { "docid": "bf4c8ba91d2c4b2da4fab9993085739c", "score": "0.492495", "text": "def text_(s, encoding=\"latin-1\", errors=\"strict\"):\n if isinstance(s, binary_type):\n return s.decode(encoding, errors)\n return s", "title": "" }, { "docid": "66905e7c17813577222eb9489d55080c", "score": "0.49188346", "text": "def _as_text(s):\n if isinstance(s, bytes):\n return s.decode('utf-8')\n return s", "title": "" } ]
1a7f9536a35fd590d40a3a1d37600ab3
Test we handle unexisting board.
[ { "docid": "857333b605e27b3d26e62b606955f50b", "score": "0.0", "text": "async def test_form_cannot_connect(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"step_id\"] == \"user\"\n\n with patch(\n \"homeassistant.components.progettihwsw.config_flow.ProgettiHWSWAPI.check_board\",\n return_value=False,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {CONF_HOST: \"\", CONF_PORT: 80},\n )\n\n assert result2[\"type\"] == FlowResultType.FORM\n assert result2[\"step_id\"] == \"user\"\n assert result2[\"errors\"] == {\"base\": \"cannot_connect\"}", "title": "" } ]
[ { "docid": "ccca7c5d0b148ca39efee43c41233dbe", "score": "0.7050416", "text": "def test_empty(self):\n with self.assertRaises(Board.InvalidDimensionsError):\n Board(())", "title": "" }, { "docid": "1f214f4a3140b583029b4e1844f0fb37", "score": "0.6889755", "text": "def test_empty(self):\n for name, board in self.boards:\n board.clear()\n\n expected = (), ()\n actual = board.occupied()\n self.assertEqual(expected, actual, name)", "title": "" }, { "docid": "0f771720f2c0c0f799d78ab53e064467", "score": "0.67251736", "text": "def test_does_not_exist(self):\n result = find.exists(Tile(1, 1), self.tiles)\n self.assertEqual(result, False)", "title": "" }, { "docid": "1f51ca74c3f6d1ff22791ae2028eb355", "score": "0.67161155", "text": "def test_out_of_bounds(self):\n for name, board in self.boards:\n if board.has_infinite_dimensions and not board.has_finite_dimensions:\n continue # Won't try to check out-of-bounds on an entirely infinite board!\n coord = tuple(2 + len(d) for d in board.dimensions)\n with self.assertRaises(Board.OutOfBoundsError, msg=name):\n board[coord]", "title": "" }, { "docid": "f2bb37e9414c598d5c1682e6f1cae656", "score": "0.66732097", "text": "def emptyboard():\n\n\tif variables.tile1+variables.tile2+variables.tile3+variables.tile4+variables.tile5+variables.tile6+variables.tile7+variables.tile8+variables.tile9==0:\n\t\treturn True\n\telse:\n\t\treturn False", "title": "" }, { "docid": "cb5f2c7163ffe7b31e009596437727d9", "score": "0.6668604", "text": "def attached_board_exists(self):\n return False", "title": "" }, { "docid": "a73d876585d53b12f07001216e2b13b0", "score": "0.66558474", "text": "def test_board_size_is_negative(self):\n with self.assertRaises(ValueError):\n sbd.Board(-1)", "title": "" }, { "docid": "586ea517f4f0215816a5118540238ee8", "score": "0.65599966", "text": "def test_is_empty_exists(self):\n bb_test = sbd.Board(10)\n self.assertTrue(bb_test.is_empty)", "title": "" }, { "docid": "22a47a547487cfba2a17deede13054b8", "score": "0.65039593", "text": "def test_cannot_reset_unexitsting_member(self):\n board = Board.objects.get(id=self.board_id)\n with self.assertRaises(BoardMember.DoesNotExist):\n board.reset_score(board.id)", "title": "" }, { "docid": "b3e8e7c8c289761cf90e8c08d0b346fa", "score": "0.648871", "text": "def did_no_one_win(self):\n for x in range(0, settings.COLS):\n for y in range(0, settings.ROWS):\n if not self.board[x][y]: # The cell is empty: players still can play\n return False\n\n return True", "title": "" }, { "docid": "43d0318bed93828c3664da1a9da584d0", "score": "0.64739925", "text": "def is_over(board):\n for k in range(4):\n board_copy = make_a_move(deepcopy(board),k)\n free_tiles = []\n for i in range(4):\n for j in range(4):\n if board_copy.read_tile(i,j) == None:\n return False\n return True", "title": "" }, { "docid": "0590fbdd68e4040db15eea2278d890f3", "score": "0.6472129", "text": "def test_dim_0(self):\n with self.assertRaises(Board.InvalidDimensionsError):\n Board((1, 0))", "title": "" }, { "docid": "b93d300975d8e24c25b68106b644ecf7", "score": "0.64621276", "text": "def check_not_finished_board(board: list):\n for row in board:\n if '?' in row:\n return False\n return True", "title": "" }, { "docid": "db69ce17bf6c54b34c34494095d6608e", "score": "0.6440228", "text": "def test_check_win(self):\r\n self.board[1][13] = constants.WHITE\r\n self.board[2][12] = constants.WHITE\r\n self.board[3][11] = constants.WHITE\r\n self.board[4][10] = constants.WHITE\r\n self.board[5][9] = constants.WHITE\r\n result = check_board_state.CheckBoardState(self.board).check_win()\r\n self.assertTrue(result)", "title": "" }, { "docid": "049ba0012a956053dcdbbab86463baa6", "score": "0.64341366", "text": "def test_empty_board():\n default_goban = []\n default_n = 19\n for i in range(0, default_n):\n for j in range(0, default_n):\n default_goban.append({\"x\": i, \"y\": j, \"val\": None})\n\n new_board = Board()\n assert default_goban == new_board.board", "title": "" }, { "docid": "97ea6f8076474aa7d452b981e9933e7d", "score": "0.6417377", "text": "def check_not_finished_board(board: list):\r\n for i in board:\r\n if '?' in i:\r\n return False\r\n return True", "title": "" }, { "docid": "211978e5cb936ed0860ab70ff1baff5e", "score": "0.6410262", "text": "def is_valid_move(board, col):\n return board[col][-1] == EMPTY", "title": "" }, { "docid": "72c953681701126065664b7616186cc5", "score": "0.63911057", "text": "def check_tie(self):\n return (self.__board[0] != \"0\" and self.__board[1] != \"1\" and self.__board[2] != \"2\" and\n self.__board[3] != \"3\" and self.__board[4] != \"4\" and self.__board[5] != \"5\" and\n self.__board[6] != \"6\" and self.__board[7] != \"7\" and self.__board[8] != \"8\")", "title": "" }, { "docid": "88cd84b73f5794e5da8db11f0163eb1f", "score": "0.6335755", "text": "def test_bad_puzzle(self):\r\n bad_puzzle = [\r\n \"1.....1..\",\r\n \".........\",\r\n \".........\",\r\n \".........\",\r\n \".........\",\r\n \".........\",\r\n \".........\",\r\n \".........\",\r\n \".........\",\r\n ]\r\n try:\r\n solution = solve(self.str_to_board(bad_puzzle))\r\n except ValueError as error:\r\n self.assertTrue('unsolvable' in error.message,\r\n \"Expecting \\'unsolvable\\' error\")", "title": "" }, { "docid": "ff656b4e4cb6ae13384ff183ed36d4c7", "score": "0.63146544", "text": "def check_legal_moves(board):\n return board[:, :, 0] == board[:, :, 1]", "title": "" }, { "docid": "f35d15f916e40f4475a242705b05a548", "score": "0.6307807", "text": "def have_lost(board):\n N = len(board)\n\n # Check every (x,y) position on the board to see if a move is possible\n for y in range(N):\n for x in range(N):\n if move_possible(x, y, board):\n return False\n\n return True", "title": "" }, { "docid": "d7c5f5e683a0c1e5e9c255bc2453b100", "score": "0.62788665", "text": "def good_placement(board, row, col):\n if board[row][col] == 0:\n return False\n if not colcheck(board, row, col):\n return False\n if not rowcheck(board, row, col):\n return False\n if not subsquare_check(board, row, col):\n return False\n return True", "title": "" }, { "docid": "bdd367c48215b285d0929b9b30ecd3fb", "score": "0.6277977", "text": "def xx_invalid_solution(board, solution):\n\n # This line returns False in case there is no solution, because that is\n # what the tests expect. Be careful because if your project says that a \n # problem has no solution when it actually has, the script wont detect it\n if solution is None: \n return False\n actions = solution.solution()\n b = board\n for action in actions:\n b = board_remove_group(b, action)\n return b != [[get_no_color() for col in line] for line in board]", "title": "" }, { "docid": "d740a0b70780f70860d05a02106dc8d1", "score": "0.62775147", "text": "def check_win(board):\n\n pass\n\n # checks rows for win\n\n\n # checks columns for win\n\n\n # checks diagonals for win", "title": "" }, { "docid": "f7a605f942cede17f95ecb0a178fc424", "score": "0.6267225", "text": "def test_non_matching_tile(self):\n result = find.matching_tile(Tile(1, 1), self.data)\n self.assertEqual(result, False)", "title": "" }, { "docid": "c83a7e64fde61dbb4496b4d4c4cab2fe", "score": "0.6249907", "text": "def check_empty(self, row: int, col: int) -> bool:\r\n return self.move_on_board((row, col)) and self._game_board[row][col] == NONE", "title": "" }, { "docid": "92de58c0303007f5d0151cb7f713a088", "score": "0.62498933", "text": "def test_dim_negative(self):\n with self.assertRaises(Board.InvalidDimensionsError):\n Board((1, -1))", "title": "" }, { "docid": "9cc875b1d4dedf38a15547ba275f34cf", "score": "0.6248719", "text": "def check_tie(board):\n\n pass", "title": "" }, { "docid": "c51268405dc5926c43bba9aa198ef0b1", "score": "0.6228105", "text": "def empty(board, x, y):\n if legal_position(board, x, y):\n if board[x][y] == '0':\n return True\n return False", "title": "" }, { "docid": "1521247a114b63b22cad9a9e6c1769b5", "score": "0.62270665", "text": "def is_invalid(self):\r\n if any(any(len(x) == 0 for x in row) for row in self.possible_values) or -1 in self.final_board:\r\n return True\r\n return False", "title": "" }, { "docid": "db53bdf1c39c343391625bed792f0ee5", "score": "0.62115353", "text": "def validate_board(board: list) -> bool:\n return check_lines(board) and check_rows(board) and check_colors(board)", "title": "" }, { "docid": "70c46fb7a08f58f3ac96793e5e98e21d", "score": "0.6210603", "text": "def check_not_finished_board(board: list) -> bool:\n for line in board:\n if '?' in line:\n return False\n return True", "title": "" }, { "docid": "507137c657d74bf96c328f35a202b9c9", "score": "0.6201724", "text": "def isOccupied(self,x,y):\n return self._board[x][y] != '#'", "title": "" }, { "docid": "23d775d5e8762f66ab1c3aeaad94b2c5", "score": "0.61875826", "text": "def test_check_board_status():\n board = Board(*TEST_ARGU3)\n assert board.check_board_status() == 0\n\n # Connect four in a row vertically\n board.columns_list = [[], [PLAYER1, PLAYER1, PLAYER1]]\n board.add_to_board(1, 1, PLAYER1)\n assert board.check_board_status() == PLAYER1\n\n # Doesn't connect four in a row vertically\n board.columns_list = [[], [PLAYER1, PLAYER1, PLAYER1]]\n board.add_to_board(1, 1, PLAYER2)\n assert board.check_board_status() == 0\n\n # Connect four in a row horizontally\n board = Board(*TEST_ARGU4)\n board.board = [[PLAYER1, PLAYER1, 0, PLAYER1, PLAYER1]]\n board.add_to_board(0, INDEX_TWO, PLAYER1)\n assert board.check_board_status() == PLAYER1\n\n # Doesn't connect four in a row horizontally\n board.board = [[PLAYER1, PLAYER1, 0, PLAYER1, PLAYER1]]\n board.add_to_board(0, INDEX_TWO, PLAYER2)\n assert board.check_board_status() == 0\n\n # Connect four in a row diagonally (from lower left to upper right)\n board = Board(*TEST_ARGU5)\n board.board = [\n [0, 0, 0, PLAYER1, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, PLAYER1, 0, 0, 0, 0],\n [PLAYER1, 0, 0, 0, 0, 0],\n ]\n board.add_to_board(1, INDEX_TWO, PLAYER1)\n assert board.check_board_status() == PLAYER1\n\n # Doesn't connect four in a row diagonally\n board.board = [\n [0, 0, 0, PLAYER1, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, PLAYER1, 0, 0, 0, 0],\n [PLAYER1, 0, 0, 0, 0, 0],\n ]\n board.add_to_board(1, INDEX_TWO, PLAYER2)\n assert board.check_board_status() == 0\n\n # Connect four in a row diagonally (from lower right to upper left)\n board.board = [\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, PLAYER2, 0, 0],\n [0, 0, 0, 0, PLAYER2, 0],\n [0, 0, 0, 0, 0, PLAYER2],\n ]\n board.add_to_board(0, INDEX_TWO, PLAYER2)\n assert board.check_board_status() == PLAYER2\n\n # Doesn't connect four in a row diagonally\n board.board = [\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, PLAYER2, 0, 0],\n [0, 0, 0, 0, PLAYER2, 0],\n [0, 0, 0, 0, 0, PLAYER2],\n ]\n board.add_to_board(0, INDEX_TWO, PLAYER1)\n assert board.check_board_status() == 0\n\n # When no player wins and the board is full\n board = Board(*TEST_ARGU6)\n board.board = [\n [PLAYER1, PLAYER2, 0, PLAYER1],\n [PLAYER2, PLAYER1, PLAYER2, PLAYER1],\n [PLAYER1, PLAYER1, PLAYER2, PLAYER2],\n [PLAYER1, PLAYER2, PLAYER2, PLAYER1],\n ]\n board.played_disks = TEST_ARGU6[0]*TEST_ARGU6[1] - 1\n board.add_to_board(0, INDEX_TWO, PLAYER1)\n assert board.check_board_status() == board.board_full", "title": "" }, { "docid": "38b08f3fabca7c35713817fd64359488", "score": "0.61864203", "text": "def is_empty(self, board: 'Board') -> bool:\n return not self.is_filled(board)", "title": "" }, { "docid": "cdfe12a43be53de6a6e86cfbffb9dec2", "score": "0.618268", "text": "def test_cannot_remove_unexitsting_member(self):\n board = Board.objects.get(id=self.board_id)\n with self.assertRaises(BoardMember.DoesNotExist):\n board.remove_member(board.id)", "title": "" }, { "docid": "78b28ec85a8dfc09ac27bbb976480a57", "score": "0.616816", "text": "def test_setting_possition_on_non_empty_spot():\n board9 = Board(9)\n board9.set_position(1, 1, \"B\")\n with pytest.raises(Exception):\n board9.set_position(1, 1, \"W\")", "title": "" }, { "docid": "cbc7358c30c9ffc1284aaf324937c327", "score": "0.61652607", "text": "def lost(board):\r\n for row in board:\r\n for i in row:\r\n if i == 0:\r\n return False\r\n return True", "title": "" }, { "docid": "dc310389e62118252936b924bc6c81fd", "score": "0.6155004", "text": "def checkdefeat(self):\n if empty != 0:\n return False\n\n for i in range(Core.board_height):\n prev = -1\n for j in range(Core.board_width):\n if prev != self.board[i][j]:\n prev = self.board[i][j]\n else:\n return False\n\n for j in range(Core.board_width):\n prev = -1\n for i in range(Core.board_height):\n if prev != self.board[i][j]:\n prev = self.board[i][j]\n else:\n return False\n\n return True", "title": "" }, { "docid": "8bdfc057d26f29abfd8242452893af52", "score": "0.6153193", "text": "def check_board_full( self, board ):\n \n if np.shape( np.where( board == STONE_BLANK ) )[1] == 0:\n return True\n return False", "title": "" }, { "docid": "0b753e2e606d25e0febbd0559426386f", "score": "0.6125744", "text": "def is_empty(self, row: int, col: int) -> bool:\n if not self.board:\n return False\n\n if not self.in_board(row, col):\n return False\n\n if self._board[row][col]:\n return False\n\n return True", "title": "" }, { "docid": "e54ebac15209f49c637654b912e938be", "score": "0.61166", "text": "def test_val_check_false():\n with pytest.raises(AttributeError):\n board9 = Board(9)\n board9.val_check(\"Z\")", "title": "" }, { "docid": "4ba8d0767c4fa772e5aebff26967ae43", "score": "0.6096979", "text": "def _validate_place(self, x, y):\n if self.board.board[x][y] is not None:\n print(\"Board position already taken. Choose another.\")\n return False\n else:\n return True", "title": "" }, { "docid": "d956d9e6594fa60f032314ecfa803704", "score": "0.60949725", "text": "def test_paint_false(self, board_16x16, board_16x16_paint) -> None:\r\n board_16x16.children[0].paint(COLOUR_LIST[3])\r\n assert not board_16x16 == board_16x16_paint", "title": "" }, { "docid": "0484f495cc6ee94a884c3caa8efff426", "score": "0.608044", "text": "def test_create_board(self):\n xo.create_board()", "title": "" }, { "docid": "28bb3252936f21887bba2fb66c7fdc95", "score": "0.607979", "text": "def empty_board(self):\n for x in range(8):\n for y in range(8):\n self.board[x][y] = 0", "title": "" }, { "docid": "42da99157d8ead35b12c9bd666e6c8c3", "score": "0.6079018", "text": "def isValidMove(coordinates, board):\n return # replace return with your code", "title": "" }, { "docid": "6b8da20000a63e7977efb9e3f00bddbf", "score": "0.60707647", "text": "def _validate_board(board):\n if len(board) != 5:\n raise ValueError(\n f\"holdem.utils.get_best_hand: \"\n f\"board must have 5 cards\\n\"\n f\"input: {board}\"\n )", "title": "" }, { "docid": "986be662f030f8ee77e2d554344c47c0", "score": "0.6069934", "text": "def findEmpty(dungeonboard):\n isEmpty = False\n while not isEmpty:\n row = randint(0, 9)\n col = randint(0, 9)\n if dungeonboard[row][col] != \".\":\n isEmpty = False\n else:\n return row, col", "title": "" }, { "docid": "25de1d6b92c815ad2d6fc06aade3bd8e", "score": "0.6069269", "text": "def is_game_over(board):\n return board[SIZE - 1][SIZE - 1]", "title": "" }, { "docid": "e645e8a71fb025a705a346a714c2d267", "score": "0.6057075", "text": "def check_not_finished_board(board: list):\n finished = True\n for i in board:\n if \"?\" in i:\n finished = False\n break\n return finished", "title": "" }, { "docid": "e63c55293fda35797262ff1922ec8d42", "score": "0.6051252", "text": "def is_board_valid(self):\n for i in range(0, 9):\n if not(self.is_row_valid(i) and self.is_column_valid(i) and self.is_cell_valid(i)):\n return False\n return True", "title": "" }, { "docid": "f2ed1f5f07ec312c51f3b012574e18d1", "score": "0.6048485", "text": "def check_valid(self, move:Move):\n\n color, x, y = move.get_value()\n if self.__board[x][y] != EMPTY:\n return False\n elif self.__board[x][y] == EMPTY:\n return True", "title": "" }, { "docid": "5487439c36e38734830f8afaab1422f7", "score": "0.6045205", "text": "def is_legal_board(self):\n for row in self.board:\n for vertex1 in row:\n if vertex1.number == 0 and len(vertex1.number_options) == 0:\n return False\n for vertex2 in vertex1.neighbors:\n if vertex2.number != 0 and vertex1.number == vertex2.number:\n return False\n return True", "title": "" }, { "docid": "b36cd7db76030a74e3cf4784c9992628", "score": "0.6040612", "text": "def valid_no(board, row, col):\n for i in range(row):\n for j in range(col):\n if board[i][j] > 9:\n return False\n return True", "title": "" }, { "docid": "4324932ee08460334ff1a6e7f86088f6", "score": "0.6040326", "text": "def test_getting_wrong_position():\n with pytest.raises(AttributeError):\n board9 = Board(9)\n board9.get_position(10, 12)", "title": "" }, { "docid": "c61a643de86d96397b1555103052cf91", "score": "0.6035684", "text": "def test_all_clear_board():\n\n size = 9\n board9 = Board(size)\n for i in board9.board:\n assert i[\"val\"] == None\n\n size = 13\n board13 = Board(size)\n for i in board13.board:\n assert i[\"val\"] == None\n\n size = 19\n board19 = Board(size)\n for i in board19.board:\n assert i[\"val\"] == None", "title": "" }, { "docid": "a12e67e1f06ce3bb148f91b2e97e7129", "score": "0.60321367", "text": "def test_computer_play_empty_board(rep):\n print(f\"tentative n°{rep}\") # inutile, mais pour le fun\n assert 1 <= morpy.play(\"X\") <= 9", "title": "" }, { "docid": "088d1568ba67919b4859a869a8bb9b45", "score": "0.60284364", "text": "def checkDraw(board):\n\tfor i in range(len(board)):\n\t\tif board[i] != X and board[i] != O:\n\t\t\treturn False\n\treturn True", "title": "" }, { "docid": "2e06964963755dd876354c93a754425b", "score": "0.6028109", "text": "def test_isrequestedboard(self):\n self.assertEqual(0, \n sum(1 for post_num, post in self.a_index.items() if post.op.board.short_name != 'a')\n )", "title": "" }, { "docid": "c08c2d972b5ad1ed7b8f224ca397972f", "score": "0.6009245", "text": "def is_valid(self, move):\n\t\treturn self.board[0][move]==0", "title": "" }, { "docid": "81ba9e952dfaac95933e0ababff3c773", "score": "0.599948", "text": "def check_win(board):\n WINS = ((0, 1, 2),(3, 4, 5),(6, 7, 8),\n (0, 3, 6),(1, 4, 7),(2, 5, 8),\n (0, 4, 8),(2, 4, 6))\n for x in WINS:\n if board[x[0]] == board[x[1]] == board[x[2]] != EMPTY:\n return board[x[0]]\n if EMPTY not in board:\n return \"TIE\"\n\n return None", "title": "" }, { "docid": "4aab7d4a642d88bbaaf1db2d99e6537e", "score": "0.59966874", "text": "def terminal(board):\n if sum([row.count(EMPTY) for row in board]) != 0 and winner(board) == None:\n return False\n return True", "title": "" }, { "docid": "706aa34a8e3ceb16dc36d08b4663f840", "score": "0.5989805", "text": "def is_full(board):\n return False", "title": "" }, { "docid": "58d7b1b3a9003fca97396ca3b0d4df29", "score": "0.5981863", "text": "def check_board_full( self ):\n \n if np.shape( np.where( self.board == STONE_BLANK ) )[1] == 0:\n return True\n return False", "title": "" }, { "docid": "b8f8f5fefffe59a68e1aa220f99f12b1", "score": "0.5980299", "text": "def __call__(self, *args):\n \"\"\" Clear the current board if there's anything in it \"\"\"\n del self._board[:]\n for arg in args:\n if arg not in self._board:\n self._board.append(arg)\n else:\n print('Illegal board setup: there can only be 1 of each marble, but you have put in duplicates')\n return", "title": "" }, { "docid": "b78584a022a82bc218f4836765d73c0b", "score": "0.5978685", "text": "def test_on_board():\n\n HEIGHT = WIDTH = 400\n SIZE = 8\n TILE_SIZE = MARGIN = HEIGHT/(SIZE + 2)\n board = Board(WIDTH, HEIGHT, SIZE, MARGIN)\n player = Player(board)\n\n assert player.on_board(0, 0) is True\n assert player.on_board(0, 8) is False", "title": "" }, { "docid": "0989ce0d8bd4a1c15660b37a29bddf25", "score": "0.5969398", "text": "def getValidMove(player, board):\n return #replace return with your code ", "title": "" }, { "docid": "5bd48260993f7db2952b275a36e24c8f", "score": "0.5966385", "text": "def validMove(self, row, col):\n # print(f'valid move requested row,col is {(row,col)} and that color is {self.board[row][col].color} and our color is {self.color}')\n if self.board[row][col].color != self.color and self.notKing(row, col):\n return True\n return False", "title": "" }, { "docid": "ed0a1e6514a101954033e84dbf1f9846", "score": "0.596375", "text": "def isvalid(self, chrom):\n # Initialise the variables to be used\n idx = int(0) # Chromosome index\n unassigned_cells = int(0)\n self.board.fill(cmn.CELL_UNASSIGNED)\n # Now loop through the board adding the shapes and checking validity.\n # Start at top left corner, processing each row in turn.\n for row in range(self.rows):\n for col in range(self.cols):\n # Retrieve the next shape\n shape = chrom[idx]\n # Skip the cell if it is already occupied.\n if self.board[row][col] != cmn.CELL_UNASSIGNED:\n continue\n # Have we run out of shapes...\n if shape == cmn.CELL_UNASSIGNED:\n unassigned_cells = unassigned_cells + 1\n continue\n # Attempt to place the shape on the board.\n if shape == cmn.CELL_SPACE:\n if self.pflag:\n # Place the hole; no valid check required.\n self.board[row][col] = cmn.CELL_SPACE\n else: # Not calculating the permutations\n # Place the hole if valid.\n if not ((col > 0 and self.board[row][col - 1] == cmn.CELL_SPACE) or\n (row > 0 and self.board[row - 1][col] == cmn.CELL_SPACE)):\n self.board[row][col] = cmn.CELL_SPACE\n else:\n # Can't place the shape\n unassigned_cells = unassigned_cells + 1\n return False, unassigned_cells\n elif shape == cmn.CELL_HDOMINO:\n # Are we ok to have a horizontal domino?\n if col < self.cols - 1 and self.board[row][col + 1] == cmn.CELL_UNASSIGNED:\n self.board[row][col] = cmn.CELL_HDOMINO\n self.board[row][col + 1] = cmn.CELL_HDOMINO\n else:\n # Can't place the shape\n unassigned_cells = unassigned_cells + 1\n return False, unassigned_cells\n else:\n # shape == cmn.CELL_VDOMINO:\n # Are we ok to have a vertical domino?\n if row < self.rows - 1:\n self.board[row][col] = cmn.CELL_VDOMINO\n self.board[row + 1][col] = cmn.CELL_VDOMINO\n else:\n # Can't place the shape\n unassigned_cells = unassigned_cells + 1\n return False, unassigned_cells\n # Move on to the next shape\n idx = idx + 1\n return True, unassigned_cells", "title": "" }, { "docid": "4f2562392be1e6bfdc401dfabd481e70", "score": "0.5956103", "text": "def check_game_over(self):\n for val in self.grid[0]:\n if val is not -1:\n return True\n return False", "title": "" }, { "docid": "4ff7bfc5306f6e659f5806fc217e1472", "score": "0.5943096", "text": "def solveBoard(self, board):\n\n found = self.findEmpty(board)\n if found:\n (row, col) = found\n else:\n return True\n\n i = 1\n while i < 10:\n if self.isValid(board, (row, col), i):\n board[row][col] = i\n\n if self.solveBoard(board):\n return True\n\n board[row][col] = 0\n\n i += 1\n\n return False", "title": "" }, { "docid": "0b40bede180906933f63806571f95997", "score": "0.59403974", "text": "def is_board_still_playable(board: Board) -> bool:\n\n n = range(len(board))\n return any([has_similar_adjacent_cell(board, x, y) for y in n for x in n])", "title": "" }, { "docid": "0c1d0374f826926c5fae8152ade18a8f", "score": "0.59347767", "text": "def isFilled(board):\r\n\tfor row in board:\r\n\t\tfor col in row:\r\n\t\t\tif col == '*':\r\n\t\t\t\treturn -1\r\n\treturn 0", "title": "" }, { "docid": "79abeca57366d6f94aefae0d3b1c58e4", "score": "0.5931802", "text": "def invalid_board_name(board, desc=False):\n board_allowed = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n desc_allowed = board_allowed + \"0123456789 .,'!/?\"\n if desc:\n return not board or not all((x in desc_allowed) for x in board) or len(board) > 30\n return not board or not board.isalpha() or not all((x in board_allowed) for x in board) or len(board) > 6 or board in BOARDS", "title": "" }, { "docid": "7fd5bca33dd84d1e542e0b492fb44a37", "score": "0.59299296", "text": "def _is_stalemate(self):\n for i in range(self.board.size):\n if None in self.board.board[i]:\n return False\n return True", "title": "" }, { "docid": "1dc55dd82839fe6f1275101c8b73cf3c", "score": "0.59283346", "text": "def terminal(board):\r\n if winner(board) is not None:\r\n return True\r\n for i in range(3):\r\n for j in range(3):\r\n if board[i][j] == EMPTY:\r\n return False\r\n else:\r\n return True", "title": "" }, { "docid": "23f0e170c30740815daf176e9c5cc0b4", "score": "0.59190524", "text": "def testGameOver(self):\r\n self.assertFalse(self.b.game_over(), 'Game is over when game not done')\r\n self.b.user_pieces[0].sunk = True\r\n self.assertTrue(self.b.game_over(), \\\r\n 'Game is not over, when game is done')", "title": "" }, { "docid": "3a3a3823b6acd1e3dff8f9781b0a1ec6", "score": "0.5918456", "text": "def exit_board(self):\n # Only call exit if the board exists\n if self.board:\n self.board.exit()", "title": "" }, { "docid": "47aa5aa329277b7dcbb6e035d4a02dbc", "score": "0.5917736", "text": "def square_is_empty_or_has_opponents_piece(self, square):\n wrong_pieces = (\"wm\", \"wk\", \"\") if self.blacks_turn else \\\n (\"bm\", \"bk\", \"\")\n if self.board.get_square(square) in wrong_pieces:\n self.move_msg = \"You don't have a piece on that square to move.\"\n return True\n self.move_msg = \"\"\n return False", "title": "" }, { "docid": "dd0a1ce0b1e028e7b323d8fc0ed732c9", "score": "0.59120506", "text": "def validBoard():\n\n\tif variables.player1_counter==variables.player2_counter or variables.player1_counter==variables.player2_counter+1:\n\t\treturn True\n\telse:\n\t\treturn False", "title": "" }, { "docid": "83c586abf9b564044c6b7004ab32934d", "score": "0.59109956", "text": "def check_valid_piece(self, piece):\n for y, row in enumerate(piece.shape):\n for x, tile in enumerate(row):\n if tile == \"#\":\n offset_y = piece.y - y\n offset_x = piece.x + x\n if offset_y < 0 or \\\n offset_x < 0 or \\\n offset_x > 9 or \\\n self.board[offset_y][offset_x]:\n return False\n return True", "title": "" }, { "docid": "668e4a76f64974242ebd65c83e21cebc", "score": "0.5905244", "text": "def overwritten(self):\n for row, line in enumerate(self.orig_board):\n for col, num in enumerate(line):\n if num != Sudoku.EMPTY:\n if num != self.board[row][col]:\n return True\n return False", "title": "" }, { "docid": "acac6e1ee761a4b594c7cfa80357467d", "score": "0.5904914", "text": "def valid_action(i, j):\n return True if [i, j] in blank_tiles(BOARD) else False", "title": "" }, { "docid": "cb2d6e09217b684625de8dde2e92f2dc", "score": "0.5892561", "text": "def test_check_draw(self):\r\n self.board = [[constants.WHITE for i in range(constants.BOARD_SIZE)] for j in range(\r\n constants.BOARD_SIZE)]\r\n result = check_board_state.CheckBoardState(self.board).check_draw()\r\n self.assertTrue(result)", "title": "" }, { "docid": "06f5363ee7bd495bede05e493eac2853", "score": "0.5886457", "text": "def done (board):\n return has_win(board) or is_full(board)", "title": "" }, { "docid": "6e12d5b7bb6b9e89584a6d660ab587aa", "score": "0.58855623", "text": "def _cell_is_valid(self, row, col):\n if not (0 <= row <= 2 and 0 <= col <= 2):\n raise IndexError\n if self._board[row][col] in (Board.CROSS, Board.ZERO):\n raise CellIsNotEmpty", "title": "" }, { "docid": "092313cb46de7f410e60a9b362ff3b30", "score": "0.58803207", "text": "def verify_import_board(self, mode):\n if mode != 2:\n return False\n sudoku_game = Game()\n board = Board()\n game_resources = GameResources()\n board.hints = []\n solved_board = game_resources.call_algorithm_to_solve(board)\n if solved_board.count(0) > 0:\n board.hints.append((0, 0))\n board.resolved = []\n return False\n for position in xrange(len(solved_board)):\n if board.board[position] == 0:\n board.hints.append((position, solved_board[position]))\n if len(board.hints) > 0:\n sudoku_game.hints = board.hints\n board.get_resolved_game()\n return True", "title": "" }, { "docid": "b3cb4b821ab15e804bd8e3580d232774", "score": "0.58747643", "text": "def test_check_no_winner(self):\n deck_game = Game()\n deck_game.player_1 = Player('Nikos')\n deck_game.player_pc = Player('Computer')\n deck_game.central['active'].push(Card('Archer', 3, 1, 5))\n deck_game.central['active'].push(Card('Test1', 1, 3, 3))\n deck_game.central['active'].push(Card('Test2', 0, 3, 2))\n self.assertFalse(deck_game.check_winner(),\\\n 'False expected. Players\\' health > 0 and central.active.size() > 0')", "title": "" }, { "docid": "740d5ac265b45d3ee636cbcd941bf1c9", "score": "0.58669794", "text": "def test_find_unfilled(self):\n self.assertRaises(KeyError, config.configfor, 'Incorrect',\n stream=self.incorrect_yaml, file=False)", "title": "" }, { "docid": "c373b4f960e8892eeac8ec8236a0a846", "score": "0.5866526", "text": "def board_filled():\n for col_i in range(NUM_COLS):\n if len(_board[col_i]) < NUM_ROWS:\n return False\n return True", "title": "" }, { "docid": "b544213c47d65d9f38d146e0745cdb09", "score": "0.58657867", "text": "def test_valid_move():\n game = fours.Fours()\n game.board = [\n [fours.Card(\"Spades\", 12), fours.Card(\"Diamonds\", 4)],\n [fours.Card(\"Hearts\", 6)],\n [],\n []\n ]\n assert_that(game.move_card(0, 1)).is_false()\n assert_that(game.board).is_equal_to([\n [fours.Card(\"Spades\", 12), fours.Card(\"Diamonds\", 4)],\n [fours.Card(\"Hearts\", 6)],\n [],\n []\n ])", "title": "" }, { "docid": "0fbd7f6dd9d621a3cd36fcabe2d704c9", "score": "0.5863555", "text": "def validMoveIncludingKing(self, row, col):\n # print(f'valid move requested row,col is {(row,col)} and that color is {self.board[row][col].color} and our color is {self.color}')\n if self.board[row][col].color != self.color:\n return True\n return False", "title": "" }, { "docid": "163f629e22800f91e2965eee7f794205", "score": "0.583776", "text": "def check_win( self, board ):\n\n for x in range( FIELD_WIDTH ):\n for y in range( FIELD_HEIGHT ):\n # We only care about players, not blank fields\n if board[x][y] == STONE_BLANK:\n continue\n \n # Check: UP\n blank, ai, human = self._count_stones_up( x, y, board )\n if ai == CONNECT: return STONE_AI\n elif human == CONNECT: return STONE_HUMAN\n\n # Check: RIGHT\n blank, ai, human = self._count_stones_right( x, y, board )\n if ai == CONNECT: return STONE_AI\n elif human == CONNECT: return STONE_HUMAN\n\n # Check: DIAGONAL RIGHT UP\n blank, ai, human = self._count_stones_rightup( x, y , board)\n if ai == CONNECT: return STONE_AI\n elif human == CONNECT: return STONE_HUMAN\n\n # Check: DIAGONAL RIGHT DOWN\n blank, ai, human = self._count_stones_rightdown( x, y, board )\n if ai == CONNECT: return STONE_AI\n elif human == CONNECT: return STONE_HUMAN\n\n return STONE_BLANK", "title": "" }, { "docid": "1da820aefdc9002c267d86842e685765", "score": "0.58355546", "text": "def NoTheWorldMustBePeopled(self):#much ado about nothing -benedick\n self.boardDict = {}\n self.board = []\n self.boardGraphic=\"\"\n for i in range(15, 0, -1):\n if i < 10:\n row = [\"[\"+str(i)+\" ]\"]\n else:\n row = [\"[\"+str(i)+\"]\"]\n row += ([\" \"] * 15)\n self.board.append(row)\n self.board.append([\"[ ]\", \"[A]\", \"[B]\", \"[C]\", \"[D]\", \"[E]\", \"[F]\", \"[G]\", \"[H]\", \"[I]\", \"[J]\", \"[K]\", \"[L]\", \"[M]\", \"[N]\", \"[O]\"])\n for player in self.players:\n for piece in player.pieces:\n self.changeCoordinateSign(piece.position, piece.symbol)\n self.boardDict[piece.position] = piece \n \n #app.createWidgets()\n for pos in self.edges:\n self.changeCoordinateSign(pos, \" X \")", "title": "" }, { "docid": "ea8ad4a94f41c4f3a9e0890a5c76c4f6", "score": "0.5827233", "text": "def terminal(board):\n\n # Someone won\n if winner(board) != None:\n return True\n \n # All cells were filled\n for row in board:\n for cell in row:\n if cell == EMPTY:\n return False\n return True", "title": "" }, { "docid": "2ac89b50bf6fe85c9d0c1554af31f7fa", "score": "0.58260447", "text": "def invalid_board_error(colour):\n m1 = number_font.render(\"Can't be solved.\", True, colour)\n window.blit(m1, (10, 550))\n m2 = text_font.render(\"Reset and Try Again\", True, colour)\n window.blit(m2, (10, 600))\n m3 = text_font.render(\"OR\", True, colour)\n window.blit(m3, (10, 620))\n m4 = text_font.render(\"Change Board.\", True, black)\n window.blit(m4, (10, 640))", "title": "" }, { "docid": "2b8d8cdbeb0f2c9d4123fbabac9335ee", "score": "0.5824745", "text": "def check_status(self):\n if self._board[0][0] == self._board[1][1] == self._board[2][2] \\\n != Board.BLANK:\n return self._board[1][1]\n if self._board[0][2] == self._board[1][1] == self._board[2][0] \\\n != Board.BLANK:\n return self._board[1][1]\n for i in range(3):\n if self._board[i][0] == self._board[i][1] == self._board[i][2] \\\n != Board.BLANK:\n return self._board[i][0]\n if self._board[0][i] == self._board[1][i] == self._board[2][i] \\\n != Board.BLANK:\n return self._board[0][i]\n for i in range(3):\n if Board.BLANK in self._board[i]:\n break\n else:\n return False\n return None", "title": "" }, { "docid": "03c794e0f73eb6d756502ad586421ffd", "score": "0.5821932", "text": "def terminal(board):\n if winner(board) is not None:\n return True\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == EMPTY:\n return False\n\n return True", "title": "" }, { "docid": "07b7c86ca324ca83742c75af201931ee", "score": "0.58212405", "text": "def is_valid_location(board, col):\n return board[ROWS - 1][col] == 0", "title": "" }, { "docid": "b53a41bd2330d0c8194033d314432f87", "score": "0.58205503", "text": "def check_move_valid(self, row, col):\n # Case of invalid input\n if (row >= self.size or row < 0 or col >= self.size or col < 0):\n # print (\"illegal (row, col) for move\")\n return False;\n # There is no neighbour of the current player\n elif(self.check_for_player_flow_neighbour(row, col) == False):\n return False;\n # Case of an occupied cell\n elif (self.board[row][col] != -1 ):\n # print(\"square is not empty\")\n return False\n\n return True", "title": "" } ]
c7b86c990e593c07e47e1f64422f60c7
Print HTTP headers to sys.stdout.
[ { "docid": "f7744652dcc4edfcdef486badb665637", "score": "0.0", "text": "def set_debug_http(self, handle):\r\n level = int(bool(handle))\r\n for scheme in \"http\", \"https\":\r\n h = self._ua_handlers.get(scheme)\r\n if h is not None:\r\n h.set_http_debuglevel(level)", "title": "" } ]
[ { "docid": "be4423dde5e95970d93f0e179b855163", "score": "0.7317722", "text": "def print_headers(self, headers={}, nocache=True):\n if self.sent_headers:\n return\n if not headers.has_key('Content-Type'):\n headers['Content-Type'] = 'text/html'\n if nocache:\n if not headers.has_key('Cache-Control'):\n headers['Cache-Control'] = 'no-store, no-cache, must-revalidate'\n if not headers.has_key('Pragma'):\n headers['Pragma'] = 'no-cache'\n\n sys.stdout.write('\\r\\n'.join(\n ['%s: %s' % t for t in headers.items()]\n ) + '\\r\\n\\r\\n')\n sys.stdout.flush()\n self.sent_headers = True", "title": "" }, { "docid": "6c07d1aacbbb759dc6a689a607fa8b15", "score": "0.7254655", "text": "def print_header(self) -> str:\n return self.print({k: k for k in self.headers})", "title": "" }, { "docid": "f68a9f4371490a5a48fa7fccc7e3030d", "score": "0.68152237", "text": "def output_headers(self):\n self._check_open()\n if self._doneHeaders:\n raise SequencingError, \"output_headers() called twice\"\n for pair in self._headers:\n self._write(\"%s: %s\\r\\n\" % pair)\n self._write(\"\\r\\n\")\n self._doneHeaders = 1", "title": "" }, { "docid": "15ad76ca6d8042a212dd9a84fd68b156", "score": "0.68101734", "text": "def log_request_headers(debug=False):\r\n h = [\" %s: %s\" % (k, v) for k, v in cherrypy.serving.request.header_list]\r\n cherrypy.log('\\nRequest Headers:\\n' + '\\n'.join(h), \"HTTP\")", "title": "" }, { "docid": "94b64ecd47562a2767c9d6406211c525", "score": "0.6758748", "text": "def print_header():\n sys.stdout.write('\\n' + \"-------------------------------------------------\" + '\\n')\n sys.stdout.write(\"Tool: {!s}, Version: {!s}\".format(__init__.name, __init__.version) + '\\n')\n sys.stdout.write(\"-------------------------------------------------\" + '\\n' + '\\n')", "title": "" }, { "docid": "111668d0a451b596f9c31a1e8163482a", "score": "0.6369322", "text": "def print_header():\n print 'pyQ v%s, by Rimon Barr:' % __version__\n print '- Python Yahoo Quote fetching utility'", "title": "" }, { "docid": "83727f370e5a2c07cf9372c2a415d2c7", "score": "0.6279186", "text": "async def print_header(app):\n print('# Begin processing', app.consumer.filename)", "title": "" }, { "docid": "6f74504c2c7a6c2ed94e498718921b86", "score": "0.62707585", "text": "def print_headers(head, outfile, silent=False):\n if outfile:\n with open(outfile, 'w', encoding='utf-8') as f:\n for head_count in head.print_header():\n f.write(head_count+'\\n')\n else:\n if not silent:\n for line in head.print_header():\n print(line)\n return", "title": "" }, { "docid": "cc1e8816665d4740a14da0d4e2fb2111", "score": "0.6237457", "text": "def _print_header(cls):\n\n if (\n not PyFunceble.CONFIGURATION[\"quiet\"]\n and not PyFunceble.CONFIGURATION[\"header_printed\"]\n ):\n # * The quiet mode is not activated.\n # and\n # * The header has not been already printed.\n\n # We print a new line.\n print(\"\\n\")\n\n if PyFunceble.CONFIGURATION[\"less\"]:\n # We have to show less informations on screen.\n\n # We print the `Less` header.\n Prints(None, \"Less\").header()\n else:\n # We have to show every informations on screen.\n\n # We print the `Generic` header.\n Prints(None, \"Generic\").header()\n\n # The header was printed.\n\n # We initiate the variable which say that the header has been printed to True.\n PyFunceble.CONFIGURATION[\"header_printed\"] = True", "title": "" }, { "docid": "2c9af84d6eb8eda7fc9f4a4711407cbc", "score": "0.61883545", "text": "def send_headers(self):\r\n self.cleanup_headers()\r\n self.headers_sent = True\r\n if not self.origin_server or self.client_is_modern():\r\n self.send_preamble()\r\n self._write(str(self.headers))", "title": "" }, { "docid": "baa7767fc5828e863d76f840b965c47e", "score": "0.6173258", "text": "def __print_header(self):\n self.__clear_console()\n print(self.__get_banner_image())\n print(self.__get_current_time())\n print()", "title": "" }, { "docid": "a9cb1f5c8d77731c976a9fe5e3a497ee", "score": "0.6159474", "text": "def do_HEAD(self):\n self._send_my_headers()", "title": "" }, { "docid": "7af06151f12cc0fdc62cd4451dbdecb4", "score": "0.61079156", "text": "def print_header(message: str):\n print_with_color(message, Colors.HEADER)", "title": "" }, { "docid": "7887123d12361471c7bb0f7e4db14b89", "score": "0.60963905", "text": "def _send_my_headers(self):\n self._get_process_status()\n if self._processes:\n self.send_response(200, 'OK')\n self.send_header('Content-Type', 'text/plain')\n self.send_header('Connection', 'close')\n self.send_header('Content-Length', '23')\n self.end_headers()\n else:\n self.send_response(503, 'Service Unavailable')\n self.send_header('Content-Type', 'text/plain')\n self.send_header('Connection', 'close')\n self.send_header('Content-Length', '31')\n self.end_headers()", "title": "" }, { "docid": "689d5f40248c4cf2e74fab1fd864981c", "score": "0.60825074", "text": "def print_header(header):\n\n print('\\n')\n print(header)\n print(\"=\" * len(header))", "title": "" }, { "docid": "1e25519b27909317ea430054175d1c20", "score": "0.6079893", "text": "def __do_head(self):\n print '''\\\n<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\n<HTML>\n<!-- THIS PAGE IS AUTOMATICALLY GENERATED. DO NOT EDIT. -->\n<!-- %(time)s -->\n<!-- USING HT2HTML %(version)s -->\n<!-- SEE http://ht2html.sf.net -->\n<!-- User-specified headers:\nTitle: %(title)s\n%(headers)s\n-->\n\n<HEAD>\n<TITLE>%(title)s</TITLE>\n<META HTTP-EQUIV=\"Content-Type\" CONTENT=\"text/html; charset=%(charset)s\">\n%(meta)s\n%(style)s\n</HEAD>''' % {'title' : self.get_title(),\n 'headers': self.get_headers(),\n 'meta' : self.get_meta(),\n 'time' : time.ctime(time.time()),\n 'version': __version__,\n 'charset': self.get_charset(),\n 'style' : self.get_style()\n }", "title": "" }, { "docid": "a249fdcc5e7b3965bb7e49aa5727e2d0", "score": "0.6076729", "text": "def do_HEAD(self):\n self.send_headers()", "title": "" }, { "docid": "f02dd2a96a78113b9444d3942b86cfb7", "score": "0.60651517", "text": "def print_header(scriptname):\n print(\"==================================\")\n print(\" This is {:s} version {:s}\".format(scriptname, pyspextools.__version__))\n print(\"==================================\")\n print(\"(C) 2018-2023 Jelle de Plaa\")\n print(\"SRON Netherlands Institute for Space Research\")\n print(\"Github: https://github.com/spex-xray/pyspextools\")\n print(\"\")", "title": "" }, { "docid": "c9691e9a6d7841e66627f8b8ad5b1ac2", "score": "0.6045284", "text": "def pretty_print_req(req):\r\n print('{}\\n{}\\n{}\\n\\n{}'.format(\r\n '-----------START-----------',\r\n req.method + ' ' + req.url,\r\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\r\n req.body,\r\n ))", "title": "" }, { "docid": "b46dae5406fc02200e88ac2112adc93a", "score": "0.60281366", "text": "def send_preamble(self):\r\n if self.origin_server:\r\n if self.client_is_modern():\r\n self._write('HTTP/%s %s\\r\\n' % (self.http_version,self.status))\r\n if not self.headers.has_key('Date'):\r\n self._write(\r\n 'Date: %s\\r\\n' % format_date_time(time.time())\r\n )\r\n if self.server_software and not self.headers.has_key('Server'):\r\n self._write('Server: %s\\r\\n' % self.server_software)\r\n else:\r\n self._write('Status: %s\\r\\n' % self.status)", "title": "" }, { "docid": "935f92c0ac0bc2b22361ace63fcf8939", "score": "0.600208", "text": "def print_header_information():\n\t\tprint \"Elijah Molloy\\nFelicia Cobb\\nRobinson Mikowlski\"\n\t\tprint \"70-510 - Spring 1 - 2018\"\n\t\tprint \"Programming Assignment #3\\n\"", "title": "" }, { "docid": "93c96169a9cfd132daddf97a30aca216", "score": "0.5991015", "text": "async def write_headers(self, status_line: str, headers: CIMultiDict[str]) -> None:\n ...", "title": "" }, { "docid": "f8073ad7828d5feb4125ecd579548c4c", "score": "0.59368944", "text": "def print_header(delay, lang, pages):\n langstr = lang\n if langstr is None:\n langstr = len(languages())\n\n pagestr = len(pages)\n if pagestr == 1:\n pagestr = \"+\"\n\n msg = []\n msg.append(\"WPTOOLS STRESS TEST\")\n msg.append(time.asctime(time.gmtime()))\n msg.append(\"delay: %d lang: %s pages: %s\" % (delay, langstr, pagestr))\n msgstr = \" \".join(msg)\n\n header = [msgstr]\n header.append(\"=\" * len(msgstr))\n header.append(\"Python \" + sys.version)\n header.append('-' * len(msgstr))\n\n if len(pages) > 1:\n print(\"Getting top %s.wikipedia.org pages\" % lang)\n for i, title in enumerate(pages[:10]):\n print(\" %d. %s\" % (i + 1, title))\n\n print(\"\\n\".join(header))", "title": "" }, { "docid": "ca97a0ed54262be3f5c467cd4815b542", "score": "0.5901599", "text": "def getHeader(self):\n self.getInitOrEnd()\n self.printContent(['Request','Req Time' , 'Payload', 'Status', 'Length', 'Resp Time'], False)\n self.getInitOrEnd()", "title": "" }, { "docid": "dd086f97666f5c91173ee0f1dff3bee9", "score": "0.5856049", "text": "def pretty_print_req(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '------This is the request------',\n req.method + ' '+ req.url,\n '\\n'.join('{}: {}'.format(k,v) for k,v in req.headers.items()),\n req.body\n ))", "title": "" }, { "docid": "9088230cd0031866e9aafa1fa4489408", "score": "0.5830158", "text": "def print_header() -> None:\n\tlogger.info(\"##############################################################\")\n\tlogger.info(\"# Datascience in Techno-Socio-Economic Systems #\")\n\tlogger.info(\"# #\")\n\tlogger.info(\"# Forecasting the PM10 value #\")\n\tlogger.info(\"##############################################################\")\n\treturn", "title": "" }, { "docid": "3942f16900693f67351396fc95fda8b2", "score": "0.5829146", "text": "def view_headers():\n\n return jsonify(get_dict('headers'))", "title": "" }, { "docid": "32404c84503ca47bb95c500861b38e0f", "score": "0.5828684", "text": "def start_response(self,status,headers):\n response_headers = \"HTTP/1.1 \" + status + \"\\r\\n\"\n for header in headers:\n response_headers += \"%s:%s\\r\\n\" % header\n '''\n print(response_headers)\n HTTP / 1.1 200 OK\n Content - Type: text / plain\n '''\n self.response_headers = response_headers", "title": "" }, { "docid": "34757655482b7bd068ccabdc1f501d99", "score": "0.5804651", "text": "def print_header(outputfile):\n\tprint(\"name\\tuser\\tn_edits\\tn_minor_edits\\tfirst_edit\\tlast_edit\\tadded_bytes\", file=outputfile)", "title": "" }, { "docid": "40fbf64e38a54848b85d915684a6ff8f", "score": "0.57984674", "text": "def response_headers(headers=None, debug=False):\r\n if debug:\r\n cherrypy.log('Setting response headers: %s' % repr(headers),\r\n 'TOOLS.RESPONSE_HEADERS')\r\n for name, value in (headers or []):\r\n cherrypy.serving.response.headers[name] = value", "title": "" }, { "docid": "fc07e17beeb3add31259ab38f722f0c2", "score": "0.5765846", "text": "def get_headers(self, environ):\r\n return [('Content-Type', 'text/html')]", "title": "" }, { "docid": "fc07e17beeb3add31259ab38f722f0c2", "score": "0.5765846", "text": "def get_headers(self, environ):\r\n return [('Content-Type', 'text/html')]", "title": "" }, { "docid": "2f6e56c9729fc07c31549058e21e3c0c", "score": "0.57420397", "text": "def print_header(self) -> bool:\n return pulumi.get(self, \"print_header\")", "title": "" }, { "docid": "b9b528511014e8caf6e9f907c1b24b3d", "score": "0.56609565", "text": "def head(self, input):\n url = input.args.strip()\n \n if not url: \n try:\n url = self.lasturl[input.sender.lower()]\n except KeyError:\n self.reply(\"No URLs posted previously and none given, nothing I can do.\")\n return\n \n m = re.search(r\"^https?://\", url, re.I)\n if not m:\n url = \"http://\" + url\n \n self.lasturl[input.sender.lower()] = url\n \n try:\n headers = urllib2.urlopen(url).headers\n except urllib2.URLError:\n self.say(\"Error: Invalid url.\")\n return\n \n if not headers:\n self.say(\"Could not fetch page headers, perhaps the site is down?\")\n return\n \n for key, val in headers.dict.iteritems():\n self.say(\"\\x02%s\\x02: %s\" % (key.capitalize(), val))", "title": "" }, { "docid": "161b43537cc2eff4c29fd0c858e9c1df", "score": "0.5658066", "text": "def display_header(self):\n print('McCabe', end=' ')", "title": "" }, { "docid": "a6efd9eeb0c0487005745dfb9c160301", "score": "0.565255", "text": "def make_head(self, scode, headers_dict):\n head = bytearray(\n \"%s %s %s\\r\\n\" % (PROTOCOL_VERSION, scode, RESPONSES[scode]),\n \"ascii\"\n )\n head.extend(b\"Cache-Control: max-age=86400\\r\\n\")\n head.extend(b\"Server: ZeroMasterHTTP/1.0\\r\\n\")\n head.extend((\"Date: %s\\r\\n\" % gmtime_str()).encode(\"ascii\"))\n if headers_dict is not None:\n for header in headers_dict.items():\n head.extend((\"%s: %s\\r\\n\" % header).encode(\"ascii\"))\n if self.keep_alive:\n head.extend(b\"Connection: keep-alive\\r\\n\\r\\n\")\n else:\n head.extend(b\"Connection: close\\r\\n\\r\\n\")\n return head", "title": "" }, { "docid": "3bd8b182f1b4a79714a7acd5fcc77d7c", "score": "0.56471527", "text": "def log_content_headers(self, request, post_data=None):\n log.debug(f\"Requested URL: {request.url}\")\n log.debug(f\"From requests cache: {request.from_cache}\")\n log.debug(f\"Request status code: {request.status_code}\")\n log.debug(f\"Request reason: {request.reason}\")\n if post_data is None:\n post_data = {}\n try:\n log.debug(json.dumps(dict(request.headers), indent=4))\n log.debug(json.dumps(request.json(), indent=4))\n log.debug(json.dumps(post_data, indent=4))\n except Exception as e:\n log.debug(f\"Could not parse JSON response from GitHub API! {e}\")\n log.debug(request.headers)\n log.debug(request.content)\n log.debug(post_data)", "title": "" }, { "docid": "3d42365b4f63c1b87d32be75bd74624a", "score": "0.564648", "text": "def end_headers():\n if req_version != 'HTTP/0.9':\n wfile.write('\\r\\n')", "title": "" }, { "docid": "488e49298c09aba4ddcd008bec857c69", "score": "0.56450325", "text": "def do_HEAD(self):\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type', 'text/html')\n\t\tself.end_headers()", "title": "" }, { "docid": "664e5f4cff119b0ec2e47eed2cefceff", "score": "0.56396776", "text": "def header(self, string):\n logger.debug(string)\n if not self.no_color:\n string = colorize(string, fg=\"cyan\", opts=(\"bold\",))\n self.stdout.write(string)", "title": "" }, { "docid": "b266864af49f23f187ad39c2bd801311", "score": "0.5585485", "text": "async def test_headers(cli):\n resp = await cli.get(\"/headers/\")\n assert \"boo\" == resp.headers[\"location\"]", "title": "" }, { "docid": "0e342e8e1c9e163a3eafb39d87ec10dd", "score": "0.55821776", "text": "def __str__(self):\r\n return '\\r\\n'.join([\"%s: %s\" % kv for kv in self._headers]+['',''])", "title": "" }, { "docid": "d1f76bad4e2f590c4609a75d5a1bdca7", "score": "0.55714", "text": "def print_header() -> None:\n clear_screen()\n print(f\"Current Playerbase - Bars represent 10,000 concurrent users\")\n print(f\"{'-' * 70}\")\n print()", "title": "" }, { "docid": "a9655eb0ba5402e7d06e2b52ac2704ec", "score": "0.5567945", "text": "def header(self, string):\n logger.debug(string)\n if not getattr(self, 'no_color', None):\n string = colorize(string, fg=\"cyan\", opts=(\"bold\",))\n self.stdout.write(string)", "title": "" }, { "docid": "2de38b9ae6a3f4ab65d90e2d2deb0337", "score": "0.5566698", "text": "def print_line(self) -> str:\n return self.print({k: \"-\" * s for k, s in zip(self.headers, self.spaces)})", "title": "" }, { "docid": "c5589cb841feff4d1bf05b709dd16f22", "score": "0.55663806", "text": "def _generate_headers(res_code):\n # initialize header\n header = ''\n # check response code\n if res_code == 200:\n header += 'HTTP/1.1 200 OK\\n'\n elif res_code == 404:\n header += 'HTTP/.1 404 Not Found\\n'\n # Add time record to the header\n now = time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\n header += f'Date: {now}'\n # Add server name\n header += 'Server: Simple-Python-Server\\n'\n # indicate that the server connection will be closed after\n # completing the requests\n header += 'Connection: close\\n\\n'\n return header", "title": "" }, { "docid": "993fe82438aa13468456f72443716930", "score": "0.55537564", "text": "def end_headers(self):\r\n if self.request_version != 'HTTP/0.9':\r\n self.wfile.write(\"\\r\\n\")", "title": "" }, { "docid": "77c07a6e6a33ca1266614b2fa40bf52d", "score": "0.5549137", "text": "def send_headers(self):\n if 'Content-Length' not in self.headers:\n self.headers['Content-Length'] = str(self.bytes_sent)\n self.content_length_flag = False\n self.headers_sent = True\n self.ret_content['statusCode'] = self.status\n for item in self.headers.items():\n for k, v in self.headers.items():\n k = k.strip()\n v = v.strip()\n self.ret_content['headers'][k] = v", "title": "" }, { "docid": "2b8caaa29192bf2defdcbdee8d095c4f", "score": "0.5545402", "text": "def _set_headers(self):\n self.send_response(200)\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Content-type', 'text/html')\n self.end_headers()", "title": "" }, { "docid": "008e4e7113c34528370990bae6d0cc4f", "score": "0.552472", "text": "def response_header(body, content_type='text/html'):\n\n header = 'HTTP/1.1 200 OK\\r\\n'\n header += 'Content-Type: %s; charset=ISO-8859-1\\r\\n' % content_type\n header += 'Content-Length: %d' % len(body)\n header += '\\r\\n\\r\\n'\n\n return header", "title": "" }, { "docid": "e91a600499b95dab3573f1e545c5e7fe", "score": "0.5510829", "text": "def print_header(string):\n print(colors.yellow(\"\\n== {0} ==\".format(string)))", "title": "" }, { "docid": "2e84dd0477f7ef0ccfd0d9f42939c07a", "score": "0.55052555", "text": "def write_headers(self):\n for _ in range(3):\n self.fout.write(self.FAKE_HEADER)\n\n self.last_docno = -1\n self.headers_written = True", "title": "" }, { "docid": "cda19e9072ef86b015efdcbb18a603e2", "score": "0.5499643", "text": "def cmd_display_header(context):\n\n print('\\nROM Header:')\n for name, value in context['Header'].items():\n line = \" {}\".format(name)\n line = line.ljust(24)\n if name == 'SramInfo' or name == 'IOSupport':\n hexstr = binascii.hexlify(value).decode()\n hexbytes = re.findall('.{1,2}', hexstr)\n line += '\\\\x' + '\\\\x'.join(hexbytes)\n elif type(value) == bytes:\n line += value.decode('utf-8')\n elif type(value) == int:\n line += hex(value)\n print(line)\n print()", "title": "" }, { "docid": "1072da007d0081cd7656788e570de4ce", "score": "0.54926044", "text": "def get_headers(self):\n return ''", "title": "" }, { "docid": "27fda5bf87adb1a2e099b9d2b102a727", "score": "0.54893583", "text": "def print_header_for_step(message: str):\n print_header(\"\\n======= {} =======\".format(message))", "title": "" }, { "docid": "f2a9041d9da879a136f1821f79ec662d", "score": "0.5478387", "text": "def add_header(self, *args, **kwargs):\n # pylint: disable=unused-argument,logging-format-interpolation\n stack_record = stack()[1]\n log.warning(\"Declined response don't use headers.\\n\"\n \" File {1}, line {2}, in {3} \\n\"\n \"{0}\".format(stack_record[4][0], *stack_record[1:4]))", "title": "" }, { "docid": "812d4759c28012f64755662116ec0a0c", "score": "0.5464053", "text": "def simpleprinter(rsp):\n if not isinstance(rsp, requests.Response):\n raise TypeError(\"expected requests.Response\")\n print(rsp.text)", "title": "" }, { "docid": "dff41d359df3a89d7db10a55803a8e74", "score": "0.54492897", "text": "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "title": "" }, { "docid": "dff41d359df3a89d7db10a55803a8e74", "score": "0.54492897", "text": "def pretty_print_POST(req):\n print('{}\\n{}\\n{}\\n\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.body,\n ))", "title": "" }, { "docid": "fa50b34174570ea9a3fa74aca326d874", "score": "0.5429982", "text": "def do_header(d):\n print 'from troposphere import Base64, FindInMap, GetAtt, Join, Output'\n print 'from troposphere import Parameter, Ref, Tags, Template'\n print 'from troposphere.cloudfront import Distribution, DistributionConfig'\n print 'from troposphere.cloudfront import Origin, DefaultCacheBehavior'\n print 'from troposphere.ec2 import PortRange'\n\n # Loop over the resources to find imports\n if 'Resources' in d:\n seen = []\n resources = d['Resources']\n for k, v in resources.items():\n (mod, tropo_object) = generate_troposphere_object(v['Type'])\n if tropo_object not in seen:\n seen.append(tropo_object)\n print 'from troposphere.%s import %s' % (mod, tropo_object,)\n print\n print\n print \"t = Template()\"\n print", "title": "" }, { "docid": "06b51c6d45d769f942859415f1c9aac7", "score": "0.5424441", "text": "def pretty_raw_request(response):\n\n\tresult = response.request.method + \" \" + response.request.path_url + \" HTTP/1.1\\n\"\n\tresult += \"Host: %s\\n\" % response.url.split('/')[2]\n\n\tfor key,value in response.request.headers.items():\n\t\tif key != \"Connection\":\n\t\t\tresult += \"%s: %s\\n\" % (key,value)\n\n\tresult += \"Connection: %s\\n\" % response.request.headers[\"Connection\"]\n\n\tif response.request.body != None:\n\t\tresult += \"\\n%s\" % response.request.body\n\telse:\n\t\tresult += \"\\n\"\n\treturn result", "title": "" }, { "docid": "072c9fe11903375082594ee89924f910", "score": "0.5417652", "text": "def infoheader():\n clear()\n print(\"=^.~= linkgrab =~.^=\")\n print(\"-\"*50)\n print(\"->> Target: %s\" %(target))\n print(\"-\"*50)", "title": "" }, { "docid": "9bc0c17eb8e1738d6f7619bef79bb241", "score": "0.54139155", "text": "def get_print_header():\n return \"\\t\".join([ \"id\".ljust(5),\n \"job name\".ljust(25),\n \"start time\".ljust(25),\n \"end time\".ljust(25),\n \"time spent\" ])", "title": "" }, { "docid": "42720b0e732670aa1fecef3d943b6a4c", "score": "0.54127914", "text": "def printHeader(args):\n\n level = \"Level\"\n title = \"Title\"\n count = \"(text, cap, head)\"\n total = \"total_text, total_cap, total_head\"\n sep = \"-\" * 65\n\n if args.colour:\n level = Colours.IPurple + level + Colours.Reset\n count = Colours.IYellow + count + Colours.Reset\n total = Colours.BRed + total + Colours.Reset\n sep = Colours.White + sep + Colours.Reset\n\n header = \"{0} {1} {2}\\t{3}\".format(level, title, count, total)\n\n print\n print sep\n print header\n print sep\n print", "title": "" }, { "docid": "5d69d65ed1422da47cd0414936893449", "score": "0.5404805", "text": "def sampcd_header_print(name, sampcd, htype, hname):\n print_header(htype, hname)\n print(\"Sample code \", str(y), \" extracted for \", name, \" :\")\n print(sampcd)\n print(\"----example code check----\\n\")\n print(\"executing sample code .....\")\n print(\"execution result:\")", "title": "" }, { "docid": "b28078942fb8e9028ae5109a037b2f3b", "score": "0.54028505", "text": "def get_headers(request: Request):\n\n output = request.headers\n\n return output", "title": "" }, { "docid": "c9c924ac99f32939b23526e56be874f0", "score": "0.5394367", "text": "def setup_http_debugging():\n return None\n # these two lines enable debugging at httplib level (requests->urllib3->httplib)\n # you will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.\n # the only thing missing will be the response.body which is not logged.\n import httplib\n httplib.HTTPConnection.debuglevel = 1\n\n logging.basicConfig() # you need to initialize logging, otherwise you will not see anything from requests\n logging.getLogger().setLevel(logging.DEBUG)\n requests_log = logging.getLogger(\"requests.packages.urllib3\")\n requests_log.setLevel(logging.DEBUG)\n requests_log.propagate = True", "title": "" }, { "docid": "50cc411eb18e8623a46773b84390a4af", "score": "0.53896683", "text": "def send_head(self):\r\n if self.is_cgi():\r\n return self.run_cgi()\r\n else:\r\n return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)", "title": "" }, { "docid": "53f1d38fb356b2313cab6cb7090dd98e", "score": "0.5383648", "text": "def pyramid(self, response):\n for header in Security_Headers.secure_headers(**self.options):\n response.headers.add(header.header, header.value)", "title": "" }, { "docid": "fd262bcf7dcfdeb57979c6d2d1bc22a2", "score": "0.53791183", "text": "def print(self):\n print(\"Response\")\n print(\"--------\")\n print(\"\\t\", \"status\", self.status)\n print(\"\\t\", \"headers\", self.headers)\n print(\"\\t\", \"body\")\n print(self.body)\n print(\"\\t\", \"is_file\", self.is_file)\n print(\"\\t\", \"file\", self.file)\n return None", "title": "" }, { "docid": "8d5b74b250df82852f8e7813d9e0e929", "score": "0.53791153", "text": "def get_all_headers(self):\n # print(self.response.headers)\n return self.response.headers", "title": "" }, { "docid": "6f7e8235d60c3c689d74dc89a57cedf6", "score": "0.5370777", "text": "def add_response_headers(self, req, fmt):\n\n if (fmt == self._PRM_FMT_HTML):\n HDR_CONTENT_TYPE_V = self._HDR_CONTENT_TYPE_V_HTML\n elif (fmt == self._PRM_FMT_JSON):\n HDR_CONTENT_TYPE_V = self._HDR_CONTENT_TYPE_V_JSON\n\n req.setHeader(self._HDR_CONTENT_TYPE_N, HDR_CONTENT_TYPE_V )\n req.setHeader(self._HDR_CACHE_CONTROL_N, self._HDR_CACHE_CONTROL_V)\n req.setHeader(self._HDR_EXPIRES_N, self._HDR_EXPIRES_V )\n req.setHeader(self._HDR_PRAGMA_N, self._HDR_PRAGMA_V )", "title": "" }, { "docid": "8c09e28534b267179918f4b0e9785273", "score": "0.5369266", "text": "def send_headers(self, status, content_type=None, content_length=None):\n\n self.send_response(status.code, status.message)\n if content_type:\n self.send_header('Content-Type', content_type)\n if content_length:\n self.send_header('Content-Length', content_length)\n self.end_headers()", "title": "" }, { "docid": "f7dd07cdcb74ed756fd899c57a276187", "score": "0.5366008", "text": "def head(phenny, input):\r\n uri = input.group(2)\r\n uri = (uri or '').encode('utf-8')\r\n if ' ' in uri: \r\n uri, header = uri.rsplit(' ', 1)\r\n else: uri, header = uri, None\r\n\r\n if not uri and hasattr(phenny, 'last_seen_uri'): \r\n try: uri = phenny.last_seen_uri[input.sender]\r\n except KeyError: return phenny.say('?')\r\n\r\n if not uri.startswith('htt'): \r\n uri = 'http://' + uri\r\n # uri = uri.replace('#!', '?_escaped_fragment_=')\r\n\r\n try: info = web.head(uri)\r\n except IOError: return phenny.say(\"Can't connect to %s\" % uri)\r\n except httplib.InvalidURL: return phenny.say(\"Not a valid URI, sorry.\")\r\n\r\n if not isinstance(info, list): \r\n try: info = dict(info)\r\n except TypeError: \r\n return phenny.reply('Try .head http://example.org/ [optional header]')\r\n info['Status'] = '200'\r\n else: \r\n newInfo = dict(info[0])\r\n newInfo['Status'] = str(info[1])\r\n info = newInfo\r\n\r\n if header is None: \r\n data = []\r\n if info.has_key('Status'): \r\n data.append(info['Status'])\r\n if info.has_key('content-type'): \r\n data.append(info['content-type'].replace('; charset=', ', '))\r\n if info.has_key('last-modified'): \r\n modified = info['last-modified']\r\n modified = time.strptime(modified, '%a, %d %b %Y %H:%M:%S %Z')\r\n data.append(time.strftime('%Y-%m-%d %H:%M:%S UTC', modified))\r\n if info.has_key('content-length'): \r\n data.append(info['content-length'] + ' bytes')\r\n phenny.reply(', '.join(data))\r\n else: \r\n headerlower = header.lower()\r\n if info.has_key(headerlower): \r\n phenny.say(header + ': ' + info.get(headerlower))\r\n else: \r\n msg = 'There was no %s header in the response.' % header\r\n phenny.say(msg)", "title": "" }, { "docid": "b5dc9d849b27c268c5eb2911bb18417c", "score": "0.53477967", "text": "def printhtmlheader(title):\n\n print \"Content-Type: text/html\"\n print\n print \"\"\"\n <html>\n <head>\n <TITLE>{0}: {1}</TITLE>\n <!-- <link rel=\"stylesheet\" href=\"/style.css\" /> -->\n </head>\n <body>\n <!-- <img src=\"/logicalis-logo.png\"> -->\n <H1>{0}</H1>\"\"\".format(MAINTITLE, title)", "title": "" }, { "docid": "6075baec72e1546d4b995cd61c4f853d", "score": "0.5346018", "text": "def print_header():\n print(\"--------------------------------------\")\n print(\" BIRTHDAY COUNTDOWN APP\")\n print(\"--------------------------------------\")", "title": "" }, { "docid": "134e6563ffa71895b178f07b50537d1f", "score": "0.5338577", "text": "def setup_headers(self):\n hdrs = {}\n if hdrs:\n self.session.headers.update(hdrs)", "title": "" }, { "docid": "08ee4bf132543a3d8873c5194226223d", "score": "0.5337372", "text": "def _get_headers(response):\n raise NotImplementedError(\"This implementation is virtual.\")", "title": "" }, { "docid": "e5e91adfaa04658dcb917e721c5bb979", "score": "0.5329506", "text": "def send_header(self, keyword, value):\r\n if self.request_version != 'HTTP/0.9':\r\n self.wfile.write(\"%s: %s\\r\\n\" % (keyword, value))\r\n\r\n if keyword.lower() == 'connection':\r\n if value.lower() == 'close':\r\n self.close_connection = 1\r\n elif value.lower() == 'keep-alive':\r\n self.close_connection = 0", "title": "" }, { "docid": "0d8a47ebc23a785e4b8f2c93b8756a52", "score": "0.53071046", "text": "def flushheaders(self):\r\n self._fp.writelines(self._headers)\r\n self._headers = []", "title": "" }, { "docid": "0b450981eda7e5b6a5699f8e3c6116c8", "score": "0.53043085", "text": "def dump(response):\n\n print(\"\\nURL:\", response.request['PATH_INFO'])\n print(\"Method:\", response.request['REQUEST_METHOD'])\n if response.request['QUERY_STRING']:\n print(\"Query:\", response.request['QUERY_STRING'])\n print(\"\\n\")\n print(\"Status code:\\n{}\\n\\nData:\\n{}\\n\".format(\n response.status_code,\n _dict_key_quotes(json.dumps(response.data, indent=4, ensure_ascii=False))\n if hasattr(response, 'data') else None\n ))", "title": "" }, { "docid": "37016c734316b4b1eca048bdfb00c6af", "score": "0.52955884", "text": "def _DebugPrintFileHeader(self, file_header):\n value_string = '0x{0:08x}'.format(file_header.signature)\n self._DebugPrintValue('Signature', value_string)\n\n value_string = '0x{0:08x}'.format(file_header.format_version)\n self._DebugPrintValue('Format version', value_string)\n\n self._DebugPrintDecimalValue(\n 'Number of pages', file_header.number_of_pages)\n\n self._DebugPrintText('\\n')", "title": "" }, { "docid": "3db386e42d963a2a4204eaff14bbd277", "score": "0.5283147", "text": "def send_headers(self):\r\n hkeys = [key.lower() for key, value in self.outheaders]\r\n status = int(self.status[:3])\r\n \r\n if status == 413:\r\n # Request Entity Too Large. Close conn to avoid garbage.\r\n self.close_connection = True\r\n elif \"content-length\" not in hkeys:\r\n # \"All 1xx (informational), 204 (no content),\r\n # and 304 (not modified) responses MUST NOT\r\n # include a message-body.\" So no point chunking.\r\n if status < 200 or status in (204, 205, 304):\r\n pass\r\n else:\r\n if (self.response_protocol == 'HTTP/1.1'\r\n and self.method != 'HEAD'):\r\n # Use the chunked transfer-coding\r\n self.chunked_write = True\r\n self.outheaders.append((\"Transfer-Encoding\", \"chunked\"))\r\n else:\r\n # Closing the conn is the only way to determine len.\r\n self.close_connection = True\r\n \r\n if \"connection\" not in hkeys:\r\n if self.response_protocol == 'HTTP/1.1':\r\n # Both server and client are HTTP/1.1 or better\r\n if self.close_connection:\r\n self.outheaders.append((\"Connection\", \"close\"))\r\n else:\r\n # Server and/or client are HTTP/1.0\r\n if not self.close_connection:\r\n self.outheaders.append((\"Connection\", \"Keep-Alive\"))\r\n \r\n if (not self.close_connection) and (not self.chunked_read):\r\n # Read any remaining request body data on the socket.\r\n # \"If an origin server receives a request that does not include an\r\n # Expect request-header field with the \"100-continue\" expectation,\r\n # the request includes a request body, and the server responds\r\n # with a final status code before reading the entire request body\r\n # from the transport connection, then the server SHOULD NOT close\r\n # the transport connection until it has read the entire request,\r\n # or until the client closes the connection. Otherwise, the client\r\n # might not reliably receive the response message. However, this\r\n # requirement is not be construed as preventing a server from\r\n # defending itself against denial-of-service attacks, or from\r\n # badly broken client implementations.\"\r\n remaining = getattr(self.rfile, 'remaining', 0)\r\n if remaining > 0:\r\n self.rfile.read(remaining)\r\n \r\n if \"date\" not in hkeys:\r\n self.outheaders.append((\"Date\", rfc822.formatdate()))\r\n \r\n if \"server\" not in hkeys:\r\n self.outheaders.append((\"Server\", self.server.server_name))\r\n \r\n buf = [self.server.protocol + SPACE + self.status + CRLF]\r\n for k, v in self.outheaders:\r\n buf.append(k + COLON + SPACE + v + CRLF)\r\n buf.append(CRLF)\r\n self.conn.wfile.sendall(EMPTY.join(buf))", "title": "" }, { "docid": "af79f6e649ca0a465cb280429a500ff2", "score": "0.5264702", "text": "def display_header():\n\n print(\"\"\"______ _ _ _____ \n| ___| | | | | | __ \\ \n| |_ _ __ __ _ ___| |_ __ _| |______| | \\/ ___ _ __ \n| _| '__/ _` |/ __| __/ _` | |______| | __ / _ \\ '_ \\ \n| | | | | (_| | (__| || (_| | | | |_\\ \\ __/ | | |\n\\_| |_| \\__,_|\\___|\\__\\__,_|_| \\____/\\___|_| |_|\n\nHigh-performance Python fractal generator \n\"\"\")", "title": "" }, { "docid": "f676c3f33697fdfd6bff8d9e1618c220", "score": "0.5263464", "text": "def send_header(keyword, value):\n if req_version != 'HTTP/0.9':\n wfile.write('%s: %s\\r\\n' % (keyword, value))\n\n if keyword.lower() == 'connection':\n if value.lower() == 'close':\n close_connection = 1\n elif value.lower() == 'keep-alive':\n close_connection = 0", "title": "" }, { "docid": "81faeb2e05c33a733214b4c1f12c37a7", "score": "0.52628666", "text": "def _sendHEADER(self, responseCode, responseMessage, contentType, length):\n #create each line of the header\n header = [\n \"HTTP/1.1 {} {}\\r\\n\".format(responseCode, responseMessage),\n \"Date: {}\\r\\n\".format(formatdate(timeval=None, localtime=False, usegmt=True)),\n \"Server: Pr0j3ct\\r\\n\",\n \"Content-Length: {}\\r\\n\".format(length),\n \"Content-Type: {}\\r\\n\".format(contentType),\n ]\n #convert header to a single string\n header = \"\".join(header)\n header += \"\\r\\n\"\n #send header\n self._send(header)", "title": "" }, { "docid": "2ad9cc3e9487f9f28d007876f132c523", "score": "0.52591604", "text": "def end_headers(self):\n if self.request_version != 'HTTP/0.9':\n self.wfile.write(\"\\r\\n\")\n self.start_resp = cStringIO.StringIO(self.wfile.getvalue())\n self.wfile = socketStream(self.connection)\n self.copyfile(self.start_resp, self.wfile)", "title": "" }, { "docid": "f25ee89dead9efff0cf10126f25c3790", "score": "0.525701", "text": "def add_curl_headers(curl_cmd, headers):\n if headers:\n for header, value in headers.items():\n curl_cmd.extend([\"--header\", f\"{header}: {value}\"])", "title": "" }, { "docid": "f3549bb79a5020b409eac69fb04c0706", "score": "0.5255465", "text": "def get_header(self):\n return ''", "title": "" }, { "docid": "cde362e8f256b34824268182903e71c4", "score": "0.525042", "text": "def _show_sys_header(rectime):\n _uname = os.uname()\n nr_cpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if not rectime:\n curr_date = \"?/?/?\"\n elif _display_iso:\n curr_date = time.strftime(\"%Y-%m-%d\", rectime)\n else:\n curr_date = time.strftime(\"%x\", rectime)\n _hdrinfo = (_uname[0], _uname[2], _uname[1], curr_date, _uname[4], nr_cpus)\n _log_info(\"%s %s (%s) \\t%s \\t_%s_\\t(%d CPU)\\n\" % _hdrinfo)", "title": "" }, { "docid": "e18d8e64f68a09ce27252c5b861cdaeb", "score": "0.5246826", "text": "def get_header(self):\n return \"\"", "title": "" }, { "docid": "feeddf22f46b27dd54342907029c5c64", "score": "0.52341086", "text": "def enable_print():\n sys.stdout = sys.__stdout__", "title": "" }, { "docid": "feeddf22f46b27dd54342907029c5c64", "score": "0.52341086", "text": "def enable_print():\n sys.stdout = sys.__stdout__", "title": "" }, { "docid": "37f6ee41cd1d00bd7772f006b3224d7f", "score": "0.52337015", "text": "def send_headers(self):\r\n hkeys = [key.lower() for key, value in self.outheaders]\r\n status = int(self.status[:3])\r\n \r\n if status == 413:\r\n # Request Entity Too Large. Close conn to avoid garbage.\r\n self.close_connection = True\r\n elif b\"content-length\" not in hkeys:\r\n # \"All 1xx (informational), 204 (no content),\r\n # and 304 (not modified) responses MUST NOT\r\n # include a message-body.\" So no point chunking.\r\n if status < 200 or status in (204, 205, 304):\r\n pass\r\n else:\r\n if (self.response_protocol == 'HTTP/1.1'\r\n and self.method != b'HEAD'):\r\n # Use the chunked transfer-coding\r\n self.chunked_write = True\r\n self.outheaders.append((b\"Transfer-Encoding\", b\"chunked\"))\r\n else:\r\n # Closing the conn is the only way to determine len.\r\n self.close_connection = True\r\n \r\n if b\"connection\" not in hkeys:\r\n if self.response_protocol == 'HTTP/1.1':\r\n # Both server and client are HTTP/1.1 or better\r\n if self.close_connection:\r\n self.outheaders.append((b\"Connection\", b\"close\"))\r\n else:\r\n # Server and/or client are HTTP/1.0\r\n if not self.close_connection:\r\n self.outheaders.append((b\"Connection\", b\"Keep-Alive\"))\r\n \r\n if (not self.close_connection) and (not self.chunked_read):\r\n # Read any remaining request body data on the socket.\r\n # \"If an origin server receives a request that does not include an\r\n # Expect request-header field with the \"100-continue\" expectation,\r\n # the request includes a request body, and the server responds\r\n # with a final status code before reading the entire request body\r\n # from the transport connection, then the server SHOULD NOT close\r\n # the transport connection until it has read the entire request,\r\n # or until the client closes the connection. Otherwise, the client\r\n # might not reliably receive the response message. However, this\r\n # requirement is not be construed as preventing a server from\r\n # defending itself against denial-of-service attacks, or from\r\n # badly broken client implementations.\"\r\n remaining = getattr(self.rfile, 'remaining', 0)\r\n if remaining > 0:\r\n self.rfile.read(remaining)\r\n \r\n if b\"date\" not in hkeys:\r\n self.outheaders.append(\r\n (b\"Date\", email.utils.formatdate(usegmt=True).encode('ISO-8859-1')))\r\n \r\n if b\"server\" not in hkeys:\r\n self.outheaders.append(\r\n (b\"Server\", self.server.server_name.encode('ISO-8859-1')))\r\n \r\n buf = [self.server.protocol.encode('ascii') + SPACE + self.status + CRLF]\r\n for k, v in self.outheaders:\r\n buf.append(k + COLON + SPACE + v + CRLF)\r\n buf.append(CRLF)\r\n self.conn.wfile.write(EMPTY.join(buf))", "title": "" }, { "docid": "ddb8dfd4e29aff1bc47c9c897c556d15", "score": "0.52210957", "text": "def handle_header(header):", "title": "" }, { "docid": "a8472e59181bc2c397ef13a6ab1b9915", "score": "0.521644", "text": "def pretty_print_POST(req):\n print('{}\\n{}\\r\\n{}\\r\\n\\r\\n{}'.format(\n '-----------START-----------',\n req.method + ' ' + req.url,\n '\\r\\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),\n req.form,\n ))", "title": "" }, { "docid": "0b5fb48a63b12b972bf72281108fb136", "score": "0.5211748", "text": "def fetch_headers(cursor):\n\n return [i[0] for i in cursor.description]", "title": "" }, { "docid": "9ec9807536d8a43443110932e6344fb1", "score": "0.5202758", "text": "def disp_header(self, **kwargs):\n self.window.disp_header()", "title": "" }, { "docid": "c46168b356d46b95f659982581e5c232", "score": "0.5190164", "text": "def writeHeader(outbuffer, title=\"Elastic Tensor Analysis\"):\n\n print(\"\"\"\n <!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n <html>\n <head>\n <title>%s</title>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/default.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"https://cdn.jsdelivr.net/npm/[email protected]/distrib/jsxgraph.css\" />\n <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/distrib/jsxgraphcore.js\"></script>\n <script src=\"http://cdn.plot.ly/plotly-latest.min.js\"></script>\n <script src=\"https://cdnjs.cloudflare.com/ajax/libs/jquery/1.12.4/jquery.min.js\"></script>\n </head>\n \"\"\" % (title))", "title": "" }, { "docid": "74cbdd462e79571cea42bb5c1b994c63", "score": "0.51894194", "text": "def print_header(d, width=40):\n if OUTPUT_FORMAT == 'plain':\n print(('(%s)' % d).ljust(width), end='')\n for p in debates[d]:\n print(p.ljust(width), end='')\n print()\n elif OUTPUT_FORMAT == 'html':\n print('<tr>')\n print('<th></th>')\n for p in debates[d]:\n print('<th>%s</th>' % p)\n print('</tr>')\n else:\n raise ValueError('bad output format: %s' % OUTPUT_FORMAT)", "title": "" }, { "docid": "8573558dd1759c5ecc5ee666eb8e3a06", "score": "0.51879543", "text": "def do_HEAD(s):\n s.send_response(200)\n s.send_header('Access-Control-Allow-Origin', '*')\n s.end_headers()", "title": "" } ]
ea836898f81f53deeddd339eb098744f
Redis instance used to cache value. Replace redis instance if you want.
[ { "docid": "c7b7573e500a55046ada3753831be40d", "score": "0.7362848", "text": "def mc(self):\n ctx = stack.top\n if ctx is not None:\n if not hasattr(ctx, 'cache_redis'):\n ctx.cache_redis = self.init_redis()\n return ctx.cache_redis", "title": "" } ]
[ { "docid": "6881087f66a27160ef90604730473508", "score": "0.7461873", "text": "def redis(self):\n if not self._redis:\n self._redis = Redis(self)\n return self._redis", "title": "" }, { "docid": "3e2ee58f21b6db258445b49769a72274", "score": "0.7202681", "text": "def redis(self) -> Redis:\n return self.manager.redis", "title": "" }, { "docid": "0f82a9469408f906590abdec97099c8c", "score": "0.6987436", "text": "def connect_redis(self) -> redis.Redis:\n\n r = redis.Redis(host=self.host, port=self.port, password=self.password)\n return r", "title": "" }, { "docid": "075aa2073c406ed290dcafb64fede756", "score": "0.69569874", "text": "def client(self):\n if self._client is None:\n self._client = redis.Redis(connection_pool=self.pool)\n return self._client", "title": "" }, { "docid": "9a847c87649ec0c2a3ed32c46150005f", "score": "0.69211257", "text": "def __init__(self):\n self._redis = redis.Redis()\n self._redis.flushdb()", "title": "" }, { "docid": "564ed55a8fa202a7f8030fa06ebd0ab6", "score": "0.6836477", "text": "def __init__(self):\n\n\t\tself.r = redis.StrictRedis(\n\t\t\thost=config.REDIS_HOST,\n\t\t\tport=config.REDIS_PORT,\n\t\t)", "title": "" }, { "docid": "0bb654a08d7cb38f7adab144cceb9cb1", "score": "0.66523117", "text": "def set_to_cache(redis_instance, key, value):\n try:\n redis_instance.set(key, value)\n self.logger.info('Cache set for key: {}'.format(key))\n except Exception as e:\n self.logger.exception('Exception while setting value to cache. Details: {}'.format(str(e)))\n raise str(e)", "title": "" }, { "docid": "30160287b7d438ccd7977a6f17909d04", "score": "0.6640087", "text": "def Init(cls, redis, value_encoder=None):\n\t\tcls._Redis = redis\n\t\tcls._Value_Encoder = value_encoder", "title": "" }, { "docid": "19e0b889890739fe89bcb507f041ac93", "score": "0.6622964", "text": "def get_redis(*args, **kwargs) -> Redis:\n if not (args or kwargs):\n kwargs = get_redis_conf()\n\n return Redis(*args, **kwargs)", "title": "" }, { "docid": "3bdd80bf64a59b09db75eb1fb4ee9ff6", "score": "0.65200466", "text": "def _get_redis_connection():\n url = current_app.config.get('REDIS_URL', 'redis://localhost:6379')\n return redis.from_url(url)", "title": "" }, { "docid": "135ab1e8a25bbf052a430c53947078b6", "score": "0.64495486", "text": "def get_client():\n global _redis_client\n global _redis_client_pool\n\n if _redis_client_pool is None:\n _redis_client_pool = redis.BlockingConnectionPool(**config.REDIS)\n\n if _redis_client is None:\n _redis_client = redis.Redis(connection_pool=_redis_client_pool)\n\n return _redis_client", "title": "" }, { "docid": "167f8924f00653d1e4ac9bab694711f3", "score": "0.6434628", "text": "def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client", "title": "" }, { "docid": "372b6d21b6b815d8a18f11f32616a68b", "score": "0.64109313", "text": "def create(cls, *args, **kwargs):\n return FakeRedis()", "title": "" }, { "docid": "80bf997b683a6ba688470a661c51282b", "score": "0.6356614", "text": "def connect_db():\n# redis_store = FlaskRedis(health_check_interval=30)\n# redis_store.init_app(app)\n ip = \"172.22.54.5\" # hz\n # ip = 'localhost' # localhost\n redis_store = redis.StrictRedis(host=\"172.22.25.100\", health_check_interval=30)\n #redis_store = redis.StrictRedis(host=ip, health_check_interval=30)\n\n return redis_store", "title": "" }, { "docid": "104915afe0746d34a2d4850bc5bef513", "score": "0.63457084", "text": "def redis(self):\n\n return self.env[RedisClient].redis", "title": "" }, { "docid": "b981b5c203b5890e5269e77d76914a68", "score": "0.63109857", "text": "def __init__(self):\n self.redisCli = RedisClient()\n # Clear and fill database to avoid wrong values\n self.redisCli.flushDB()\n self.redisCli.fillRedisDatabase()\n # Database should be filled\n self.originalData = self.fetchOriginalData()", "title": "" }, { "docid": "dc660446e3298571c05336e9eb3f1e68", "score": "0.63056445", "text": "def get_from_cache(redis_instance, key):\n try:\n data_from_cache = redis_instance.get(key)\n self.logger.info('Data from cache successful for key: {}'.format(key))\n return data_from_cache\n except Exception as e:\n self.logger.exception('Data from cache for key: {} is unsuccessful. Details: {}'.format(key, str(e)))\n raise str(e)", "title": "" }, { "docid": "5ece91771283e735ecd0ddc39998f5af", "score": "0.6252584", "text": "def login_redis(self):\n options = {}\n path = os.getcwd() + '/.redis.conf'\n if os.path.exists(path):\n with open(path, 'r') as F:\n options = json.loads(F.read())\n pool = redis.ConnectionPool(host=options['host'], port=int(options['port']), db=int(options['db']))\n else:\n pool = redis.ConnectionPool(host='localhost', port=6379, db=0)\n r = redis.StrictRedis(connection_pool=pool)\n return r", "title": "" }, { "docid": "7f83e5ff2c84d476f638511d4f57902b", "score": "0.6249371", "text": "def get_redis_connection(alias='default'):\n cache = get_cache(alias)\n return cache._client", "title": "" }, { "docid": "53ad0725a0907c97bdafe6e11cf1f9f4", "score": "0.6249303", "text": "def __init__(self,\n host=\"localhost\",\n port=7070,\n jobs_key=\"jobs\",\n results_key=\"results\",\n model_key=\"model\",\n worker_prefix=\"workers.\",\n version_key=\"version\",\n version_lock=\"check_version\",\n default_version=0,\n redis_init_wait_seconds=5,\n check_version_frequency=1,\n password=None,\n verbose=True,\n **kwargs,\n ):\n self.model_key, self.jobs_key, self.results_key = model_key, jobs_key, results_key\n self.worker_prefix, self.version_key, self.version_lock = worker_prefix, version_key, version_lock\n self.verbose = verbose\n\n # if localhost and can't find redis, start one\n if host in (\"localhost\", \"0.0.0.0\", \"127.0.0.1\", \"*\"):\n try:\n redis.Redis(host=host, port=port, password=password, **kwargs).client_list()\n except redis.ConnectionError:\n # if not, on localhost try launch new one\n if self.verbose:\n print(\"Redis not found on %s:%s. Launching new redis...\" % (host, port))\n self.start_redis(port=port, requirepass=password, **kwargs)\n time.sleep(redis_init_wait_seconds)\n\n self.redis = redis.Redis(host=host, port=port, password=password, **kwargs)\n if self.verbose and len(self.redis.keys()):\n warn(\"Found existing keys: {}\".format(self.redis.keys()))\n\n if self.version_key not in self.redis.keys():\n with self.redis.lock(self.version_lock):\n self.redis.set(self.version_key, default_version)\n\n self.local_version = int(self.redis.get(self.version_key))\n self.check_version_frequency = check_version_frequency", "title": "" }, { "docid": "dd38fe9bad403600e15d8727bd59693f", "score": "0.6174959", "text": "def get_db():\n return redis.Redis(\n host=environ.get(\"REDIS_SERVER\"),\n port=environ.get(\"REDIS_PORT\"),\n password=environ.get(\"REDIS_PASSWORD\"),\n )", "title": "" }, { "docid": "f16fb65c551e2ee00e62d340954c92d1", "score": "0.617225", "text": "def getRedis(self):\n cfg = ConfigParser.RawConfigParser()\n cfgFile = os.path.join('config', 'config.cfg')\n cfg.read(cfgFile)\n redis_ip = cfg.get('redis', 'host')\n redis_port = cfg.get('redis', 'port')\n redis_password = cfg.get('redis', 'password')\n r = redis.StrictRedis(host=redis_ip, port=int(redis_port), db=0, password=redis_password)\n return r", "title": "" }, { "docid": "8612a1eea518375dc736fc7143fd2a90", "score": "0.6150255", "text": "def _get_client(cls, host, port, password, db):\n try:\n import redis\n except ImportError:\n raise RuntimeError('no redis module found')\n else:\n cls.REDIS_VERSION = redis.__version__\n\n # connection pool cache key\n credentials = (host, port, password, db)\n # retrieve or create a pool\n pool = cls._connection_pool_cache.get(\n credentials,\n redis.ConnectionPool(\n host=host,\n port=port,\n password=password,\n db=db,\n ),\n )\n cls._connection_pool_cache[credentials] = pool\n return redis.Redis(connection_pool=pool)", "title": "" }, { "docid": "68243bdc4d0a8aeee9437c5a224505ff", "score": "0.61149603", "text": "def redis_get(id):\n return loads(redis.get(id))", "title": "" }, { "docid": "f0b426dba9abc511a873b8bccbd0c7d7", "score": "0.6101163", "text": "def __init__(\n self,\n redis: Union[Redis, RedisClient], # pylint:disable=unsubscriptable-object\n key: str\n ):\n\n super().__init__(redis)\n self._key = key", "title": "" }, { "docid": "b4b35fbccd92794dd4ba44b39318c82d", "score": "0.608667", "text": "def redis_restart():\n redis_stop()\n redis_run()", "title": "" }, { "docid": "6380b6e20b755d85c0fa826542c2073c", "score": "0.607907", "text": "def redis_init(db=0, max_connections=1):\n pool = redis.ConnectionPool(host=app.config['REDIS_HOST'],\n port=app.config['REDIS_PORT'],\n password=app.config['REDIS_PASSWORD'],\n db=db,\n max_connections=max_connections)\n return redis.Redis(connection_pool=pool)", "title": "" }, { "docid": "dd651dd0c19ce4fe9c069a63549cf047", "score": "0.60708493", "text": "def __init__(self,\n namespace,\n cache_url,\n redis_port=6379,\n subnamespace=None,\n use_S3=False,\n region=None,\n s3_custom_resource=None,\n redis_custom_connection=None,\n redis_ca_cert=None,\n redis_keyfile=None,\n redis_certfile=None,\n use_ssl=True):\n self.namespace = namespace\n self.cache_url = cache_url\n self.subnamespace = subnamespace\n\n if use_S3:\n if s3_custom_resource is None:\n self.s3 = boto3.resource('s3', region_name=region)\n else:\n self.s3 = s3_custom_resource\n self.bucket = self.s3.Bucket(self.namespace)\n else:\n self.bucket = None\n\n if redis_custom_connection is None:\n if redis_ca_cert is not None and redis_keyfile is not None and redis_certfile is not None and use_ssl:\n if not os.path.exists(redis_keyfile):\n print('file not found :', redis_keyfile)\n raise FileNotFoundError(redis_keyfile)\n if not os.path.exists(redis_certfile):\n print('file not found :', redis_certfile)\n raise FileNotFoundError(redis_certfile)\n if not os.path.exists(redis_ca_cert):\n print('file not found :', redis_ca_cert)\n raise FileNotFoundError(redis_ca_cert)\n self.redis_cache = redis.StrictRedis(\n host=cache_url,\n port=redis_port,\n ssl=use_ssl,\n ssl_cert_reqs='required',\n ssl_ca_certs=redis_ca_cert,\n ssl_keyfile=redis_keyfile,\n ssl_certfile=redis_certfile\n )\n self.redis_cache.ping()\n else:\n self.redis_cache = redis.StrictRedis(\n host=cache_url,\n port=redis_port,\n ssl=use_ssl\n )\n self.redis_cache.ping()\n else:\n self.redis_cache = redis_custom_connection", "title": "" }, { "docid": "6639c65d2d64c9fdd8811627c721c1ec", "score": "0.6065015", "text": "def from_redis(cls, params) -> \"RedisObject\":\n return cls(**to_kwargs(params)) # type: ignore", "title": "" }, { "docid": "9072d9669d81b3897f691d4d73b2b9ff", "score": "0.6056114", "text": "def cache():\n return Simplecache()", "title": "" }, { "docid": "5621c6e1aab15e855617de6bb1a9fe48", "score": "0.6011427", "text": "def obj(self):\n return cache_obj", "title": "" }, { "docid": "41614ed500658f2283cbfc3e95f5c098", "score": "0.5971196", "text": "def __init__(self, **kwargs):\n\n self.cache = {}\n sleep_time = kwargs.get('sleep_time', self.SLEEP_TIME)\n params = {\n 'host': kwargs.get('host', self.HOST),\n 'port': kwargs.get('port', self.PORT),\n 'db': kwargs.get('db', self.DB),\n 'socket_timeout': kwargs.get('socket_timeout', self.TIMEOUT)\n }\n attempts = 0\n while attempts < self.ATTEMPTS_COUNT:\n try:\n self.redis = redis.Redis(**params)\n self.redis.ping()\n break\n except redis.exceptions.ConnectionError, e:\n logging.info('Redis connection failed. Sleep for {} seconds'.format(sleep_time))\n time.sleep(sleep_time)\n finally:\n attempts += 1\n\n if attempts == self.ATTEMPTS_COUNT:\n self.redis = None\n logging.info(\n 'Not able to connect via host {} port {}. Switching to local hash'.format(self.HOST, self.PORT)\n )", "title": "" }, { "docid": "55529b2334b0918071c78954b91f7200", "score": "0.59602094", "text": "def __init__(\n self, redis_client: redis.Redis, namespace: str, ttl: int = DEFAULT_TTL\n ) -> None:\n self.redis = redis_client\n self.namespace = namespace\n self.ttl = ttl\n self.health_key = self._absolute(f\"health {uuid4()}\")", "title": "" }, { "docid": "7b06c9cd24f93e7c1f10c882e6e7cbaf", "score": "0.5954604", "text": "def redis_address(self):\n return self._redis_address", "title": "" }, { "docid": "085c75537db2c60916f4146755b41722", "score": "0.59447604", "text": "async def get_redis(self, timeout: float = None):\n if timeout is not None:\n redis = await wait_for(self._pool.get(), timeout)\n else:\n redis = await self._pool.get()\n try:\n yield redis\n if redis._listener: # pylint: disable=protected-access\n try:\n # pylint: disable=protected-access\n await wait_for(asyncio.wait({redis._listener}), timeout)\n except asyncio.TimeoutError:\n await redis.close()\n # pylint: disable=raise-missing-from\n raise SiderPyError(\"Closising Redis instance because active pub/sub\")\n finally:\n self._pool.put(redis)", "title": "" }, { "docid": "65b08a011bd8e7b4c0567b2a3e8e55a0", "score": "0.5925525", "text": "def new_client():\n return rediscluster.RedisCluster(startup_nodes=redisconfig.hosts, decode_responses=True)\n #return redis.Redis(host=\"172.31.36.59\", port=7000)", "title": "" }, { "docid": "5d0ccb0f27f31de6143754dd17fb0656", "score": "0.5879384", "text": "def __init__(self, host, port, password, decode_responses=True):\n try:\n self.redis = redis.Redis(host=host, port=port, password=password, decode_responses=decode_responses)\n except redis.ConnectionError as e:\n LOGGER.error(\"redis client connect error: %s\" % e)", "title": "" }, { "docid": "cbc3d39ab08fcec1e8cff49ba3d48844", "score": "0.5874904", "text": "def get_cache(self):\n return self._cache", "title": "" }, { "docid": "1f27b809d950995a7b7b1a0a5b40bdf2", "score": "0.5844288", "text": "def get(self, key):\n return self.redis.get(key)", "title": "" }, { "docid": "f54461ef3758dcb42daa36112c5b07fc", "score": "0.5828332", "text": "def get_resq():\n return pyres.ResQ(redis.Redis(\n host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB))", "title": "" }, { "docid": "c5645f1786e42df31f57230837488b50", "score": "0.58105916", "text": "def get_rq_conn():\n\n connection = Redis(\n host=env.REDIS_SERVER_HOST,\n port=env.REDIS_SERVER_PORT,\n )\n\n return connection", "title": "" }, { "docid": "34c71f9b87681588a2812eed575a7864", "score": "0.57912576", "text": "def __init__(self, host, port, key):\n self._key = key\n self.client = RedisClient(host=host, port=port, db=0).client\n self.r = self.client # for legacy App that may have been using this value", "title": "" }, { "docid": "162f905d13742dce4f4e31df6ad7f9f7", "score": "0.57719374", "text": "def __init__(self, host=HOST, port=PORT, db=DB, password=PASSWORD):\n pool = redis.ConnectionPool(\n # socket_timeout=10,\n # socket_connect_timeout=10,\n host=host,\n port=port,\n db=db,\n password=password,\n decode_responses=True,\n )\n self.client = redis.Redis(connection_pool=pool)", "title": "" }, { "docid": "dc14ecf675cf394bce8618bf15539cb7", "score": "0.5762568", "text": "def redis_ttl(self):\n return self._redis_ttl", "title": "" }, { "docid": "7a33d4ccc61433fa102a9154116d6312", "score": "0.575578", "text": "def getter( *args, **kargs ):\n key = keyprefix + args[1]\n \n cached_value = memcache.get(key)\n if cached_value is not None:\n return cached_value\n \n data = func( *args, **kargs )\n memcache.set(key, data, ttl)\n return data", "title": "" }, { "docid": "3aa322110cb703e8a0697ccf07d9e704", "score": "0.57534796", "text": "def __call__(self, *args, **kwargs):\n return self._cache_wrapper(None, *args, **kwargs)", "title": "" }, { "docid": "55af3d66fc228b6ec04f6495a836a68b", "score": "0.5747258", "text": "def __value_get(self):\n if self.no_cache:\n # re-create data from database and cache it\n value = from_pickle(self.db_value, db_obj=self)\n self.cached_value = value\n self.no_cache = False\n return self.cached_value", "title": "" }, { "docid": "77b9088f0bcd034332e046aa069df143", "score": "0.5741003", "text": "def __init__(self, redis: Redis,\n key_format: str = 'redbucket:{zone}:{key}') -> None:\n super(RedisRateLimiter, self).__init__()\n self._redis = redis\n self._key_format = _validate_key_format(key_format)\n\n if self.MIN_REDIS_VERSION:\n self._check_redis_version()", "title": "" }, { "docid": "5a2cdc4d3aaa9caf0b036c513f4018c6", "score": "0.5738151", "text": "def get_conn(self):\n if not self.status_check():\n raise ValueError(\"Abnormal redis connection\")\n\n return self._conn", "title": "" }, { "docid": "a62af15921ba997e94cde00ba87121a1", "score": "0.57373816", "text": "def _cache(self):\r\n client = getattr(self._local, 'client', None)\r\n if client:\r\n return client\r\n \r\n # Use binary mode if it's both supported and requested\r\n if using_pylibmc and self._use_binary:\r\n client = memcache.Client(self._servers, binary=True)\r\n else:\r\n client = memcache.Client(self._servers)\r\n \r\n # If we're using pylibmc, set the behaviors according to settings\r\n if using_pylibmc:\r\n client.behaviors = CACHE_BEHAVIORS\r\n \r\n self._local.client = client\r\n return client", "title": "" }, { "docid": "b1d87d1ae37889ca1218b8b696e1ead2", "score": "0.5727149", "text": "def redis_cache_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redis_cache_id\")", "title": "" }, { "docid": "b1d87d1ae37889ca1218b8b696e1ead2", "score": "0.5727149", "text": "def redis_cache_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"redis_cache_id\")", "title": "" }, { "docid": "68ea978ddf0dd46b244a9119fdb3a844", "score": "0.5715332", "text": "def redis(context, limit=10):\n util.retry(util.is_redis_available, limit=limit)", "title": "" }, { "docid": "e43192b70dbf1a8483709886813896e9", "score": "0.5708443", "text": "def redis_run():\n #redis_cmd = 'redis-server /usr/local/etc/redis.conf'\n redis_cmd = 'docker run --rm --name raven-redis -p 6379:6379 -d redis:6'\n\n with settings(warn_only=True):\n result = local(redis_cmd, capture=True)\n\n if result.failed:\n print('Redis may already be running...')", "title": "" }, { "docid": "3e9d5f5edf3b769c4127bb045168e00f", "score": "0.5692644", "text": "def simple_redis_backend(\n cls,\n redis_server: Tuple[str, int],\n database: int,\n password: Optional[str],\n namespace: str,\n ttl: int = DEFAULT_TTL,\n ) -> \"STDataStore\":\n return cls(\n redis.Redis( # type: ignore\n host=redis_server[0],\n port=redis_server[1],\n db=database,\n health_check_interval=HCI,\n ),\n namespace,\n ttl,\n )", "title": "" }, { "docid": "f2fcc430de6256199ef420256523936b", "score": "0.56909645", "text": "def redis_host(self):\n return self._redis_host", "title": "" }, { "docid": "838106339a4ce0b03a4c6d43934a40a2", "score": "0.5685865", "text": "def initialize_cloud_redis(injected_server=None):\n global REDIS_SERVER # pylint: disable=W0603\n if injected_server is None:\n if REDIS_SERVER is not None: # if we have a redis instance, return it\n return\n redis_endpoint, redis_password, redis_port = read_configuration()\n redis_server = redis.Redis(host=redis_endpoint,\n port=redis_port,\n password=redis_password)\n else:\n # injecting a fake redis will always override existing instance\n redis_server = injected_server\n\n REDIS_SERVER = redis_server\n return", "title": "" }, { "docid": "5853409b339aea82fcde2af55cff4456", "score": "0.567116", "text": "def resource_cache(self) -> ValueCache:\n return self._resource_cache", "title": "" }, { "docid": "3a54d6b127624a4987ca02ad0a5ff336", "score": "0.56520605", "text": "def __init__(__self__,\n resource_name: str,\n args: RedisCacheArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "title": "" }, { "docid": "7c5a4165c9636a08142c857536ebba43", "score": "0.56496334", "text": "def redis_cache_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"redis_cache_id\")", "title": "" }, { "docid": "c67710e0d3148d59fb9b3dc2034b1b3c", "score": "0.56473607", "text": "def set(self, key, value):\n self.redis.set(key, value, ex=self.timeout_sec)", "title": "" }, { "docid": "a82e763926f591c76278d06e446da738", "score": "0.5643504", "text": "def __init__(\n self,\n host=None,\n port=None,\n password=None,\n db=None):\n self.host = host\n self.port = port\n self.password = password\n self.db = db\n self.cache_dict = {} # cache dictionary replicating redis\n self.keys = [] # cache redis keys", "title": "" }, { "docid": "54aef843b728dd47496dcc10a32506c0", "score": "0.5639758", "text": "async def create(cls, connection: str):\n self = RedisDB()\n self.conn = await aioredis.create_redis_pool(connection)\n return self", "title": "" }, { "docid": "8cf06b3fe81e6adbd9fc290ba936272e", "score": "0.56332296", "text": "def redis(create_redis, server, loop):\n redis = loop.run_until_complete(\n create_redis(server.tcp_address))\n\n async def clear():\n await redis.flushall()\n loop.run_until_complete(clear())\n return redis", "title": "" }, { "docid": "bca59bca7ecf59bdae92c0021209b061", "score": "0.56305885", "text": "def cache(self):\n return self.__cache", "title": "" }, { "docid": "0f2158e88e0ad4e91dede3be0bbc0d42", "score": "0.56275636", "text": "def get(self, key, dbredis):\n try:\n dbredis = self.__dbredis\n if not self.redis:\n raise ValueError('Redis not connected.')\n # Get redis\n data = self.redis.get(REDIS_SETTING['prefix']+\":\"+key)\n if data:\n return data.decode('utf-8')\n else:\n return False\n except (ValueError) as error:\n return False", "title": "" }, { "docid": "680c1a64f02749a2e9b69d5a5265e991", "score": "0.5616301", "text": "def configure_redis(app):\n app.redis = Redis(\n host=app.config[\"REDIS_HOST\"],\n port=app.config[\"REDIS_PORT\"],\n db=app.config[\"REDIS_DB\"]\n )", "title": "" }, { "docid": "f8073cbebefda06a2ebea7a264912991", "score": "0.55965245", "text": "def get_db():\n if not hasattr(g, 'redis_db'):\n g.redis_db = connect_db()\n return g.redis_db", "title": "" }, { "docid": "8aa6b7dbef8d838290e77ffc2e35fe33", "score": "0.5594094", "text": "def cache(self):\r\n return self.cache_", "title": "" }, { "docid": "b04066074b00d45d3308bdb95df9cc6a", "score": "0.5587209", "text": "def push_cache(self, resp, resp_key):\n client = get_client(self.ssdb_clients, resp_key)\n if not client:\n raise ResponseError('ssdb_client can not be none')\n r = redis.Redis(connection_pool=client['connection_pool'])\n r.set(resp_key, resp.to_json())", "title": "" }, { "docid": "417816fd5f6b55512daf7385919b7c40", "score": "0.5579754", "text": "def __init__(self, host=HOST, password=PASSWORD):\r\n\t\t# pool = redis.ConnectionPool(\r\n\t\t# socket_timeout=10,\r\n\t\t# socket_connect_timeout=10,\r\n\t\t# host=host,\r\n\t\t# port=port,\r\n\t\t# db=db,\r\n\t\t# password=password,\r\n\t\t# decode_responses=True,\r\n\t\t# )\r\n\t\t# self.client = redis.Redis(connection_pool=pool)\r\n\t\tself.conn_radis = redis.Redis(host='localhost',password=\"Bmcu28VN\")", "title": "" }, { "docid": "94164bd8e7b894d73fa22061f7ed1746", "score": "0.5551203", "text": "def Get(cls, key, value):\n cache_key = '%s=%s' % (key, value)\n obj = cls._cache.get(cache_key, None)\n if obj:\n return obj\n obj = cls.get_by_key_name(cache_key)\n if obj:\n cls._cache[cache_key] = obj\n return obj\n return None", "title": "" }, { "docid": "443ce704af795af0eb0e0a8d666b0d14", "score": "0.5530329", "text": "def connect_redis(self, keys=None):\n\n if not _PSCACHE_AVAILABLE:\n raise ImportError('package `pscache` needs to be installed'\n ' to use the redis feature!')\n\n # TODO / Note\n # Right now we set the experiment id, but then we don't even\n # really use it. Later we will need to use it to send to a \n # unique redis DB for each expt.\n\n # ALSO: it may be better to have the MPIDataSource provide\n # an interface telling us what experiment it is using...\n\n expt = self._datasource_parent.experiment\n run = self._datasource_parent._currrun\n\n self._redispub = publisher.ExptPublisher(expt, host='psdb3')\n self._redismon = self._redispub.smalldata_monitor(run,\n keys=keys)\n\n self.add_monitor_function(self._redismon)\n\n return", "title": "" }, { "docid": "212909620da8b795eb65dcdc9efa758b", "score": "0.55244356", "text": "def get(self, default=None):\n\t\tdata = self._redis.get(self._key)\n\n\t\treturn self._value_encoder.decode(data) if data is not None else default", "title": "" }, { "docid": "3e9c7001942741bcdd7c7b5492d86e1e", "score": "0.55202574", "text": "def _connected_to_redis(self):\n if self._redis_client is None:\n now = time.time()\n if (\n now - self._redis_last_connection_attemp\n > self.REDIS_RECONNECTION_INTERVAL\n ):\n log.info(\"Trying to reconnect to redis\")\n self._redis_last_connection_attemp = now\n self._redis_client = getRedisClient()\n if self._redis_client:\n log.info(\"Connected to redis\")\n else:\n log.info(\"Could not connect to redis\")\n return self._redis_client is not None", "title": "" }, { "docid": "8fffe11a1a251db6195c07f056c934d5", "score": "0.55113196", "text": "def init():\n global _cache\n _cache = {} # {key: (value, expiration_time)}", "title": "" }, { "docid": "9c5343b461e4ea9c81eed155bb4cae57", "score": "0.5508917", "text": "def set(self, key, value):\n self.redis.set(key, value)\n self.redis.expire(key, 60)\n return True", "title": "" }, { "docid": "6865b1a7051cbf7020f1e9a46e3ce982", "score": "0.5507175", "text": "def __init__(self, error_rate, redis_host=None, redis_port=None, redis_db=None, redis_key=None):\n \n if not (0 < error_rate < 1):\n raise ValueError(\"Error_Rate must be between 0 and 1.\")\n\n # error_rate = 1.04 / sqrt(m)\n # m = 2 ** b\n # M(1)... M(m) = 0\n\n b = int(math.ceil(math.log((1.04 / error_rate) ** 2, 2)))\n\n self.alpha = self._get_alpha(b)\n self.b = b\n self.m = 1 << b\n self.M = [ 0 for i in range(self.m) ]\n self.bitcount_arr = [ 1L << i for i in range(160 - b + 1) ]\n\n if redis_host:\n if not redis_port or not redis_key:\n raise ValueError(\"missing redis_information\")\n if not redis_db:\n redis_db = 0\n # redis connection information\n self.redis_host = redis_host\n self.redis_port = redis_port\n self.redis_db = redis_db\n self.redis_key = redis_key\n self.redis = Redis(redis_host, redis_port, redis_db)\n if self.redis.exists(self.redis_key):\n self.restore_from_redis()\n else:\n self.save_to_redis()", "title": "" }, { "docid": "2b2e2aafdf1c1ee89fc2376aa37047ab", "score": "0.5505819", "text": "def redis_set(id, data):\n redis.set(id, dumps(data))", "title": "" }, { "docid": "0412bbec41fea64b522d5f4edf6477e8", "score": "0.5497035", "text": "def __getitem__(self, key):\n value = self.cache[key]\n del self.cache[key]\n self.cache[key] = value\n\n return value", "title": "" }, { "docid": "f1e3e1cd0e1e4a488d2a08247f3707b3", "score": "0.54964095", "text": "def _update_instance_cache(instance):\n instance._monitor_instance_cache = MonitorInstanceCache(instance)", "title": "" }, { "docid": "26e77b50e6d6c2b0f3e6ec89bad6c441", "score": "0.54925257", "text": "def redis_load(self):\n if self.redis_loaded:\n return\n\n with RedisManager.pipeline_context() as pipeline:\n self.queue_up_redis_calls(pipeline)\n data = pipeline.execute()\n self.load_redis_data(data)", "title": "" }, { "docid": "2aaa043b9ec565806be1ca5188bf1bf4", "score": "0.5487168", "text": "def _get_redis_connection_info():\n ci = None\n if g.conf.tenant:\n ci = g.conf.tenant.get('redis')\n return ci", "title": "" }, { "docid": "ab58e1b5eab2fba73db21893f592a44f", "score": "0.54816586", "text": "def test_mock_redis_client(self):\n self.assertFalse(mock_redis_client(host=\"localhost\", port=6379).strict)", "title": "" }, { "docid": "a8fa76015e79df166aa909873123c26f", "score": "0.54754645", "text": "def _get_connection():\n try:\n pool = redis.ConnectionPool(host=HOST, port=PORT, db=DB)\n conn = redis.Redis(connection_pool=pool)\n return conn\n except redis.RedisError as e:\n logger.error(e)\n\n return None", "title": "" }, { "docid": "5a4e0a0119810f1d2a79cfd35ab3ab88", "score": "0.547345", "text": "def __init__(self, dataset_letter, redis_host, redis_password, start_fresh=False):\n Dataset.__init__(self, dataset_letter)\n self.r = Redis(redis_host, redis_password)\n if (start_fresh == True):\n self.remove()\n self.add()", "title": "" }, { "docid": "f3a11d88c405a2686d51df4a9e1d6ee2", "score": "0.5471714", "text": "def _build_redis_subscriber(redis_dict):\n redis_subscriber = redis.Redis(\n host=redis_dict._host, port=redis_dict._port, db=redis_dict._db\n ).pubsub(ignore_subscribe_messages=True)\n redis_subscriber.subscribe(redis_dict._re_md_channel_name)\n return redis_subscriber", "title": "" }, { "docid": "9cfdcddcf13f75d4866a2f5fddeda972", "score": "0.54554754", "text": "def get_cache(self):\n return self._cache", "title": "" }, { "docid": "80c6f2ec567185df72ed0201c3704e99", "score": "0.54461527", "text": "async def acquire(self):\n # Take a connection from the pool.\n try:\n return redis(connection=self._connection, pool=self)\n except (aioredis.exceptions.ConnectionError) as err:\n raise ConnectionError(\"Redis Pool is already closed: {}\".format(str(err)))\n except (aioredis.exceptions.RedisError) as err:\n raise ConnectionError(\n \"Redis Pool is closed o doesnt exists: {}\".format(str(err))\n )\n except Exception as err:\n raise ProviderError(\"Unknown Error: {}\".format(str(err)))\n return False", "title": "" }, { "docid": "975b05b5d16618ab86432955be6be0a6", "score": "0.5445302", "text": "def create_mocked_redis_node(host, port, **kwargs):\n if port == 7000:\n result = [\n [0, 5460, [\"127.0.0.1\", 7000], [\"127.0.0.1\", 7003]],\n [5461, 10922, [\"127.0.0.1\", 7001], [\"127.0.0.1\", 7004]],\n ]\n\n elif port == 7001:\n result = [\n [0, 5460, [\"127.0.0.1\", 7001], [\"127.0.0.1\", 7003]],\n [5461, 10922, [\"127.0.0.1\", 7000], [\"127.0.0.1\", 7004]],\n ]\n else:\n result = []\n\n r_node = Redis(host=host, port=port)\n\n orig_execute_command = r_node.execute_command\n\n def execute_command(*args, **kwargs):\n if args[0] == \"CLUSTER SLOTS\":\n return result\n elif args[0] == \"INFO\":\n return {\"cluster_enabled\": True}\n elif args[1] == \"cluster-require-full-coverage\":\n return {\"cluster-require-full-coverage\": \"yes\"}\n else:\n return orig_execute_command(*args, **kwargs)\n\n r_node.execute_command = execute_command\n return r_node", "title": "" }, { "docid": "05dd9a0157a7106df30e4a315653f530", "score": "0.54337966", "text": "def cache(self):\n return self._cache", "title": "" }, { "docid": "05dd9a0157a7106df30e4a315653f530", "score": "0.54337966", "text": "def cache(self):\n return self._cache", "title": "" }, { "docid": "bf14199cc6a105b562403fc3e7e8686f", "score": "0.54304904", "text": "def get(\n self,\n name=None):\n if not name:\n err = (\n 'mock - MockRedis.get('\n 'name={}'\n ') - missing a name'.format(\n name))\n log.error(err)\n raise Exception(err)\n value_in_dict = self.cache_dict.get(\n name,\n None)\n if not value_in_dict:\n value_in_env = ae_consts.ev(\n name,\n None)\n log.info(\n 'mock - MockRedis.get('\n 'name={}) '\n 'env={}'.format(\n name,\n value_in_env))\n if value_in_env:\n return value_in_env.encode('utf-8')\n else:\n return None\n else:\n log.info(\n 'mock - MockRedis.get('\n 'name={}) '\n 'cached_value={}'.format(\n name,\n value_in_dict))\n return value_in_dict\n # end of get data from dict vs in the env", "title": "" }, { "docid": "c9caf917f208ab5a6e5dc0ad6f1e6225", "score": "0.54200375", "text": "def run_example():\r\n\r\n log = utilities.configure_logger('default', '../logs/redis_script.log')\r\n\r\n try:\r\n log.info('Step 1: connect to Redis')\r\n r = login_database.login_redis_cloud()\r\n log.info('Step 2: cache some data in Redis')\r\n r.set('andy', '[email protected]')\r\n\r\n log.info('Step 2: now I can read it')\r\n email = r.get('andy')\r\n log.info('But I must know the key')\r\n log.info(f'The results of r.get: {email}')\r\n\r\n log.info('Step 3: cache more data in Redis')\r\n r.set('pam', '[email protected]')\r\n r.set('fred', '[email protected]')\r\n\r\n log.info('Step 4: delete from cache')\r\n r.delete('andy')\r\n log.info(f'r.delete means andy is now: {email}')\r\n\r\n log.info(\r\n 'Step 6: Redis can maintain a unique ID or count very efficiently')\r\n r.set('user_count', 21)\r\n r.incr('user_count')\r\n r.incr('user_count')\r\n r.decr('user_count')\r\n result = r.get('user_count')\r\n log.info('I could use this to generate unique ids')\r\n log.info(f'Redis says 21+1+1-1={result}')\r\n\r\n log.info('Step 7: richer data for a SKU')\r\n r.rpush('186675', 'chair')\r\n r.rpush('186675', 'red')\r\n r.rpush('186675', 'leather')\r\n r.rpush('186675', '5.99')\r\n\r\n log.info('Step 8: pull some data from the structure')\r\n cover_type = r.lindex('186675', 2)\r\n log.info(f'Type of cover = {cover_type}')\r\n\r\n log.info('Add Customer Info')\r\n r.hmset('Beet', {'Telephone': '808-543-9021', 'Zip': '96701'})\r\n r.hmset('Ujido', {'Telephone': '808-355-5678', 'Zip': '11211'})\r\n r.hmset('Sean', {'Telephone': '524-444-4321', 'Zip': '12111'})\r\n r.hmset('Anna', {'Telephone': '321-555-6790', 'Zip': '23456'})\r\n r.hmset('Alaska', {'Telephone': '123-456-7890', 'Zip': '98223'})\r\n sean_phone = r.hmget('Sean', 'Telephone')\r\n Ujido_zip = r.hmget('Ujido', 'Zip')\r\n anna_zip = r.hmget('Anna', 'Zip')\r\n log.info(f'Sean\\'s Telephone number is : {sean_phone}')\r\n log.info(f'Ujido\\'s Zip is: {Ujido_zip}')\r\n log.info(f'Anna\\'s Zip is: {anna_zip}')\r\n\r\n except Exception as e:\r\n print(f'Redis error: {e}')", "title": "" }, { "docid": "db5c144e03c7e7302af0596cfbe5c1cd", "score": "0.54165804", "text": "def _start_engine_redis():\n data.start('kvs', 'central')\n data.start('kvs', 'engine')", "title": "" }, { "docid": "fbcce5d4ad55ff127fc18854d54d496d", "score": "0.5412747", "text": "def external_clear_cache(instance, cache_key):\n setattr(instance, cache_key, {})", "title": "" }, { "docid": "c5bdecdca7b44de91bdf971ec7ab7281", "score": "0.5393266", "text": "def __init__(self, redis, logger, console_logger=None):\n super().__init__(redis, logger, console_logger)", "title": "" }, { "docid": "4cc7c356bfed1ac10561626795005ba0", "score": "0.537579", "text": "def ping_cache(redis_instance):\n try:\n redis_instance.ping()\n self.logger.info('Redis instance is available!')\n return True\n except Exception as e:\n self.logger.exception('Redis instance is unavailable on ping!. Details : {}'.format(str(e)))\n return False", "title": "" }, { "docid": "53ba22709920a4c59a2d22341b915b6f", "score": "0.5375309", "text": "def new_redis_connection_pool(host=REDIS_HOST_DEFAULT,\n port=REDIS_PORT_DEFAULT,\n db=REDIS_EXPIRE_DEFAULT,\n password=''):\n return redis.ConnectionPool(host=host, port=port, db=db, password=password)", "title": "" }, { "docid": "93efe08836fc38aea49deb2b880c3e7c", "score": "0.5368246", "text": "def InstanceCache(method):\n\n def wrapped(*args, **kwargs):\n\n _hash = hash((args[1:], tuple(sorted(kwargs.items()))))\n _self = args[0]\n k = '__MethodCache__%s_%x' % (method.__name__, _hash)\n\n if hasattr(_self, k):\n return getattr(_self, k)\n\n ret = method(*args, **kwargs)\n setattr(_self, k, ret)\n return ret\n\n return wrapped", "title": "" } ]
d90d3b15455d45d6b597a96330b7666c
Finds location of the arm in space relative to the base.
[ { "docid": "56370a7d6be6343b3cbb6bd1907dcf21", "score": "0.55456394", "text": "def armLocation(length, theta, position = [0,0]):\n #print \"Angle:\",theta\n width = 263.5\n dx = 125\n dy = 40\n p1 = (position[0]+dx*cos(theta)+dy*cos(pi/2 - theta),position[1]-dx*sin(theta)+dy*sin(pi/2 - theta))\n p2 = (p1[0]-length*sin(theta),p1[1]-length*cos(theta))\n p3 = (p2[0]-width*cos(theta),p2[1]+width*sin(theta))\n p4 = (p3[0]+length*sin(theta),p3[1]+length*cos(theta))\n \n plt.plot([p1[0], p2[0], p3[0], p4[0], p1[0]], [p1[1], p2[1], p3[1], p4[1], p1[1]])\n plt.axis([-700, 700, -200, 700])\n plt.show()\n return [p1, p2, p3, p4]", "title": "" } ]
[ { "docid": "978abb5b8a42371efd3b228c57c9b245", "score": "0.6127112", "text": "def GetBasePosition(self):\n ...", "title": "" }, { "docid": "5dfe8dc953ab0ca7337b4fb32f866076", "score": "0.58375865", "text": "def find_arm(self, arm):\n for idx in range(self.n_arms):\n if self.arms[idx] == arm:\n return idx\n return False", "title": "" }, { "docid": "96dba53f37f3093daf7fb70afbd8a3f5", "score": "0.5778702", "text": "def _get_position(self):\n return self.client.read_holding_registers(0x0d, 0x01, unit=0x01).registers[0] / 10.", "title": "" }, { "docid": "0cfc4c60ed3ac497c37d24ec4f27a457", "score": "0.5614154", "text": "def get_wheelbase(self):\n if sum(1 for i in self.bricks if i.bl_id == '30027bc01') < 4:\n return 0\n # find wheelbase by taking difference between maximum and minimum\n # wheel position in the direction of the forward axis\n return np.around(\n np.max([\n np.dot(self.get_forward_axis(), wheel.position)\n for wheel in self.bricks if wheel.bl_id == '30027bc01'\n ]) - np.min([\n np.dot(self.get_forward_axis(), wheel.position)\n for wheel in self.bricks if wheel.bl_id == '30027bc01'\n ])\n )", "title": "" }, { "docid": "2267f7e96f22b553040885445523ead2", "score": "0.55786496", "text": "def _find_robot(self):\r\n structure = self.MAZE_STRUCTURE\r\n size_scaling = self.MAZE_SIZE_SCALING\r\n for i in range(len(structure)):\r\n for j in range(len(structure[0])):\r\n if structure[i][j] == 'r':\r\n return j * size_scaling, i * size_scaling\r\n assert False, 'No robot in maze specification.'", "title": "" }, { "docid": "0398c5d31fa0a16f02e31b5763f3b7c5", "score": "0.5531772", "text": "def relative_orbit(self):\n if self.mission == \"S1A\":\n return ((self.absolute_orbit - 73) % 175) + 1\n elif self.mission == \"S1B\":\n return ((self.absolute_orbit - 27) % 175) + 1", "title": "" }, { "docid": "cedac688f2c0931a27ed9953cb6b45ba", "score": "0.5504237", "text": "def GetBasePosition(self):\n\t\tposition, _ = (self._pybullet_client.getBasePositionAndOrientation(self.quadruped))\n\t\treturn position", "title": "" }, { "docid": "21604916e21e8ade18b647cd7e0e66cf", "score": "0.5486279", "text": "def Location(self) -> _n_8_t_1:", "title": "" }, { "docid": "93c739c5555865c05bb4bdfdc968b6e8", "score": "0.5453101", "text": "def base_offset(self):\n return self._base_offset", "title": "" }, { "docid": "deba6fdb3967eea4be06aa4c247acc5f", "score": "0.54142404", "text": "def camBase(smNum, arm):\n smNum = int(smNum)\n return (stsSpsBase\n + (smNum - 1) * stsModuleCount\n + stsCamIds[arm] * stsCamCount)", "title": "" }, { "docid": "f16645592ffec57f455a74103193b393", "score": "0.5378602", "text": "def get_base_representation(self):\n x1, y1 = self._base\n x2 = x1 + self._length * np.cos(self._angle)\n y2 = y1 + self._length * np.sin(self._angle)\n return x1, y1, x2, y2", "title": "" }, { "docid": "f47a3f2e02161ad61a781e0757388a62", "score": "0.537082", "text": "def _look(self, origin, direction):\n\n # this method is very slow but i am behind on AOC so sadly this will remain as-is\n dx, dy = direction\n x, y = origin[0] + dx, origin[1] + dy\n while 0 <= x < len(self) and 0 <= y < len(self[0]):\n spot = self[x][y]\n if spot != FLOOR:\n return spot\n x += dx\n y += dy", "title": "" }, { "docid": "a6aebcd13270eeb4c2db4e234e15d739", "score": "0.5320267", "text": "def arm(self) -> None:\n ...", "title": "" }, { "docid": "e2108e193b9ebf12325b50506b215444", "score": "0.5319708", "text": "def calculate_origin(self):\n self.pos[0] += math.cos(degrees_to_radians(30+self.character.angle)) * (32/2)\n self.pos[1] += math.sin(degrees_to_radians(-30-self.character.angle)) * (32/2)\n print(self.pos)", "title": "" }, { "docid": "cc28cf85818da51cf759a884c55d583e", "score": "0.5253649", "text": "def location(self):\r\n return self.rect", "title": "" }, { "docid": "cc28cf85818da51cf759a884c55d583e", "score": "0.5253649", "text": "def location(self):\r\n return self.rect", "title": "" }, { "docid": "cc28cf85818da51cf759a884c55d583e", "score": "0.5253649", "text": "def location(self):\r\n return self.rect", "title": "" }, { "docid": "1b0a94e1f728b1799be227bb1a3b69ec", "score": "0.5219788", "text": "def get_block_loc(flag_len):\n block_idx = math.ceil((flag_len - 1) / 16)\n start = 16 * block_idx\n return start", "title": "" }, { "docid": "fbaeaae05ebc51c678e774c7056728c2", "score": "0.5198893", "text": "def get_offset(self, va):\n return int(va % self.ram)", "title": "" }, { "docid": "22cc9333e29a63112fe50db1712bf2bc", "score": "0.5185065", "text": "def _locate(self):\n current = self.current_wr\n if current is None:\n return\n\n node_ahead_id = current.node_ahead.id\n self._distance_to_node_ahead = current.distance_to_node_ahead\n start_idx = self._ahead_idx if self._ahead_idx is not None else 1\n self._ahead_idx = None\n\n ids = self._nodes_data.get(NodeDataIdx.node_id)\n for idx in range(start_idx, len(ids)):\n if ids[idx] == node_ahead_id:\n self._ahead_idx = idx\n break", "title": "" }, { "docid": "cb5ebd4d1efbbc0824a935aa9421e2fd", "score": "0.5175595", "text": "def calc_belts_lens_from_position(self, x, y):\n \n x += X_MARKER_TO_MOTOR\n y += Y_MARKER_TO_MOTOR\n\n left = math.sqrt (\n math.pow(x - X_CART_LEN/2, 2) +\n math.pow(y - Y_CART_LEN, 2)\n )\n right = math.sqrt (\n math.pow (MOTORS_DIS - X_CART_LEN/2 - x, 2) +\n math.pow (y - Y_CART_LEN, 2)\n )\n \n return left, right", "title": "" }, { "docid": "9f124fa890fc799d686b1b6a6360989e", "score": "0.5147295", "text": "def target_origin(self):\n zyx = map(self.INPUT.POSITION.get,'ZYX')\n origin = [c.VALUE for c in zyx] + self.target_off\n return np.uint32(np.maximum(origin, [0,0,0]))", "title": "" }, { "docid": "4ee15340b151dbe10168499044a3e9b3", "score": "0.51434314", "text": "def getRobotPosition(self):\n return self.location", "title": "" }, { "docid": "3b104fb8eea4453692c25e685bd508d8", "score": "0.5141226", "text": "def DeviceIndependentLocation(self) -> _n_10_t_2:", "title": "" }, { "docid": "d2bceff69862d13ffe6c271e9e2f472e", "score": "0.5137226", "text": "def getRelativeShapePosition(self,shape):\n return 1 - (shape.y - self.topLeft[1]) / (self.bottomRight[1] - self.topLeft[1])", "title": "" }, { "docid": "e3c2b8e6bc86ed93a40215847f2bd865", "score": "0.51283646", "text": "def arm1(self):\n self._torque_offset = [0.5646, 0.5084, 0.5128]\n self._max_tau = [0.6, 0.55, 0.55]\n self._min_tau = [.45, .40, .4]", "title": "" }, { "docid": "391c3762574c092fa0b1989887e87e59", "score": "0.51168257", "text": "def axial_location(self):\n return self.parent_thick.get_axial_location(self.index)", "title": "" }, { "docid": "1eafc23c058c57b783f577b34c95e52f", "score": "0.51157016", "text": "def arm_calibration(self):\n self.arm_motor.run_forever(speed_sp=self.MAX_SPEED)\n while not self.touch_sensor.is_pressed:\n time.sleep(0.01)\n\n self.arm_motor.stop(stop_action=\"brake\")\n ev3.Sound.beep().wait()\n\n rev_to_position = 14.2 * 360\n self.arm_motor.run_to_rel_pos(position_sp=-rev_to_position)\n self.arm_motor.wait_while(ev3.Motor.STATE_RUNNING)\n ev3.Sound.beep().wait()\n\n self.arm_motor.position = 0 # Calibrate the down position as 0 (this line is correct as is).", "title": "" }, { "docid": "a1a56e9708e351a35f1ae437ad5caba8", "score": "0.51063395", "text": "def _get_origin(self):\n return self.grid[self.shape[0] // 2][self.shape[1] // 2]", "title": "" }, { "docid": "ac6a011e2fdf6c6ec0776e9d834e9934", "score": "0.50985605", "text": "def getRobotPosition(self):\n self.displayDistance()\n return self.sensor.range\n #return self.robotPos", "title": "" }, { "docid": "56bd92fbce3e9ab73ee9508caf3ed815", "score": "0.50936264", "text": "def _find_start_point(self, mirror_x):\n start_point = None\n scan_direction = 1\n first_index = self._lowest_beam_in_range(mirror_x)\n if first_index is None:\n raise InstrumentError(\n 'No beam reflected (mirror position = {})'.format(mirror_x))\n last_index, start_point = self._last_beam_sample()\n if start_point is not None and last_index is not None:\n if first_index == last_index:\n if (mirror_x - self.data[-1].mirror_position[0]) < 0:\n scan_direction = -1\n elif start_point[2] > 0:\n if first_index > last_index:\n start_point = start_point - [35, 0, 0]\n else:\n start_point = start_point + [35, 0, 0]\n scan_direction = -1\n else:\n if first_index > last_index:\n start_point = start_point + [35, 0, 0]\n scan_direction = -1\n else:\n start_point = start_point - [35, 0, 0]\n else:\n start_point = [self.tracker.axes[0].limits.lower,\n self.tracker.axes[1].limits.lower,\n self.tracker.axes[2].limits.lower]\n return (start_point, scan_direction)", "title": "" }, { "docid": "36c8c390c91716fc61057e3c87d14561", "score": "0.50850636", "text": "def bottom_right():\r\n return OpenMaya.MPoint( 0.5, -0.5, 0.0)", "title": "" }, { "docid": "3bbdf5de8a45c8fe153e93128b460270", "score": "0.5066524", "text": "def get_right_angular_pos(self):\n encoder_value = self.fr_motor.getQuadraturePosition()\n ticks_per_turn = self.ticks_per_rev_right\n return 2 * math.pi * encoder_value / ticks_per_turn", "title": "" }, { "docid": "283c414c82872bdf5531809f8aef932b", "score": "0.50640774", "text": "def get_base_link_position_orientation(self):\n # TODO: not used anywhere yet, but probably should be put in ObjectBase\n body_id = self.get_body_id()\n pos, orn = p.getBasePositionAndOrientation(body_id)\n dynamics_info = p.getDynamicsInfo(body_id, -1)\n inertial_pos = dynamics_info[3]\n inertial_orn = dynamics_info[4]\n inv_inertial_pos, inv_inertial_orn = p.invertTransform(inertial_pos, inertial_orn)\n pos, orn = p.multiplyTransforms(pos, orn, inv_inertial_pos, inv_inertial_orn)\n return pos, orn", "title": "" }, { "docid": "5eebb13d4ac5bc9ceec4ca80a977959f", "score": "0.5063631", "text": "def rWalk(self):\n for i in xrange(0,self.dims):\n self.location[i] += random.uniform(-1,1)", "title": "" }, { "docid": "cc2e1d7b2cc2241d9cc72e6480b7f20d", "score": "0.5062578", "text": "def origin():\r\n\t\treturn Point(0, 0)", "title": "" }, { "docid": "53baa4fba8b9861ee1662ab75c22692d", "score": "0.5048936", "text": "def getSpawnLocation(self):\r\n #Spawn it off the screen, at a varying x and y co-ordinate\r\n self.rect.x = random.randint(self.screen.get_width() + 1, self.screen.get_width() * 2)\r\n self.rect.y = random.randint(0, self.screen.get_height() - self.rect.height)\r\n #Check if its colliding with another reward. If it is, check that that sprite isn't itself. This is necessary otherwise it'll always be colliding with something in the same group\r\n collision = pygame.sprite.spritecollide(self, self.group, False)\r\n #If the sprite collides with another reward or an asteroid, recursively call the function until it finds a place where it can spawn\r\n for col in collision:\r\n if (col != self):\r\n self.getSpawnLocation()\r\n collision = pygame.sprite.spritecollideany(self, self.asteroidGroup)\r\n if (collision):\r\n self.getSpawnLocation()", "title": "" }, { "docid": "75f38e729d150aa0cfd4fc609a1facae", "score": "0.503711", "text": "def dir_to_relative(x, y):\n return np.absolute((x - y + 180) % 360 - 180)", "title": "" }, { "docid": "eedcb678dad28f7638afd36a0ef7366b", "score": "0.503468", "text": "def get_alignment_loc(self):\n self.check_is_fitted()\n return self._get_alignment_loc()", "title": "" }, { "docid": "21b2b1de8bf078e86d738408147be430", "score": "0.50225896", "text": "def _get_alignment_loc(self):\n if not hasattr(self, \"_X\"):\n # defensive error - fit should store X to self._X\n raise RuntimeError(\n \"fit needs to store X to self._X when using default get_alignment_loc\"\n )\n\n X = self._X\n\n align = self.get_alignment()\n\n return convert_align_to_align_loc(align, X)", "title": "" }, { "docid": "b129bcf80f2db337dc2cd98cbc2fa96b", "score": "0.50191087", "text": "def level_end_coords(self):\n\t\ttiles = self.getTiles()\n\t\twidth = len(tiles[0]) - 1\n\t\theight = len(tiles) - 1\n\t\tlen(tiles) - 1\n\t\treturn (self.origin[0] + (width/ROOM_WIDTH), self.origin[1] + (height/ROOM_HEIGHT))", "title": "" }, { "docid": "ebca96349ce991e4fd59c897bafd8b86", "score": "0.50171906", "text": "def bottom_offset(self):\n return self.base_offset", "title": "" }, { "docid": "08c8f82e23d87a37ac0e383115e61e18", "score": "0.500871", "text": "def determine_location(self):\n region = self.view.full_line(0)\n line = self.view.substr(region)\n if re.match(\"^#!\", line):\n return region.end()\n else:\n return 0", "title": "" }, { "docid": "bde825687966e8e5c5358d09bb734b20", "score": "0.49975467", "text": "def get_robot_position(self):\n return self.robot_pos", "title": "" }, { "docid": "0b05c52ce7fc7854d679f03dd7bfb006", "score": "0.49874872", "text": "def get_location(self):", "title": "" }, { "docid": "35599cec675b4421f4342a935a50d643", "score": "0.49595052", "text": "def pull_arm(self):\n\t\trand = np.random.beta(self.beta_parameters[:, 0], self.beta_parameters[:, 1])\n\n\t\tindex = np.argmax(rand)\n\n\t\treturn index", "title": "" }, { "docid": "8c9ecb100971373fe5aa4135841ab139", "score": "0.4958892", "text": "def loc(self):\n return self.bijector.shift", "title": "" }, { "docid": "21c1fe280595a6ca49a605b7cf823e59", "score": "0.49554467", "text": "def get_full_position(self):\n return self.head.position + self.tail.blocks_positions", "title": "" }, { "docid": "e78c98a749133aa923e53dc14e63c79c", "score": "0.49550146", "text": "def get_end_effector_pos(self):\n theta_1 = self.cur_theta[0]*np.pi\n theta_2 = self.cur_theta[1]*np.pi\n theta_3 = self.cur_theta[2]*np.pi\n #specify the anchor point\n start_x = 63\n start_y = 63\n start_point = (start_x,start_y)\n #get the link end positions\n x_link_1 = np.round(np.cos(theta_1)*self.link_length + start_x)\n y_link_1 = np.round(np.sin(theta_1)*self.link_length + start_y)\n x_link_2 = np.round(x_link_1 + np.cos(theta_1 + theta_2)*self.link_length)\n y_link_2 = np.round(y_link_1 + np.sin(theta_1 + theta_2)*self.link_length)\n x_link_3 = np.round(x_link_2 + np.cos(theta_1 + theta_2 + theta_3)*self.link_length)\n y_link_3 = np.round(y_link_2 + np.sin(theta_1 + theta_2 + theta_3)*self.link_length)\n link1_end_point = (int(x_link_1),int(y_link_1))\n link2_end_point = (int(x_link_2),int(y_link_2))\n link3_end_point = (int(x_link_3),int(y_link_3))\n #IPython.embed()\n return np.copy(link3_end_point)", "title": "" }, { "docid": "6356c6f5b1a9dc2ec4aded7c3a732875", "score": "0.49505723", "text": "def get_base(self):\r\n return self.base", "title": "" }, { "docid": "33497dd2adaeefe114e3f5df3835f1b5", "score": "0.49503732", "text": "def _match_base(nucleotide, min_atoms_per_base):\n return_hbond_masks = [None] * 2\n # Standard vectors containing the origin and the base normal vectors\n vectors = np.array([[0, 0, 0], [0, 0, 1]], np.float)\n\n # Check base type and match standard base.\n if (nucleotide.res_name[0] in _adenine_containing_nucleotides):\n std_base = _std_adenine\n std_ring_centers = _std_adenine_ring_centers\n std_hbond_masks = _std_adenine_hbond_masks\n elif (nucleotide.res_name[0] in _thymine_containing_nucleotides):\n std_base = _std_thymine\n std_ring_centers = _std_thymine_ring_centers\n std_hbond_masks = _std_thymine_hbond_masks\n elif (nucleotide.res_name[0] in _cytosine_containing_nucleotides):\n std_base = _std_cytosine\n std_ring_centers = _std_cytosine_ring_centers\n std_hbond_masks = _std_cytosine_hbond_masks\n elif (nucleotide.res_name[0] in _guanine_containing_nucleotides):\n std_base = _std_guanine\n std_ring_centers = _std_guanine_ring_centers\n std_hbond_masks = _std_guanine_hbond_masks\n elif (nucleotide.res_name[0] in _uracil_containing_nucleotides):\n std_base = _std_uracil\n std_ring_centers = _std_uracil_ring_centers\n std_hbond_masks = _std_uracil_hbond_masks\n else:\n warnings.warn(\n f\"Base Type {nucleotide.res_name[0]} not supported. \"\n f\"Unable to check for basepair\",\n UnexpectedStructureWarning\n )\n return None\n\n # Check if the structure uses PDBv3 or PDBv2 atom nomenclature.\n if (\n np.sum(np.isin(std_base[1].atom_name, nucleotide.atom_name)) >\n np.sum(np.isin(std_base[0].atom_name, nucleotide.atom_name))\n ):\n std_base = std_base[1]\n else:\n std_base = std_base[0]\n\n # Add the ring centers to the array of vectors to be transformed.\n vectors = np.vstack((vectors, std_ring_centers))\n \n # Match the selected std_base to the base.\n fitted, transformation = superimpose(\n nucleotide[np.isin(nucleotide.atom_name, std_base.atom_name)],\n std_base[np.isin(std_base.atom_name, nucleotide.atom_name)]\n )\n\n # Transform the vectors\n trans1, rot, trans2 = transformation\n vectors += trans1\n vectors = np.dot(rot, vectors.T).T\n vectors += trans2 \n # Normalize the base-normal-vector \n vectors[1,:] = vectors[1,:]-vectors[0,:]\n norm_vector(vectors[1,:])\n\n # Investigate the completeness of the base:\n # \n # A difference in length of zero means the base contains all atoms\n # of the std_base \n length_difference = len(std_base) - len(fitted)\n \n if (length_difference > 0 and len(fitted) >= min_atoms_per_base):\n # If the base is incomplete but contains 3 or more atoms of the \n # std_base, transform the complete std_base and use it to\n # approximate the base.\n warnings.warn(\n f\"Base with res_id {nucleotide.res_id[0]} and chain_id \" \n f\"{nucleotide.chain_id[0]} is not complete. Attempting to \"\n f\"emulate with std_base.\", IncompleteStructureWarning\n )\n return_base = superimpose_apply(std_base, transformation)\n return_hbond_masks = std_hbond_masks\n elif (length_difference > 0):\n # If the base is incomplete and contains less than 3 atoms of \n # the std_base, throw warning\n warnings.warn(\n f\"Base with res_id {nucleotide.res_id[0]} and chain_id \"\n f\"{nucleotide.chain_id[0]} has an overlap with std_base \"\n f\"which is less than 3 atoms. Unable to check for basepair.\",\n IncompleteStructureWarning\n )\n return None\n else:\n # If the base is complete use the base for further calculations.\n #\n # Generate a boolean mask containing only the base atoms and\n # their hydrogens (if available), disregarding the sugar atoms\n # and the phosphate backbone.\n base_atom_mask = np.ones(len(nucleotide), dtype=bool)\n for i in range(len(nucleotide)):\n if (\n (\"'\" in nucleotide[i].atom_name)\n or (\"*\" in nucleotide[i].atom_name)\n or ((nucleotide[i].atom_name not in std_base.atom_name)\n and (nucleotide[i].element != \"H\"))\n ):\n base_atom_mask[i] = False\n \n # Create boolean masks for the AtomArray containing the bases` \n # heteroatoms (or the usually attached hydrogens), which can act \n # as Hydrogen Bond Donors or Acceptors respectively, using the\n # std_base as a template.\n for i in range(2):\n return_hbond_masks[i] = _filter_atom_type(\n nucleotide[base_atom_mask], \n std_base[std_hbond_masks[i]].atom_name\n )\n return_base = nucleotide[base_atom_mask]\n\n return return_base, return_hbond_masks, vectors", "title": "" }, { "docid": "979c5f0192de56a3e5269c601ef76016", "score": "0.4947944", "text": "def get_into_position():\n turn_around()\n move_to_wall()\n face_north()\n move()\n face_east()", "title": "" }, { "docid": "25a0287cc5b38a7e35be60f1b820d3f6", "score": "0.49447945", "text": "def getOffset(self):\n # Offset is same for both axes since spot and ROI are square.\n offset = (self.roiSize - self.spotSize) // 2\n xStart = self.xPointOriginal - offset\n yStart = self.yPointOriginal - offset\n return (xStart, yStart)", "title": "" }, { "docid": "5370e1738e5c33793eda84e8a0ec6a51", "score": "0.49422956", "text": "def get_min_and_max_arm_angles(self):\n return self.min_arm_angle, self.max_arm_angle", "title": "" }, { "docid": "d12f43c02fb4fac577f9ea01483c8913", "score": "0.49359208", "text": "def get_rearmost_amino(self):\n num_placed = len(self.all_coordinates)\n amino = self.amino_acids[num_placed - 1]\n prev_amino = self.amino_acids[num_placed - 2]\n return amino", "title": "" }, { "docid": "39bf5f5729cb9a4135f444623f00fe62", "score": "0.49250686", "text": "def center(self):\n return (self.ul + self.lr) / 2", "title": "" }, { "docid": "98378bd1979c2d8a4e6a1cebb7f11e8e", "score": "0.49115208", "text": "def _find_rl(rls: np.ndarray) -> float:\n minimal, maximal = min(rls), max(rls)\n l, r = get_cr_lum(minimal, 0), get_cr_lum(maximal, 1)\n\n diffs = np.ediff1d(rls)\n max_diff_idx = diffs.argmax()\n\n mid_cr = np.sqrt(get_cr_lum(rls[max_diff_idx], rls[max_diff_idx + 1]))\n\n print(l, mid_cr, r)\n\n if mid_cr >= l and mid_cr >= r:\n return rl_from_garmonic_cr(rls[max_diff_idx], rls[max_diff_idx + 1])\n elif l >= r:\n return 0\n return 1", "title": "" }, { "docid": "bd1957a91b8dc97f303378aa41d3d3de", "score": "0.49091154", "text": "def inward():\r\n\t\treturn Vector3(0, 0, 1)", "title": "" }, { "docid": "cc011231abe17c08b25618662a757cb2", "score": "0.49073347", "text": "def get_instrument_position(self, instrument):\r\n try:\r\n func = tool_transforms[instrument]\r\n except KeyError:\r\n raise ValueError(\"Unsupported instrument: \" + instrument)\r\n\r\n mat = func(\r\n self.arm_angle_1,\r\n self.arm_angle_2,\r\n self.arm_angle_3,\r\n self.arm_angle_4,\r\n self.arm_angle_5)\r\n\r\n vec4 = mat * np.matrix([0, 0, 0, 1]).transpose()\r\n return np.squeeze(np.asarray(vec4[0:3])) # Discard w coord\r", "title": "" }, { "docid": "df14dfcb9fd7cae7249f7919d7509e5f", "score": "0.49071723", "text": "def GetImageLocation(self):\n\n ipp = self.ds.ImagePositionPatient\n iop = self.ds.ImageOrientationPatient\n\n normal = []\n normal.append(iop[1] * iop[5] - iop[2] * iop[4])\n normal.append(iop[2] * iop[3] - iop[0] * iop[5])\n normal.append(iop[0] * iop[4] - iop[1] * iop[3])\n\n loc = 0\n for i in range(0, len(normal)):\n loc += normal[i] * ipp[i]\n\n # The image location is inverted for Feet First images\n if 'PatientPosition' in self.ds:\n if ('ff' in self.ds.PatientPosition.lower()):\n loc = loc * -1\n\n return loc", "title": "" }, { "docid": "780372c59a75e6e8482c1419df9bd697", "score": "0.48869395", "text": "def origin(self):\n radius = self.radius\n return ( self._center[0] - radius,\n self._center[1] - radius,\n self._center[2] - radius )", "title": "" }, { "docid": "eb989548fe67486df06a78d31fa69396", "score": "0.48845586", "text": "def get_absolute_pose(arm_state):\n if arm_state.refFrame == ArmState.OBJECT:\n transformed_arm_state = convert_ref_frame(arm_state,\n ArmState.ROBOT_BASE)\n return transformed_arm_state.ee_pose\n else:\n return arm_state.ee_pose", "title": "" }, { "docid": "e71016cc04890716b7e16fb69203d75f", "score": "0.48746267", "text": "def target_location(self):\n target_row = int((self.__height - 1) / 2) # middle row of board\n target_col = self.__width # first col that is out of board range\n target = (target_row,target_col)\n return target", "title": "" }, { "docid": "85306cd5f2d5b1dc01a04c53e139ca1e", "score": "0.48709363", "text": "def get_pos(self):\n item = self.tape[self.dir][0] # The top Tape_Item/sequence approaching.\n return item, self.dir, self.state", "title": "" }, { "docid": "1a69eb7fcc386b6e5241d5a69f5dfdaf", "score": "0.48685372", "text": "def basePlane(self):\n return core.Base()", "title": "" }, { "docid": "013c80b361d07e5fb02a98e48c148ba1", "score": "0.48680013", "text": "def move_arm(self, new_arm_angle):\n if new_arm_angle > self.max_arm_angle:\n raise Exception('Crawling Robot: Arm Raised too high. Careful!')\n if new_arm_angle < self.min_arm_angle:\n raise Exception('Crawling Robot: Arm Raised too low. Careful!')\n disp = self.displacement(self.arm_angle, self.hand_angle,\n new_arm_angle, self.hand_angle)\n cur_x_pos = self.robot_pos[0]\n self.robot_pos = (cur_x_pos + disp, self.robot_pos[1])\n self.arm_angle = new_arm_angle\n\n # Position and Velocity Sign Post\n self.positions.append(self.get_robot_position()[0])\n # self.angle_sums.append(abs(math.degrees(old_arm_angle)-\n # math.degrees(new_arm_angle)))\n if len(self.positions) > 100:\n self.positions.pop(0)\n # self.angle_sums.pop(0)", "title": "" }, { "docid": "8501a4ca370405b51c68c9642d3bc075", "score": "0.4866761", "text": "def gen_target(self, arm):\n self.target = np.random.random(size=(1,)) * \\\n self.target_gain + self.target_bias\n\n if self.control_type == 'osc_x':\n return (self.target, 0)\n elif self.control_type == 'osc_y': \n return (0, self.target)\n return arm.position(self.target)", "title": "" }, { "docid": "0e25042050d761912490358edb7e7437", "score": "0.48634052", "text": "def start_pos(self):\n # mid_point = self.lattice_dims[0] // 2\n # return np.tile((mid_point, mid_point, 0, 1), (self.seq_length, 1))\n return np.tile((0, 0, 0), (self.seq_length, 1))", "title": "" }, { "docid": "31474d1b3225825d1a054fe066ee3fb1", "score": "0.48629043", "text": "def _d_lon(self, base):\n self._check_base(base)\n return self.d_lon_coslat / np.cos(base.lat)", "title": "" }, { "docid": "67be2c3b1152244f4375865753b5dd76", "score": "0.48502967", "text": "def linedir(self, node):\n try:\n cpos = self.pos(node)\n ppos = self.pos(node.parent)\n vector = ppos - cpos\n return math.atan2(vector.imag, vector.real)\n except TypeError:\n return None", "title": "" }, { "docid": "9b731bdbc210bd2c488ceb8d63cdb63b", "score": "0.48494604", "text": "def _get_base_pose(self):\n _p = self.rob.Tcp.Base\n p = V(_p[0], _p[1], _p[2])\n _r = self.rob.Tcp.Rotation.Q\n r = R(_r[3], _r[0], _r[1], _r[2])\n return Pose(p,r)", "title": "" }, { "docid": "ba49be1a14b2194a3233fd77d3b4384a", "score": "0.48438576", "text": "def center(self):\r\n return abs_alfa((self.start + self.end)/2)", "title": "" }, { "docid": "954e853d82219f7f049099f085a25c86", "score": "0.4841598", "text": "def target_location(self):\n return Board.__GOAL", "title": "" }, { "docid": "6744b67a6c5e9397f1eab72129ef90ea", "score": "0.48318288", "text": "def find_room(self, direction):\n return getattr(self.current_location, direction)", "title": "" }, { "docid": "70ec8f713b60e5e868a6af8b9bc7c3a3", "score": "0.48249695", "text": "def convert_pos_aa_mrna( aa_pos, rf_oi = 0 ):\n mrna_pos = aa_pos * 3\n rf_pos = mrna_pos - (mrna_pos % 3) + rf_oi #need to find rf = 0\n return rf_pos", "title": "" }, { "docid": "01c3d2a039f16f08e29ef88c11331ea1", "score": "0.4823974", "text": "def bottom_left():\r\n return OpenMaya.MPoint(-0.5, -0.5, 0.0)", "title": "" }, { "docid": "d6830de83b405f0dcf543f1d43588d62", "score": "0.48235077", "text": "def get_instrument_pointing(self, instrument):\r\n try:\r\n func = tool_transforms[instrument]\r\n except KeyError:\r\n raise ValueError(\"Unsupported instrument: \" + instrument)\r\n\r\n mat = func(\r\n self.arm_angle_1,\r\n self.arm_angle_2,\r\n self.arm_angle_3,\r\n self.arm_angle_4,\r\n self.arm_angle_5)\r\n\r\n # Z-axis is pointing axis. Transform the Z direction in the\r\n # instrument coordinate system to rover nav frame. See\r\n # Figure 3 on Page 14 of the PPCS document.\r\n\r\n # (Note the PPPCS doc includes a note on page 10 that states that the\r\n # pointing axis changed to X in Dec 2008, and that updated transform\r\n # matrices would be included in a later revision of the document. However,\r\n # I have not been able to find this updated document. The use of Z as the\r\n # pointing axis is consistent with the 2009-01-09 version of the PPPCS doc.)\r\n vec4 = mat * np.matrix([0, 0, 1, 0]).transpose()\r\n direction = np.squeeze(np.asarray(vec4[0:3])) # Discard w coord\r\n return transforms3d.utils.normalized_vector(direction) # Normalize\r", "title": "" }, { "docid": "573f0d832210d10b8a5041488ba5f92f", "score": "0.4823482", "text": "def find_right_edge(self):\n self.arm_control.add_trajectory_point(0.75, 0, self.table_height + height_above_table, \n wipe_quat[0], wipe_quat[1], wipe_quat[2], wipe_quat[3],\n\t\t \t\t\t 1000, 1000, 1000, 30, 30, 30,\n\t\t\t\t\t False, False, False, False, False, False, 3)\n self.arm_control.add_trajectory_point(0.75, 0, self.table_height, \n wipe_quat[0], wipe_quat[1], wipe_quat[2], wipe_quat[3],\n\t\t\t\t\t 1000, 1000, downward_force-2, 30, 30, 30,\n\t\t\t\t\t False, False, True, False, False, False, 6)\n self.arm_control.execute()\n\twhile self.pressure_listener.is_touching():\n self.drive_base.move(0, -0.01, 0.1)\n self.arm_control.stop_in_place()", "title": "" }, { "docid": "f65f4f29edcc2e3fd9c9f34b97aa6785", "score": "0.48221967", "text": "def origin(self):\n if hasattr(self, '_origin'):\n return self._origin\n else:\n return Point.align_from_origin(self._p1, self._p2)[0]", "title": "" }, { "docid": "64ebf45be3441c4e43a993f78a6f23c7", "score": "0.4817551", "text": "def getRobotPosition(self):\n return self.p", "title": "" }, { "docid": "1aaa60a846e1d919bc73905e6818ad3e", "score": "0.4809311", "text": "def right_corner(self):\n return self.range[-1]", "title": "" }, { "docid": "dea6a4c5e3e224a238f06943803cd4f5", "score": "0.48060584", "text": "def cutadaptLocation():", "title": "" }, { "docid": "30c98e1d6b36ac007da6c1f8041a3e32", "score": "0.48043567", "text": "def _gen_base_coords(self) -> (int, int):\n while True:\n x = random.randint(ROCK_WALL_SIZE, self.cols - ROCK_WALL_SIZE - BASE_WIDTH)\n y = random.randint(ROCK_WALL_SIZE, self.rows - ROCK_WALL_SIZE - BASE_HEIGHT)\n survey = self.survey_cells(x - BASE_MARGIN, y - BASE_MARGIN, BASE_WIDTH + 2 * BASE_MARGIN, BASE_HEIGHT + 2 * BASE_MARGIN)\n if not CellType.ChargePad in survey and not CellType.BlueWall in survey and not CellType.GreenWall in survey:\n return (x, y)", "title": "" }, { "docid": "b24b1c60321076969d2916cd4747a70e", "score": "0.47969672", "text": "def get_bottom_right(self):\r\n return self.x1, self.y1", "title": "" }, { "docid": "763a60898c002a64a19b423ad8758df3", "score": "0.4794807", "text": "def find_r(self):\n # x[:,numpy.newaxis,:] transposes and extends x;\n # put another way x.shape is changed from\n # (n,3) to (n,newaxis,3), and then the subtraction\n # of x broadcasts newaxis to have size n\n self.r = self.x[:,numpy.newaxis,:]-self.x\n # magnitude of each displacement vector\n self.r_norm = numpy.apply_along_axis(numpy.linalg.norm,2,self.r)\n return self.r,self.r_norm", "title": "" }, { "docid": "a9f807350301dfccf0bbdb6df686d1ec", "score": "0.47940114", "text": "def calculate_locations(self):\n (x, y) = self.start_location\n\n if self.direction == Direction.EAST:\n return [[x + a, y] for a in range(11)]\n elif self.direction == Direction.NORTH:\n return [[x, y + a] for a in range(11)]\n elif self.direction == Direction.WEST:\n return [[x - a, y] for a in range(11)]\n elif self.direction == Direction.SOUTH:\n return [[x, y - a] for a in range(11)]", "title": "" }, { "docid": "a96f8befcd39e3d563a38730afd738e4", "score": "0.4791485", "text": "def abs_max_loc(self):", "title": "" }, { "docid": "451586712053ba8ecaf7142cd5497ee6", "score": "0.47894272", "text": "def get_end_loc(self):\n return self.pos_marker.working_loc_after(\n self.raw,\n )", "title": "" }, { "docid": "7f3916e0d50766cf96b44cb9350c0ff1", "score": "0.47791073", "text": "def calcWristAngles(self):\n (x,y,z) = self.wrist_normal\n self.wrist_x = np.pi/2 - np.arctan2(z, x) - self.swing\n self.wrist_y = np.arctan2(-y, z)", "title": "" }, { "docid": "790fff9db2b5d9f2982998b3aff76b46", "score": "0.4778514", "text": "def calc_zone_r( r_in, r_out ):\n return ( 0.5*(r_out**3 + r_in**3) )**(1./3);", "title": "" }, { "docid": "bf1d835e5cf31b2bc57cc1fc41df8a47", "score": "0.47753304", "text": "def get_dir(self, start: int):\n r1, r2 = random.random() + 1e-3, random.random() + 1e-3\n if start == 0:\n return (r1, r2)\n elif start == 1:\n return (-r1, r2)\n elif start == 2:\n return (r1, -r2)\n return (-r1, -r2)", "title": "" }, { "docid": "61577009c46910281ffc8cdfae5a094d", "score": "0.4772431", "text": "def getRobotPosition(self):\n return self.pos", "title": "" }, { "docid": "d89b64987d89f9972cabc278af7acf34", "score": "0.47710732", "text": "def end_point(self) -> BearingAbstract:\n return self._bearings[-1]", "title": "" }, { "docid": "fedb15272a4f6c46d6d3f11cb532ecd6", "score": "0.47670257", "text": "def getRobotPosition(self):\n return self.position", "title": "" }, { "docid": "6c661442856eb928cb27bc2006c03d06", "score": "0.47659248", "text": "def _get_vrf_start(self):\n return self.__vrf_start", "title": "" }, { "docid": "b54c14a08ddecd16e46fa4d40e27703e", "score": "0.47632465", "text": "def magnetic(self, point):\n return point", "title": "" }, { "docid": "e4394b5d7407b5a546e7cc639e985dc2", "score": "0.47630838", "text": "def get_start_point(self) -> Point:\n curr_point: Point = self.get_bottom_left_corner()\n while not any([wall.contains_point(curr_point) for wall in self.walls]):\n curr_point.x += 1\n # now we have a bottom corner\n return Point(curr_point.x + 1, curr_point.y + 1)", "title": "" }, { "docid": "0aeee34e67cf17faf5dc70da22d67e63", "score": "0.4758094", "text": "def position(self, i):\n return self.R[i][:, None] * self.sightlines.dir[i][None, :]", "title": "" }, { "docid": "e7f5086b643d88b17ad246775dc37db6", "score": "0.47550935", "text": "def relative_path_from(self, base):\n \"Algorithm taken from pathname.rb from Ruby 1.9.2\"\n base = Pathname(base)\n\n if self.isabs() and base.isabs():\n is_rel = False\n elif not self.isabs() and not base.isabs():\n is_rel = True\n else:\n raise ValueError(\"self and base must both be relative or absolute!\")\n\n a_prefix = self._path\n a_names = []\n\n b_prefix = base._path\n b_names = []\n\n a_prefix, basename = os.path.split(a_prefix)\n while a_prefix != '' and basename != '':\n if basename != os.path.curdir:\n a_names.insert(0, basename)\n a_prefix, basename = os.path.split(a_prefix)\n\n b_prefix, basename = os.path.split(b_prefix)\n while b_prefix != '' and basename != '':\n if basename != os.path.curdir:\n b_names.insert(0, basename)\n b_prefix, basename = os.path.split(b_prefix)\n\n if a_prefix != b_prefix:\n raise \"different prefix: %r and %r\" % (a_prefix, b_prefix)\n\n while a_names and b_names and a_names[0] == b_names[0]:\n a_names.pop(0)\n b_names.pop(0)\n\n if os.path.pardir in b_names:\n raise ValueError, \"base includes .. in path: %r\" % base\n\n b_names = [os.path.pardir for x in b_names]\n relpath = b_names + a_names\n\n if relpath:\n return Pathname(*relpath)\n else:\n return Pathname(os.path.curdir)", "title": "" }, { "docid": "0ceb664898c54b044ff354a130a0af52", "score": "0.47544703", "text": "def estimateRelativeCoordinates(self, blob):\n x = blob.pt[0]\n y = min(479, blob.pt[1] + blob.size + self.upperCropValue) # plus because high y values mean lower in the image\n\n # Angles relative to the camera orientation\n xAngle = -(np.pi/6) * (x - 320) / 320\n yAngle = -(np.pi/8) * (y - 240) / 240\n\n # The angle, the ground/camera/object form with Y kept at zero\n theta = np.pi/2 - self.calibrationAngle + yAngle\n \n # The actual X and Y relative coordinates\n X = np.tan(theta) * self.calibrationHeight\n Y = X * np.tan(xAngle) \n\n return [ X, Y ]", "title": "" } ]
632fd86eb40d2badbab4f3c78dc6354a
Constructs a ResNet50 model.
[ { "docid": "5cd82c73ff832d739fb6bedd9ee42fa5", "score": "0.75137323", "text": "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" } ]
[ { "docid": "93476a99b995c376193072cffd72e9f0", "score": "0.7939585", "text": "def resnet50():\n model = models.resnet50()\n model.apply(weights_init)\n return model", "title": "" }, { "docid": "7d29192d3203989df7a6d6610b2c6abd", "score": "0.77827954", "text": "def build_model(opts):\n return resnet50_model.ResNet50(\n opts.method, opts.num_classes, opts.num_updates, opts.dropout_rate)", "title": "" }, { "docid": "d6d8c89e5d72e7e3cf2ec8035c42e1f7", "score": "0.75665265", "text": "def defineModel():\n # load the ResNet50 model\n resnet50_conv = resnet50.ResNet50( weights='imagenet', include_top=False, input_shape=(224, 224, 3) )\n\n # # freeze the layers except the last 4 layers\n # for layer in resnet50_conv.layers[:-4]:\n # layer.trainable = False\n\n # # check the trainable status of the individual layers\n # for layer in resnet50_conv.layers:\n # print(layer, layer.trainable)\n\n # create the model\n model = models.Sequential()\n\n # add the ResNet50 convolutional base model\n model.add(resnet50_conv)\n\n # add new layers\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dropout(0.2))\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dropout(0.2))\n model.add(layers.Dense(1, activation='sigmoid'))\n\n # show a summary of the model. Check the number of trainable parameters\n model.summary()\n\n return model", "title": "" }, { "docid": "fbb36f21980e5cb00615d22988a3afbc", "score": "0.74759895", "text": "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "fbb36f21980e5cb00615d22988a3afbc", "score": "0.74759895", "text": "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "fbb36f21980e5cb00615d22988a3afbc", "score": "0.74759895", "text": "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "fbb36f21980e5cb00615d22988a3afbc", "score": "0.74759895", "text": "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "fbb36f21980e5cb00615d22988a3afbc", "score": "0.74759895", "text": "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "fbb36f21980e5cb00615d22988a3afbc", "score": "0.74759895", "text": "def resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "4216df71a6227fcc665f5dc1629735c4", "score": "0.7386862", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "4216df71a6227fcc665f5dc1629735c4", "score": "0.7386862", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "4216df71a6227fcc665f5dc1629735c4", "score": "0.7386862", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "dc2f68ee5432632237cdb82cb2e82e31", "score": "0.7379036", "text": "def resnet50(pretrained=False, average_pool_size = 7,**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], average_pool_size, **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "aa19c4518b9b6ba415fe2c1d05a60c49", "score": "0.7326761", "text": "def resnet50(**kwargs):\n model = ResNet(STM_Bottleneck, [3, 4, 6, 3], **kwargs)\n checkpoint = model_zoo.load_url(model_urls['resnet50'])\n layer_name = list(checkpoint.keys())\n for ln in layer_name:\n if 'conv' in ln or 'downsample.0.weight' in ln and not 'cstm' in ln or 'cmm' in ln:\n checkpoint[ln] = checkpoint[ln].unsqueeze(2)\n model.load_state_dict(checkpoint, strict=False)\n\n return model", "title": "" }, { "docid": "9015161357b825dace50a0396aad5d01", "score": "0.73022145", "text": "def resnet(self):\n model = keras.applications.resnet50.ResNet50(include_top=True,weights=None)\n model.layers.pop()\n last = model.layers[-1].output\n x = Dense(102, activation=\"softmax\")(last)\n model = Model(model.input, x)\n model.compile(optimizer='adam', loss='categorical_crossentropy')\n return model", "title": "" }, { "docid": "f7efbbaeaa2f28ea83c1caadb248aef1", "score": "0.73003805", "text": "def resnet50(pretrained=True, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n param_dict = load_checkpoint(cfg.PRETRAINED_RESNET_50)\n load_param_into_net(model, param_dict)\n\n return model", "title": "" }, { "docid": "0ce89dea804aaed5fc16631097cacc14", "score": "0.724254", "text": "def resnet50(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model", "title": "" }, { "docid": "0ce89dea804aaed5fc16631097cacc14", "score": "0.724254", "text": "def resnet50(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model", "title": "" }, { "docid": "0ce89dea804aaed5fc16631097cacc14", "score": "0.724254", "text": "def resnet50(num_classes, pretrained=False, **kwargs):\n model = ResNet(num_classes, Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)\n return model", "title": "" }, { "docid": "a51c5c73286e3af91229964801f59fa7", "score": "0.72298306", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3],\n groups=1,\n width_per_group=64,\n **kwargs)\n return model", "title": "" }, { "docid": "5d669dbba8b672e595cc08fc9c1126e6", "score": "0.722397", "text": "def resnet50(pretrained=False, mode='rgb', **kwargs):\n if mode == 'flow':\n model = ResNet(Bottleneck, [3, 4, 6, 3], inp=20, **kwargs)\n else:\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "be8e7d38b215a0f898f49a8eaeab6db2", "score": "0.7196045", "text": "def resnet50c(num_classes=100):\n model = models.resnet50(num_classes=num_classes)\n model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n model.avgpool = nn.AvgPool2d(4, stride=1)\n model.maxpool = nn.Identity()\n model.apply(weights_init)\n return model", "title": "" }, { "docid": "64ea77631ff9bd76cc57a8f07e4bd248", "score": "0.7173291", "text": "def fresnet50_v3(**kwargs):\n model = ResNet(BasicBlock_v3, [3, 4, 14, 3], **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model", "title": "" }, { "docid": "d6150726ebb746d80d5293fc9f8767bf", "score": "0.71488065", "text": "def getResNet50( in_shape, layers ):\n\n # create resnet50 architecture with random weights \n model = ResNet50( weights=None,\n include_top=False, \n input_shape=in_shape )\n\n # add flatten layer\n x = Flatten()( model.layers[-1].output )\n for layer in layers[ 'fc' ]:\n \n # add fc + optional dropout layers\n x = Dense( layer[ 'units' ], activation=layer[ 'activation' ], kernel_initializer='he_uniform' )(x)\n if 'dropout' in layer:\n x = Dropout( layer[ 'dropout' ] )( x )\n \n # add output layers\n for layer in layers[ 'out' ]:\n x = Dense( layer[ 'units' ], activation=layer[ 'activation' ] )(x)\n\n return Model( inputs=model.inputs, output=x )", "title": "" }, { "docid": "1c1f5b5fbf470a60c46069da8e0c539f", "score": "0.70192593", "text": "def resnet50(gpus):\n\tfrozen = 0\n\tbase_model = ResNet50(\n\t\tweights='imagenet', include_top=False, input_shape=(224, 224, 3))\n\n\tx = Flatten()(base_model.output)\n\toutput = Dense(len(CLASSES), activation='softmax', name='predictions')(x)\n\n\treturn _compile(gpus, base_model.input, output, frozen)", "title": "" }, { "docid": "c1a308d87060a67748f520b64b81cd7e", "score": "0.7003919", "text": "def sk_resnet50(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "d1d18c77b257300b75c985e51bd88264", "score": "0.6956338", "text": "def resnet50(pretrained=True, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n print('using pre-trained model')\n\n model.load_state_dict(torch.load('./model_train_nepoch.pth'))\n model.eval()\n # pretrained_state = model_zoo.load_url(model_urls['resnet50'])\n # model_state = model.state_dict()\n # pretrained_state = {k: v for k, v in pretrained_state.items() if\n # k in model_state and v.size() == model_state[k].size()}\n # model_state.update(pretrained_state)\n # model.load_state_dict(model_state)\n\n print('download finished')\n return model", "title": "" }, { "docid": "1625209e4fac05b4ea74f38c06cd2074", "score": "0.6954455", "text": "def resnet50(pretrained=False, progress=True, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], shortcut_type='B', **kwargs)\n if pretrained:\n checkpoint = load_state_dict_from_url(model_urls['resnet50'],\n progress=progress)\n state_dict = checkpoint['state_dict']\n\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove 'module.'\n new_state_dict[name]=v\n model.load_my_state_dict(new_state_dict)\n\n return model", "title": "" }, { "docid": "6b9ef992943468eeb8efc159c8a0c3b3", "score": "0.69440067", "text": "def resnet50(pretrained=False, **kwargs):\n model = I3DResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n\n return model", "title": "" }, { "docid": "c3d64f58ffa60e7f1331df91d6f30f59", "score": "0.68242407", "text": "def resnet50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):\n\tdefault_cfg = default_cfgs['resnet50']\n\tmodel = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, **kwargs)\n\tmodel.default_cfg = default_cfg\n\tif pretrained:\n\t\tload_pretrained(model, default_cfg, num_classes, in_chans)\n\treturn model", "title": "" }, { "docid": "96c7086ad713e74f2ca54c2aef070b48", "score": "0.6806889", "text": "def resnext50(**kwargs):\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "46938b2039dc32befcad0fa60b3eea91", "score": "0.6741958", "text": "def ResNet50(output_stride, ibn_mode='none', pretrained=True, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], output_stride, **kwargs)\n\n if pretrained:\n _load_pretrained_model(model, path='pretrained/resnet50_sw.pth')\n return model", "title": "" }, { "docid": "e5fd1610bee4540b4355637908478ec3", "score": "0.6713189", "text": "def create_model() -> Module:\n enc = ResNet34(in_channels=3)\n ModelsWeightsStorage().load(enc, 'imagenet')\n model = UNetDecoder(enc, classes_num=1)\n return ModelWithActivation(model, activation='sigmoid')", "title": "" }, { "docid": "e1923f400a7ebcaa012e6bace7ab4360", "score": "0.66717356", "text": "def load_model():\n model = ResNet50(weights=\"imagenet\")\n print(\"Model loaded\")\n return model", "title": "" }, { "docid": "114905ce6eea53d2215245d768e379a0", "score": "0.66499907", "text": "def create_model() -> Module:\n enc = ResNet18(in_channels=3)\n ModelsWeightsStorage().load(enc, 'imagenet')\n model = UNetDecoder(enc, classes_num=1)\n return ModelWithActivation(model, activation='sigmoid')", "title": "" }, { "docid": "ddfac93d5f047b371c7b3a70b5cba332", "score": "0.664587", "text": "def resnet50_3d(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "45e12218e404882505430686f524c5b7", "score": "0.65999156", "text": "def construct_model(input_size, n_class):\n n_base = 16 # number of channels in the first level of U-net\n drate = 0.1 # dropout rate\n x_input = Input(shape=input_size + (1,)) # input image has only one channel\n y_output = box_unet(x_input, n_class=n_class, n_base=n_base, drate=drate, name='unet')\n\n model = Model(inputs=[x_input], outputs=[y_output])\n\n return model", "title": "" }, { "docid": "3812062452649b82d29598975e5f2107", "score": "0.6520237", "text": "def stnet50(**kwargs):\n\n model = Stnet_Res_model(\n Bottleneck,\n [3, 4, 6, 3],\n **kwargs,\n )\n return model", "title": "" }, { "docid": "b215ca787bde4c9100db8834d7caaf7f", "score": "0.6519174", "text": "def create_model():\n model = Sequential()\n # image normalization\n model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))\n # cropping\n model.add(Cropping2D(cropping=((60, 25), (0, 0))))\n model.add(Conv2D(filters=24, kernel_size=5, strides=2, activation=\"relu\"))\n model.add(Conv2D(filters=36, kernel_size=5, strides=2, activation=\"relu\"))\n model.add(Conv2D(filters=48, kernel_size=5, strides=2, activation=\"relu\"))\n model.add(Conv2D(filters=64, kernel_size=3, activation=\"relu\"))\n model.add(Conv2D(filters=64, kernel_size=3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(100, activation=\"relu\"))\n model.add(Dense(50, activation=\"relu\"))\n model.add(Dense(10, activation=\"relu\"))\n model.add(Dense(1))\n\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=[\"accuracy\"])\n model.summary()\n plot_model(model, to_file=\"model.png\", show_shapes=True)\n\n return model", "title": "" }, { "docid": "54a90750479cae2c64dd271a5b2a271b", "score": "0.651713", "text": "def create_model():\n\tmodel = Sequential([\n\t\tConv2D(filters=24, kernel_size=5, strides=2, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),\n\t\tConv2D(filters=36, kernel_size=5, strides=2, padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.001)),\n\t\tConv2D(filters=48, kernel_size=5, strides=2, padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.001)),\n\t\tConv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.001)),\n\t\tConv2D(filters=64, kernel_size=3, padding='same', activation='relu', kernel_regularizer=regularizers.l2(0.001)),\n\t\tFlatten(),\n\t\tDense(units=250, activation='relu', kernel_regularizer=regularizers.l2(0.001)),\n\t\tDense(units=dataset.class_count, activation='softmax')\n Dense(units=dataset.class_count, activation='softmax')\n\t])\n\n\tmodel.compile(\n\t\toptimizer='adam',\n\t\tloss='sparse_categorical_crossentropy',\n\t\tmetrics=['accuracy']\n\t)\n\treturn model", "title": "" }, { "docid": "54f2461d6902777b279bf4afa6195935", "score": "0.6505197", "text": "def create_model(self):\n input = Input(shape=self.input_shape)\n\n out = Flatten()(input)\n out = Dense(1024, activation=\"relu\", name=\"rcnn_dense1\")(out)\n out = Dropout(0.5)(out)\n out = Dense(1024, activation=\"relu\", name=\"rcnn_dense2\")(out)\n out = Dropout(0.5)(out)\n\n rcnn_classifier = Dense(self.number_of_classes+1, activation='sigmoid', name='rcnn_classifier')(out)\n rcnn_regressor = Dense(4*(self.number_of_classes), activation='linear', name='rcnn_regressor')(out)\n\n rcnn_model = Model(inputs=input, outputs=[rcnn_classifier, rcnn_regressor])\n\n rcnn_model.compile(optimizer=\"adam\", loss=[\"categorical_crossentropy\",\"logcosh\"])\n\n return rcnn_model", "title": "" }, { "docid": "e8adcb013ea9f77355fc5ebe7c390d13", "score": "0.6483124", "text": "def create_model():\n opt = Adam(lr=settings.ADAM_LR)\n model = AlexNetNN(num_classes=settings.NUM_CLASSES).build_model()\n model.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\n return model", "title": "" }, { "docid": "b4e96cea9ddbc3946f501d46b74a4daf", "score": "0.6475144", "text": "def create_model():\n\n\n\tnum_classes = 10\n\tmodel = Sequential()\n\tinput_shape = (28, 28)\n\tmodel.add(Flatten(input_shape=input_shape))\n\tmodel.add(Dense(28*28*10, activation=\"tanh\"))\n\tmodel.add(Dense(1000, activation=\"tanh\"))\n\tmodel.add(Dense(num_classes, activation=\"softmax\"))\n\topt = keras.optimizers.SGD(lr=0.001, decay=1e-6)\n\n\t# Let's train the model using RMSprop\n\tmodel.compile(loss='categorical_crossentropy',\n\t\t optimizer=opt,\n\t\t metrics=['accuracy'])\n\n\treturn model", "title": "" }, { "docid": "00e86d63cfed2eccf64bdfd144f5036b", "score": "0.644241", "text": "def create_model():\n pre_trained_model_path = \"./config/resnet56_on_cifar10.pth\"\n model = resnet56(10, pretrained=True, path=pre_trained_model_path)\n logging.info(\"load pretrained model successfully\")\n return model", "title": "" }, { "docid": "a01717955b989d041e6e1faead4bca96", "score": "0.64002526", "text": "def ResNet50(include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n\n def stack_fn(x):\n x = stack1(x, 64, 3, stride1=2, name='conv2')\n x = stack1(x, 128, 4, name='conv3')\n x = stack1(x, 256, 14, name='conv4')\n return stack1(x, 512, 3, name='conv5')\n\n return ResNet(stack_fn, False, False, 'resnet50', include_top, weights,\n input_tensor, input_shape, pooling, classes, **kwargs)", "title": "" }, { "docid": "acfa0cd6d327a8a04f8c85c4e7717b7c", "score": "0.6377983", "text": "def create_rgb_model() -> models.Sequential:\n shape = (128, 32, 1)\n\n model = models.Sequential(name=\"LeNet5\")\n model.add(layers.Conv2D(20, 5, activation='tanh', input_shape=shape, padding='same'))\n model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(layers.Conv2D(50, 5, activation='tanh', padding='same'))\n model.add(layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(500))\n model.add(layers.ReLU())\n model.add(layers.Dense(2))\n model.add(layers.Softmax())\n\n model.summary()\n return model", "title": "" }, { "docid": "f446317b8f9cbfb9b313633fe55c34d0", "score": "0.6371014", "text": "def tv_resnet50(pretrained=False, num_classes=1000, in_chans=3, **kwargs):\n\tmodel = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, **kwargs)\n\tmodel.default_cfg = default_cfgs['tv_resnet50']\n\tif pretrained:\n\t\tload_pretrained(model, model.default_cfg, num_classes, in_chans)\n\treturn model", "title": "" }, { "docid": "ebf6f1613205a1be477b9bfa4e6a7e6d", "score": "0.6343152", "text": "def resnet50(**kwargs):\n return _resnet(Bottleneck, [3, 4, 6, 3], **kwargs)", "title": "" }, { "docid": "ae3f15d87d8cd25a0700a60969019d75", "score": "0.63414055", "text": "def create_model():\n\n model = Sequential()\n model.add(Lambda(rgb_to_grayscale, input_shape=(160, 320, 3)))\n model.add(Lambda(normalize))\n model.add(Cropping2D(((70, 30), (0, 0))))\n model.add(Conv2D(6, 5, 5))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D())\n model.add(Conv2D(10, 5, 5))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D())\n model.add(Conv2D(16, 5, 5, activation='relu'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D())\n model.add(Flatten())\n model.add(Dense(100))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(84))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='linear'))\n model.compile(optimizer='adam', loss='mse')\n return model", "title": "" }, { "docid": "a55e699206d47425b4bf6c5672e0e15d", "score": "0.63383204", "text": "def preresnet50(pretrained=False, progress=True, **kwargs):\n model = _resnet('preresnet50', PreActBottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)\n model.apply(weights_init)\n return model", "title": "" }, { "docid": "88d50ad5fee55a3d1af5812aa37ef051", "score": "0.6336567", "text": "def resnet50_model(img_rows, img_cols, color_type=1, num_classes=None):\n\n # Handle Dimension Ordering for different backends\n global bn_axis\n if K.image_dim_ordering() == 'tf':\n bn_axis = 3\n img_input = Input(shape=(img_rows, img_cols, color_type))\n else:\n bn_axis = 1\n img_input = Input(shape=(color_type, img_rows, img_cols))\n\n x = ZeroPadding2D((3, 3))(img_input)\n x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n\n # Fully Connected Softmax Layer\n x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)\n x_fc = Flatten()(x_fc)\n x_fc = Dense(1000, activation='softmax', name='fc1000')(x_fc)\n\n # Create model\n model = Model(img_input, x_fc)\n\n # Load ImageNet pre-trained data\n if K.image_dim_ordering() == 'th':\n # Use pre-trained weights for Theano backend\n weights_path = 'pretrained_models/resnet50_weights_th_dim_ordering_th_kernels.h5'\n else:\n # Use pre-trained weights for Tensorflow backend\n weights_path = 'pretrained_models/resnet50_weights_tf_dim_ordering_tf_kernels.h5'\n\n model.load_weights(weights_path)\n\n # Truncate and replace softmax layer for transfer learning\n # Cannot use model.layers.pop() since model is not of Sequential() type\n # The method below works since pre-trained weights are stored in layers but not in the model\n x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)\n x_newfc = Flatten()(x_newfc)\n x_newfc = Dense(num_classes, activation='softmax', name='fc10')(x_newfc)\n\n # Create another model with our customized softmax\n model = Model(img_input, x_newfc)\n\n # Learning rate is changed to 0.001\n sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n\n return model", "title": "" }, { "docid": "19d9367bab89c466d07c5b990bb4cbcd", "score": "0.6317331", "text": "def create_baseline():\n # create model\n input_dimentions = 559\n model = models.Sequential()\n model.add(layers.Dense(60, input_dim=input_dimentions, kernel_initializer='normal', activation='relu'))\n model.add(layers.Dense(1, kernel_initializer='normal', activation='sigmoid'))\n # Compile model\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "title": "" }, { "docid": "bd5a2236b410c07081afe2b29a47b9b4", "score": "0.6298865", "text": "def create_model(self):\n self.model = model.create_model(self.num_classes, self.config[\"nms_thresh\"],\n self.config[\"score_thresh\"])", "title": "" }, { "docid": "f038d132ea7347daf598f614b564b86b", "score": "0.62936455", "text": "def __init__(self,\n batch_size: int,\n target_size: Tuple[int, int, int],\n num_predictions: int,\n num_filters: int,\n nn_depth: int,\n learning_rate: float,\n model_name: str,\n input_name: str,\n output_name: str,\n path_to_model_weights: str,\n regularization: float) -> None:\n self.batch_size = batch_size\n self.target_size = target_size\n self.num_predictions = num_predictions\n self.path_to_model_weights = path_to_model_weights\n self.nn = CustomResNet18(\n input_shape=self.target_size,\n num_filters=num_filters,\n regularization=regularization,\n input_name=input_name,\n output_name=output_name,\n num_predictions=self.num_predictions,\n nn_depth=nn_depth\n ).build()\n self.nn.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),\n loss=tf.keras.losses.MeanSquaredError(),\n metrics=tf.keras.metrics.CosineSimilarity())\n self.model_name = model_name\n self.model_summary = self.nn.summary()", "title": "" }, { "docid": "47484a096be38e3c6cbc40d04674324f", "score": "0.62844884", "text": "def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):\n return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride,\n pretrained_backbone=pretrained_backbone)", "title": "" }, { "docid": "cecbc8340fc5ac9bb9f3cf490a367b17", "score": "0.6277839", "text": "def deeplabv3_resnet50(n_classes=2, output_stride=8, pretrained_backbone=False,**kwargs):\n return _load_model('deeplabv3', 'resnet50', num_classes=n_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone,**kwargs)", "title": "" }, { "docid": "7427ceb0ae3415c7b416af689352e209", "score": "0.62770325", "text": "def deeplabv3plus_resnet50(n_classes=2, output_stride=8, pretrained_backbone=False, **kwargs):\n return _load_model('deeplabv3plus', 'resnet50', num_classes=n_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone, **kwargs)", "title": "" }, { "docid": "cdf63d66b2f891ab9d7f069070823a1e", "score": "0.6269192", "text": "def __initialize_CNN_model(self):\n model = VGG16()\n model.layers.pop()\n model = Model(inputs=model.inputs, outputs=model.layers[-1].output)\n return model", "title": "" }, { "docid": "8aec49290ebd01f8c83e26b9b48bf38a", "score": "0.62674505", "text": "def create_model():\n model = Sequential()\n model.add(Flatten(input_shape=(1,) + OBSERVATION_SPACE))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(NB_ACTIONS, activation='softmax'))\n print(model.summary())\n\n # For a classification problem of actions\n model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "title": "" }, { "docid": "2662b918179c2e94328abdcaf5be0e0a", "score": "0.6267147", "text": "def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):\n return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride,\n pretrained_backbone=pretrained_backbone)", "title": "" }, { "docid": "d93454c113d3e152d068fdd3834bd9c2", "score": "0.6266001", "text": "def resnet18c(num_classes=100):\n model = models.resnet18(num_classes=num_classes)\n model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n model.avgpool = nn.AvgPool2d(4, stride=1)\n model.maxpool = nn.Identity()\n model.apply(weights_init)\n return model", "title": "" }, { "docid": "2549eb50eb09f30f60342137386b6a35", "score": "0.62604684", "text": "def fish_res50_config():\n mc = base_model_config('fish')\n\n mc.NET = 'resnet50'\n\n mc.IMAGE_WIDTH = 1280\n mc.IMAGE_HEIGHT = 720\n mc.BATCH_SIZE = 10\n\n mc.WEIGHT_DECAY = 0.0001#0.0001\n mc.LEARNING_RATE = 0.01#0.02\n mc.DECAY_STEPS = 10000\n mc.MAX_GRAD_NORM = 1.0\n mc.MOMENTUM = 0.9\n mc.LR_DECAY_FACTOR = 0.5\n\n mc.LOSS_COEF_BBOX = 0.5#5.\n mc.LOSS_COEF_CONF_POS = 75.\n mc.LOSS_COEF_CONF_NEG = 100.0\n mc.LOSS_COEF_CLASS = 1.0\n\n mc.PLOT_PROB_THRESH = 0.4\n mc.NMS_THRESH = 0.4\n mc.PROB_THRESH = 0.005\n mc.TOP_N_DETECTION = 64\n\n mc.DATA_AUGMENTATION = True\n mc.DRIFT_X = 150\n mc.DRIFT_Y = 100\n\n mc.SHAPE_DIM = 3\n\n diameters = [87, 150, 202, 233, 278]\n radii = 0.5 * np.array(diameters)\n anchors = set_anchors(mc, radii)\n\n mc.ANCHOR_BOX = anchors\n mc.ANCHORS = len(anchors)\n mc.ANCHOR_PER_GRID = len(radii)\n\n return mc", "title": "" }, { "docid": "426de0df8e03326bb0760dbfc6d27a50", "score": "0.6249012", "text": "def resnet50_model(img_rows, img_cols, color_type=1, num_class=3):\n\n bn_axis = 3\n # img_input = Input(shape=(color_type, img_rows, img_cols))\n img_input = Input(shape=(img_rows, img_cols, color_type))\n x = ZeroPadding2D((3, 3))(img_input)\n x = Convolution2D(64, 7, 7, subsample=(2, 2), name='conv1')(x)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x) # dim_ordering='th'\n\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n\n # Fully Connected Softmax Layer\n x = AveragePooling2D((7, 7), name='avg_pool')(x)\n # x_fc = AveragePooling2D((7, 7), name='avg_pool')(x)\n # x_fc = Flatten()(x_fc)\n # x_fc = Dense(1000, activation='softmax', name='fc1000')(x_fc)\n\n # Create model\n model = Model(img_input, x)\n\n # Load ImageNet pre-trained data\n # weights_path = './model/resnet50_weights_tf_dim_ordering_tf_kernels.h5'\n weights_path = './model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n model.load_weights(weights_path)\n\n # Truncate and replace softmax layer for transfer learning\n # Cannot use model.layers.pop() since model is not of Sequential() type\n # The method below works since pre-trained weights are stored in layers but not in the model\n # x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)\n x_newfc = Flatten(input_shape=model.output_shape[1:])(x)\n x_newfc = Dense(num_class, activation='sigmoid', name='fc10')(x_newfc)\n\n # Create another model with our customized softmax\n model = Model(img_input, x_newfc)\n model.summary()\n # Learning rate is changed to 0.001\n sgd = SGD(lr=1e-4, decay=1e-6, momentum=0.9) #sparse_categorical_crossentropy\n model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n\n return model", "title": "" }, { "docid": "b59d3ee4ba09dc4e589c2a47e720c714", "score": "0.6236706", "text": "def ranking_resnet_50(**kwargs):\n model = RankingResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n\n return model", "title": "" }, { "docid": "d65b2fff7c672e151b0c132fe22fac48", "score": "0.61526597", "text": "def deepbase_dcn_resnet50(self, **kwargs):\r\n model = DCNResNet(Bottleneck, [3, 4, 6, 3], deep_base=True,\r\n bn_type=self.configer.get('network', 'bn_type'), **kwargs)\r\n model = ModuleHelper.load_model(model, \r\n all_match=False, \r\n pretrained=self.configer.get('network', 'pretrained'),\r\n network=\"dcnet\")\r\n return model", "title": "" }, { "docid": "483ca121c09ec6653ca05aded492bf67", "score": "0.6134833", "text": "def create_model():\n \n global model\n global nb_neurones ## number of neurones per layer\n global nb_layers ## nb_layers-1 is the number of layers in the NN\n model = Sequential()\n \n ## Definition of the layers \n \n model.add(Dense(nb_neurones, input_dim=200, activation='tanh')) ## input layer\n for l in range(nb_layers):\n model.add(Dense(nb_neurones, activation='tanh'))\n model.add(Dense(2, activation='softmax')) ## output layer\n\n # Creation of the model\n \n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n return model", "title": "" }, { "docid": "ee8e29754186416341c9b24fcda098d8", "score": "0.61298704", "text": "def _build_model(in_channels: int, n_outputs: int, **kwargs):\n\n _model = t_models.resnet18(\n pretrained=False, num_classes=n_outputs, **kwargs)\n _model.conv1 = torch.nn.Conv2d(in_channels, 64, kernel_size=7,\n stride=2, padding=3, bias=False)\n\n return _model", "title": "" }, { "docid": "9b8ecda5eb46afe99c8c1d2f1389dbbd", "score": "0.6117336", "text": "def make_model(max_len):\n model = Sequential()\n\n model.add(Conv1D(4, 4, padding='same', activation='elu', input_shape=(max_len, 1)))\n model.add(MaxPooling1D(4))\n\n model.add(Conv1D(8, 4, padding='same', activation='elu'))\n model.add(MaxPooling1D(4))\n\n model.add(Conv1D(16, 4, padding='same', activation='elu'))\n model.add(MaxPooling1D(4))\n\n model.add(Conv1D(32, 4, padding='same', activation='elu'))\n model.add(MaxPooling1D(4))\n\n model.add(Conv1D(64, 4, padding='same', activation='elu'))\n model.add(MaxPooling1D(4))\n\n model.add(Conv1D(128, 4, padding='same', activation='elu'))\n model.add(MaxPooling1D(4))\n\n model.add(Conv1D(256, 4, padding='same', activation='elu'))\n model.add(MaxPooling1D(4))\n\n model.add(GlobalAveragePooling1D())\n\n model.add(Dense(64, activation='elu'))\n model.add(Dense(2, activation=\"softmax\"))\n\n return model", "title": "" }, { "docid": "ee07c8436c32de66c4a3b22f370c8e5e", "score": "0.6112427", "text": "def _create_model(self):\n module_layer = hub.KerasLayer(\n handle=self._model_spec.uri, trainable=self._hparams.do_fine_tuning)\n\n image_size = self._model_spec.input_image_shape\n\n self._model = tf.keras.Sequential([\n tf.keras.Input(shape=(image_size[0], image_size[1], 3)), module_layer,\n tf.keras.layers.Dropout(rate=self._model_options.dropout_rate),\n tf.keras.layers.Dense(\n units=self._num_classes,\n activation='softmax',\n kernel_regularizer=tf.keras.regularizers.l1_l2(\n l1=self._hparams.l1_regularizer,\n l2=self._hparams.l2_regularizer))\n ])\n print(self._model.summary())", "title": "" }, { "docid": "233d95aec1444ef33609dcce8613338c", "score": "0.61121875", "text": "def resnet50d(pretrained=False, progress=True, **kwargs):\n\n return _resnet('resnet50d', pretrained, progress, deep_stem=True, avg_downsample=True, **kwargs)", "title": "" }, { "docid": "b70ca95a8bc660696f4705e830f790cc", "score": "0.6109776", "text": "def resnet101():\n model = models.resnet101()\n model.apply(weights_init)\n return model", "title": "" }, { "docid": "d2b3c98805ca6222de5717e6a72a25ca", "score": "0.61085665", "text": "def lenet5(obj, verbose=False):\n\n\n INIT_LR = 1e-3\n\n classes = np.unique(obj.y)\n nClasses = len(classes)\n\n model = Sequential()\n model.add(Conv2D(filters=6, kernel_size=(5,5), activation='relu', input_shape=(1,165,120)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(filters=16, kernel_size=(5,5), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n model.add(Dense(120, activation='relu'))\n model.add(Dense(84, activation='relu'))\n model.add(Dense(nClasses, activation='softmax'))\n\n if verbose:\n model.summary()\n\n #optimizer\n sgd = SGD(lr=0.1)\n model.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy'])\n\n return model", "title": "" }, { "docid": "1a3ddf4c64243f2955af2806beeace4b", "score": "0.6107033", "text": "def resnet_v1_50(num_classes=1000,\n is_training=True,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n weight_decay = 0.0005,\n reuse=None,\n fc_conv_padding='VALID',\n network_depth = None,\n global_pool=False):\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3\n -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n \n # Define the input as a tensor with shape input_shape\n X_input = tf.keras.Input(shape=[224, 224, 3])\n\n if weight_decay == None:\n kernel_regularizer = None\n else:\n kernel_regularizer = tf.keras.regularizers.L2(weight_decay) \n \n if network_depth != None:\n initial_size = network_depth\n else: \n initial_size = 3 #Defalut size is 3, as the original resnet 50\n\n starting_size = initial_size \n # Zero-Padding\n X = tf.keras.layers.ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = tf.keras.layers.Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_regularizer=kernel_regularizer)(X)\n X = tf.keras.layers.BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = tf.keras.layers.Activation('relu')(X)\n X = tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='0', s = 1, kernel_regularizer = kernel_regularizer)\n for i in range(initial_size-1):\n X = identity_block(X, 3, [64, 64, 256], stage=2, block=\"a\"+str(i), kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [64, 64, 256], stage=2, block='c',kernel_regularizer = kernel_regularizer)\n\n initial_size = initial_size + 1\n # Stage 3 \n X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='1', s = 2, kernel_regularizer = kernel_regularizer)\n for i in range(initial_size-1):\n X = identity_block(X, 3, [128, 128, 512], stage=3, block=\"b\"+str(i), kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [128, 128, 512], stage=3, block='c', kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [128, 128, 512], stage=3, block='d', kernel_regularizer = kernel_regularizer)\n \n initial_size = (starting_size) * 2 \n # Stage 4 \n X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2, kernel_regularizer = kernel_regularizer)\n for i in range(initial_size - 1):\n X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c'+str(i),kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c',kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d',kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e',kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f',kernel_regularizer = kernel_regularizer)\n\n # Stage 5 \n X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='2', s = 2, kernel_regularizer = kernel_regularizer)\n for i in range(starting_size - 1):\n X = identity_block(X, 3, [512, 512, 2048], stage=5, block='d'+str(i), kernel_regularizer = kernel_regularizer)\n #X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c', kernel_regularizer = kernel_regularizer)\n\n # AVGPOOL . Use \"X = AveragePooling2D(...)(X)\"\n X = tf.keras.layers.AveragePooling2D()(X)\n\n # output layer\n X = tf.keras.layers.Flatten()(X)\n X = tf.keras.layers.Dense(256)(X)\n X = tf.keras.layers.Dropout(dropout_keep_prob)(X)\n X = tf.keras.layers.Dense(num_classes, activation='softmax', name='fc' + str(num_classes), kernel_regularizer = kernel_regularizer)(X)\n \n # Create model\n model = tf.keras.Model(inputs = X_input, outputs = X, name='ResNet50')\n\n return model", "title": "" }, { "docid": "73367a0f7233e2ef241a5fe253220169", "score": "0.61001647", "text": "def create_model(inputs, name=\"resnet\"):\r\n x = Conv2D(filters=8, kernel_size=3, padding='same', activation='relu')(inputs)\r\n x = rest_block(inputs=x, num_filters=16)\r\n x = rest_block(inputs=x, num_filters=16)\r\n x = rest_block(inputs=x, num_filters=16)\r\n x = rest_block(inputs=x, down_factor=2, num_filters=32)\r\n x = rest_block(inputs=x, num_filters=32)\r\n x = rest_block(inputs=x, num_filters=32)\r\n x = rest_block(inputs=x, num_filters=32)\r\n x = rest_block(inputs=x, down_factor=2, num_filters=64)\r\n x = rest_block(inputs=x, num_filters=64)\r\n x = rest_block(inputs=x, num_filters=64)\r\n x = MaxPool2D(pool_size=2, strides=2)(x)\r\n\r\n x = Flatten()(x)\r\n x = Dense(64, activation=\"relu\")(x)\r\n outputs = Dense(10, name=\"predictions\")(x)\r\n\r\n model = keras.Model(inputs=inputs, outputs=outputs)\r\n model.summary()\r\n return model", "title": "" }, { "docid": "fe1c66f6d441c87aed2aa59bc0ee3c01", "score": "0.6095121", "text": "def build_model():\n model = keras.Sequential([layers.Dense(5, activation='sigmoid', input_shape=[len(X_train.keys())]),\n layers.Dense(1, activation='sigmoid')])\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mean_absolute_error', 'mean_squared_error'])\n return model", "title": "" }, { "docid": "700c97fec82ba07860d2eb826abc4149", "score": "0.6091953", "text": "def get_model():\n nn_model = Sequential()\n nn_model.add(Dense(512, input_dim=input_dim, activation='relu'))\n nn_model.add(Dropout(0.4))\n nn_model.add(Dense(128, activation='relu'))\n nn_model.add(Dropout(0.4))\n nn_model.add(Dense(32, activation='relu'))\n nn_model.add(Dropout(0.4))\n nn_model.add(Dense(2, activation='softmax'))\n nn_model.compile(loss='sparse_categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n return nn_model", "title": "" }, { "docid": "d24a410e5652e192a3de8f96116e688d", "score": "0.6091797", "text": "def build_base_model(self):\n if self._cnn_base.lower() == 'vgg':\n return vgg.VGG(vgg.cfg['F'], self._input_shape).model\n elif self._cnn_base.lower() == 'resnet':\n assert self._input_shape >= (200, 200, 3), error_msg['ResNetShape']\n base_model = keras.applications.ResNet50(\n weights=\"imagenet\", include_top=False, input_shape=self._input_shape)\n # only train conv5 block\n for layer in base_model.layers:\n if \"conv5_\" in layer.name:\n break\n layer.trainable = False\n return base_model\n elif self._cnn_base.lower() == 'test':\n model = keras.models.Sequential()\n model.add(keras.layers.Conv2D(32, kernel_size=(5, 5),\n activation='relu',\n input_shape=self._input_shape))\n model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n model.add(keras.layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n return model\n elif self._cnn_base.lower() == 'mini':\n assert self._input_shape >= (200, 200, 3), error_msg['MiniNet']\n base_model = keras.applications.mobilenet.MobileNet(\n input_shape=self._input_shape, alpha=1.0, depth_multiplier=1, dropout=1e-3, include_top=False, weights='imagenet')\n for layer in base_model.layers:\n if \"conv_dw_9\" in layer.name:\n break\n layer.trainable = False\n return base_model\n else:\n raise NameError('Unknown CNN model')", "title": "" }, { "docid": "c58fef2833888aa0b8fd56d3f19abef1", "score": "0.6080094", "text": "def resnet50d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n\n return _resnet(\n \"resnet50d\",\n pretrained,\n progress,\n Bottleneck,\n [3, 4, 6, 3],\n [64, 128, 256, 512],\n deep_stem=True,\n avg_downsample=True,\n **kwargs,\n )", "title": "" }, { "docid": "fddfab6e8ff34dabec025e8e01eee28c", "score": "0.6073341", "text": "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "title": "" }, { "docid": "dbd7c4809d428bdb4cddbd88ae95378f", "score": "0.60675037", "text": "def base_model(image_input, num_classes):\n feature1, feature2, feature3 = darknet53(image_input)\n\n x, y1 = predict(feature1, 512, num_classes)\n x, y2 = predict(feature2, 256, num_classes, x, up_sampling=True)\n x, y3 = predict(feature3, 128, num_classes, x, up_sampling=True)\n\n return Model(image_input, [y1, y2, y3])", "title": "" }, { "docid": "521af9791f187a06e0386494ac082059", "score": "0.6064452", "text": "def build_model(input_size: int) -> Sequential:\n input_size = [input_size]\n model = Sequential()\n model.add(Dense(128, activation='relu', input_shape=input_size,\n kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=None),\n bias_initializer='zeros'))\n model.add(Dense(len(UtteranceType), activation='softmax',\n kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=None),\n bias_initializer='zeros'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n return model", "title": "" }, { "docid": "e52d985c3ed502c5ab8e297017775084", "score": "0.60632765", "text": "def initialize_model(num_classes):\r\n model = torchvision.models.alexnet(pretrained=True)\r\n for param in model.parameters():\r\n param.requires_grad = False\r\n num_features_1 = model.classifier[6].in_features\r\n model.classifier[6] = nn.Linear(num_features_1, num_classes)\r\n return model", "title": "" }, { "docid": "100da4d595b69571a15b1a6a6c68f493", "score": "0.6036053", "text": "def __init__(self):\n \n self.options = OrderedDict()\n\n self.options[\"model_name\"] = 'resNet50'\n \n # Image model parameters\n self.options['input_dim'] = 448 # expected x, y dim of resNet \n self.options['image_depth'] = 3 # 3 color channels (RGB)\n self.options['n_image_embed'] = 512 # resNet\n self.options['n_image_regions'] = 196 # 14x14 regions\n\n # Training batch size \n self.options['batch_size'] = 15 # small to avoid OOM errors", "title": "" }, { "docid": "9eed3a6ab6d6286d6fad1c3a406f29e9", "score": "0.6009775", "text": "def resnet50(num_classes,\n batch_size=None,\n use_l2_regularizer=True,\n rescale_inputs=False,\n batch_norm_decay=0.9,\n batch_norm_epsilon=1e-5):\n input_shape = (224, 224, 3)\n img_input = layers.Input(shape=input_shape, batch_size=batch_size)\n if rescale_inputs:\n # Hub image modules expect inputs in the range [0, 1]. This rescales these\n # inputs to the range expected by the trained model.\n x = layers.Lambda(\n lambda x: x * 255.0 - tf.keras.backend.constant( # pylint: disable=g-long-lambda\n imagenet_preprocessing.CHANNEL_MEANS,\n shape=[1, 1, 3],\n dtype=x.dtype),\n name='rescale')(\n img_input)\n else:\n x = img_input\n\n if tf.keras.backend.image_data_format() == 'channels_first':\n x = layers.Permute((3, 1, 2))(x)\n bn_axis = 1\n else: # channels_last\n bn_axis = 3\n\n block_config = dict(\n use_l2_regularizer=use_l2_regularizer,\n batch_norm_decay=batch_norm_decay,\n batch_norm_epsilon=batch_norm_epsilon)\n x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)\n x = layers.Conv2D(\n 64, (7, 7),\n strides=(2, 2),\n padding='valid',\n use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n name='conv1')(\n x)\n x = layers.BatchNormalization(\n axis=bn_axis,\n momentum=batch_norm_decay,\n epsilon=batch_norm_epsilon,\n name='bn_conv1')(\n x)\n x = layers.Activation('relu')(x)\n x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)\n\n x = conv_block(\n x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), **block_config)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', **block_config)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', **block_config)\n\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', **block_config)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', **block_config)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', **block_config)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', **block_config)\n\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', **block_config)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', **block_config)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', **block_config)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', **block_config)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', **block_config)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', **block_config)\n\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', **block_config)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', **block_config)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', **block_config)\n\n x = layers.GlobalAveragePooling2D()(x)\n\n dim = 2048\n\n pred = compression(features)\n # ------------------------------------------------------------------------------\n\n each_category = 100 # 每个类别用多少神经元刻画\n class_num = num_classes\n cluster_num = each_category * class_num # 簇个数\n\n NeuronKernel1 = Center_variable([cluster_num, dim], name='W_kernal1')#[簇个数, 样本维度 ]\n NeuronKernel2 = Center_variable([cluster_num, dim], name='W_kernal2')\n\n\n var, dist, line, neuron_layer = forward(pred, NeuronKernel1, NeuronKernel2, cluster_num, dim)\n\n w2 = tf.Variable(tf.truncated_normal([cluster_num, class_num]), 'weight_w2')\n b2 = bias_variable([class_num], 'biases_b2')\n x = tf.matmul(neuron_layer, w2)+b2\n\n\n # x = layers.Dense(\n # num_classes,\n # kernel_initializer=tf.initializers.random_normal(stddev=0.01),\n # kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n # bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),\n # name='fc1000')(\n # x)\n\n\n # A softmax that is followed by the model loss must be done cannot be done\n # in float16 due to numeric issues. So we pass dtype=float32.\n x = layers.Activation('softmax', dtype='float32')(x)\n\n # Create model.\n return tf.keras.Model(img_input, x, name='resnet50')", "title": "" }, { "docid": "5430fc9c54b53dc474b97a3fa2cf2e61", "score": "0.60082525", "text": "def create_model_5(shape=(32, 32, 3), num_classes=10):\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), padding='same',\n input_shape=shape))\n model.add(Activation('relu'))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(Activation('relu'))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(256))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(num_classes))\n model.add(Activation('softmax'))\n\n return model", "title": "" }, { "docid": "9db2ccf684d9889311a9fbf79099236b", "score": "0.59994245", "text": "def make_densenet121_resisc_model(**model_kwargs) -> tf.keras.Model:\n input = tf.keras.Input(shape=(256, 256, 3))\n\n # Preprocessing layers\n img_scaled_to_255 = Lambda(lambda image: image * 255)(input)\n img_resized = Lambda(lambda image: tf.image.resize(image, (224, 224)))(\n img_scaled_to_255\n )\n img_scaled_to_1 = Lambda(lambda image: image / 255)(img_resized)\n mean, std = mean_std()\n img_standardized = Lambda(lambda image: (image - mean) / std)(img_scaled_to_1)\n\n # Load ImageNet pre-trained DenseNet\n model_notop = DenseNet121(\n include_top=False,\n weights=None,\n input_tensor=img_standardized,\n input_shape=(224, 224, 3),\n )\n\n # Add new layers\n x = GlobalAveragePooling2D()(model_notop.output)\n predictions = Dense(num_classes, activation=\"softmax\")(x)\n\n # Create graph of new model and freeze pre-trained layers\n new_model = Model(inputs=input, outputs=predictions)\n\n for layer in new_model.layers[:-1]:\n layer.trainable = False\n if \"bn\" == layer.name[-2:]: # allow batchnorm layers to be trainable\n layer.trainable = True\n\n # compile the model\n new_model.compile(\n optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"]\n )\n\n return new_model", "title": "" }, { "docid": "3bb4d38fe534d5513b438ee449396ce5", "score": "0.5990188", "text": "def resnet_50(num_classes, data_format='channels_first'):\n return resnet_50_generator(\n block_fn=bottleneck_block,\n layers=[3, 4, 6, 3],\n num_classes=num_classes,\n data_format=data_format)", "title": "" }, { "docid": "4baa9f68806700e1a6f00d72d6fff847", "score": "0.5989204", "text": "def create_model_3(inputs, name=\"resnet\"):\r\n x = rest_block(inputs=inputs, down_factor=1, num_filters=16)\r\n x = rest_block(inputs=x, down_factor=2, num_filters=32)\r\n x = rest_block(inputs=x, down_factor=2, num_filters=64)\r\n\r\n x = Flatten()(x)\r\n x = Dense(512, activation=\"relu\")(x)\r\n x = Dense(128, activation=\"relu\")(x)\r\n x = Dense(64, activation=\"relu\")(x)\r\n outputs = Dense(10, name=name)(x)\r\n\r\n model = keras.Model(inputs=inputs, outputs=outputs)\r\n model.summary()\r\n return model", "title": "" }, { "docid": "dffacf96f3025cbfd06cdc938f00d723", "score": "0.5988595", "text": "def resnet50_aspp(pretrained=False, **kwargs):\n model = ResNet_aspp(Bottleneck, [3, 4, 6, 3], **kwargs)\n\n return model", "title": "" }, { "docid": "ce727430ea99b9a89bfdca2f34d57f90", "score": "0.5978964", "text": "def load_model():\r\n global model\r\n # model = resnet50(pretrained=True)\r\n # 读取参数\r\n if os.path.exists('crnn/checkpoint.pth.tar'):\r\n checkpoint = torch.load('crnn/checkpoint.pth.tar', map_location=device)\r\n model.load_state_dict(checkpoint['state_dict'])\r\n print('model has restored')\r\n\r\n model.eval()\r\n model = model.to(device)", "title": "" }, { "docid": "06f30cda77fc3c08d8ac0100a783761d", "score": "0.597437", "text": "def lenet5(X):\n conv_1 = K.layers.Conv2D(6, 5, padding='same',\n activation='relu',\n kernel_initializer='he_normal')(X)\n pool_1 = K.layers.MaxPool2D(2, 2)(conv_1)\n conv_2 = K.layers.Conv2D(16, 5, padding='valid',\n activation='relu',\n kernel_initializer='he_normal')(pool_1)\n pool_2 = K.layers.MaxPool2D(2, 2)(conv_2)\n flat = K.layers.Flatten()(pool_2)\n f_con_1 = K.layers.Dense(120, input_shape=X.shape,\n activation='relu',\n kernel_initializer='he_normal')(flat)\n f_con_2 = K.layers.Dense(84, activation='relu',\n kernel_initializer='he_normal')(f_con_1)\n Y = K.layers.Dense(10, activation='softmax',\n kernel_initializer='he_normal')(f_con_2)\n model = K.Model(X, Y)\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model", "title": "" }, { "docid": "f78352c933444f9a3f15acc074b7de15", "score": "0.5949784", "text": "def resnet50v2(**kwargs):\n return _resnet(BottleneckV2, [3, 4, 6, 3], 2, **kwargs)", "title": "" }, { "docid": "7cdb65abb9a4a55022809e84389c98f5", "score": "0.59412074", "text": "def create_random_model(in_size: int, out_size: int, config: dict, task: str) -> AbstractModel:\n model_options = config.get(\"MODELS\", [\"neural_network\"])\n model_type = choice(model_options)\n\n criterion = config.get(\"GENERAL_CRITERION\", \"MSE\")\n\n if model_type == \"neural_network\":\n return create_deep_learning_model(in_size, out_size, config.get(\"NEURAL_NETWORK_EVOL_CONFIG\", {}), task,\n criterion)\n\n elif model_type == \"some_future_type\":\n # TODO add types as they are added in config\n pass", "title": "" }, { "docid": "40dbbc564a89ee08e5323edbf6160c53", "score": "0.5939731", "text": "def get_model():\n return Sequential([\n Conv1D(32, kernel_size=5, input_shape=(514, 12)),\n MaxPooling1D(),\n Activation('relu'),\n Conv1D(64, kernel_size=5),\n MaxPooling1D(),\n Activation('relu'),\n Conv1D(128, kernel_size=5),\n MaxPooling1D(),\n Activation('relu'),\n Flatten(),\n Dense(20),\n Activation('relu'),\n Dense(2),\n Activation('softmax')\n ])", "title": "" }, { "docid": "7c70302a4063945f48b67b9a41668a15", "score": "0.5929089", "text": "def create_model(self):\n self.model = StackedGCN(self.args, self.clustering_machine.feature_count, self.clustering_machine.class_count)\n self.model = self.model.to(self.device)", "title": "" }, { "docid": "f5d7e1ee5252dacb2df3d29b8c1992e6", "score": "0.5925951", "text": "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "5899d55e1622d46fb9fed5d73a92fc63", "score": "0.59253466", "text": "def tweaked_resnet50(pretrained=False, progress=True, **kwargs):\n return _tweaked_resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,\n **kwargs)", "title": "" }, { "docid": "a973782def39d1a66dc8c78c4006d882", "score": "0.59168833", "text": "def wide_resnet50_2(pretrained=False, num_classes=1000, in_chans=3, **kwargs):\n\tmodel = ResNet(\n\t\tBottleneck, [3, 4, 6, 3], base_width=128,\n\t\tnum_classes=num_classes, in_chans=in_chans, **kwargs)\n\tmodel.default_cfg = default_cfgs['wide_resnet50_2']\n\tif pretrained:\n\t\tload_pretrained(model, model.default_cfg, num_classes, in_chans)\n\treturn model", "title": "" }, { "docid": "0bea7a477dc38ac99609c72774659e92", "score": "0.591471", "text": "def build_read_tensor_keras_resnet(args):\t\n\tin_channels = defines.total_input_channels_from_args(args)\n\tif args.channels_last:\n\t\tin_shape = (args.read_limit, args.window_size, in_channels)\n\t\tchannel_axis = 3\n\telse:\n\t\tin_shape = (in_channels, args.read_limit, args.window_size)\n\t\tchannel_axis = 1\n\n\tx = Input(in_shape, name=args.tensor_map)\n\tmodel = keras_resnet.models.ResNet50(x, classes=len(args.labels))\n\tadamo = Adam(lr=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=1.)\t\n\tmodel.compile(loss='categorical_crossentropy', optimizer=adamo, metrics=get_metrics(args.labels))\t\n\tmodel.summary()\n\t\n\tif os.path.exists(args.weights_hd5):\n\t\tmodel.load_weights(args.weights_hd5, by_name=True)\n\t\tprint('Loaded model weights from:', args.weights_hd5)\n\n\treturn model", "title": "" }, { "docid": "86966e4bff07407aa83d65bee8f7202d", "score": "0.59130096", "text": "def __build_model(self):\n model = Sequential()\n model.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=self.input_shape))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(self.dropout))\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(self.dropout))\n model.add(Dense(self.num_classes, activation='softmax'))\n model.compile(loss=tf.keras.losses.categorical_crossentropy,\n optimizer=tf.keras.optimizers.Adadelta(learning_rate=self.learning_rate, rho=self.rho, epsilon=self.epsilon),\n metrics=['accuracy'])\n return model", "title": "" } ]
a404e163ba2393e46165fbcedf3f1591
command to display all the regrade requests
[ { "docid": "34f89319cd2655565707459797320655", "score": "0.6930176", "text": "async def display_requests(ctx):\n for name, questions in db.select_query(\n 'SELECT name,questions FROM regrade'\n ):\n if name:\n await ctx.send(name + \" \" + questions)\n else:\n await ctx.send('There are no regrade requests at the moment')\n break", "title": "" } ]
[ { "docid": "9e26b7c479dffb6328178f7b140df671", "score": "0.59886205", "text": "def show_requested(self):\n results = []\n for name, comp in sorted(self._requested):\n results.append(\n ansiformat(\n self._get_color(comp), \"{} {}\".format(name, dr.get_name(comp))\n )\n )\n IPython.core.page.page(six.u(os.linesep.join(results)))", "title": "" }, { "docid": "7fda1e589a716d9427a855ddaed78403", "score": "0.5951658", "text": "def list():\n cli_client.list()", "title": "" }, { "docid": "cac3de827e90c63e89fdc311d091e35c", "score": "0.5941279", "text": "def view_requests():\n return cv.view_requests()", "title": "" }, { "docid": "4a705364f3399718e16c3efdbf15afaf", "score": "0.5834303", "text": "def show_results(self):\n for response in self.responses:\n print ('-'+response)", "title": "" }, { "docid": "e4ef1309308affa1e5be7ed4547a736e", "score": "0.57981396", "text": "def list(self):\n return self._get('/list')", "title": "" }, { "docid": "ea952f3bcf0bdfaf0814e3a7ef73df16", "score": "0.57469463", "text": "def showAll():\r\n \r\n for i in range (len(validCommands)):\r\n print validCommands[i] + \" \",\r\n print", "title": "" }, { "docid": "9bb2e96de0d100dfe570aca6252f3873", "score": "0.56999236", "text": "def getGuestRequests(request):\n mac = utils._getMAC(utils._getIPAddress(request))\n requests = WebRequest.objects.all().filter(trainee=None, mac_address=mac).order_by('status')\n print mac\n html = render(request, 'web_access/requests_panel.html', context={'guest_access_requests': requests})\n return HttpResponse(html)", "title": "" }, { "docid": "b5acbfbedb9e1524417d1e5e14283e4f", "score": "0.5691624", "text": "def all_entries(self) -> requests.models.Response:", "title": "" }, { "docid": "b5acbfbedb9e1524417d1e5e14283e4f", "score": "0.5691624", "text": "def all_entries(self) -> requests.models.Response:", "title": "" }, { "docid": "1438b4a7bebe7bcf69047a7eb29b0a3e", "score": "0.56702083", "text": "def index():\n return(\n f\"Available routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start<br/>\"\n f\"/api/v1.0/start/end\"\n )", "title": "" }, { "docid": "6718e8e964f70a1b1d8fbc600b655111", "score": "0.56677794", "text": "def help_all(self):\n print((\"A command that prints all string representation of all \") +\n (\"instances\"))", "title": "" }, { "docid": "7846d2b70e31f4869329e69b54933cc8", "score": "0.5654725", "text": "def showall():\n msglist = Message.query.limit(10).all()\n msglp = [print(\"{},{}\".format(m.msg, m.digest)) for m in msglist]\n return \"done\"", "title": "" }, { "docid": "cf66dbbadd8cd573acf2c631658c6ab5", "score": "0.56175786", "text": "def display_all(self):\n print(\" User || Password || Domain || Verified || Email Pull || AD Groups \")\n print(\"-------------------++------------------++------------------++------------++--------------++--------------------\")\n for cred in self.master_list.keys():\n cred_info =[self.master_list[cred].username,\n self.master_list[cred].password,\n self.master_list[cred].domain,\n self.master_list[cred].verified,\n self.master_list[cred].email_pull,\n self.master_list[cred].ad_groups]\n print('{0:18} || {1:16} || {2:16} || {3:10} || {4:12} || {5:25}'.format(*cred_info))", "title": "" }, { "docid": "f99d465c8a80d1db08e87b4ee83590d6", "score": "0.56129956", "text": "def show(ctx, host):\n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n if host == 'all':\n url = 'http://' + server + '/api/v' + str(version) + \\\n '/gateways'\n else:\n url = 'http://' + server + '/api/v' + str(version) + \\\n '/gateway/' + host\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Print a gateway\n if host != 'all':\n g = data\n status = 'enabled' if g['enabled'] else 'disabled'\n indent = ' ' * 10\n click.echo(g['host'] + ': ' + g['name'])\n click.echo(indent + 'eui ' + euiString(g['eui']))\n click.echo(indent + 'power: ' + str(g['power']) + ' dBm')\n click.echo(indent + 'status: ' + status)\n return\n \n # Print all gateways\n click.echo('{:15}'.format('Gateway') + '{:17}'.format('IP-Address') + \\\n '{:24}'.format('EUI') + \\\n '{:9}'.format('Enabled') + '{:12}'.format('Power-dBm'))\n for i,g in data.iteritems():\n enabled = 'Yes' if g['enabled'] else 'No'\n click.echo('{:14.14}'.format(g['name']) + ' ' + \\\n '{:17}'.format(g['host']) + \\\n '{:24}'.format(euiString(g['eui'])) + \\\n '{:9}'.format(enabled) + \\\n '{:2}'.format(g['power']))", "title": "" }, { "docid": "3b381bee2087970e775d96cf123019ae", "score": "0.5604479", "text": "def infoRateLimitCommand(args):\n print(getRateLimitInfo())", "title": "" }, { "docid": "02a45eef54532d5fec24c15482d1fe56", "score": "0.5601845", "text": "def show_request(self, ar, *args, **kw):\n print(ar.to_rst(*args, **kw))", "title": "" }, { "docid": "428b6527692c19bef30acf1437e9f5ab", "score": "0.5583262", "text": "def view_complete_api(self):\n print()\n system_info = self.fc.device_manager.system_info\n print(\n f\"system : {system_info[-1]}\\n\"\n f\"build : {system_info[-2]}\\n\"\n f\"hw-code: {system_info[0]}\"\n )\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(f\"Report date: {now}\")\n for service_name, service in self.fc.services.items():\n if service_name == 'any1':\n continue\n print()\n print('=' * 65)\n for action_name in service.actions:\n self.view_actionarguments(service_name, action_name)", "title": "" }, { "docid": "de850f75afed900257f19e280182760e", "score": "0.55796564", "text": "def list_allocation_ids():\n rc_provision = NcProvision()\n all_alloc_list, token = rc_provision.fetch_alloc()\n req_ids = []\n for allo in all_alloc_list:\n req_ids.append(allo.crams_req_id)\n print(req_ids)", "title": "" }, { "docid": "b80847b09c12f976a2053275ee4f38f4", "score": "0.5557065", "text": "def list_command():\n \n with_description = True\n\n from aiida import load_dbenv\n load_dbenv()\n from aiida.orm import DataFactory\n\n LapwbasisData = DataFactory('exciting.lapwbasis')\n groups = LapwbasisData.get_lapwbasis_groups()\n\n if groups:\n for g in groups:\n sp = LapwbasisData.query(dbgroups=g.dbgroup).distinct()\n num_sp = sp.count()\n\n if with_description:\n description_string = \": {}\".format(g.description)\n else:\n description_string = \"\"\n\n print \"* {} [{} species]{}\".format(g.name, num_sp, description_string)\n else:\n print \"No LAPW basis sets were found.\"", "title": "" }, { "docid": "6dc9ab946e965cbb3220bb352ecf889e", "score": "0.55505043", "text": "def request(self, request_id, logfile='/var/log/nova.log'):\n lines = utils.execute(\"cat %s | grep '\\[%s '\" % (logfile, request_id))\n print re.sub('#012', \"\\n\", \"\\n\".join(lines))", "title": "" }, { "docid": "66c6377a726c2a92d6677b5b93e0f803", "score": "0.5515462", "text": "def SIPshowregistry(self, *a, **kw):\n action = \"SIPshowregistry\"\n if self.soc.connected:\n # Send command to AMI and capture request id\n req_id = self.cmd(action)\n self._pending.add(req_id)\n self._cache[req_id] = []\n return req_id", "title": "" }, { "docid": "141356db4d671d638255efc1b8d01f08", "score": "0.55026156", "text": "def all():\n log.debug('Retrieving all cards from Scryfall.')", "title": "" }, { "docid": "97f83c15711b740a070a3cb4a3c51f70", "score": "0.55019104", "text": "def index(self):\n return self.draw(self.requests())", "title": "" }, { "docid": "cab1825f27208d4fc912a7a018ca3338", "score": "0.54889995", "text": "def list_command(rsf_file, output_format):\n\n try:\n cmds = rsf.read(rsf_file)\n register = Register(cmds)\n\n utils.check_readiness(register)\n\n records = register.records()\n\n if not records:\n return\n\n if output_format == \"json\":\n stream = StringIO()\n utils.serialise_json(records, stream)\n\n stream.seek(0)\n\n click.echo(stream.read())\n\n elif output_format == \"csv\":\n stream = StringIO()\n headers = Record.headers(register.schema())\n\n xsv.serialise(stream, records, headers)\n\n stream.seek(0)\n\n click.echo(stream.read())\n\n else:\n for record in records.values():\n click.echo(record.blob)\n\n except RegistersException as err:\n utils.error(str(err))", "title": "" }, { "docid": "e419c8c9f54b20c4b3728820a00813be", "score": "0.5487742", "text": "def detail(self, req):\n print(\"NMH 99999 i m here in detail() 22222\") \n return self._get_access_groups(req, is_detail=True)", "title": "" }, { "docid": "0a741238b21ae63e9b9bba635a16ae4c", "score": "0.54829377", "text": "def list(self):\n self.get()", "title": "" }, { "docid": "34eced9674e754f80b0974a4395c6033", "score": "0.5480761", "text": "def do_list(self, args):\n print('\\nhindsight')\n print('dumpzilla')\n\tprint('evtx\\n')", "title": "" }, { "docid": "16464d5baaa6595c19d62c7f51b3d7d6", "score": "0.5480337", "text": "def index(self, req):\n print(\"NMH 99999 i m here in index() 1111\") \n return self._get_access_groups(req, is_detail=False)", "title": "" }, { "docid": "e27d712ca3ccfee57a680c0eb22c7c76", "score": "0.5455223", "text": "def get_all(self):\n if True:\n labels = self.get_labels()\n print_json(labels)", "title": "" }, { "docid": "fb585030aa20f81785574e9f0cab16e4", "score": "0.54468334", "text": "def show_commands():\n return \":hammer_and_wrench: *List of supported commands:*\\n\" \\\n \"`on` - list all online workers\\n\" \\\n \"`off` - list all offline workers\\n\" \\\n \"`all` - list all workers\\n\" \\\n \"`pool` - show link for nanopool stats page\\n\" \\\n \"`wallet` - show link for Etherscan wallet\\n\"", "title": "" }, { "docid": "3d16727d51d7578af56a12d7f43a82c7", "score": "0.54424775", "text": "def list_main(args):\n resource = args.resource.lower()\n if resource == \"user\":\n for user in User.select():\n print(user.name)\n elif resource == \"room\":\n for room in Room.select():\n print(f\"{quote(room.creator.name, safe='')}/{quote(room.name, safe='')}\")", "title": "" }, { "docid": "536979d2b4a7d9c16c926917d7c44b96", "score": "0.5435567", "text": "def show_requests(self):\n try:\n projects = self.conn.get_requests(self.bdo_id)\n # adding index column to given result\n projects_fields = (('index'),) + projects.description\n result = projects.fetchall()\n counter = 0\n # request is empty then returning early with a message\n if len(result) == 0:\n print(Color.F_Red+\"there is no pending request\"+Base.END)\n return\n\n print(\"write index of request to manage:\\n\")\n temp = result\n # converting list of tuples into list of list\n temp = [list(elem) for elem in temp]\n for request in temp:\n if str(RequestType.APPROVAL.name) == str(request[0]):\n temp2 = str(request[2]).split('|')\n request[2] = temp2[0] + \" request for \" + temp2[4] + \" project for \" + temp2[1] + \" member\"\n # inserting data for index column\n request.insert(0, counter)\n counter += 1\n table = PrettyTable()\n # assigning field names to pretty table object and removing request id column\n table.field_names = [column[0] for column in projects_fields[:-1]]\n for row in temp:\n # removing request id from temp list and adding to table row\n table.add_row(row[:-1])\n print(table)\n row_number = input(\"enter index number: \")\n if not Validation.is_int(row_number):\n print(\"index number is not valid\")\n input()\n return\n menu = Menu()\n result_names = [x.name for x in RequestResult]\n result_names.extend([str(BackButton.EXIT.name)])\n result_name = menu.draw_menu(result_names)\n input()\n status = \"NULL\"\n if str(RequestType.APPROVAL.name) == str(result[int(row_number)][0]):\n temp = str(result[int(row_number)][2]).split('|')\n req_type = str(temp[0])\n self.project_id = str(temp[3])\n self.member_id = str(temp[2])\n if req_type == str(ApprovalType.WAGE.name) and str(result_name) == str(RequestResult.APPROVED.name):\n self.delete_project_members()\n status = \"'True'\"\n elif req_type == str(ApprovalType.MEMBER.name) and str(result_name) == str(RequestResult.APPROVED.name):\n project_members_id = self.conn.get_project_members(self.project_id)\n members_required = self.conn.get_project_members_required(self.project_id)\n # checking if total number of members in given project are less then actual project requirement\n if int(members_required[0]) <= int(len(project_members_id)):\n print(Color.F_Red + \"project members limit exceeded can't accept request\" + Base.END)\n return\n self.conn.assign_project_members(self.project_id, self.member_id)\n status = \"'True'\"\n elif str(result_name) == str(RequestResult.REJECTED.name):\n status = \"'False'\"\n else:\n if str(result_name) == str(RequestResult.APPROVED.name):\n status = \"'True'\"\n elif str(result_name) == str(RequestResult.REJECTED.name):\n status = \"'False'\"\n self.conn.resolve_request(status, result[int(row_number)][4])\n self.conn.commit_data()\n print(Color.F_Green + \"Request completed successfully\" + Base.END)\n except Exception as e:\n print(e)\n self.conn.rollback_data()", "title": "" }, { "docid": "964fb9f0c5b4dc6726c1106738c5c9e1", "score": "0.54314226", "text": "def list(self, request):\n return CustomeResponse(\n {'msg': 'Get method bnot allowed'}\n )", "title": "" }, { "docid": "8f55ec0a0661e383c0e3dfc542562e12", "score": "0.5418671", "text": "def cli_list():\n for name, ip in get_local_dns_entries().items():\n print(name, ip)", "title": "" }, { "docid": "66bf3f63f876e26dcc8a4bbf53c8ae53", "score": "0.5417719", "text": "async def list(self, ctx):\n responses = open_json(\"Cog/admin/angry_responses.json\")\n response_list = []\n for response in responses:\n response_list.append(f\"- {response['response']}\\n\")\n await ctx.send(f\"```md\\n{''.join(response_list)}```\")", "title": "" }, { "docid": "ddbcf4316427a97aa1f5b30bf7ddb077", "score": "0.54020256", "text": "def get_all(self):\n \n pass", "title": "" }, { "docid": "4c30eadeeb41c65c290bb81a8d8c231d", "score": "0.53937215", "text": "def help(self):\n self.__print_qsrs_available()", "title": "" }, { "docid": "cd229cfe9c7a9e21557899d22e256c87", "score": "0.5379", "text": "def getAll(self):", "title": "" }, { "docid": "179b4b7b8ea7bfc3ec385e1031b23b23", "score": "0.53660345", "text": "def get():\n nodes = manage_nodes.get_nodes()\n print((\"{} \" * len(nodes))[:-1].format(*nodes))", "title": "" }, { "docid": "59a0f710adf96c58f774fc2e38ea4beb", "score": "0.5365071", "text": "def showList(access): \n\n passList = access.getall()\n\n # Create a table in cmd line\n if passList:\n x = PrettyTable()\n x.field_names = [\"URL\", \"email\", \"password\", \"Notes\"]\n\n for i in passList:\n x.add_row(\n [i[2], i[1], '****',i[4]]\n )\n print(x.get_string())\n\n else:\n # No passwords available\n print('\\nNo Passwords Available')\n\n print('Press enter to exit')\n input()", "title": "" }, { "docid": "8b6f56579a25438184b16a343d3b9831", "score": "0.5357731", "text": "def list(self, request):\n\n a_viewset = [\n \"A View set performs actions (list, create, update, retrieve, destroy)\",\n \"Automatically maps urls using URL Router\",\n \"Proveides more functionality with less code\"\n ]\n\n return Response({'message':'Hello, ', 'a_viewset': a_viewset})", "title": "" }, { "docid": "88f26511d1fc5ccabe8fe911c741dc19", "score": "0.5356498", "text": "def show_action(request):\n list_tasks = []\n try:\n logger.debug(f'The \"request\" is {request}')\n if 'user' in request:\n list_tasks = work.show_all_tasks(request['user'])\n logger.debug(f'The \"show\" command were selected')\n except Exception as e:\n logger.error(f'Error happened during showing all records - {str(e)}')\n logger.error(f'Error happened during showing all records - {repr(e)}')\n return render_template('templates/task_list.html', {'tasks': list_tasks})", "title": "" }, { "docid": "15793db844c05825f61e9bb52b54e21f", "score": "0.5354613", "text": "def printing_get_genres(file_name):\n result = reports.get_genres(file_name)\n print (\"The ordred list of genres:\")\n for nr, genre in enumerate(result, 1):\n print (\" \", nr, genre)\n print()", "title": "" }, { "docid": "475253b41ba6c88bd15dd9d11fa4542c", "score": "0.5353989", "text": "def cmd_listallinfo(self, conn, path=\"/\"):\n return self._listall(path, self._resolve_path(path), True)", "title": "" }, { "docid": "f9715a86fff909b647ff25cc672093fd", "score": "0.5352748", "text": "def show_results(self, proto, request):\n ok_signal = proto.get_name(proto.res_ok)\n print(f\"Qubits between Node {self.node0} and Node {self.node1} with purpose_id {request.purpose_id}:\")\n for _ in range(request.number):\n yield self.await_signal(proto, ok_signal)\n res = proto.get_signal_result(ok_signal, self)\n qubits = proto.node.qmemory.peek(res.logical_qubit_id)[0].qstate.qubits\n print(f\" {ns.sim_time():8.0f}: {qubits} with create_id {res.create_id}\")", "title": "" }, { "docid": "57cc77d70ef3089794dd1f774152586e", "score": "0.5350359", "text": "def display_get_genres():\n game_genres = reports.get_genres(filename)\n [print(genre) for genre in game_genres]\n print()", "title": "" }, { "docid": "d162c29826c6beafea11b05c76ace473", "score": "0.53487444", "text": "def browse_results(self):\n self.run('tui', ['tui'])", "title": "" }, { "docid": "557922f7fc832ebbb33b43850ba7567a", "score": "0.53427017", "text": "def all(self, request):\n return self.list(request)", "title": "" }, { "docid": "3e141b78387cd724c170107b1ab34a2d", "score": "0.53404456", "text": "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "title": "" }, { "docid": "8198e5a4c18786ddb5a14109f434eccd", "score": "0.5331521", "text": "def rescan_get():\n return render_template(\"github_rescan.html\")", "title": "" }, { "docid": "13cd27d47ae161771b245269f69e0f40", "score": "0.5314113", "text": "def all_programs(request):\n return render(request, 'wao/all_programs.html')", "title": "" }, { "docid": "24d8e36d18be86828999dc5bd22d83a1", "score": "0.53089684", "text": "def list_rr(ctx, *args, **kwargs):\n ctx.forward(view)", "title": "" }, { "docid": "c619f2030687176110a0c081e6efbcba", "score": "0.5307149", "text": "def list(self, filter=None):\n filter_string = ''\n if filter:\n filter_string = '?{}'.format(filter)\n resp = self._http.get_cmd('{}{}'.format(self._apiBase,filter_string))\n #print('RESP: {}'.format(resp))\n if resp:\n return resp\n return list()", "title": "" }, { "docid": "0231c0a17c0a0c2f6d18e4ab13e72160", "score": "0.53049135", "text": "def get_all():\n return handle_pricer_request(\"get_price_and_greeks\")", "title": "" }, { "docid": "c7ca37238e92a518db5b7197dcca8bda", "score": "0.5288639", "text": "def commands(self):\n for cmd in self.expose:\n print \"% \" + cmd\n print \"%\\t\" + getattr(self, cmd).__doc__", "title": "" }, { "docid": "8f18c27fa0c062896de235d2783c191c", "score": "0.52846736", "text": "def see_all_available(self):\n cars_available = self._car_service.get_all_available()\n self._system.clear_screen() ##display header function instead\n print(\n \"\\t ___ _ _ _ _ _ \\n\"\n \"\\t| __| |___ ___| |_ | | (_)__| |_\\n\"\n \"\\t| _|| / -_) -_) _| | |__| (_-< _|\\n\"\n \"\\t|_| |_\\___\\___|\\__| |____|_/__/\\__|\\n\"\n \"\\n\"\n \"All cars now available: \\n\"\n \"CarNr Brand Type Year Price \\n\")\n for car in cars_available:\n print(car)", "title": "" }, { "docid": "da6cb693e412336ecb7871c25817fc92", "score": "0.5265973", "text": "def list_request(self, form: str, query: str = None) -> Dict[str, Any]:\n params = remove_empty_elements({\"q\": query})\n response = self._http_request(\"GET\", f\"arsys/v1/entry/{form}\", params=params)\n return response", "title": "" }, { "docid": "643fe1ea4516327c26cdd01b418fbf47", "score": "0.5259977", "text": "def dev_list(tool, cmdargs):\n\n if cmdargs.pcidev:\n tool.list_ethernet_pci_devices()\n\n if cmdargs.vm:\n tool.list_vms()", "title": "" }, { "docid": "8193f47a548d0fcae6d81a7f3d2a9863", "score": "0.52599746", "text": "def schedule_list():\n host = _host()\n url = urljoin(host, \"schedules\")\n response = requests.get(url)\n if response.status_code >= 400:\n msg = (\n f\"{color.LIGHT_RED}Something went wrong.{color.END} \"\n f\"Got status code {response.status_code} and reponse {response.text}.\"\n )\n click.echo(msg)\n return\n\n click.echo(response.text)\n return response.text", "title": "" }, { "docid": "19cf69507904f178915896f4308b978c", "score": "0.5253621", "text": "def list_commands_cmd(self, args):\n self.respond(' '.join(list(self.commands.keys())))", "title": "" }, { "docid": "2b886ca420df20d180205faa3067e1d0", "score": "0.52491784", "text": "def get_all():\n\n url = 'http://www.rcsb.org/pdb/rest/getCurrent'\n\n req = urllib.request.Request(url)\n f = urllib.request.urlopen(req)\n result = f.read()\n assert result\n\n kk = str(result)\n\n p = re.compile('structureId=\\\"....\"')\n matches = p.findall(str(result))\n out = list()\n for item in matches:\n out.append(item[-5:-1])\n\n return out", "title": "" }, { "docid": "944e19140876400f5c36d5bad1d5a825", "score": "0.5245987", "text": "def list(self):\n# self.stdin('list')\n# out = self.stderr.split()\n return", "title": "" }, { "docid": "4fe8c60ac4006be0996bfb87b7017c47", "score": "0.5241683", "text": "def getlist(self):\n return self.get_response({'api_key': self.API_KEY, 'action': 'getlist'})", "title": "" }, { "docid": "e7d06ddf030eebb59ca75232ffd87c3b", "score": "0.52352864", "text": "def list_plans():\n return logging.info(PaymentPlan.list())", "title": "" }, { "docid": "4032912b2a8ca63456c39360ae656067", "score": "0.5221821", "text": "def get(self):\n print(self.__requestData)", "title": "" }, { "docid": "91413442e1876df1b9a493677ea23227", "score": "0.52197015", "text": "def list(self, request):\n\n a_viewset = [\n 'Uses actions (list, create, retrieve, update, partial_update)',\n 'Automatically maps to URLS using Routers',\n 'Provides more functionality with less code',\n ]\n\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "title": "" }, { "docid": "ccd6c76d93e59e326c4da529eaa8e791", "score": "0.52108645", "text": "def get_all(self) -> APIResponse:\n return self._get(\"list\")", "title": "" }, { "docid": "1bb0e9788790fda158357497baa0f5d9", "score": "0.5210416", "text": "def show_all(self):\n for cmd in self.pcmds:\n print(cmd)", "title": "" }, { "docid": "34e97b44bc152029772cdbc016dd6f24", "score": "0.5208729", "text": "def list(self, request):\n\n if request.method == 'GET':\n r = requests.get('https://demo.consul.io/v1/agent/members', params=request.GET)\n\n if r.status_code == 200:\n return Response({'message': 'Hello, This is Consul!','notes': 'This endpoint returns the members the agent sees in the cluster gossip pool. Due to the nature of gossip, this is eventually consistent: the results may differ by agent. The strongly consistent view of nodes is instead provided by /v1/catalog/nodes.', 'response': r})\n return Response({'message': 'GET failed!'})", "title": "" }, { "docid": "150d2105524ce86f346a2af3d375e86f", "score": "0.5205288", "text": "async def all_requests(self, ctx: BBContext):\n\n async with self.bot.pool.acquire() as con:\n ban_requests = await con.fetch(\"select user_tag, reason, message_link, staff_tag FROM moderation.banrequests\")\n if ban_requests:\n view = BanRequests(ctx.author, ctx.guild, self.bot, self.logger, ban_requests) # type: ignore\n await view.start(ctx.channel)\n else:\n await ctx.send('No ban requests found')", "title": "" }, { "docid": "6b304f686b619c08915b8935cfe59a4c", "score": "0.5204081", "text": "def get_all_requests( self ):\n self._assert_admin()\n return SlotRequest.select().join( User )", "title": "" }, { "docid": "8ef7f2a7d0fedee9ed075c2bb117b54e", "score": "0.52031106", "text": "def show_vnics_all(args):\n _logger.debug('%s', where_am_i())\n args.no_truncate = True\n args.details = True\n args.ocid = None\n args.ip_address = None\n args.name = None\n return show_vnics(args)", "title": "" }, { "docid": "1a8b6672c5c28cdfb9f8874b22f1b4d7", "score": "0.52024376", "text": "def display_accounts():\n return Credential.display_accounts()", "title": "" }, { "docid": "fdd8879c978bc4472168358237d417c1", "score": "0.5198581", "text": "def get(self, request):\n xp_entries = XpEntry.objects.filter(user=request.user).all()\n serializer = XpEntrySerializer(xp_entries, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "5d4d037434687ab4296390aa87b6b3e9", "score": "0.5197216", "text": "def request():\n kbpath = \"/var/www/kb/data/pages\" if not settings.DEBUG else os.path.join(settings.PROJECT_PATH, 'var/pages')\n\n if not SATELLITES:\n return False\n\n print \"Fetching informations for the kpi nagios\"\n\n # Total number of hosts ---------------------------------------------------\n\n nb_total_hosts = SATELLITES.query(\"\"\"\\\nGET hosts\nStats: name != \\\" \\\"\n\"\"\")\n nombre = 0\n for sat in nb_total_hosts:\n nombre += sat[0]\n\n nb_total_hosts = nombre\n \n # nb_host_down\n \n nb_host_down_temp = SATELLITES.query(\"\"\"\\\nGET hosts\nColumns: name last_hard_state_change state acknowledged\nFilter: hard_state = 1\nFilter: notifications_enabled = 1\nFilter: scheduled_downtime_depth = 0\nFilter: state > 0\n\"\"\")\n nb_host_down = len(nb_host_down_temp)\n\n # Total number of services ------------------------------------------------\n\n nb_total_services = SATELLITES.query(\"\"\"\\\nGET services\nStats: description != \\\" \\\"\n\"\"\")\n nombre = 0\n for sat in nb_total_services:\n nombre += sat[0]\n\n nb_total_services = nombre\n \n # Total of alerts\n \n nb_total_app_temp = SATELLITES.query_column_unique(\"\"\"\\\nGET hostgroups\nColumns: name\n\"\"\")\n \n csv_report_dir = \"/home/django/public_html/reporting\" if not settings.DEBUG else \"/tmp\"\n report_app = open(path.join(csv_report_dir, \"list_apps.csv\"), \"w\")\n report_app.write(\"Apps;\\n\")\n regex_app=re.compile(\"^app_[-a-zA-Z0-9]+$\",re.IGNORECASE)\n app = filter(regex_app.search, nb_total_app_temp)\n nb_total_app = len(app) \n \n \n for app_name in app: \n report_app.write(\"%s;\\n\" % app_name)\n report_app.close()\n\n\n\n # Total number of Linux ---------------------------------------------------\n\n nb_linux = SATELLITES.query(\"\"\"\\\nGET hostgroups\nColumns: num_hosts\nFilter: name = sys_linux\n\"\"\")\n nombre = 0\n for sat in nb_linux:\n nombre += sat[0]\n\n nb_linux = nombre\n\n\n # Total number of Windows -------------------------------------------------\n\n nb_windows = SATELLITES.query(\"\"\"\\\nGET hostgroups\nColumns: num_hosts\nFilter: name = sys_windows\n\"\"\")\n nombre = 0\n for sat in nb_windows:\n nombre += sat[0]\n\n nb_windows = nombre\n\n\n # Total number of AIX -----------------------------------------------------\n\n nb_aix = SATELLITES.query(\"\"\"\\\nGET hostgroups\nColumns: num_hosts\nFilter: name = sys_aix\n\"\"\")\n nombre = 0\n for sat in nb_aix:\n nombre += sat[0]\n\n nb_aix = nombre\n\n # get the service with path to the procedure ------------------------------\n\n services_all = SATELLITES.query(\"\"\"\\\nGET services\nColumns: host_name description notes_url_expanded contact_groups\n\"\"\")\n written_procedures = 0\n missing_procedures = 0\n total_written = 0\n total_missing = 0\n\n csv_report_dir = \"/home/django/public_html/reporting\" if not settings.DEBUG else \"/tmp\"\n\n myreport = open(path.join(csv_report_dir, \"detailled_report.csv\"), \"w\")\n my_simple_report = open(path.join(csv_report_dir, \"simple_report.csv\"), \"w\")\n myreport.write(\"written;hostname;services;procedure;stratos\\n\")\n my_simple_report.write(\"written;procedure;\\n\")\n procedures = {}\n for services in services_all:\n procedure_path = services[2].split('/')[-1].strip(':').replace(':', '/').lower()\n empty = 1\n for serv in services[3]:\n if empty == 1:\n list_contact = \"%s\" % serv\n empty = 0\n else:\n list_contact += \", %s\" % serv\n empty = 0\n if path.lexists(\"%s/%s.txt\" % (kbpath, procedure_path)):\n total_written +=1\n myreport.write(\"yes;%s;%s;%s;%s\\n\" % (services[0],\n services[1], services[2], list_contact))\n procedures[str(services[2])] = 1\n else:\n total_missing += 1\n myreport.write(\"no;%s;%s;%s;%s\\n\" % (services[0],\n services[1], services[2], list_contact))\n procedures[str(services[2])] = 0\n for procedure, written in procedures.items():\n if written:\n my_simple_report.write(\"yes;%s\\n\" % procedure)\n written_procedures += 1\n else:\n my_simple_report.write(\"no;%s\\n\" % procedure)\n missing_procedures += 1\n myreport.close()\n\n result = {\n 'total_hosts': nb_total_hosts,\n 'total_services': nb_total_services,\n 'total_app' : nb_total_app,\n 'linux': nb_linux,\n 'windows': nb_windows,\n 'aix': nb_aix,\n 'written_procedures': written_procedures,\n 'missing_procedures': missing_procedures,\n 'total_written' : total_written,\n 'total_missing' : total_missing,\n 'nb_host_down' : nb_host_down\n }\n\n return result", "title": "" }, { "docid": "c4b31d2b53df09b9cb458140cbdd24fe", "score": "0.51956755", "text": "def list(self, request: Request):\n return super().list(request)", "title": "" }, { "docid": "1418bbfae280ccc93e350d7d8c359252", "score": "0.5195139", "text": "def list(self, request):\n a_viewset = [\n 'Uses actions (list, create, retrieve, update, partial_update)',\n 'Automatically maps to URLs using Routers',\n 'Provides more functionality with less code',\n ]\n\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "title": "" }, { "docid": "29829c386b87eb16cc4d0f072a045eef", "score": "0.5191827", "text": "def list(self, request):\n\n a_viewset = [\n 'Uses actions (list, create, retrieve, update, partial_update)'\n 'Automatically maps to URLS using Routers',\n 'Provides more functionality with less code'\n ]\n\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "title": "" }, { "docid": "c0e37a485044ae88f48ddf198c18d865", "score": "0.5186302", "text": "def _list_reqs_assigned(self, requesters):\n for requester in requesters:\n print(requester.get_requests())\n return", "title": "" }, { "docid": "2d999fb3824877dc3ed5c9c6621df4ef", "score": "0.5185901", "text": "def cmd_list(hname, json=False, xml=False, links=False):\n\n session_file_path = get_session_file_path()\n smc_client = SMCClient(session_file_path)\n\n try:\n res = smc_client.list(hname)\n except ResolveError as err:\n raise CommandError(err)\n except (SMCOperationFailure) as err:\n raise CommandError(u\"(SMC): \" + unicode(err))\n\n for name in sorted(res):\n print_fmt(\"{}\", name)", "title": "" }, { "docid": "6c46d0246847ac4f589887800d152c9e", "score": "0.5184165", "text": "def list(self, request):\n return CustomeResponse(\n {\n 'msg': \"list method not allowed\"\n },\n status=status.HTTP_400_BAD_REQUEST,\n validate_errors=1\n )", "title": "" }, { "docid": "e8097b354e742252958ae449fc90836c", "score": "0.51817274", "text": "def cli(env):\n\n manager = SoftLayer.MessagingManager(env.client)\n accounts = manager.list_accounts()\n\n table = formatting.Table(['id', 'name', 'status'])\n for account in accounts:\n if not account['nodes']:\n continue\n\n table.add_row([account['nodes'][0]['accountName'],\n account['name'],\n account['status']['name']])\n\n env.fout(table)", "title": "" }, { "docid": "84e0c931a48783eef3c0c80e7baa5187", "score": "0.51802635", "text": "def cmd_listmaps(self, *args):\n self._prepareAuthorizedCommand()\n all = self.mm.getAllMaps()\n pp = printer.MM_Printer_Maps( self.msgs )\n if (not self.quiet):\n pp.run( all )\n print self.msgs.render('report_maps', {'total':len(all)})", "title": "" }, { "docid": "d9a56f24028b1d9e742d101f3493eedc", "score": "0.51658076", "text": "def summarize():\n if not is_request_valid(request):\n abort(400)\n handle_command(request.form,request.form['response_url'])\n return jsonify(\n response_type = \"ephemeral\",\n text = \":robot_face:\"\n )", "title": "" }, { "docid": "5099ac305179d6f711866c37db557617", "score": "0.5161549", "text": "def print_all(self):\n self.print_packet()\n self.print_rtap()", "title": "" }, { "docid": "ccb2bc3bc4bf7867a47516212993865c", "score": "0.5154552", "text": "def endpointGuide(self):\n print(\"\\n\".join([\"%s -> %s\" % entry for entry in self.listEntries]))", "title": "" }, { "docid": "54edc4bb7e35c3d611270faf8e44f5ea", "score": "0.5146207", "text": "def show(user, dev):\n print(\"WNI: Waiting to connect to server\")\n\n if dev is True:\n sio.connect('http://localhost:8080')\n else:\n sio.connect('http://104.248.156.240:8080')\n sio.emit('list_all_requests', {'user': user})", "title": "" }, { "docid": "285c6c88f6d28af4870a811c93f65556", "score": "0.5142536", "text": "def showResults(self):\r\n print \"COMMAND : \\\"%s\\\"\" %self.command\r\n print \"OUTPUT : \\\"%s\\\"\" %self.output.strip()\r\n print \"ERROR : \\\"%s\\\"\" %self.error.strip()\r\n print \"RETURN CODE : %d\" %self.returnCode", "title": "" }, { "docid": "d489795c9a1681c5f28ee254ef842794", "score": "0.51399016", "text": "def report_all(self):", "title": "" }, { "docid": "7abd9e9f294f6fa1d4381004dee10a2f", "score": "0.51380056", "text": "def list(obj: SkyNetCtxt, router: str) -> None:\n print(\n LRPProvider(obj).list(router).to_string(\n columns=[\"Name\", 'MAC', 'Enabled', 'Networks']))", "title": "" }, { "docid": "8229c9fda64827d284790413a2634738", "score": "0.5137609", "text": "def get_rbs():\n sql = \"SELECT `nodo`,`etiqueta_ont`,`slot`,`puerto`,`ont` FROM `t_servicios_RBS`\"\n rbs = conector(sql,\"select\",\"Consultando ONTS\")\n return rbs", "title": "" }, { "docid": "a446a85ee7ce0928d594830e25740826", "score": "0.51334566", "text": "def view_all_reservations():\r\n reservations = get_all_reservations()\r\n reservation_table = PrettyTable([\"ID\",\r\n \"Begin date\",\r\n \"End date\",\r\n \"Adults\",\r\n \"Children\",\r\n \"Feeding type\",\r\n \"Extras\",\r\n \"Price\",\r\n \"Paid\"])\r\n for reservation in reservations:\r\n reservation_table.add_row(reservation)\r\n print(reservation_table)", "title": "" }, { "docid": "da18305b13fda7e6cb3c037193e18995", "score": "0.5128679", "text": "def summaries():\n tx = my_bittrex.get_market_summaries()\n print(tabulate(tx[\"result\"], headers=\"keys\", tablefmt=\"grid\"))", "title": "" }, { "docid": "dc7f46b0325f04ba8fe9f912f6153c18", "score": "0.5128366", "text": "def get(self, request: Request):\n return self.list(request)", "title": "" }, { "docid": "dc7f46b0325f04ba8fe9f912f6153c18", "score": "0.5128366", "text": "def get(self, request: Request):\n return self.list(request)", "title": "" }, { "docid": "dc7f46b0325f04ba8fe9f912f6153c18", "score": "0.5128366", "text": "def get(self, request: Request):\n return self.list(request)", "title": "" }, { "docid": "745f79006ccab347e531915b87354cb5", "score": "0.51184803", "text": "def do_list(self, arg):\n context = self.vm.current_context.adapt_context()\n return self.print_cm(context, arg)", "title": "" }, { "docid": "5ff40e1de0bc7b69eabfb4067c500bc6", "score": "0.5116164", "text": "def test_get_all_user_requests():\n response = requests.get('https://maintainencetrackerapi.herokuapp.com/api/v1/user/requests')\n assert response.status_code, 200\n assert response.headers['content-type'] == 'application/json'\n assert b'request' in response.content\n assert b'id' in response.content", "title": "" }, { "docid": "84d452317f3d10564e0a2e9c78d379a1", "score": "0.5108813", "text": "def get(self, request):\n objs = ManagerTask().get_all_tasks()\n if isinstance(objs, dict):\n return render(request, NO_TASK)\n\n return render(request, MANAGER_ALL_TASK, {'task_info': objs})", "title": "" }, { "docid": "c629a6c4bce345f9f28d9850209091ba", "score": "0.51063794", "text": "def print_commands(self):\n for i in self.__cmd_list:\n print i", "title": "" } ]
20bf984df908d1fb52677a7b15e96b59
Return an iterator to the link counts. Returned values have the form (start, end), count
[ { "docid": "acade2d19f779ef287dd95e2b6e85d02", "score": "0.64247334", "text": "def iterCounts(self):\n\n if self._obsCount:\n for period in sorted(self._obsCount.keys()):\n if self._obsCount[period]:\n yield period, self._obsCount[period] \n else:\n for start, end in self.iterCountPeriods():\n count = self.getObsCount(start, end)\n if count:\n yield (start, end), count", "title": "" } ]
[ { "docid": "f741166bf40f0bef0de6e35a4ccb277b", "score": "0.71143895", "text": "def count_sources(edge_iter: EdgeIterator) -> Counter:\n return Counter(u for u, _, _ in edge_iter)", "title": "" }, { "docid": "ff3aa2b5ab802e53c14ebe5ed8a281e0", "score": "0.67742", "text": "def getNumLinks(self):\n return sum(imap(lambda link: 1, self.iterRegularLinks()))", "title": "" }, { "docid": "0cac6f2a0df9787469c11c686bb80166", "score": "0.67385226", "text": "def _get_links_count(self):\n return self.__links_count", "title": "" }, { "docid": "5b886b8148d450e8d1b174025327046f", "score": "0.663654", "text": "def count(self):\n\t\tcount = 0\n\t\tcurrent_node = self.begin\n\t\twhile current_node:\n\t\t\tcount += 1\n\t\t\tcurrent_node = current_node.next\n\t\treturn count", "title": "" }, { "docid": "4f5ad7ba76865cbcf9dc86818fe5868d", "score": "0.6600717", "text": "def route_lengths(self):\n for sources, targets in self.routes():\n yield sum(self.link[pair] for pair in zip(sources, targets))", "title": "" }, { "docid": "e04eba67f0c3d793088b0cc4ebb7fe7e", "score": "0.6578856", "text": "def __len__(self):\n count = 0\n links=set()\n for item in self.graph.items(self.uri):\n assert item not in links,\"There is a loop in the RDF list! (%s has been processed before)\"%item\n links.add(item)\n count += 1\n return count", "title": "" }, { "docid": "3611f840fa7cc7c822a2d7184c5ce7e4", "score": "0.65788144", "text": "def count(self):\n if self.begin is None:\n return 0\n if self.begin and self.end is None:\n return 1\n\n count = 1\n next_node = self.begin.next\n while next_node:\n count += 1\n next_node = next_node.next\n # print ('counted_colors', count)\n return count", "title": "" }, { "docid": "189fad1a6823a35e5a24240f86267db5", "score": "0.64467156", "text": "def itercount(self):\n\t\treturn self.count", "title": "" }, { "docid": "4af0d4174acdbf229cb442ccf294b43d", "score": "0.64337015", "text": "def _get_ref_counts(self) -> Dict[str, int]:", "title": "" }, { "docid": "a0337811fc87457ddee1d3562e0dd782", "score": "0.6401581", "text": "def getNumLinks(self):\n linkCount = 0\n for c in components:\n linkCount += c.getOutputs().size()\n return linkCount", "title": "" }, { "docid": "bea3d7a783d1d849fb8efe9a213cb18b", "score": "0.6397746", "text": "async def _get_ref_counts(self) -> Dict[str, int]:", "title": "" }, { "docid": "88c71be952aa7a0d9c4fcf8c0634cc4b", "score": "0.6323574", "text": "def numLinks(self):\n return self._numLinks", "title": "" }, { "docid": "1ac1b9cd77b2c7e07d7615dbed7f2189", "score": "0.63191855", "text": "def getIterationCounts(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "c6792de3ef0fc3fc04e19fc23cbe6ac7", "score": "0.62841445", "text": "def count_targets(edge_iter: EdgeIterator) -> Counter:\n return Counter(v for _, v, _ in edge_iter)", "title": "" }, { "docid": "281c1e619825bcd2ef42f92fa0857443", "score": "0.62795436", "text": "def num_links(self) -> int:\n return self._data_store.num_entries(\"forte.data.ontology.top.Link\")", "title": "" }, { "docid": "0d2fdeff9dbb87492312c91103f397bb", "score": "0.62772834", "text": "def iter_count(self):\n return self.__iter_count", "title": "" }, { "docid": "2d2a3e4b8d744e295e3e54b3e1b28774", "score": "0.62580115", "text": "def count(self):\n # if self.starts is None:\n # raise Exception('No Start Iterator set')\n\n if not self.pipes:\n raise Exception('No Start Iterator set')\n\n count = 0\n try:\n while True:\n self.next()\n count = count + 1\n except recommend_utils.FastNoSuchElementException:\n # Ignore FastNoSuchElementException\n pass\n return count", "title": "" }, { "docid": "f72cea06cdf7da40f522dfdd1da4aee5", "score": "0.62397474", "text": "def cnt(sequence):\n\tcnt.starts += 1\n\tfor item in sequence:\n\t\tcnt.items += 1\n\t\tyield item", "title": "" }, { "docid": "0f9db78ef87f63ebe72743603bc8c656", "score": "0.62046164", "text": "def count(self):\n # same old count\n count = 0\n node = self.head\n while node:\n count += 1\n node = node.next\n return count", "title": "" }, { "docid": "abacb843b2e7747ba4b4b145e9aa658a", "score": "0.6186819", "text": "def nlinks(self):\n\n return self._nlinks", "title": "" }, { "docid": "6ad5be442429f792f658c691d4e3e4de", "score": "0.60805583", "text": "def __len__(self):\n n = 0\n for nodeA in iter(self):\n n += 1\n return n", "title": "" }, { "docid": "387213a5358ca5f668198471092453a2", "score": "0.603986", "text": "def links(self) -> Iterator[Link]:\n yield from self._links.values()", "title": "" }, { "docid": "41d82fe49524a92a5a12293d38ae8abd", "score": "0.6029758", "text": "def __len__(self):\n n = 0\n for nodeB in iter(self):\n n += 1\n return n", "title": "" }, { "docid": "19f1e3bd79c25ca3f83fc2648165cebb", "score": "0.60145247", "text": "def size(self):\n current = self.head\n count = 0\n while current:\n count += 1\n current = current.get_next\n return count", "title": "" }, { "docid": "66b7879cde15c567dbaee087688db588", "score": "0.6003993", "text": "def counter(start, end):\n while start <= end:\n yield start\n start += 1", "title": "" }, { "docid": "37b7c7db5fe200e17fa2fa377477dfb7", "score": "0.60026366", "text": "def getCounts ( self ) :\r\n\r\n return self.countList", "title": "" }, { "docid": "fe654f1b18d661fbb17450ae6b996a16", "score": "0.59945834", "text": "def len(self):\n tmp = self.head\n count = 0\n while tmp.next:\n count += 1\n tmp = tmp.next\n return count", "title": "" }, { "docid": "89d4e21129c173ead00a8669d888c772", "score": "0.59932506", "text": "def _get_num_link_neighbors(self):\n return self.__num_link_neighbors", "title": "" }, { "docid": "ab2a93ae61a1a23f5f4cac5f8d4c9aeb", "score": "0.5954377", "text": "def len(self):\n count = 0\n temp = self.head\n while temp != None:\n temp = temp.next\n count += 1\n return count", "title": "" }, { "docid": "13f08ed229b1ad6753e077d474adad74", "score": "0.59523505", "text": "def __len__(self):\n count = 0\n current = self.head\n while current != None:\n count += 1\n current = current.next\n\n return count", "title": "" }, { "docid": "26da9aad35f47b34b15994ba9228b6c3", "score": "0.59483826", "text": "def len(self):\n i=0\n j=self.head\n while(j.next != None):\n \ti=i+1\n \tj=j.next\n return i", "title": "" }, { "docid": "6eca539e60f98fd40ffa1f7648710d44", "score": "0.5943561", "text": "def size(self):\n count = 0\n current = self.dll.head\n while current is not None:\n count += 1\n current = current.next_node\n return count", "title": "" }, { "docid": "e7426da8a52e2a52c40a14ff8ee61883", "score": "0.5942751", "text": "def length(self):\n\t\t#Complexity class- n\n\t\tcount = 1\n\t\tstart = self.head\n\t\twhile start.next != None:\n\t\t\tcount += 1\n\t\t\tstart = start.next\n\t\treturn count", "title": "" }, { "docid": "c16576b256c9a097857f9e4808228cad", "score": "0.59424055", "text": "def size(self):\n counter = 1;\n itrNode = self;\n while itrNode.next:\n counter = counter + 1;\n itrNode = itrNode.next;\n \n return counter;", "title": "" }, { "docid": "0115a7ecf7105f760357dcce7f80cc7b", "score": "0.59357876", "text": "def count(iterator):\n return sum(1 for i in iterator)", "title": "" }, { "docid": "8a417013d9c197c78c47590a965b7e80", "score": "0.59336424", "text": "def count(linkedList, target):\r\n counter = 0\r\n\r\n #Current node starts as the value of the\r\n #1st element's dictionary (which is just a pointer to the next node/element) \r\n currentNode = linkedList\r\n \r\n while currentNode != None:\r\n if currentNode['data'] == target:\r\n counter += 1\r\n currentNode = currentNode['next']\r\n \r\n return counter", "title": "" }, { "docid": "079717844e56574984dff6abda198c2a", "score": "0.5931736", "text": "def _get_counts(self):\n\n seq_counts = {}\n\n length = len(self.seq)\n\n for idx in range(0, length, 4):\n\n base = self.seq[idx: idx + 3]\n seq_counts.setdefault(base, 0)\n seq_counts[base] += 1\n\n if (idx + 4) < length:\n linkage = self.seq[idx + 3]\n seq_counts.setdefault(linkage, 0)\n seq_counts[linkage] += 1\n\n return seq_counts", "title": "" }, { "docid": "ac2ed36878db47265f710e14314fa0d9", "score": "0.59244573", "text": "def counts(self):\n return self._counts", "title": "" }, { "docid": "ac2ed36878db47265f710e14314fa0d9", "score": "0.59244573", "text": "def counts(self):\n return self._counts", "title": "" }, { "docid": "c00d215c4181bcd6d1f619fd93c97868", "score": "0.59208447", "text": "def count(self):\n return self.node_count", "title": "" }, { "docid": "b00c1c42ff8851c4e36b993b0aa1ddc8", "score": "0.5919887", "text": "def GetCount():", "title": "" }, { "docid": "a2052ebb95996cbd50ed3f0b24bbfb54", "score": "0.5917653", "text": "def size(self):\n current = self.head\n count = 0\n while current:\n count += 1\n current = current.next\n return count", "title": "" }, { "docid": "a2052ebb95996cbd50ed3f0b24bbfb54", "score": "0.5917653", "text": "def size(self):\n current = self.head\n count = 0\n while current:\n count += 1\n current = current.next\n return count", "title": "" }, { "docid": "360602a6afbb60aa2c1f84ec2ced6c45", "score": "0.5915006", "text": "def __len__(self):\n n = 0\n for node in iter(self):\n n += 1\n return n", "title": "" }, { "docid": "17912ef415c04e1d3db9ba9cb3069d5d", "score": "0.5912437", "text": "def rels_count(sub):\n #nx.draw_networkx_edge_labels(sub)\n edges = sub.edges(data=True)\n rels = []\n for e in edges:\n rels.append(e[2][\"label\"])\n return Counter(rels)", "title": "" }, { "docid": "d4ee9cdbf84ff299f98842bc533b513a", "score": "0.59024644", "text": "def count(self):\n count = 0\n if self._top:\n count += 1\n node = self._top\n while node.next:\n count += 1\n node = node.next\n return count", "title": "" }, { "docid": "263cc8609a521f158a6fd91937c172b9", "score": "0.58916545", "text": "def __len__(self):\n\t\tself.count = 0\n\t\tself.current = self.head\n\t\twhile self.current is not None:\n\t\t\tself.count += 1\n\t\t\tself.current = self.current.next\n\t\treturn self.count", "title": "" }, { "docid": "0cd3b7604b25768a37b3709aacf42642", "score": "0.58859515", "text": "def size(self):\n value = self.head_node\n counter = -1\n while value is not None:\n value = value.next\n counter += 1\n return counter", "title": "" }, { "docid": "1aaf968453699ec1af83161ee1f677ec", "score": "0.5868728", "text": "def size_linked_list(self):\n node = self.head\n count = 0\n while node:\n count += 1\n node = node.next\n \n return count", "title": "" }, { "docid": "f2fcd4476ef6b8ba7bc8bb11e6e6415a", "score": "0.5860336", "text": "def __len__(self) -> int:\n counter = 0\n curr = self._head\n while curr is not None:\n counter += 1\n curr = curr.next\n return counter", "title": "" }, { "docid": "ddcb27a852046b6ba0b271952518c39d", "score": "0.5857906", "text": "def length(self):\n cur = self._head\n count = 0\n\n while cur != None:\n count +=1\n cur = cur.next\n\n return count", "title": "" }, { "docid": "58546615ffcd52c3d92fb1f748e85869", "score": "0.58446723", "text": "def visit_counts(self):\n return Counter(\n tile.visit_count\n for tile\n in self.values()\n )", "title": "" }, { "docid": "6eea5763260d18c46358d6448b5410ec", "score": "0.5833632", "text": "def count(self):\n counter = 0\n for i in self:\n counter += 1\n\n return counter", "title": "" }, { "docid": "8e031c82c3673a268c68cc0b6310b8dc", "score": "0.58265036", "text": "def __len__(self):\n\n count = 0\n currentnode = self._top\n while currentnode is not None:\n count += 1\n currentnode = currentnode._next\n return count", "title": "" }, { "docid": "d895875cc620b52a6300c0d28d914e0d", "score": "0.5821782", "text": "def totalConnections(analyzer):\n return gr.numEdges(analyzer['graph'])", "title": "" }, { "docid": "e15022c8906205f4e89d782f83b45f6e", "score": "0.5807296", "text": "def get_linked_lengths(frames, linker, *args, **kw):\n linked = link(frames, linker, *args, **kw)\n return linked.groupby('particle').x.count()", "title": "" }, { "docid": "25cc0725e24acfef3acad63d9b81538e", "score": "0.5800594", "text": "def iterations(self):\n return self._ptr.contents.iterations", "title": "" }, { "docid": "fb00c320573105bd0065eab3a66a100a", "score": "0.57963574", "text": "def count_connections(self):\n\t\ti = 0\n\t\tfor i in range(0, len(self.end_points)):\n\t\t\tself.point_connection_count.append(self.connection_count(i))", "title": "" }, { "docid": "d9913b3cc13f3e7d1933e88e6caa668e", "score": "0.57915854", "text": "def count(self):\n # type: () -> Counts\n directories = 0\n files = 0\n data = 0\n for _path, info in self._make_iter(namespaces=[\"details\"]):\n if info.is_dir:\n directories += 1\n else:\n files += 1\n data += info.size\n return Counts(directories=directories, files=files, data=data)", "title": "" }, { "docid": "dd5bd9808954d24feaacbfceb836db7c", "score": "0.57798237", "text": "def count(self):\n count = 0\n for _ in self:\n count += 1\n return count", "title": "" }, { "docid": "4e81e4aaab92c91244d62f1a2a6c5402", "score": "0.57770073", "text": "def count(self, value):\n target = self.head\n count = 0 \n for x in range(self.length):\n target = target.next\n if target.val == value:\n count += 1\n return count\n pass", "title": "" }, { "docid": "0ef4273602d9fd51c2798a9f5635defa", "score": "0.57666415", "text": "def iterLinks(self):\n return iter(self._linksByIid.values())", "title": "" }, { "docid": "962eced18c87df5f45bce19f89e0726e", "score": "0.57606167", "text": "def group_lengths(links):\n lengths = []\n while links:\n start_item = list(links.keys())[0] \n g = find_group(links, start_item)\n lengths.append(len(g))\n # remove group from links\n for key in g:\n links.pop(key)\n return lengths", "title": "" }, { "docid": "aca0bb6b26322bddd9a61839ccf95eb1", "score": "0.57587975", "text": "def _get_n_links(self):\n with Handle(self) as hFile:\n file_information = wrapped(win32file.GetFileInformationByHandle, hFile)\n return file_information[7]", "title": "" }, { "docid": "2076a1df03a328e44f449aed26976b21", "score": "0.5755015", "text": "def length(self):\n curr = self.head\n total = 0\n while curr.next != None:\n total += 1\n curr = curr.next\n return total", "title": "" }, { "docid": "385f4efc5a12799e27fc6f1dfefc3d04", "score": "0.57544017", "text": "def length(self):\n if SingleLinkList.is_empty(self):\n return 0\n\n cur = self._head\n count = 1\n while cur.next != self._head:\n cur = cur.next\n count += 1\n\n return count", "title": "" }, { "docid": "05a8b39b19212241b5eab1b50ababe54", "score": "0.57513237", "text": "def iter_nodes(self):\r\n return xrange(len(self.nodes))", "title": "" }, { "docid": "2b7c194c931e807543cf58221d97a913", "score": "0.57464594", "text": "def size(self) :\n count = 0\n current = self.head\n while current is not None:\n count += 1\n current = current.getNext()\n return count", "title": "" }, { "docid": "e1ec4075a5129ee20dc8aa641e99c2a8", "score": "0.5728731", "text": "def size(self):\n counter = 0\n current = self.head\n while current is not None:\n counter += 1\n current = current.next_node\n return counter", "title": "" }, { "docid": "40918fa0911eb5663fd7e6e3ef420428", "score": "0.5724653", "text": "def size(self):\n count = 0\n current_node = self.head\n while current_node is not None:\n count += 1\n current_node = current_node.get_next()\n return count", "title": "" }, { "docid": "63cfd045608ecb7236dca3ca9628bd4b", "score": "0.5722326", "text": "def GetRangesCount(self) -> int:\n ...", "title": "" }, { "docid": "aad56af411d8d9e4cc10ecac915a6c8d", "score": "0.5719039", "text": "def count_edges(self):\n return self._n", "title": "" }, { "docid": "53a606f10da433d93e20ab54dea48895", "score": "0.5712312", "text": "def _counts_at_position(positions, orig_reader, cmp_reader):\n pos_counts = collections.defaultdict(lambda:\n collections.defaultdict(lambda:\n collections.defaultdict(int)))\n for orig_parts in orig_reader:\n cmp_parts = cmp_reader.next()\n for pos in positions:\n try:\n pos_counts[pos][int(orig_parts[pos+1])][int(cmp_parts[pos+1])] += 1\n except IndexError:\n pass\n for pos, count_dict in pos_counts.iteritems():\n for orig_val, cmp_dict in count_dict.iteritems():\n for cmp_val, count in cmp_dict.iteritems():\n yield pos+1, orig_val, cmp_val, count", "title": "" }, { "docid": "68f1ead4c4b38c30a610665507142e43", "score": "0.5709216", "text": "def linkCount(self, nodeRef, fieldName):\n fieldDict = self.nodeRefDict.get(nodeRef, {})\n linkSet = fieldDict.get(fieldName, set())\n return len(linkSet)", "title": "" }, { "docid": "f2e277af96bfb47c93302d5dad16455c", "score": "0.5697153", "text": "def __len__(self):\r\n count = 0\r\n currentnode = self._front\r\n while currentnode is not None:\r\n count += 1\r\n currentnode = currentnode._next\r\n \r\n return count", "title": "" }, { "docid": "5458bc0d1687e3e378237783ee0382fb", "score": "0.5696842", "text": "def size(self):\n\n current = self.head\n count = 0\n\n while current:\n count += 1\n current = current.next_node\n\n return count", "title": "" }, { "docid": "309bbdf8353e4a5dba1c1aed7de95f6f", "score": "0.5696036", "text": "def count(sq: Iteratable, start: tp.Optional[int] = None, step: int = 1,\n start_at: tp.Optional[int] = None) -> tp.Iterator[int]:\n if start_at:\n warnings.warn('This is deprecated and will be removed in Satella 3.0. Use start instead.',\n DeprecationWarning)\n start = start_at\n num = start or 0\n for _ in sq:\n yield num\n num += step", "title": "" }, { "docid": "a37252ec2845d4cf1ddb3e8af53480c8", "score": "0.569381", "text": "def size(self):\n \n current_node = self.head\n i = 0\n while current_node != None:\n# print(\"c_node data: \", current_node.data)\n current_node = self.hashmap.get(current_node.previous_hash)\n i += 1\n return i\n pass", "title": "" }, { "docid": "d9e6b5b3261b104ac0d85c36712d1533", "score": "0.56934935", "text": "def count(self):\n resp, page = self.request(\"GET\", self.uri)\n return page[\"total\"]", "title": "" }, { "docid": "45ca6f60f502a5a44d8f8b4665194c10", "score": "0.56877464", "text": "def fast_count_segments(starts, ends, points):\n graph_items = segregate_items(starts, ends, points)\n # print(graph_items)\n point_cnt = {p: 0 for p in points}\n cur = 0\n for item in graph_items:\n if item[1] == 'L':\n cur += 1\n elif item[1] == 'R':\n cur -= 1\n else:\n point_cnt[item[0]] = cur\n\n cnt = [point_cnt[point] for point in points]\n return cnt", "title": "" }, { "docid": "fcd872d7c28b2cdc0058ec2b579e85ce", "score": "0.5679965", "text": "def getSize(linkedList):\r\n count = 0\r\n node = linkedList\r\n while node != None:\r\n count += 1\r\n node = node['next']\r\n return count", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "411c5c11e407425d9f7700a1dc271969", "score": "0.5676698", "text": "def GetCount(self):", "title": "" }, { "docid": "3ab57e5465c4011ca706cdaa6fc4076e", "score": "0.566808", "text": "def length(self):\n current = self.head\n count = 0\n while current is not None:\n count = count + 1\n current = current.getNext()\n return count", "title": "" }, { "docid": "ee8a016d7cba1bc91cd1269e4e2e69c2", "score": "0.5665678", "text": "def length(self):\n\n # o(n) for all n nodes in the list because we have to interate all n nodes and count 1\n # Loop through all nodes and count one for each\n\n # start with the head of the node\n # end the loop at the tail\n # create a variable named count\n # increment the count variable every time it loop through a node\n # return the count variable when the loop is finished\n\n # counter variable that increment by 1 every time the loop goes through it\n count = 0\n\n # keep track of the head node\n keep_track = self.head\n\n while (keep_track):\n count += 1\n # keep_track is passed to the next node\n keep_track = keep_track.next\n # return count\n return count", "title": "" }, { "docid": "be6cb4d264e451fb2df35f563a06b88d", "score": "0.5663701", "text": "def get_number_of_stored_links():\n conn = None\n number_of_links = None\n try:\n # Read database configuration\n params = config()\n # Connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # Create a new cursor\n cur = conn.cursor()\n # Select the next non-tracked link\n cur.execute(\"SELECT COUNT(*) FROM link\")\n row = cur.fetchone()\n if row is not None:\n number_of_links = row[0]\n # Close communication with the PostgreSQL database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n return number_of_links", "title": "" }, { "docid": "465bc04f8fdc2da894581ce1ce6059ec", "score": "0.5660931", "text": "def length(self):\n current = self.head\n count = 0\n while current != None:\n count = count + 1\n current = current.getNext()\n return count", "title": "" }, { "docid": "314347eeb4438eaa7cd0e081272a0d98", "score": "0.56593055", "text": "def count(self, value):\n counter = 0\n curr = self.head.next\n for i in range(self.length):\n if curr.val == value:\n counter += 1\n curr = curr.next\n return counter", "title": "" }, { "docid": "0f878864ec68b294b7fc7c8e8259ee14", "score": "0.5658368", "text": "def length(self):\r\n n = 0\r\n ptr = self.head\r\n while (ptr != None):\r\n n += 1\r\n ptr = ptr.next\r\n return n", "title": "" }, { "docid": "b589c025d4631d428012a05ce0e3adb6", "score": "0.56540227", "text": "def _aggregateAllMovementCounts(self, startTimeInMin, endTimeInMin):\n linkCount = 0\n for movement in self.iterOutMovements():\n if movement.isUTurn():\n continue\n count = movement.getObsCount(startTimeInMin, endTimeInMin)\n #TODO a movement can return a zero value!!! so if you say\n #if count you are missing many movements\n if count is not None:\n linkCount += count\n else:\n return None\n #TODO: is this right???\n return linkCount if linkCount else None", "title": "" }, { "docid": "d720b576628108ea6a55bfec9ea02689", "score": "0.5653668", "text": "def IterationCount(self) -> int:", "title": "" }, { "docid": "d720b576628108ea6a55bfec9ea02689", "score": "0.5653668", "text": "def IterationCount(self) -> int:", "title": "" }, { "docid": "cea9955a1bb81d11036bbef5f0498196", "score": "0.56533146", "text": "def length(self):\n # TODO: Loop through all nodes and count one for each\n length_counter = 0\n current = self.head\n\n while current is not None:\n current = current.next\n length_counter += 1\n return length_counter", "title": "" }, { "docid": "c389a48719beb7333b202fece469a4e9", "score": "0.5652262", "text": "def generator_len(self, it):\n return len(list(it))", "title": "" } ]
b96fb0c336541696d73fb3e20a0c16e4
loop over each image as query and evaluate its performance based on top_r selection
[ { "docid": "abd2b101df9f7c3f228309044a374c6c", "score": "0.0", "text": "def oneAgainstAllFaiss(y_true, hash_binary, cls_num_ac, top_r):\n precisions, recalls, dist = [], [], []\n numSample = hash_binary.shape[0]\n # index = faiss.IndexFlatL2(hash_binary.shape[1])\n # index.add(np.ascontiguousarray(hash_binary.astype(np.float32)))\n\n # D, I = index.search(np.ascontiguousarray(hash_binary.astype(np.float32)), top_r+1)\n\n # https://github.com/facebookresearch/faiss/wiki/Binary-indexes\n index = faiss.IndexBinaryFlat(hash_binary.shape[1] * 8)\n index.add(np.ascontiguousarray(hash_binary.astype(np.uint8)))\n D, I = index.search(np.ascontiguousarray(hash_binary.astype(np.uint8)), top_r+1)\n\n # check whether the query index is inside the retrieved, since the first place of I may not be the index of query\n I_re = []\n for i in range(numSample):\n tmp = I[i].tolist()\n if i in set(tmp):\n tmp.remove(i)\n else:\n tmp = tmp[:-1]\n I_re.append(tmp)\n I_re = np.asarray(I_re)\n indexes = I_re\n\n assert indexes.shape[1] == top_r\n\n y_true_cls_num = []\n\n for _, label in enumerate(y_true):\n y_true_cls_num.append(cls_num_ac[label])\n \n y_true_cls_num = np.asarray(y_true_cls_num) - 1\n\n indexes_f = indexes.flatten()\n knn_labels = y_true[indexes_f.tolist(),].reshape(indexes.shape)\n knn_labels_ind = (knn_labels == y_true[:, None]).astype(int)\n \n precisions = knn_labels_ind.sum(axis=1) / top_r\n recalls = knn_labels_ind.sum(axis=1) / y_true_cls_num\n\n # return np.mean(np.array(precisions)), np.mean(np.array(recalls)), dists\n return np.mean(precisions), np.mean(recalls), None", "title": "" } ]
[ { "docid": "c36dbba6bc9e1aeb9366f525170254c5", "score": "0.58435273", "text": "def query(self, images):\n if self.pool_size == 0:\n return images\n return_images = []\n for image in images:\n if self.num_imgs < self.pool_size:\n self.num_imgs += 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = np.random.rand()\n if p > 0.5:\n random_id = np.random.randint(0, self.pool_size)\n tmp = self.images[random_id].copy()\n self.images[random_id] = image\n return_images.append(tmp)\n else:\n return_images.append(image)\n return np.array(return_images)", "title": "" }, { "docid": "813392db7041a59d286f2b4a0708800d", "score": "0.5836827", "text": "def find_queries_on_image(image, query):\n\n work_image = image.copy()\n h, w = query.shape[:2] # Size of query (note! that collumns and rows switched in opencv)\n\n # Common consts\n # Radius is a distanse between points to decide that template matches are equal. Use as radius 50% of min side of query\n radius = min([h, w]) * 0.5\n # Offset is value that used to deside if matched points has the same coords in query and croped image.\n # It is 50% of radius of point sparsing\n offset = radius * 0.5\n\n # Find matches and select results above threshold\n result = cv2.matchTemplate(work_image, query, cv2.TM_CCOEFF_NORMED) # Use 'normed' method to easy choose a threshold from 0 to 1\n loc = np.where(result >= threshold) # filter by threshold\n points = zip(*loc[::-1]) # Get matched points (note! - Remember to switch collumns and rows to get x(w),y(h))\n\n # Save vals of matched points\n vals = [ result[pt[1]][pt[0]] for pt in points ]\n\n # Sort points based on descending values from vals array\n # it allows get better results from sparsing points method\n points = [p for p, _ in reversed(sorted(zip(points,vals), key=lambda pair: pair[1]))]\n\n # Sparce points to reduce matches\n sparsed_points = sparse_subset(points, radius)\n\n # Match features on every cropped image build from sparced point to deside if it is real object from query\n # Initiate SIFT detector\n det = cv2.xfeatures2d.SIFT_create()\n # Create BFMatcher object\n bf = cv2.BFMatcher()\n # Convert query and image to gray\n query_gray = cv2.cvtColor(query,cv2.COLOR_BGR2GRAY)\n image_gray = cv2.cvtColor(work_image,cv2.COLOR_BGR2GRAY)\n\n # Loop memory\n res_img = np.zeros((0, 0, 3), np.uint8) # cross loop image to save result\n\n # {\n # 'point': pt,\n # 'matches': matches,\n # 'good_matches': good_matches,\n # 'common_good_matches': common_good_matches\n # }\n accepted_points = [] # points to keep. list with dict\n rejected_points = [] # points to reject. list with dict\n # Loop to match\n for pt in sparsed_points:\n # Crop image (note! remember - sparsed points is (x,y) but crop function gets args (y:len,x:len))\n crop_img = image_gray[pt[1]:pt[1] + h, pt[0]:pt[0] + w]\n\n # find the keypoints and descriptors with SURF\n img1, img2 = query_gray, crop_img\n kp1, des1 = det.detectAndCompute(img1, None)\n kp2, des2 = det.detectAndCompute(img2, None)\n\n # Match descriptors if enough points found\n matches = bf.knnMatch(des1,des2,k=2) if (len(kp1) >= 3 and len(kp2) >= 3) else []\n\n # Store all the good matches as per Lowe's ratio test.\n good_matches = []\n for m,n in matches:\n if m.distance < 0.75*n.distance:\n good_matches.append(m)\n\n # Sort them in the order of their distance.\n good_matches = sorted(good_matches, key = lambda x:x.distance)\n\n # Get match points with the same coords on img1 and img2 (within small offset)\n common_good_matches = [] # list to keep matches\n for good_match in good_matches:\n is_x_matching_within_offset = abs(kp1[good_match.queryIdx].pt[1] - kp2[good_match.trainIdx].pt[1]) <= offset\n is_y_matching_within_offset = abs(kp1[good_match.queryIdx].pt[0] - kp2[good_match.trainIdx].pt[0]) <= offset\n if is_x_matching_within_offset and is_y_matching_within_offset:\n common_good_matches.append(good_match)\n\n # Make decision. and save result to list\n # To keep sparsed point - We should have at least 3 common points and more or equal than 20% of all matches\n res_dict = {\n 'point': pt,\n 'matches': matches,\n 'good_matches': good_matches,\n 'common_good_matches': common_good_matches\n }\n if len(common_good_matches) >= 3 and len(common_good_matches) >= 0.2*len(matches):\n accepted_points.append(res_dict)\n else:\n rejected_points.append(res_dict)\n\n # Draw black frame on images for visual separation\n cv2.rectangle(img1, (0, 0), img1.shape[:2][::-1], (0, 0, 0), 2)\n cv2.rectangle(img2, (0, 0), img2.shape[:2][::-1], (0, 0, 0), 2)\n # Draw good matches to img\n img3 = cv2.drawMatches(img1, kp1, img2, kp2, good_matches, None, flags=2)\n # Draw common good matches to img\n img4 = cv2.drawMatches(img1, kp1, img2, kp2, common_good_matches, None, flags=2)\n\n # Add images to result\n img34 = concatenate_cv2_images(img3, img4, axis=1)\n res_img = concatenate_cv2_images(res_img, img34, axis=0)\n\n\n # Draw red rectangle on matched queries\n for pt in points:\n cv2.rectangle(work_image, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n\n # Draw blue rectangle on matched queries, find centers of the rectangles. Draw small sircles on the centers\n accepted_rectangle_centers = []\n for pt in [ point_dict['point'] for point_dict in accepted_points ]:\n # blue rectangle\n cv2.rectangle(work_image, pt, (pt[0] + w, pt[1] + h), (255, 0, 0), 2)\n rectangle_center = (pt[0] + w/2, pt[1] + h/2)\n # blue circle r=10\n cv2.circle(work_image, rectangle_center, 10, (255, 0, 0), thickness=2, lineType=8, shift=0)\n accepted_rectangle_centers.append(rectangle_center)\n\n return radius, offset, points, sparsed_points, accepted_points, rejected_points, accepted_rectangle_centers, res_img, work_image", "title": "" }, { "docid": "367e8e8706c964fa9c08b05efcaa6703", "score": "0.5793264", "text": "def _phrase_relevance_images(index):\n\n less_relevant_1 = _test_image(\n title='A picture of my office',\n tags=[{'name': 'office'}],\n creator='Alice Foo',\n relevance=QAScores.LESS_RELEVANT.value\n )\n less_relevant_1.save(index=index)\n\n less_relevant_2 = _test_image(\n title='My office in my home',\n tags=[{'name': 'office'}, {'name': 'home'}],\n creator='Gordon',\n relevance=QAScores.LESS_RELEVANT.value\n )\n less_relevant_2.save(index=index)\n\n not_relevant = _test_image(\n title='Mastiff',\n tags=[{'name': 'dog'}],\n creator='Liam',\n relevance=QAScores.NOT_RELEVANT.value\n )\n not_relevant.save(index=index)\n\n # This should be the top result.\n target = _test_image(\n title='My home office',\n tags=[{'name': 'home office'}, {'name': 'noise'}, {'name': 'clutter'}],\n creator='John Fooson',\n relevance=QAScores.TARGET.value\n )\n target.save(index=index)", "title": "" }, { "docid": "4c692ea5aa1ea036507e41f5991b7013", "score": "0.57181007", "text": "def query(self, images):\n if self.pool_size == 0:\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size:\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5:\n random_id = random.randint(0, self.pool_size - 1)\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else:\n return_images.append(image)\n return_images = torch.cat(return_images, 0)\n return return_images", "title": "" }, { "docid": "e22e73abe8ddacf0792bf8c06e485f86", "score": "0.5635902", "text": "def query(self, images):\n if self.buffer_size == 0:\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.img_num < self.buffer_size:\n self.img_num = self.img_num + 1\n self.image_buffer.append(image)\n return_images.append(image)\n else:\n use_buffer = np.random.random() < self.buffer_ratio\n if use_buffer:\n random_id = np.random.randint(0, self.buffer_size)\n image_tmp = self.image_buffer[random_id].clone()\n self.image_buffer[random_id] = image\n return_images.append(image_tmp)\n else:\n return_images.append(image)\n return_images = torch.cat(return_images, 0)\n return return_images", "title": "" }, { "docid": "c2ea30a100c68181295e4a333aef1404", "score": "0.5628681", "text": "def run(self):\n\n print('>> Generating pose predictions based on top-1 images...')\n output = []\n for i, rank in tqdm(enumerate(self._ranks.T), total=self._ranks.shape[1]):\n query_image = self._dataset.data['query_image_names'][i]\n for j in rank:\n nearest_neighbor = self._dataset.data['reference_image_names'][j]\n key = self._dataset.key_converter(nearest_neighbor)\n if key in self._filename_to_pose:\n quaternion, camera_pose_matrix = self._filename_to_pose[key]\n output.append(\n [self._dataset.output_converter(query_image),\n *quaternion, *list(camera_pose_matrix[:3,3])])\n break\n return output", "title": "" }, { "docid": "dfd6a5901ef27b0d64fb75e7ff8484cb", "score": "0.5598365", "text": "def evaluate(self):\n tic = time.time()\n\n print(\"Running per image evaluation...\")\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if p.useSegm is not None:\n p.iouType = \"segm\" if p.useSegm == 1 else \"bbox\"\n print(\"useSegm (deprecated) is not None. Running {} evaluation\".format(p.iouType))\n print(\"Evaluate annotation type *{}*\".format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params = p\n\n self._prepare()\n\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == \"segm\" or p.iouType == \"bbox\":\n computeIoU = self.computeIoU\n elif p.iouType == \"keypoints\":\n computeIoU = self.computeOks\n self.ious = {\n (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds\n }\n\n maxDet = p.maxDets[-1]\n\n # <<<< Beginning of code differences with original COCO API\n def convert_instances_to_cpp(instances, is_det=False):\n # Convert annotations for a list of instances in an image to a format that's fast\n # to access in C++\n instances_cpp = []\n for instance in instances:\n instance_cpp = _C.InstanceAnnotation(\n int(instance[\"id\"]),\n instance[\"score\"] if is_det else instance.get(\"score\", 0.0),\n instance[\"area\"],\n bool(instance.get(\"iscrowd\", 0)),\n bool(instance.get(\"ignore\", 0)),\n )\n instances_cpp.append(instance_cpp)\n return instances_cpp\n\n # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++\n ground_truth_instances = [\n [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]\n for imgId in p.imgIds\n ]\n detected_instances = [\n [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]\n for imgId in p.imgIds\n ]\n ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]\n\n if not p.useCats:\n # For each image, flatten per-category lists into a single list\n ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]\n detected_instances = [[[o for c in i for o in c]] for i in detected_instances]\n\n # Call C++ implementation of self.evaluateImgs()\n self._evalImgs_cpp = _C.COCOevalEvaluateImages(\n p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances\n )\n self._evalImgs = None\n\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print(\"COCOeval_opt.evaluate() finished in {:0.2f} seconds.\".format(toc - tic))\n # >>>> End of code differences with original COCO API", "title": "" }, { "docid": "fc38960557c0b52d812c1db290b31b23", "score": "0.5584452", "text": "def linked_iteration(filepath,image_filter, type_filter,default_indexes, fields):\n\n if 'tvi' in type_filter and 'vi' not in type_filter:\n print('to get the \"tvi\" you need to specify the \"vi\"')\n return\n\n # get a tuple with an ordered dictionary {raster_name: field_count} and a set with the unique raster names\n tablestructure = get_structure(filepath)\n\n images = list(tablestructure[0].keys())\n counts = list(tablestructure[0].values())\n\n # this will store the field indexes\n indexes = list(range(default_indexes))+ []\n imagenames=[]\n # add indexes as first fields\n for i in range(default_indexes): imagenames.append('id_'+str(i))\n\n for n,image in enumerate(images):\n\n image_name = image.split('_')[-3]+'_'+ image.split('_')[-2]\n\n if not image_filter or (image_filter and (image_name in image_filter)): # define wich images, all or some?\n\n # get the number of fields for this image\n offset = 0\n for i in range(n):\n offset += counts[i]\n\n # print(offset)\n\n field_count = tablestructure[0][image]\n\n for j in range(offset,offset+field_count):\n\n #split the field name\n splitted = fields[j].split('_')\n\n for t in type_filter: # iterate the filter dictionary\n\n if t in TYPES:\n\n if t == 'b':\n if fields[j].startswith('b') and len(splitted) == 1:\n\n if type_filter[t]: # check subtype\n if splitted[0] in type_filter[t]:\n indexes.append(j)\n imagenames.append(image_name)\n else: # no subtype, take all\n indexes.append(j)\n imagenames.append(image_name)\n\n elif t == 'vi':\n\n if splitted[0].upper() in VITYPES and (len(splitted) <= 3): # check this field is ok\n\n for tf in type_filter[t]:\n\n if tf.upper() == splitted[0].upper():\n\n if tf in VTYPES_NOBAND: # check this is a special vi\n indexes.append(j)\n imagenames.append(image_name)\n break\n\n if type_filter.get('b', False): # check if there is a subfilter\n\n if len(type_filter['b']) > 1: #check there are band combinations\n bandcombination = 0\n for bb in type_filter['b']:\n if bb in fields[j]:\n bandcombination +=1\n if bandcombination == 2:\n indexes.append(j)\n imagenames.append(image_name)\n break\n\n else: # no type_filter['b'], take all\n indexes.append(j)\n imagenames.append(image_name)\n\n elif t == 'tb':\n\n if fields[j].startswith('b') and len(splitted) > 1:\n\n for tf in type_filter[t]:\n\n if tf.upper() == splitted[1].upper(): # look for the filter\n\n if type_filter.get('b', False): # check if there is a subfilter\n\n for bb in type_filter['b']:\n if bb in fields[j]:\n indexes.append(j)\n imagenames.append(image_name)\n\n else: # noband filter, take all\n indexes.append(j)\n imagenames.append(image_name)\n\n elif t == 'tvi':\n\n if splitted[0].upper() in VITYPES and (len(splitted) >= 4) \\\n and (splitted[0].upper() in type_filter['vi']):\n\n for tf in type_filter[t]:\n\n if tf.upper() == splitted[3].upper() or tf.upper() == splitted[1].upper():\n\n if set([splitted[0].upper()]).intersection(VTYPES_NOBAND):# check this is a special vi\n indexes.append(j)\n imagenames.append(image_name)\n break\n\n if type_filter.get('b', False): # check if there is a subfilter\n\n if len(type_filter['b']) > 1: # check there are band combinations\n bandcombination = 0\n for bb in type_filter['b']:\n if bb in splitted: bandcombination +=1\n if bandcombination == 2:\n indexes.append(j)\n imagenames.append(image_name)\n break\n\n else: # if no band filter, take all\n indexes.append(j)\n imagenames.append(image_name)\n\n else:\n print('type_filter should be: b vi tb tvi ')\n print('type_filter should be: ' + str(VITYPES))\n return\n\n return imagenames, indexes", "title": "" }, { "docid": "d1cfaed20ea6f611e683da2d8747e387", "score": "0.55842066", "text": "def dev_get_single_image_results(gt_boxes, pred_dict, iou_thr, verbose = False ):\n pred_boxes = pred_dict['boxes']\n pred_scores = pred_dict['scores']\n all_pred_indices = range(len(pred_boxes))\n all_gt_indices = range(len(gt_boxes))\n if verbose:\n print(' get_single_image_results : ')\n print(' gt_boxes_img : (', len(gt_boxes),') ' , gt_boxes)\n print(' pred_boxes_pruned : (', len(pred_boxes) ,') ' , pred_boxes)\n\n\n ## Here NONE of the ground truths were detected --> FN = # of GT Boxes\n if len(all_pred_indices) == 0:\n# print(' No predictions were made (len(all_pred_indices) == 0) --> FN = # of GT Boxes')\n tp = 0\n fp = 0\n fn = len(gt_boxes)\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n ## Here NO ground truths existed --> FP = # of Predicted Boxes\n if len(all_gt_indices) == 0:\n# print(' No GT Boxes were present (len(all_gt_indices) == 0) --> FP = # of Predicted Boxes')\n tp = 0\n fp = len(pred_boxes)\n fn = 0\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}\n\n gt_idx_thr = []\n pred_idx_thr = []\n ious = []\n\n for ipb, pred_box in enumerate(pred_boxes):\n if verbose:\n print(' PR:', pred_box , 'Score: ', pred_scores[ipb])\n for igb, gt_box in enumerate(gt_boxes):\n iou = dev_calc_iou_individual(pred_box, gt_box, verbose)\n if verbose:\n print(' '*30,' with GT: ', gt_box, ' IoU: ', round(iou,4))\n if iou > iou_thr:\n gt_idx_thr.append(igb)\n pred_idx_thr.append(ipb)\n ious.append(iou)\n\n ## sORT IoUs in descending order\n args_desc = np.argsort(ious)[::-1]\n if verbose:\n print(' argsort(iou) descending:', args_desc, ' ious descending:', [round(ious[i],4) for i in args_desc])\n\n ## Here None of the predictions matched GT Boxes - therefore\n ## All of the Predcitions were False Postitives --> FP = # of Predicted Boxes\n ## NONE of the GT boxes were correctly predicted --> FN = # of GT Boxes\n if len(args_desc) == 0:\n # No matches\n# print( ' len(args_desc) == 0 -- no matches ')\n tp = 0\n fp = len(pred_boxes)\n fn = len(gt_boxes)\n else:\n gt_match_idx = []\n pred_match_idx = []\n for idx in args_desc:\n gt_idx = gt_idx_thr[idx]\n pr_idx = pred_idx_thr[idx]\n # If the boxes are unmatched, add them to matches\n if (gt_idx not in gt_match_idx) and (pr_idx not in pred_match_idx):\n gt_match_idx.append(gt_idx)\n pred_match_idx.append(pr_idx)\n tp = len(gt_match_idx)\n fp = len(pred_boxes) - len(pred_match_idx)\n fn = len(gt_boxes) - len(gt_match_idx)\n\n return {'true_pos': tp, 'false_pos': fp, 'false_neg': fn}", "title": "" }, { "docid": "43e87ddd44f5ad1b79e807fc4b0e8749", "score": "0.55591094", "text": "def fast_rcnn_inference_single_image(\n boxes,\n scores,\n image_shape,\n score_thresh,\n nms_thresh,\n topk_per_image,\n\n):\n valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)\n import pdb\n # pdb.set_trace()\n if not valid_mask.all():\n boxes = boxes[valid_mask]\n scores = scores[valid_mask]\n\n scores = scores[:, :-1]\n num_bbox_reg_classes = boxes.shape[1] // 4\n # Convert to Boxes to use the `clip` function ...\n boxes = Boxes(boxes.reshape(-1, 4))\n boxes.clip(image_shape)\n boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4\n\n # 1. Filter results based on detection scores. It can make NMS more efficient\n # by filtering out low-confidence detections.\n filter_mask = scores > score_thresh # R x K\n # R' x 2. First column contains indices of the R predictions;\n # Second column contains indices of classes.\n filter_inds = filter_mask.nonzero() #469,2\n if num_bbox_reg_classes == 1:\n boxes = boxes[filter_inds[:, 0], 0]\n else:\n boxes = boxes[filter_mask]\n scores = scores[filter_mask]\n\n # 2. Apply NMS for each class independently.\n keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)\n if topk_per_image >= 0:\n keep = keep[:topk_per_image]\n boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] #38,4\n\n result = Instances(image_shape)\n result.pred_boxes = Boxes(boxes)\n result.scores = scores\n result.pred_classes = filter_inds[:, 1]\n return result, filter_inds[:, 0]", "title": "" }, { "docid": "9f202d23d378fbad27002acaa0d5fa3c", "score": "0.5558661", "text": "def benchmark():\n throughput_time = {\"Image\": []}\n average_duration_time = {\"Image\": []}\n\n for i in range(40): # adjust accordingly so whole thing takes a few sec\n logging.info('Image Processing execution beginning')\n t0 = time.time()\n image_processing()\n t1 = time.time()\n logging.info('Image Processing function ended, calculating metrics')\n if i >= 20: # We let it warmup for first 20 rounds, then consider the last 20 metrics\n throughput_time[\"Image\"].append(1 / ((t1 - t0) * 1000))\n average_duration_time[\"Image\"].append(((t1 - t0) * 1000) / 1)\n\n # Printing out results for throughput\n for name, numbers in throughput_time.items():\n logging.info(\"The throughput time\")\n length = str(len(numbers))\n median = str(statistics.median(numbers))\n mean = str(statistics.mean(numbers))\n stdev = str(statistics.stdev(numbers))\n output = \"FUNCTION {} used {} times. > MEDIAN {} ops/s > MEAN {} ops/s > STDEV {} ops/s\".format(name,\n length,\n median,\n mean,\n stdev)\n logging.info(output)\n\n # printing out results for average duration\n for name, numbers in average_duration_time.items():\n logging.info(\"The average Duration details\")\n length = str(len(numbers))\n median = str(statistics.median(numbers))\n mean = str(statistics.mean(numbers))\n stdev = str(statistics.stdev(numbers))\n output = \"FUNCTION {} used {} times. > MEDIAN {} s/ops > MEAN {} s/ops > STDEV {} s/ops\".format(name,\n length,\n median,\n mean,\n stdev)\n logging.info(output)\n\n logging.critical(\"The benchmark is finished properly\")", "title": "" }, { "docid": "248d8dfbb25041d2f0ff11cd0dc6e68a", "score": "0.5557809", "text": "def visualize_ranked_results(\n distmat, dataset, data_type, width=128, height=256, save_dir='', topk=10\n):\n num_q, num_g = distmat.shape\n mkdir_if_missing(save_dir)\n\n print('# query: {}\\n# gallery {}'.format(num_q, num_g))\n print('Visualizing top-{} ranks ...'.format(topk))\n\n query, gallery = dataset\n assert num_q == len(query)\n assert num_g == len(gallery)\n\n indices = np.argsort(distmat, axis=1)\n\n def _cp_img_to(src, dst, rank, prefix, matched=False):\n \"\"\"\n Args:\n src: image path or tuple (for vidreid)\n dst: target directory\n rank: int, denoting ranked position, starting from 1\n prefix: string\n matched: bool\n \"\"\"\n if isinstance(src, (tuple, list)):\n if prefix == 'gallery':\n suffix = 'TRUE' if matched else 'FALSE'\n dst = osp.join(\n dst, prefix + '_top' + str(rank).zfill(3)\n ) + '_' + suffix\n else:\n dst = osp.join(dst, prefix + '_top' + str(rank).zfill(3))\n mkdir_if_missing(dst)\n for img_path in src:\n shutil.copy(img_path, dst)\n else:\n dst = osp.join(\n dst, prefix + '_top' + str(rank).zfill(3) + '_name_' +\n osp.basename(src)\n )\n shutil.copy(src, dst)\n\n for q_idx in range(num_q):\n qimg_path, qpid, qcamid = query[q_idx]\n qimg_path_name = qimg_path[0] if isinstance(\n qimg_path, (tuple, list)\n ) else qimg_path\n\n if data_type == 'image':\n qimg = cv2.imread(qimg_path)\n qimg = cv2.resize(qimg, (width, height))\n qimg = cv2.copyMakeBorder(\n qimg, BW, BW, BW, BW, cv2.BORDER_CONSTANT, value=(0, 0, 0)\n )\n # resize twice to ensure that the border width is consistent across images\n qimg = cv2.resize(qimg, (width, height))\n num_cols = topk + 1\n grid_img = 255 * np.ones(\n (\n height,\n num_cols*width + topk*GRID_SPACING + QUERY_EXTRA_SPACING, 3\n ),\n dtype=np.uint8\n )\n grid_img[:, :width, :] = qimg\n else:\n qdir = osp.join(\n save_dir, osp.basename(osp.splitext(qimg_path_name)[0])\n )\n mkdir_if_missing(qdir)\n _cp_img_to(qimg_path, qdir, rank=0, prefix='query')\n\n rank_idx = 1\n for g_idx in indices[q_idx, :]:\n gimg_path, gpid, gcamid = gallery[g_idx]\n invalid = (qpid == gpid) & (qcamid == gcamid)\n\n if not invalid:\n matched = gpid == qpid\n if data_type == 'image':\n border_color = GREEN if matched else RED\n gimg = cv2.imread(gimg_path)\n gimg = cv2.resize(gimg, (width, height))\n gimg = cv2.copyMakeBorder(\n gimg,\n BW,\n BW,\n BW,\n BW,\n cv2.BORDER_CONSTANT,\n value=border_color\n )\n gimg = cv2.resize(gimg, (width, height))\n start = rank_idx*width + rank_idx*GRID_SPACING + QUERY_EXTRA_SPACING\n end = (\n rank_idx+1\n ) * width + rank_idx*GRID_SPACING + QUERY_EXTRA_SPACING\n grid_img[:, start:end, :] = gimg\n else:\n _cp_img_to(\n gimg_path,\n qdir,\n rank=rank_idx,\n prefix='gallery',\n matched=matched\n )\n\n rank_idx += 1\n if rank_idx > topk:\n break\n\n if data_type == 'image':\n imname = osp.basename(osp.splitext(qimg_path_name)[0])\n cv2.imwrite(osp.join(save_dir, imname + '.jpg'), grid_img)\n\n if (q_idx+1) % 100 == 0:\n print('- done {}/{}'.format(q_idx + 1, num_q))\n\n print('Done. Images have been saved to \"{}\" ...'.format(save_dir))", "title": "" }, { "docid": "7d3d3ae22f89ab7498aaa90189f0b65e", "score": "0.55538106", "text": "def detect_object_multiscale(img, svc_classify, norm, features, multi_scale_cfg, fast_hog_extract = 1, draw_win_flag=0):\n windows = []\n total_time = 0\n for level in multi_scale_cfg[:3]:\n print(level)\n\n level_id = level[0]\n scale = level[1]\n x_start_stop = level[2]\n y_start_stop = level[3]\n xy_window = level[4]\n xy_overlap = level[5]\n\n #print(level_id, scale, x_start_stop, y_start_stop, xy_window, xy_overlap)\n\n #print(y_start_stop)\n #y_start_stop[0] = int (img.shape[0] / 3)\n t_start =time.time()\n rescaled_detect_wins, ori_detect_wins, rescaled_img = detect_object(img, svc_classify, norm, features, scale,\n x_start_stop, y_start_stop,\n xy_window, xy_overlap, fast_hog_extract)\n\n t_end = time.time()\n\n print(\"Time_level:{0:}\".format(t_end-t_start))\n\n # writing and displaying multi scale detection\n #window_img = draw_boxes(img, rescaled_detect_wins, color=(0, 255, 0), thick=4)\n #cv2.imshow(\"window_img\" + level_id, window_img)\n #cv2.imwrite(\"./window_img\" + level_id + \".png\", window_img)\n\n #rescaled_window_img = draw_boxes(rescaled_img, ori_detect_wins, color=(0, 255, 0), thick=4)\n #cv2.imshow(\"window_img_rescaled\" + level_id, rescaled_window_img)\n\n #windows.append(rescaled_detect_wins)\n windows.extend(rescaled_detect_wins)\n total_time += (t_end-t_start)\n\n\n print(\"total time:{0:}\".format(total_time))\n window_img = img\n #for win_scale in windows:\n win_scale = windows\n #print(window_img.shape)\n if draw_win_flag == 1:\n window_img = draw_boxes(window_img, win_scale, color=(0, 255, 0), thick=4)\n #cv2.imshow(\"window_img_multi\", window_img)\n\n return windows, window_img", "title": "" }, { "docid": "9113450b0726f01a793ce241b7d67250", "score": "0.5539919", "text": "def standard_iteration(filepath,image_filter, type_filter,default_indexes, fields):\n\n # get a tuple with an ordered dictionary {raster_name: field_count} and a set with the unique raster names\n tablestructure = get_structure(filepath)\n\n images = list(tablestructure[0].keys())\n counts = list(tablestructure[0].values())\n\n # this will store the field indexes\n indexes = list(range(default_indexes)) + []\n imagenames = []\n # add indexes as first fields\n for i in range(default_indexes): imagenames.append('id_' + str(i))\n\n for n, image in enumerate(images):\n\n image_name = image.split('_')[-3] + '_' + image.split('_')[-2]\n\n if not image_filter or (image_filter and (image_name in image_filter)): # define wich images, all or some?\n\n # get the number of fields for this image\n offset = 0\n for i in range(n):\n offset += counts[i]\n\n # print(offset)\n\n field_count = tablestructure[0][image]\n\n for j in range(offset, offset + field_count):\n\n # split the field name\n splitted = fields[j].split('_')\n\n for t in type_filter: # iterate the filter dictionary\n\n if t in TYPES:\n\n if t == 'b':\n if fields[j].startswith('b') and len(splitted) == 1:\n if type_filter[t]: # check subtype\n if splitted[0] in type_filter[t]:\n indexes.append(j)\n imagenames.append(image_name)\n else: # no subtype, take all\n indexes.append(j)\n imagenames.append(image_name)\n\n elif t == 'vi':\n\n if splitted[0].upper() in VITYPES and (len(splitted) <= 3): # check this field is ok\n\n for tf in type_filter[t]:\n if tf.upper() == splitted[0].upper():\n indexes.append(j)\n imagenames.append(image_name)\n\n elif t == 'tb':\n\n if fields[j].startswith('b') and len(splitted) > 1:\n for tf in type_filter[t]:\n if tf.upper() == splitted[1].upper(): # look for the filter\n indexes.append(j)\n imagenames.append(image_name)\n\n elif t == 'tvi':\n\n if splitted[0].upper() in VITYPES and (len(splitted) >= 4):\n for tf in type_filter[t]:\n if tf.upper() == splitted[3].upper() or tf.upper() == splitted[1].upper():\n indexes.append(j)\n imagenames.append(image_name)\n\n else:\n print('type_filter should be: b vi tb tvi ')\n print('type_filter should be: ' + str(VITYPES))\n return\n\n return imagenames, indexes", "title": "" }, { "docid": "230a654b8ee16925cf53a0349f495b32", "score": "0.5529959", "text": "def __eval_image_process(self, from_list, compare_list, queue):\n result_list = []\n for imageA in from_list:\n for imageB in compare_list:\n if imageA[0] is not imageB[0]:\n result = float(block.diff(imageA[1], imageB[1]))\n if result >= self.fuzzy_percentage:\n result = \"%.2f\" % result\n result_list.append((imageA[0],\n imageB[0],\n result))\n queue.put(result_list)", "title": "" }, { "docid": "25fea097289e4867a24150c11533893c", "score": "0.54886633", "text": "def __eval_image(self, block_list):\n\n compare_result = []\n block_list_len = len(block_list)\n\n if block_list_len < 4:\n next_not_touch_index = 1\n for imageA in block_list:\n for index in range(next_not_touch_index,block_list_len):\n imageB = block_list[index]\n result = float(block.diff(imageA[1], imageB[1]))\n if result >= self.fuzzy_percentage:\n result = \"%.2f\" % result\n compare_result.append((imageA[0],\n imageB[0],\n result))\n # to prevent images of being compare to itself and already\n # compared images in previous loops\n next_not_touch_index += 1\n else:\n jobs = []\n procs = 4\n queue = Queue()\n block_split = self.__split_list(block_list, 2)\n # prepare processes\n # The first part of the split image list will be compared to the first part and second\n # part with two processes\n # the second part work in the same way\n for i in range(procs):\n index = i % 2\n process = Process(target=self.__eval_image_process,\n args=(block_split[i // 2], block_split[i % 2], queue))\n jobs.append(process)\n # start process and wait for processes to end\n for j in jobs:\n j.start()\n\n for j in jobs:\n compare_result.extend(queue.get())\n\n for j in jobs:\n j.join()\n\n return compare_result", "title": "" }, { "docid": "9027a8ce60720ac5c644d3d70600c9ea", "score": "0.54815334", "text": "def pixelmatch(q, ax_start, ay_start, cpus, nnf, nnd, A_size, B_size, feat_A, feat_AP, feat_B, feat_BP, patch_size,\n rand_search_radius):\n a_cols = A_size[1]\n a_rows = A_size[0]\n\n b_cols = B_size[1]\n b_rows = B_size[0]\n\n ax_end = min(ax_start + A_size[1] // cpus + 1, A_size[1])\n ay_end = min(ay_start + A_size[0] // cpus + 1, A_size[0])\n\n y_idxs = list(range(ay_start, ay_end))\n np.random.shuffle(y_idxs)\n # print(y_idxs)\n for ay in y_idxs:\n x_idxs = list(range(ax_start, ax_end))\n np.random.shuffle(x_idxs)\n # print(x_idxs)\n for ax in x_idxs:\n\n ybest, xbest = nnf[ay, ax]\n dbest = nnd[ay, ax]\n\n for jump in [8, 4, 2, 1]:\n # print(\"ax:{}; ay:{}; jump:\".format(ax,ay)+str(jump))\n\n # left\n if ax - jump < a_cols and ax - jump >= 0:\n vp = nnf[ay, ax - jump]\n xp = vp[1] + jump\n yp = vp[0]\n\n if xp < b_cols and xp >= 0 and yp >= 0 and yp < b_rows:\n val = cal_dist(ay, ax, yp, xp,\n feat_A, feat_AP,\n feat_B, feat_BP,\n A_size, B_size, patch_size)\n if val < dbest:\n # print(\"update\")\n xbest, ybest, dbest = xp, yp, val\n nnf[ay, ax] = np.array([ybest, xbest])\n nnd[ay, ax] = dbest\n # d = cal_dist(ay, ax, ybest, xbest,feat_A, feat_AP, feat_B, feat_BP, A_size, B_size, patch_size)\n # if (dbest != d):\n # print('{}left, {} vs {}'.format([ay,ax,ybest,xbest], dbest, d))\n\n # right\n if ax + jump < a_cols:\n vp = nnf[ay, ax + jump]\n xp = vp[1] - jump\n yp = vp[0]\n\n if xp < b_cols and xp >= 0 and yp >= 0 and yp < b_rows:\n val = cal_dist(ay, ax, yp, xp,\n feat_A, feat_AP,\n feat_B, feat_BP,\n A_size, B_size, patch_size)\n if val < dbest:\n # print(\"update\")\n xbest, ybest, dbest = xp, yp, val\n nnf[ay, ax] = np.array([ybest, xbest])\n nnd[ay, ax] = dbest\n # d = cal_dist(ay, ax, ybest, xbest,feat_A, feat_AP, feat_B, feat_BP, A_size, B_size, patch_size)\n # if (dbest != d):\n # print('{}right, {} vs {}'.format([ay,ax,ybest,xbest], dbest, d))\n\n # up\n if (ay - jump) < a_rows and (ay - jump) >= 0:\n vp = nnf[ay - jump, ax]\n xp = vp[1]\n yp = vp[0] + jump\n\n if xp < b_cols and xp >= 0 and yp >= 0 and yp < b_rows:\n val = cal_dist(ay, ax, yp, xp,\n feat_A, feat_AP,\n feat_B, feat_BP,\n A_size, B_size, patch_size)\n if val < dbest:\n # print(\"update\")\n xbest, ybest, dbest = xp, yp, val\n nnf[ay, ax] = np.array([ybest, xbest])\n nnd[ay, ax] = dbest\n # d = cal_dist(ay, ax, ybest, xbest,feat_A, feat_AP, feat_B, feat_BP, A_size, B_size, patch_size)\n # if (dbest != d):\n # print('{}up, {} vs {}'.format([ay,ax,ybest,xbest], dbest, d))\n\n # dowm\n if (ay + jump) < a_rows and (ay + jump) >= 0:\n vp = nnf[ay + jump, ax]\n xp = vp[1]\n yp = vp[0] - jump\n\n if xp < b_cols and xp >= 0 and yp >= 0 and yp < b_rows:\n val = cal_dist(ay, ax, yp, xp,\n feat_A, feat_AP,\n feat_B, feat_BP,\n A_size, B_size, patch_size)\n if val < dbest:\n # print(\"update\")\n xbest, ybest, dbest = xp, yp, val\n nnf[ay, ax] = np.array([ybest, xbest])\n nnd[ay, ax] = dbest\n # d = cal_dist(ay, ax, ybest, xbest,feat_A, feat_AP, feat_B, feat_BP, A_size, B_size, patch_size)\n # if (dbest != d):\n # print('{}down, {} vs {}'.format([ay,ax,ybest,xbest], dbest, d))\n\n rand_d = rand_search_radius\n\n while rand_d >= 1:\n\n\n xmin = max(xbest - rand_d, 0)\n xmax = min(xbest + rand_d + 1, b_cols)\n xmin, xmax = min(xmin, xmax), max(xmin, xmax)\n\n ymin = max(ybest - rand_d, 0)\n ymax = min(ybest + rand_d + 1, b_rows)\n ymin, ymax = min(ymin, ymax), max(ymin, ymax)\n\n rx = np.random.randint(xmin, xmax)\n ry = np.random.randint(ymin, ymax)\n\n val = cal_dist(ay, ax, ry, rx,\n feat_A, feat_AP,\n feat_B, feat_BP,\n A_size, B_size, patch_size)\n if val < dbest:\n xbest, ybest, dbest = rx, ry, val\n nnf[ay, ax] = np.array([ybest, xbest])\n nnd[ay, ax] = dbest\n\n\n rand_d = rand_d // 2\n\n\n q.put([ax, ay, xbest, ybest, dbest])", "title": "" }, { "docid": "e96345699cf5efecc9861007dc99ce63", "score": "0.5465659", "text": "def execute_all(method, template, show_images):\n benchmark = Benchmark(method(), template)\n benchmark.execute_all(show_images)\n print \"Recall: \" + str(benchmark.recall())\n print \"Precision: \" + str(benchmark.precision())\n print \"F-Measure: \" + str(benchmark.fmeasure())", "title": "" }, { "docid": "c88e8b82a63f39b12812dfe1a5df4d6f", "score": "0.5465257", "text": "def fast_rcnn_inference_single_image_2(\n boxes,\n scores,\n image_shape,\n score_thresh,\n nms_thresh,\n topk_per_image,\n box_features,\n\n):\n valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)\n import pdb\n pdb.set_trace()\n if not valid_mask.all():\n boxes = boxes[valid_mask]\n scores = scores[valid_mask]\n\n scores = scores[:, :-1]\n num_bbox_reg_classes = boxes.shape[1] // 4\n # Convert to Boxes to use the `clip` function ...\n boxes = Boxes(boxes.reshape(-1, 4))\n boxes.clip(image_shape)\n boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4\n\n # 1. Filter results based on detection scores. It can make NMS more efficient\n # by filtering out low-confidence detections.\n filter_mask = scores > score_thresh # R x K\n # R' x 2. First column contains indices of the R predictions;\n # Second column contains indices of classes.\n filter_inds = filter_mask.nonzero() #469,2\n if num_bbox_reg_classes == 1:\n boxes = boxes[filter_inds[:, 0], 0]\n else:\n boxes = boxes[filter_mask]\n scores = scores[filter_mask]\n box_features = box_features[filter_mask]\n # 2. Apply NMS for each class independently.\n keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)\n if topk_per_image >= 0:\n keep = keep[:topk_per_image]\n boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep] #38,4\n\n result = Instances(image_shape)\n result.pred_boxes = Boxes(boxes)\n result.scores = scores\n result.pred_classes = filter_inds[:, 1]\n return result, filter_inds[:, 0]", "title": "" }, { "docid": "606e5173f51f99bf58826326842005f0", "score": "0.5451685", "text": "def __call__(self, sr_imgs, per_image=False, progbar=True, desc=''):\n\t\tscenes_paths = tqdm(self.paths, desc=desc) if progbar else self.paths\n\t\thr_sm = [] if self.hr_sm == [] else [self.hr_sm]\n\t\t\n\t\tself.scores = [\n#\t\t\tscore_image(*i)\n\t\t\tscore_image_fast(*i)\n\t\t\tfor i in zip(sr_imgs, scenes_paths, *hr_sm)]\n\t\t\n\t\tassert len(self.scores) == len(self.paths)\n\t\t\n\t\tscore = np.mean(self.scores)\n\t\t\n\t\tif per_image:\n\t\t\treturn score, self.scores\n\t\telse:\n\t\t\treturn score", "title": "" }, { "docid": "e8c72b155fb73cd6cab0d936d8e62913", "score": "0.5445057", "text": "def accumulate(self):\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n\n T = len(self.iouThrs)\n R = len(self.recThrs)\n K = len(self.catIDs)\n A = 1\n M = len(self.maxDets)\n precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories\n recall = -np.ones((T, K, A, M))\n scores = -np.ones((T, R, K, A, M))\n\n # create dictionary for future indexing\n # get inds to evaluate\n k_list = list(range(len(self.catIDs)))\n m_list = self.maxDets[:]\n a_list = list(range(1))\n i_list = list(range(len(self.imageIDs)))\n\n I0 = len(self.imageIDs)\n A0 = 1\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0 * A0 * I0\n for a, a0 in enumerate(a_list):\n Na = a0 * I0\n for m, maxDet in enumerate(m_list):\n # E: 当前类别、area下,所有image的evalImg的list\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n # dtScores: 所有image的dtScores首尾相接成一个列表\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n # dtm: 第一维大小为T,第二维大小与dtScores相同\n dtm = np.concatenate([e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:, inds]\n\n npig = np.sum([e[\"gtMatches\"].shape[1] for e in E])\n\n tps = np.logical_and(dtm, True) # true positive\n fps = np.logical_not(dtm) # false positive\n\n # tp_sum, fp_sum: 第一维大小为T,第二维大小与dtScores相同\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp) # 所有检测框数目\n rc = tp / npig\n pr = tp / (fp + tp + np.spacing(1)) # tp / (fp + tp)\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n # 若检测到该类物品,则计算recall值\n if nd:\n recall[t, k, a, m] = rc[-1]\n # 反之,强行将该类的recall置为0\n else:\n recall[t, k, a, m] = 0\n\n # Currently, we use this simple version of precision\n if nd:\n precision[t, :, k, a, m] = np.full(R, pr[-1])\n else:\n precision[t, :, k, a, m] = np.full(R, 1)\n\n # TODO: implement both precision\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n # pr = pr.tolist()\n # q = q.tolist()\n #\n # for i in range(nd - 1, 0, -1):\n # if pr[i] > pr[i-1]:\n # pr[i-1] = pr[i]\n #\n # inds = np.searchsorted(rc, self.recThrs, side='left')\n # try:\n # for ri, pi in enumerate(inds):\n # q[ri] = pr[pi]\n # ss[ri] = dtScoresSorted[pi]\n # except:\n # pass\n # precision[t, :, k, a, m] = np.array(q)\n # scores[t, :, k, a, m] = np.array(ss)\n self.eval = {\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n # 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))", "title": "" }, { "docid": "35dcf57bd7045a81857eb964b2f9a9d4", "score": "0.54336965", "text": "def query(self, images):\r\n if self.pool_size == 0: # if the buffer size is 0, do nothing\r\n return images\r\n return_images = []\r\n for image in images:\r\n image = torch.unsqueeze(image.data, 0)\r\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\r\n self.num_imgs = self.num_imgs + 1\r\n self.images.append(image)\r\n return_images.append(image)\r\n else:\r\n p = random.uniform(0, 1)\r\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\r\n random_id = random.randint(0, self.pool_size - 1) # randint is inclusive\r\n tmp = self.images[random_id].clone()\r\n self.images[random_id] = image\r\n return_images.append(tmp)\r\n else: # by another 50% chance, the buffer will return the current image\r\n return_images.append(image)\r\n return_images = torch.cat(return_images, 0) # collect all the images and return\r\n return return_images", "title": "" }, { "docid": "b9e6b3a5f4df4d2075766b0b6892852b", "score": "0.5395777", "text": "def demo(net, image_name):\n\n num_images = 1\n foldername = '/media/akhtar/6D2C8F896B2F79E0/Projects/py-faster-rcnn-master/data/output_images_detected/' \n foldername_all = '/home/itu/faster-rcnn-1070/data/output_images_all/'\n thresh=0.05\n max_per_image=100\n\n all_boxes = [[] for _ in xrange(num_images)]\n\n ntopProp = [300]\n\n\n theta = [0, 90, 135, 45, 157.5, 112.5, 67.5, 22.5]\n\n for t in xrange(0,len(ntopProp)):\n \t#output_dir = get_output_dir(imdb, net)\n\n\n\n \tif not cfg.TEST.HAS_RPN:\n \troidb = imdb.roidb\n\n \tall_final_boxes = [[[] for _ in xrange(num_images)]\n \tfor _ in xrange(3)]\n\n\tall_final_boxes_rotated = [[[] for _ in xrange(num_images)]\n \tfor _ in xrange(3)]\n\n \tall_rpn_boxes = [[[] for _ in xrange(num_images)]\n \tfor _ in xrange(1)]\n\n\t#print('all_final_boxes_rotated :', all_final_boxes_rotated)\n\tcntG = 0\n\tcntR = 0\n\tcG = 0\n\tcR = 0\n\n \tfor i in xrange(num_images):\n\t\t# filter out any ground truth boxes\n\t\tif cfg.TEST.HAS_RPN:\n\t \t\tbox_proposals = None\n\t\telse:\n\t \t# The roidb may contain ground-truth rois (for example, if the roidb\n\t \t# comes from the training or val split). We only want to evaluate\n\t \t# detection on the *non*-ground-truth rois. We select those the rois\n\t \t# that have the gt_classes field set to 0, which means there's no\n\t \t# ground truth.\n\t \t\tbox_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]\n\n\n\n\t # Load the demo image\n\t\tim_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\n\t\n\t\tim = cv2.imread(im_file)\n\n\n\n\n\t\trpn_boxes, rpn_scores, final_boxes, final_scores, orient_score, final_boxes1, final_scores1, transApplied = im_detect(net, im, box_proposals, True)\n\n\t\tif ntopProp[t] == 300:\n\t\t\tif len(rpn_scores) > 299:\n\t\t\t\trpn_boxes = rpn_boxes[0:ntopProp[t],:]\n\t\t\t\trpn_scores = rpn_scores[0:ntopProp[t],:]\n\t\t\t\tfinal_boxes = final_boxes[0:ntopProp[t],:]\n\t\t\t\tfinal_scores = final_scores[0:ntopProp[t],:]\n\t\t\t\torient_scores = orient_score[0:ntopProp[t],:]\n\t\t\t\tfinal_boxes1 = final_boxes1[0:ntopProp[t],:]\n\t\t\t\tfinal_scores1 = final_scores1[0:ntopProp[t],:]\n\t\t\t\ttransApplied = transApplied[0:ntopProp[t],:,:,:]\n\t\telse:\t\n\t\t\trpn_boxes = rpn_boxes[0:ntopProp[t],:]\n\t\t\trpn_scores = rpn_scores[0:ntopProp[t],:]\n\t\t\tfinal_boxes = final_boxes[0:ntopProp[t],:]\n\t\t\tfinal_scores = final_scores[0:ntopProp[t],:]\n\t\t\torient_scores = orient_score[0:ntopProp[t],:]\n\t\t\tfinal_boxes1 = final_boxes1[0:ntopProp[t],:]\n\t\t\tfinal_scores1 = final_scores1[0:ntopProp[t],:]\n\t\t\ttransApplied = transApplied[0:ntopProp[t],:,:,:]\n\n\t\ttemp_boxes = None\n\t\tblobs, im_scales = _get_blobs(im, temp_boxes)\n\n\t\trotatedBoxesAll = np.zeros((len(rpn_boxes), 3,2,4))\n\n\t\tfor iii in range(0, len(rpn_boxes)):\n\t\t\tfinal_boxes_tr = final_boxes1[iii,:]\n\t\t\t#print('final_boxes_tr :', final_boxes_tr)\n\t\t\tfinal_boxes_tr = ((final_boxes_tr * im_scales[0]) / 16)\n\n\t\t\tfinal_boxes_tr = trans_box1(final_boxes_tr,transApplied[iii,0,:,:],transApplied[iii,1,:,:])\n\t\n\t\t\tfinal_boxes_tr = ((final_boxes_tr * 16) / im_scales[0])\n\n\t\t\trotatedBoxesAll[iii, :,:,:] = final_boxes_tr[0,:,:,:]\n\t\n\n\t\trpn_dets = np.hstack((rpn_boxes, rpn_scores)) \\\n\t\t\t.astype(np.float32, copy=False)\n\t\t#all_rpn_boxes[0][i] = rpn_dets\n\n\n\t\t#_t['misc'].tic()\n\n\t\tmaxScore = final_scores1\n\t\tfor j in xrange(1, 3):\n\n\t\t\tinds = np.where(maxScore[:, j] > thresh)[0]\n\t\t\tcls_scores = maxScore[inds, j]\n\t\t\tcls_boxes = final_boxes[inds, j*4:(j+1)*4]\n\t\t\tcls_orient = np.argmax(orient_score[inds, :], axis = 1)\n\t\t\trpn_bboxes = rpn_boxes[inds,:]\n\t\t\trpn_sscores = rpn_scores[inds]\n\t\n\t\t\tcls_scores1 = final_scores[inds, j]\n\n\t\t\trotatedBoxesClass = np.hstack((rotatedBoxesAll[inds,j,0,:], rotatedBoxesAll[inds,j,1,:])).astype(np.float32, copy=False)\n\t\t\t#print('rotatedBoxesClass :', rotatedBoxesClass.shape)\n\n\t\t\tcls_dets_temp_rotated = np.hstack((rotatedBoxesAll[inds,j,0,:], rotatedBoxesAll[inds,j,1,:], cls_scores[:, np.newaxis])) \\\n\t\t\t\t.astype(np.float32, copy=False)\n\n\n\t \t\tcls_dets_temp = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n\t\t\t\t.astype(np.float32, copy=False)\n\n\t\t\t#print('cls_dets_temp', cls_dets_temp.shape)\n\n\t \t\tcls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis], cls_orient[:, np.newaxis], rpn_bboxes, rpn_sscores)) \\\n\t\t\t\t.astype(np.float32, copy=False)\n\n\n\t \t\tkeep = nms(cls_dets_temp, cfg.TEST.NMS)\n\t\t\t#keep = nms(cls_dets_temp, 0.3)\n\t\n\t\t\tcls_dets = cls_dets[keep, :]\n\t\t\trotatedBoxesClass = rotatedBoxesClass[keep, :]\n\t\n\n\t \t\tall_final_boxes[j][i] = cls_dets\n\t\t\tall_final_boxes_rotated[j][i] = rotatedBoxesClass\n\n\t\tif max_per_image > 0:\n\t \t\timage_scores = np.hstack([all_final_boxes[j][i][:, 4]\n\t\t\t \t\t for j in xrange(1, 3)])\n\n\t \t\tif len(image_scores) > max_per_image:\n\t\t\t\timage_thresh = np.sort(image_scores)[-max_per_image]\n\t\t\t\tfor j in xrange(1, 3):\n\t\t \t\t\tkeep = np.where(all_final_boxes[j][i][:, -1] >= image_thresh)[0]\n\t\t \t\t\tall_final_boxes[j][i] = all_final_boxes[j][i][keep, :]\n\t\t\t\t\tall_final_boxes_rotated[j][i] = all_final_boxes_rotated[j][i][keep, :]\n\n\t\n\t\tfor j in xrange(1, 3):\n\n\t\t\trpn_bo = np.array([208, 58, 2243, 1094])\n\n\n\t\t\tim,cntG,cntR, cG, cR = vis_detections_final(im, CLASSES[j], all_final_boxes[j][i], 0.75, cntG,cntR, cG, cR, rpn_sscores, rpn_bo, all_final_boxes_rotated[j][i])\n\n\n\t\tprint ('check: ',os.path.join(cfg.DATA_DIR, 'demo', 're_'+image_name))\n\t\tcv2.imwrite(os.path.join(cfg.DATA_DIR, 'demo', 're_'+image_name), im)", "title": "" }, { "docid": "ff6541efad79d34aec9cd0c8f7d1e1cd", "score": "0.5395157", "text": "def fast_rcnn_inference(\n boxes,\n scores,\n image_shapes,\n score_thresh,\n nms_thresh,\n topk_per_image,\n):\n result_per_image = [\n fast_rcnn_inference_single_image(\n boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image\n )\n for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)\n ]\n return [x[0] for x in result_per_image], [x[1] for x in result_per_image]", "title": "" }, { "docid": "790b7eeddb887ec4297c12005aacf8e6", "score": "0.5388199", "text": "def pull_all_images(x_min, x_max, y_start):\n y = y_start\n print(\"Exploring up\")\n while not row_is_all_404(x_min, x_max, y):\n y += 1\n if y % 10 == 0:\n print(\"Row %i done\" % y)\n y_max = y\n print(\"Exploring down\")\n y = y_start\n while not row_is_all_404(x_min, x_max, y):\n y -= 1\n if y % 10 == 0:\n print(\"Row %i done\" % y)\n y_min = y\n\n return y_min, y_max", "title": "" }, { "docid": "5a7e94c3e866550d3d684e32c7f7976f", "score": "0.53802115", "text": "def analyze_image(self): \n self.get_best_kmeans()\n self.get_raw_segments()\n self.clean_raw_segments()", "title": "" }, { "docid": "0243cc36809e9d869849e4d0028228a9", "score": "0.5374329", "text": "def __call__(self, results):\n if random.random() < self.mosaic_ratio:\n ori_img = results['img']\n ori_bboxes = results['gt_bboxes']\n ori_labels = results['gt_labels']\n \n results['img'], imgIds = self.make_mosaic_img(ori_img)\n results['gt_bboxes'], results['gt_labels'], box_masks = self.make_mosaic_bboxes(ori_bboxes, ori_labels, imgIds)\n \n if 'gt_masks' in results.keys():\n ori_masks = results['gt_masks']\n results['gt_masks'] = self.make_mosaic_masks(ori_masks, imgIds, box_masks)\n \n return results", "title": "" }, { "docid": "b61e22176fea336cb93cb59a3efecac1", "score": "0.5368932", "text": "def test():\n db = MongoClient()\n fotos = db['aidu']['elevator_fotos']\n for idx, foto in enumerate(fotos.find()):\n #if idx not in [143, 196, 206, 524]:\n # continue\n\n rospy.loginfo(\"Processing foto %d\" % idx)\n convert_start = datetime.now()\n image = convert(foto['image'], input_type='ros', output_type='cv2')\n benchmark_convert.append(millis(datetime.now() - convert_start))\n\n original_image = np.array(image)\n\n # Threshold\n image = threshold(image)\n threshold_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)\n\n # Detect buttons\n buttons = detect_buttons(image)\n\n rospy.loginfo('Foto %d - convert: %fms - threshold: %fms - detect: %fms' % (idx, benchmark_convert[-1],\n benchmark_threshold[-1],\n benchmark_detect[-1]))\n rospy.loginfo('Bytes: %d' % len(foto['image']))\n rospy.loginfo('Detected %d buttons' % len(buttons))\n\n # Show button\n for button in buttons:\n x0 = int(button.x - button.w/2)\n y0 = int(button.y - button.h/2)\n x1 = int(button.x + button.w/2)\n y1 = int(button.y + button.h/2)\n cv2.rectangle(original_image, (x0, y0), (x1, y1), (0, 0, 255), 2)\n cv2.rectangle(threshold_image, (x0, y0), (x1, y1), (0, 0, 255), 2)\n out = np.vstack((original_image, threshold_image))\n cv2.imshow('Button detector', cv2.resize(out, (640, 720)))\n cv2.waitKey()\n\n if rospy.is_shutdown():\n break", "title": "" }, { "docid": "0c47f0d90c86e4cc92c5a71e8d05224e", "score": "0.5366306", "text": "def process_lr_images(scene_path,processing_type = \"with_same_lr\",fusion_type = \"median\"):\n qm_images = load_lr_qm(scene_path, quality_map_only = True) \n lr_images = load_lr_qm(scene_path, lr_only = True) \n\n # getting all those pixels which are bad in all the low resolution images\n pxl_nc_all_lr = np.where(np.sum(qm_images,axis = 0) == 0)\n pxl_coordinates = list(zip(pxl_nc_all_lr[0],pxl_nc_all_lr[1]))\n\n if processing_type == \"with_same_lr\":\n for lr_image,qm_image in zip(lr_images,qm_images):\n\n #Padding the array \n lr_padded = np.pad(lr_image,((1,1),(1,1)),'constant',constant_values=(np.nan,))\n for (x,y) in pxl_coordinates:\n sub_array_xy = lr_padded[y:y+3,x:x+3] # small array around the bad pixel\n avg_val = np.nanmean(sub_array_xy) \n lr_image[x,y] = avg_val # Replacing the bad pixel with the mean of pixel value of nearby pixels\n qm_image[x,y] = True # marking it as clear pixel\n\n elif processing_type == \"with_all_lr\":\n if fusion_type == \"mean\":\n lr_fused = np.nanmean(lr_images,axis = 0) \n elif fusion_type == \"mode\":\n lr_fused = scipy.stats.mode(lr_images, axis=0, nan_policy='omit').mode[0]\n else:\n lr_fused = np.nanmedian(lr_images,axis = 0) \n\n for lr_image,qm_image in zip(lr_images,qm_images):\n for (x,y) in pxl_coordinates:\n lr_image[x,y] = lr_fused[x,y] # replacing the bad pixel with the central tendency measure at that pixel coordinate\n qm_image[x,y] = True\n\n for lr_image,qm_image in zip(lr_images,qm_images):\n lr_image[~qm_image] = np.nan # making all the remaining non clear pixels as NAN so that they won't have any impact on calculations\n\n return lr_images", "title": "" }, { "docid": "377527eb6a351cad95672616f3158c50", "score": "0.5361521", "text": "def run(self):\r\n\r\n start = time.time()\r\n self.compute()\r\n end1 = time.time()\r\n self.sort()\r\n end2 = time.time()\r\n self.analyze()\r\n end3 = time.time()\r\n imageResultPath = self.reconstruct()\r\n end4 = time.time()\r\n\r\n print \"Computing time:\", end1-start, \"detik\"\r\n print \"Sorting time :\", end2-end1, \"detik\"\r\n print \"Analyzing time:\", end3-end2, \"detik\"\r\n print \"Image creation:\", end4-end3, \"detik\"\r\n\r\n totalSecond = end4-start\r\n m, s = divmod(totalSecond, 60)\r\n h, m = divmod(m, 60)\r\n print \"Total time : %d:%02d:%02d detik\" % (h, m, s)\r\n print \"\"\r\n return imageResultPath", "title": "" }, { "docid": "5acf84da2af2e3ba98c8111469568e58", "score": "0.5360146", "text": "def img_select(hash_func, img_path, img, img_list, img_dict, count=1, img_save_path='', img_format='jpg'):\r\n if len(img_list) == 1:\r\n img_tmp = cv2.imread(os.path.join(img_path, img_list[0]))\r\n img_new_name = os.path.join(img_save_path, '{:0>3d}_{}.{}'.format(count, img.split('.')[0], img_format))\r\n cv2.imwrite(img_new_name, img_tmp)\r\n return print('Sort finish.')\r\n\r\n # save current img\r\n img_save_path = img_save_path\r\n img_format = img_format\r\n img_tmp = cv2.imread(os.path.join(img_path, img))\r\n img_new_name = os.path.join(img_save_path, '{:0>3d}_{}.{}'.format(count, img.split('.')[0], img_format))\r\n cv2.imwrite(img_new_name, img_tmp)\r\n # delete the current img in the list\r\n img_list.pop(img_list.index(img))\r\n # generate the hash of current img\r\n hash_std = hash_func(img_tmp)\r\n\r\n # find the most similar img\r\n dis_min = 10000 \r\n for img in img_list:\r\n hash_now = img_dict[img]\r\n dis = hamming_distance(hash_std, hash_now, \r\n hsv=True if hash_func == phash_hsv else False, \r\n cm=True if hash_func == color_moments else False)\r\n if dis < dis_min:\r\n dis_min = dis\r\n img_min = img\r\n count += 1\r\n return img_select(hash_func, img_path, img_min, img_list, img_dict, count, img_save_path, img_format)", "title": "" }, { "docid": "c92840ba37a17150bf853d5842a284a2", "score": "0.5355308", "text": "def process_query(query, width_scale, filename_out):\n print(\"Generating tiles based on query:\")\n print(query)\n connection = MongoClient()\n db = connection[\"voteview\"]\n\n icpsrs = []\n for row in db.voteview_members.find(query, {\"icpsr\": 1, \"_id\": 0}):\n icpsrs.append(row[\"icpsr\"])\n\n icpsrs = list(set(icpsrs))\n filenames = list_files_icpsr(icpsrs)\n print(\"Detected %s matching member portraits\" % len(filenames))\n if filenames:\n grid = build_grid(filenames, width_scale=width_scale)\n grid.save(filename_out, \"JPEG\")", "title": "" }, { "docid": "4d83d6c2d9ff2b8a97455dbe1fe84cd6", "score": "0.53549886", "text": "def do_sg_evaluation(dataset, predictions, predictions_pred, output_folder, logger):\n\n evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=False)\n\n top_Ns = [20, 50, 100]\n modes = [\"sgdet\"]\n result_dict = {}\n\n for mode in modes:\n result_dict[mode + '_recall'] = {20:[], 50:[], 100:[]}\n for image_id, (prediction, prediction_pred) in enumerate(zip(predictions, predictions_pred)):\n img_info = dataset.get_img_info(image_id)\n image_width = img_info[\"width\"]\n image_height = img_info[\"height\"]\n\n gt_boxlist = dataset.get_groundtruth(image_id)\n\n gt_entry = {\n 'gt_classes': gt_boxlist.get_field(\"labels\").numpy(),\n 'gt_relations': gt_boxlist.get_field(\"relation_labels\").numpy().astype(int),\n 'gt_boxes': gt_boxlist.bbox.numpy(),\n }\n\n # import pdb; pdb.set_trace()\n prediction = prediction.resize((image_width, image_height))\n obj_scores = prediction.get_field(\"scores\").numpy()\n all_rels = prediction_pred.get_field(\"idx_pairs\").numpy()\n fp_pred = prediction_pred.get_field(\"scores\").numpy()\n # multiplier = np.ones((obj_scores.shape[0], obj_scores.shape[0]))\n # np.fill_diagonal(multiplier, 0)\n # fp_pred = fp_pred * multiplier.reshape(obj_scores.shape[0] * (obj_scores.shape[0] - 1), 1)\n scores = np.column_stack((\n obj_scores[all_rels[:,0]],\n obj_scores[all_rels[:,1]],\n fp_pred[:, 1:].max(1)\n )).prod(1)\n sorted_inds = np.argsort(-scores)\n sorted_inds = sorted_inds[scores[sorted_inds] > 0] #[:100]\n\n pred_entry = {\n 'pred_boxes': prediction.bbox.numpy(),\n 'pred_classes': prediction.get_field(\"labels\").numpy(),\n 'obj_scores': prediction.get_field(\"scores\").numpy(),\n 'pred_rel_inds': all_rels[sorted_inds],\n 'rel_scores': fp_pred[sorted_inds],\n }\n\n evaluator[mode].evaluate_scene_graph_entry(\n gt_entry,\n pred_entry,\n )\n\n evaluate(gt_boxlist.get_field(\"labels\"), gt_boxlist.bbox, gt_boxlist.get_field(\"pred_labels\"),\n prediction.bbox, prediction.get_field(\"scores\"), prediction.get_field(\"labels\"),\n prediction_pred.get_field(\"idx_pairs\"), prediction_pred.get_field(\"scores\"),\n top_Ns, result_dict, mode)\n\n evaluator[mode].print_stats(logger)\n logger.info('=====================' + mode + '(IMP)' + '=========================')\n logger.info(\"{}-recall@20: {}\".format(mode, np.mean(np.array(result_dict[mode + '_recall'][20]))))\n logger.info(\"{}-recall@50: {}\".format(mode, np.mean(np.array(result_dict[mode + '_recall'][50]))))\n logger.info(\"{}-recall@100: {}\".format(mode, np.mean(np.array(result_dict[mode + '_recall'][100]))))", "title": "" }, { "docid": "49615c23b310cd67144115128f997cfb", "score": "0.53533524", "text": "def query_image_for_profile(query: DartSearchQuery, show_stages=False):\n\n dart_image = query[0]\n profile = query[1]\n\n image = cv2.blur(dart_image, (4, 4))\n dart_bodies = DartDetector.get_parts_of_colour(image, profile.body)\n\n dart_tips = DartDetector.get_parts_of_colour(image, profile.tip)\n dart_body_detector = standard_body_blob_detector()\n dart_tip_detector = standard_tip_blob_detector()\n detect_bodies = cv2.blur(cv2.cvtColor(cv2.cvtColor(dart_bodies, cv2.COLOR_HSV2BGR), cv2.COLOR_BGR2GRAY), (4, 4))\n body_points = dart_body_detector.detect(detect_bodies)\n detect_tips = cv2.blur(cv2.cvtColor(cv2.cvtColor(dart_tips, cv2.COLOR_HSV2BGR), cv2.COLOR_BGR2GRAY), (4, 4))\n tip_points = dart_tip_detector.detect(detect_tips)\n body_edges = cv2.Canny(detect_bodies, 150, 200, apertureSize=5)\n\n lines = cv2.HoughLinesP(body_edges, 1, np.pi / 360, 50)\n\n potential_darts = []\n all_combos = []\n used_combos = set()\n\n if lines is not None:\n lines = [line[0] for line in lines]\n lines.sort(key=lambda x: x[1])\n\n for dart in body_points:\n for tip in tip_points:\n for line in lines:\n dartiness = DartDetector.get_dartiness_points(line,\n (tip.pt[0], tip.pt[1], dart.pt[0], dart.pt[1]))\n all_combos.append(PotentialDart(dartiness, tip, dart))\n\n cv2.circle(image, (int(dart.pt[0]), int(dart.pt[1])), int(dart.size * 2), (90, 255, 255), thickness=2)\n\n all_combos.sort()\n\n for dart in all_combos:\n\n if not dart[1] in used_combos and not dart[2] in used_combos and len(potential_darts) < min(len(tip_points),\n len(\n body_points)):\n # essentially a vector pointing from the tip to the body\n dart_tip_dx = dart.body_point.pt[0] - dart.tip_point.pt[0]\n dart_tip_dy = dart.body_point.pt[1] - dart.tip_point.pt[1]\n\n # add vector to point\n extended_end_of_dart_x = int(dart.body_point.pt[0] + dart_tip_dx * 1.2)\n extended_end_of_dart_y = int(dart.body_point.pt[1] + dart_tip_dy * 1.2)\n\n extended_dart_tip_x = int(dart.body_point.pt[0] - dart_tip_dx * 1.4)\n extended_dart_tip_y = int(dart.body_point.pt[1] - dart_tip_dy * 1.4)\n\n min_x = min(extended_dart_tip_x, extended_end_of_dart_x)\n min_y = min(extended_dart_tip_y, extended_end_of_dart_y)\n\n max_x = max(extended_dart_tip_x, extended_end_of_dart_x)\n max_y = max(extended_dart_tip_y, extended_end_of_dart_y)\n\n bounding_rect = (min_x, min_y, max_x - min_x, max_y - min_y)\n\n potential_darts.append(DartResult(dart.score, (dart.tip_point.pt[0], dart.tip_point.pt[1]),\n (dart.body_point.pt[0], dart.body_point.pt[1]), bounding_rect))\n used_combos.add(dart[1])\n used_combos.add(dart[2])\n if show_stages:\n for tip in tip_points:\n cv2.circle(image, (int(tip.pt[0]), int(tip.pt[1])), int(tip.size), (0, 255, 255), thickness=2)\n plt.imshow(image)\n plt.show()\n plt.imshow(cv2.cvtColor(dart_bodies, cv2.COLOR_HSV2RGB))\n plt.show()\n plt.imshow(cv2.cvtColor(dart_tips, cv2.COLOR_HSV2RGB))\n plt.show()\n # plt.imshow(cv2.cvtColor(body_edges, cv2.COLOR_GRAY2RGB))\n # plt.show()\n return potential_darts, profile", "title": "" }, { "docid": "a594678f852d8b33c9d2e5820c48f04c", "score": "0.53390646", "text": "def eval_model(test_loader, \n results, \n detection_threshold, \n device, \n lit_model):\n for images, _, image_ids in test_loader:\n\n images = list(image.to(device) for image in images)\n outputs = lit_model(images)\n\n for i, image in enumerate(images):\n\n boxes = outputs[i]['boxes'].data.cpu().numpy()\n scores = outputs[i]['scores'].data.cpu().numpy()\n \n boxes = boxes[scores >= detection_threshold].astype(np.int32)\n scores = scores[scores >= detection_threshold]\n image_id = image_ids[i]\n\n for s, b in zip(scores, boxes.astype(int)):\n result = {\n 'image_id': image_id,\n 'x1': b[0],\n 'y1': b[1],\n 'x2': b[2],\n 'y2': b[3],\n 'score': s,\n }\n\n \n results.append(result)\n \n return results", "title": "" }, { "docid": "b9ce70aae7f14f772b478abc7951101c", "score": "0.53285146", "text": "def im_detect(net, minibatch_db, num_classes, output_dir=None):\n global _ind_to_class, _class_to_ind\n assert(_ind_to_class)\n assert(_class_to_ind)\n # \n assert(len(minibatch_db), 1), \"each time, we process one image\"\n # Get the input data/labels\n blobs, im_scales = _get_minibatch(minibatch_db, num_classes);\n\n if cfg.TEST.HAS_SECONDARY_REGIONS:\n blobs_size = len(blobs['rois']) - 1\n sr_per_num = cfg.TEST.SR_PER_NUMBER - 1\n sr_iter_count = blobs_size / sr_per_num\n if blobs_size % sr_per_num:\n sr_iter_count += 1\n out_names_blobs = [\"primary_score\", \"secondary_score\", \\\n \"secondary_max_score\", \"cls_score\", \"cls_prob\", \"accuracy\"]\n else:\n sr_iter_count = 1\n out_names_blobs = [\"primary_score\",]\n \n # \n data_blobs = blobs['data']\n label_blob = blobs['labels']\n primary_roi_blob = blobs['rois'][0, :]\n secordary_rois_blobs = None\n if cfg.TEST.HAS_SECONDARY_REGIONS:\n secordary_rois_blobs = blobs['rois'][1:, :]\n primary_score = None\n secondary_score = None\n # \n print \"process\", len(blobs['rois']), \"boxes each image\"\n\n for sic in xrange(sr_iter_count):\n if cfg.TEST.HAS_SECONDARY_REGIONS:\n start_idx = sic * sr_per_num\n end_idx = (sic + 1) * sr_per_num\n end_idx = min(blobs_size, end_idx) \n # Get input info\n rois_blobs = secordary_rois_blobs[start_idx: end_idx, :]\n rois_blobs = np.vstack((primary_roi_blob, rois_blobs))\n # Determine the number of secondary regions\n n_rois_count = [end_idx - start_idx]\n n_rois_count_blob = np.array(n_rois_count)\n else:\n rois_blobs = primary_roi_blob\n rois_blobs = np.array(rois_blobs)\n n_rois_count = [0]\n n_rois_count_blob = np.array(n_rois_count)\n\n # Reshape network inputs\n net.blobs['data'].reshape(*(data_blobs.shape))\n net.blobs['rois'].reshape(*(rois_blobs.shape))\n net.blobs['labels'].reshape(*(label_blob.shape)) \n net.blobs['n_rois_count'].reshape(*(n_rois_count_blob.shape)) \n\n # Forward\n blobs_out = net.forward(out_names_blobs,\n data=data_blobs.astype(np.float32, copy=False),\n rois=rois_blobs.astype(np.float32, copy=False),\n labels=label_blob.astype(np.float32, copy=False),\n n_rois_count=n_rois_count_blob.astype(np.float32, copy=False))\n # Get the primary region's scores for each class\n if primary_score is None:\n primary_score = blobs_out['primary_score']\n primary_score = primary_score[0] \n # \n if cfg.TEST.HAS_SECONDARY_REGIONS:\n if secondary_score is None:\n secondary_score = blobs_out['secondary_score']\n else:\n secondary_score = np.vstack((secondary_score, blobs_out['secondary_score']))\n\n cls_score = []\n sr_ind = []\n secondary_max_score = []\n # \n if cfg.TEST.HAS_SECONDARY_REGIONS:\n for nc in xrange(num_classes):\n sind = np.argmax(secondary_score[:, nc])\n smax = np.max(secondary_score[:, nc])\n sr_ind.append(sind)\n secondary_max_score.append(smax)\n for nc in xrange(num_classes):\n nc_score = primary_score[nc] + secondary_max_score[nc]\n cls_score.append(nc_score)\n else:\n cls_score = primary_score\n # \n cls_prob = np.exp(cls_score)\n cls_prob = cls_prob / np.sum(cls_prob)\n pred_cls_ind = np.argmax(cls_prob)\n gt_cls_ind = blobs['labels'][0]\n\n # \n if cfg.TEST.HAS_SHOW_REGION:\n print \"predicted cls:\", _ind_to_class[pred_cls_ind], \"(\", pred_cls_ind, \")\"\n print \"groundtruth cls:\", _ind_to_class[gt_cls_ind], \"(\", gt_cls_ind, \")\"\n print\n # get the original image\n imagepath = minibatch_db[0]['image']\n im = cv2.imread(imagepath) \n # Get the primary region's corresponding scaled index\n prlevel = blobs['rois'][0][0]\n primary_region = blobs['rois'][0][1:]\n if cfg.TEST.HAS_CONTROL_THE_SAME_SCALE:\n prlevel = 0\n primary_region = primary_region / im_scales[prlevel]\n\n # \n imagename = imagepath.split(\"/\")[-1]\n imagename, img_ext = imagename.split(\".\")\n img_ext = \".\" + img_ext\n image_output_dir = os.path.join(output_dir, cfg.TEST.PRIMARY_SECONDARY_VISUAL)\n # Build the directory\n if not os.path.exists(image_output_dir):\n os.makedirs(image_output_dir)\n # \n pred_cls = _ind_to_class[pred_cls_ind]\n gt_cls = _ind_to_class[gt_cls_ind]\n\n # \n has_secondary_regions = cfg.TEST.HAS_SECONDARY_REGIONS\n has_show_secondary_region = cfg.TEST.HAS_SHOW_SECONDARY_REGION\n secondary_region = None\n if has_secondary_regions and has_show_secondary_region:\n # \n sind = sr_ind[pred_cls_ind]\n srlevel = blobs['rois'][sind][0]\n if cfg.TEST.HAS_CONTROL_THE_SAME_SCALE:\n srlevel = 0\n secondary_region = blobs['rois'][sind][1:]\n secondary_region = secondary_region / im_scales[srlevel]\n \n _vis_secondary_region_with_class(im, primary_region, secondary_region, \\\n pred_cls, gt_cls, image_output_dir, imagename, img_ext)\n print\n print \"*******************************************************************\"\n print\n\n # \n return cls_prob, pred_cls_ind, gt_cls_ind", "title": "" }, { "docid": "0d17de2b1c33e272d336f092956e829d", "score": "0.5325522", "text": "def analyze_newest_images(self):\r\n\r\n def analyze_image(filepath):\r\n \"\"\"Helper function for cleaning analysis_dict to JSON serializable types\r\n before uploading to breadboard.\r\n \"\"\"\r\n self.logger.debug('{file} analyzing: '.format(file=filepath))\r\n analysis_dict, settings = self.analysis_function(\r\n filepath, self.previous_settings)\r\n cleaned_analysis_dict = {}\r\n print('\\n')\r\n for key in self.analyzed_var_names:\r\n if not isnan(analysis_dict[key]):\r\n cleaned_analysis_dict[key] = analysis_dict[key]\r\n print(key, analysis_dict[key])\r\n print('\\n')\r\n return cleaned_analysis_dict, settings\r\n\r\n bc, watchfolder = self.bc, self.watchfolder\r\n run_id = self.unanalyzed_ids[-1] # start from top of stack\r\n print(self.unanalyzed_ids)\r\n if self.images_per_shot == 1:\r\n file = os.path.join(watchfolder,\r\n '{run_id}_0.spe'.format(run_id=run_id))\r\n pending_file = file\r\n else: # for triple imaging\r\n file = [os.path.join(watchfolder, '{run_id}_{idx}.spe'.format(\r\n run_id=run_id, idx=idx)) for idx in range(images_per_shot)]\r\n pending_file = file[-1]\r\n old_filesize = 0\r\n # wait for file to finish writing to hard disk before opening in MATLAB\r\n while os.path.getsize(pending_file) != old_filesize:\r\n old_filesize = os.path.getsize(pending_file)\r\n time.sleep(0.3)\r\n if self.append_mode:\r\n run_dict = bc._send_message(\r\n 'get', '/runs/' + str(run_id) + '/').json()\r\n if set(self.analyzed_var_names).issubset(set(run_dict['parameters'].keys())):\r\n popped_id = [self.unanalyzed_ids.pop()]\r\n self.done_ids += popped_id\r\n return None\r\n try:\r\n analysis_dict, self.previous_settings = analyze_image(\r\n file)\r\n resp = bc.append_analysis_to_run(run_id, analysis_dict)\r\n except: # if MATLAB analysis fails\r\n analysis_dict = {'badshot': True}\r\n warning_message = str(\r\n run_id) + 'could not be analyzed. Marking as bad shot.'\r\n resp = bc.append_analysis_to_run(run_id, analysis_dict)\r\n warnings.warn(warning_message)\r\n self.logger.warn(warning_message)\r\n popped_id = [self.unanalyzed_ids.pop()]\r\n self.done_ids += popped_id\r\n\r\n if resp.status_code != 200:\r\n logger.warning('Upload error: ' + resp.text)\r\n\r\n if not self.save_images: # delete images and add run_ids to .txt file after analysis if in testing mode\r\n print('Not saving images.')\r\n if isinstance(file, str):\r\n filepath = os.path.join(self.watchfolder, file)\r\n os.remove(filepath)\r\n self.logger.debug(\r\n 'save_images is False, file {file} deleted after analysis.'.format(file=filepath))\r\n elif isinstance(file, list):\r\n for f in file:\r\n filepath = os.path.join(self.watchfolder, f)\r\n os.remove(filepath)\r\n logger.debug(\r\n 'save_images is False, file {file} deleted after analysis.'.format(file=filepath))\r\n with open(os.path.join(self.watchfolder, 'run_ids.txt'), 'a') as run_ids_file:\r\n run_ids_file.write(str(popped_id[0]) + '\\n')\r\n self.logger.debug('Run_id {id} added to {file}.'.format(\r\n id=str(popped_id[0]), file=os.path.join(watchfolder, 'run_ids.txt')))\r\n print('\\n')", "title": "" }, { "docid": "7fee612bb560f597bf3ad8325b0fdd02", "score": "0.53138566", "text": "def query(self, images):\n if self.pool_size == 0: # if the buffer size is 0, do nothing\n return images\n\n return_images = []\n\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer\n self.num_imgs = self.num_imgs + 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer\n random_id = random.randint(\n 0, self.pool_size - 1) # randint is inclusive\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else: # by another 50% chance, the buffer will return the current image\n return_images.append(image)\n # collect all the images and return\n return_images = torch.cat(return_images, 0)\n return return_images", "title": "" }, { "docid": "99f9715d6d6b0be63dea1c88e49d7b2a", "score": "0.5307565", "text": "def pull(self, queries):\n self.start_queue()\n self.results = []\n threads = []\n self.info(\"Saving images in: %s\" % self.path)\n for query in queries:\n self.info(\"Searching for \\\"%s\\\" images...\" % query)\n start = 0\n while start < self.max_results:\n api_url = self.make_api_url(query, start)\n if self.is_url_in_history(api_url):\n self.info(\"Already have %s\" % api_url)\n start += 4 # Normal number of results.\n continue\n self.put_url_in_history(api_url)\n data = self.http_get(api_url)\n if data:\n msg = json.loads(data)\n if msg['responseStatus'] == 200 and msg['responseData']:\n # Good response.\n newresults = msg['responseData']['results']\n if not len(newresults):\n self.warn(\"No results in response: %s\" % msg)\n time.sleep(3)\n break\n self.results += newresults\n start += len(newresults)\n # Retrieve.\n for result in newresults:\n image_url = result['unescapedUrl']\n image_id = self.make_hash(image_url)\n image_path = self.get_std_image_path(image_id)\n if 'dogdrip.net/' in image_url:\n # Exceptionally slow domain. :-)\n self.info(\"Skipping %s\" % image_url)\n if self.is_url_in_history(image_url):\n # Seen it already.\n self.info(\"Already have %s\" % image_url)\n else:\n # Put URL in history, to avoid repeats.\n self.put_url_in_history(image_url)\n # Join queue to retrieve image content.\n self.q.put(image_url)\n else:\n # Bad response.\n details = msg.get('responseDetails', '')\n if details.startswith('qps rate exceeded'):\n self.warn('Google just complained about ' +\\\n 'too many queries per second. Pausing...')\n time.sleep(15)\n else:\n self.warn(\"Response not OK: %s\" % msg)\n time.sleep(3)\n break\n else:\n # No data in response.\n self.warn(\"No data from %s\" % api_url)\n time.sleep(3)\n break\n # Be nice to Google.\n time.sleep(1.5)\n # Wait for workers to finish processing the queue.\n self.q.join()\n # Summarise results.\n std_count = len(self.get_paths('.std'))\n if not std_count:\n self.warn(\"No images found.\")\n self.info(\"There are %d images.\" % std_count)", "title": "" }, { "docid": "827bf3fbd148e1f528cf2d37e94cb375", "score": "0.53008354", "text": "def filter_dirty_images(df):\n df_q = df.query('('\n '(perspective == \"sr\") or '\n '(perspective == \"sl\")) or '\n '(over_exposed == 1) or '\n '(missing_element.notnull('\n ')) or '\n '(bright < 30) or '\n '(sharpness < 11)'\n )\n return df_q", "title": "" }, { "docid": "3677fe18a51ff33f264985405a4adc28", "score": "0.5271173", "text": "def quickImageAnalysis(imageTuple):\n hashForImage1 = calculateHash(imageTuple[0][0])\n hashForImage2 = calculateHash(imageTuple[1][0])\n\n hammingDistance = calculateHammingDistance(hashForImage1, hashForImage2)\n\n # Can fine tune here for leniency\n if hammingDistance == 0:\n print('Images are the same.')\n exit(0)\n\n biggerImage = naiveComparison(imageTuple[0][0], imageTuple[1][0])\n if biggerImage == imageTuple[1][0]:\n return (imageTuple[0][1], imageTuple[1][1])\n else:\n return (imageTuple[1][1], imageTuple[0][1])", "title": "" }, { "docid": "7dd6ea39d2a705595f93509f91dd5810", "score": "0.5264064", "text": "def processImages(self, imsq):\n pass", "title": "" }, { "docid": "9e9ebb77bb7b2dc3c4c15d427232ac99", "score": "0.5249005", "text": "def fast_rcnn_inference(boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image):\r\n result_per_image = [\r\n fast_rcnn_inference_single_image(\r\n boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image\r\n )\r\n for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)\r\n ]\r\n return tuple(list(x) for x in zip(*result_per_image))", "title": "" }, { "docid": "54a326d9902079bab5e4c275614a5d64", "score": "0.5245979", "text": "def fast_rcnn_inference_single_image(\r\n boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image\r\n):\r\n scores = scores[:, :-1]\r\n num_bbox_reg_classes = boxes.shape[1] // 4\r\n # Convert to Boxes to use the `clip` function ...\r\n boxes = Boxes(boxes.reshape(-1, 4))\r\n boxes.clip(image_shape)\r\n boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4\r\n\r\n # Filter results based on detection scores\r\n filter_mask = scores > score_thresh # R x K\r\n # R' x 2. First column contains indices of the R predictions;\r\n # Second column contains indices of classes.\r\n filter_inds = filter_mask.nonzero()\r\n if num_bbox_reg_classes == 1:\r\n boxes = boxes[filter_inds[:, 0], 0]\r\n else:\r\n boxes = boxes[filter_mask]\r\n scores = scores[filter_mask]\r\n\r\n # Apply per-class NMS\r\n keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)\r\n if topk_per_image >= 0:\r\n keep = keep[:topk_per_image]\r\n boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]\r\n\r\n result = Instances(image_shape)\r\n result.pred_boxes = Boxes(boxes)\r\n result.scores = scores\r\n result.pred_classes = filter_inds[:, 1]\r\n return result, boxes, scores, keep", "title": "" }, { "docid": "fd7a51717611274d6e6a49cee752d46a", "score": "0.5230591", "text": "def base_method(self, in_img_l, in_img_r):\n out_img_l = in_img_l.argmin(axis=0)\n out_img_r = in_img_r.argmin(axis=0)\n #my\n out_img_l_true = out_img_l.copy()\n out_img_r_true = out_img_r.copy()\n wl, hl = out_img_l.shape\n wr, hr = out_img_r.shape\n windows = 8\n check1 = 0\n check2 = 0\n for i in range(windows,wl-windows):\n for j in range(windows,hl-windows):\n #first check if it is bad\n deltaup = abs(out_img_l[i][j]-out_img_l[i][j+1])\n deltadown = abs(out_img_l[i][j]-out_img_l[i][j-1])\n deltaleft = abs(out_img_l[i][j]-out_img_l[i-1][j])\n deltaright = abs(out_img_l[i][j]-out_img_l[i+1][j])\n maxdis = 10\n if(deltaup>maxdis and deltadown>maxdis and deltaleft>maxdis and deltaright>maxdis):\n vote = np.zeros(self.args.max_disp + 1)\n for k in range(-windows,windows+1):\n for l in range(-windows,windows+1):\n vote[out_img_l[i+k][j+l]] += 1\n out_img_l_true[i][j] = vote.argmax()\n check1 += 1\n for i in range(windows,wr-windows):\n for j in range(windows,hr-windows):\n #first check if it is bad\n deltaup = abs(out_img_r[i][j]-out_img_r[i][j+1])\n deltadown = abs(out_img_r[i][j]-out_img_r[i][j-1])\n deltaleft = abs(out_img_r[i][j]-out_img_r[i-1][j])\n deltaright = abs(out_img_r[i][j]-out_img_r[i+1][j])\n maxdis = 10\n if(deltaup>maxdis and deltadown>maxdis and deltaleft>maxdis and deltaright>maxdis):\n vote = np.zeros(self.args.max_disp + 1)\n for k in range(-windows,windows+1):\n for l in range(-windows,windows+1):\n vote[out_img_r[i+k][j+l]] += 1\n out_img_r_true[i][j] = vote.argmax()\n check2 += 1\n print(check1) \n print(check2)\n\n return out_img_l_true, out_img_r_true", "title": "" }, { "docid": "13d222176f936f47c0e8d31ce7906940", "score": "0.5228754", "text": "def search_images_db(text: str, count: int = 20) -> List[Tuple[\"ImageModel\", float]]:\n from image_scraper.models import ImageModel\n\n text_features = get_text_features(text)\n\n images = ImageModel.objects.all()\n\n image_features = images.values_list(\"image_features__features\", flat=True)\n\n image_features = np.stack(image_features)\n\n sim = 100. * text_features @ image_features.T\n\n top_idxs = np.argsort(sim)[::-1][:count].tolist()\n\n return [\n images[i] for i in top_idxs\n ]", "title": "" }, { "docid": "9b767d68757ed7845f4c08e776e9e37e", "score": "0.52247125", "text": "def selective_search_IJCV_roidb(self):\n assert False, 'not implemented'\n cache_file = os.path.join(self.cache_path,\n '{:s}_selective_search_IJCV_top_{:d}_roidb.pkl'.\n format(self.name, self.config['top_k']))\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} ss roidb loaded from {}'.format(self.name, cache_file)\n return roidb\n\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_IJCV_roidb(gt_roidb)\n roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote ss roidb to {}'.format(cache_file)\n\n\n num_images = self.num_images\n widths = [Image.open(self.image_path_at(i)).size[0]\n for i in xrange(num_images)]\n for i in xrange(num_images):\n boxes = roidb[i]['boxes'].copy()\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n #image=cv2.imread(self.image_path_at(i))\n #width = image.shape[1]\n #height = image.shape[0]\n for ii in xrange(boxes.shape[0]):\n assert (boxes[ii, 2] < widths[i]), '[merge] fail at '+str(ii)\n\n return roidb", "title": "" }, { "docid": "01c12967f6d6461580bd028ef6132a7a", "score": "0.5214975", "text": "def match_gt_box(rois,img_info):\n # in this method i can implement to get the rois that i can not classfiy\n img_class=img_info.get('class')\n # gt_box is (xmin,xmax,xmax,ymax)\n gt_box=img_info.get('change_box')\n gt_box_square=(gt_box[2]-gt_box[0])*(gt_box[3]-gt_box[1])\n pos_roi_loc=[]\n neg_roi_loc=[]\n pos_lrecord=[]\n neg_lrecord=[]\n pos_record=[]\n neg_record=[]\n gt_xmin, gt_ymin, gt_xmax, gt_ymax = gt_box\n iou_pos=[]\n iou_neg=[]\n for roi in rois:\n xmin,ymin,xmax,ymax=roi\n xxmin=np.maximum(xmin,gt_xmin)\n yymin=np.maximum(ymin,gt_ymin)\n xxmax=np.minimum(xmax,gt_xmax)\n yymax=np.minimum(ymax,gt_ymax)\n if yymax-yymin<0 or xxmax-xxmin<0:\n continue\n roi_square = (roi[2] - roi[0]) * (roi[3] - roi[1])\n w=xxmax-xxmin\n h=yymax-yymin\n iou=w*h/(roi_square+gt_box_square-w*h)\n class_num=cfg.NUM_CLASSES-1\n record_loc = [0] * 8 * class_num\n if iou<0.1:\n gt_class = np.zeros([cfg.NUM_CLASSES], dtype=np.int32)\n gt_class[-1]=1\n neg_roi_loc.append(record_loc)\n neg_lrecord.append(gt_class)\n neg_record.append(roi)\n iou_neg.append(iou)\n else:\n pos_lrecord.append(img_info.get('class'))\n index=np.where(img_info.get('class')==1)[0][0]\n record_loc[4*index:4*index+4]=[1,1,1,1]\n pos_record.append(roi)\n roi_width=roi[2]-roi[0]\n roi_height=roi[3]-roi[1]\n roi=[roi[0]+roi_width/2,roi[1]+roi_height/2,roi_width,roi_height]\n record_loc[4*(class_num+index):4*(class_num+index+1)]=cal_offset(roi,gt_box)\n pos_roi_loc.append(record_loc)\n iou_pos.append(iou)\n neg_record = np.array(neg_record)\n neg_lrecord = np.array(neg_lrecord)\n neg_roi_loc = np.array(neg_roi_loc)\n if len(pos_lrecord) != 0:\n pos_roi_loc=np.array(pos_roi_loc)\n pos_lrecord=np.array(pos_lrecord)\n pos_record=np.array(pos_record)\n iou_pos=np.array(iou_pos)\n if len(pos_lrecord)>6:\n index=iou_pos.argsort()[::-1][:6]\n pos_record=pos_record[index,:]\n pos_lrecord=pos_lrecord[index,:]\n pos_roi_loc=pos_roi_loc[index,:]\n if len(neg_lrecord)!=0:\n total_roi=np.concatenate((pos_record,neg_record),axis=0)\n label_record=np.concatenate((pos_lrecord,neg_lrecord),axis=0)\n roi_loc=np.concatenate((pos_roi_loc,neg_roi_loc),axis=0)\n else:\n total_roi = pos_record\n label_record = pos_lrecord\n roi_loc = pos_roi_loc\n else:\n if len(neg_lrecord)==0:\n return None\n else:\n total_roi=neg_record\n label_record=neg_lrecord\n roi_loc=neg_roi_loc\n return total_roi ,label_record,roi_loc", "title": "" }, { "docid": "580b4ef695c411f550b2c57524a29ae6", "score": "0.5201154", "text": "def DisplayImages(self) -> LayerFilterDisplayImages:", "title": "" }, { "docid": "c4f74a44a5911a768227ce1c7872bd16", "score": "0.5189496", "text": "def main(*args):\n if len(args) >= 1:\n image_source_id = bson.objectid.ObjectId(args[0])\n\n config = global_conf.load_global_config('config.yml')\n db_client = database.client.DatabaseClient(config=config)\n\n image_source = None\n s_image_source = db_client.image_source_collection.find_one({'_id': image_source_id})\n if s_image_source is not None:\n image_source = db_client.deserialize_entity(s_image_source)\n del s_image_source\n\n if image_source is not None:\n image_source.begin()\n while not image_source.is_complete():\n image, _ = image_source.get_next_image()\n debug_img = image.data[:, :, ::-1].copy()\n for obj in image.metadata.labelled_objects:\n x, y, w, h = obj.bounding_box\n cv2.rectangle(debug_img, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n text_label = str(obj.class_names[0])\n (retval, baseLine) = cv2.getTextSize(text_label, cv2.FONT_HERSHEY_COMPLEX, 1, 1)\n text_org = (x, y - 0)\n\n cv2.rectangle(debug_img, (text_org[0] - 5, text_org[1] + baseLine - 5),\n (text_org[0] + retval[0] + 5, text_org[1] - retval[1] - 5), (0, 0, 0), 2)\n cv2.rectangle(debug_img, (text_org[0] - 5, text_org[1] + baseLine - 5),\n (text_org[0] + retval[0] + 5, text_org[1] - retval[1] - 5), (255, 255, 255), -1)\n cv2.putText(debug_img, text_label, text_org, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\n\n cv2.imshow('debug', debug_img)\n cv2.waitKey(0)", "title": "" }, { "docid": "debdf8efe6fc4eb8213e73502f28ee4c", "score": "0.5188061", "text": "def process(self, image):", "title": "" }, { "docid": "b6d17a1aff8bd6a9df334bc96d8bff55", "score": "0.51851326", "text": "def score_images(imgs, scenes_paths, *args):\n\treturn np.mean([\n#\t\tscore_image(*i)\n\t\tscore_image_fast(*i)\n\t\tfor i in zip(tqdm(imgs), scenes_paths, *args)\n\t\t])", "title": "" }, { "docid": "599b2f7226e4b9bce9a4ff90e7bce817", "score": "0.5184983", "text": "def classify_image(interpreter, image, top_k=1):\r\n set_input_tensor(interpreter, image)\r\n interpreter.invoke()\r\n output_details = interpreter.get_output_details()[0]\r\n output = np.squeeze(interpreter.get_tensor(output_details['index']))\r\n\r\n # If the model is quantized (uint8 data), then dequantize the results\r\n if output_details['dtype'] == np.uint8:\r\n scale, zero_point = output_details['quantization']\r\n output = scale * (output - zero_point)\r\n\r\n ordered = np.argpartition(-output, top_k)\r\n return [(i, output[i]) for i in ordered[:top_k]]", "title": "" }, { "docid": "d78d565ad2b9809dc80bd061e588ff05", "score": "0.51834095", "text": "def eval_model(image_path, \n results, \n detection_threshold, \n device, \n lit_model):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n\n image /= 255.0\n transform = transforms.ToTensor()\n image = transform(image)\n \n images = [image,]\n outputs = lit_model(images)\n\n for i, image in enumerate(images):\n\n boxes = outputs[i]['boxes'].data.cpu().numpy()\n scores = outputs[i]['scores'].data.cpu().numpy()\n \n boxes = boxes[scores >= detection_threshold].astype(np.int32)\n scores = scores[scores >= detection_threshold]\n\n for s, b in zip(scores, boxes.astype(int)):\n result = {\n 'x1': b[0],\n 'y1': b[1],\n 'x2': b[2],\n 'y2': b[3],\n 'score': s,\n }\n\n \n results.append(result)\n \n return results", "title": "" }, { "docid": "d333a9ad1c2948dbbab3f1878c6d1de4", "score": "0.51739", "text": "def detect(model, min_score, max_overlap, top_k, suppress=None):\n\n # Transforms\n resize = transforms.Resize((300, 300))\n to_tensor = transforms.ToTensor()\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n \n # id=500\n path=\"/home/fereshteh/kaist\" \n tp=0\n gt=0\n dt=0 \n idd=0 \n for id in range(0,45139):\n idd=idd+1 \n print(idd) \n # address = os.path.join(path, 'testanno', 'I'+ '{0:05}'.format(id) + '.xml')\n \n imagepath=os.path.join(path, 'test', 'I'+ '{0:05}'.format(id)+ '.png')\n # img_path = '/home/fereshteh/kaist/person/I00650.png'\n # tree = ET.parse(address)\n # root = tree.getroot()\n original_image = Image.open(imagepath, mode='r')\n original_image = original_image.convert('RGB')\n image = normalize(to_tensor(resize(original_image)))\n #image = (to_tensor(resize(original_image)))\n\n # Move to default device\n image = image.to(device)\n\n # Forward prop.\n predicted_locs, predicted_scores = model(image.unsqueeze(0))\n fps.update()\n # Detect objects in SSD output\n det_boxes, det_labels, det_scores = detect_objects(model, priors_cxcy, predicted_locs, predicted_scores, min_score=min_score,\n max_overlap=max_overlap, top_k=top_k, n_classes=n_classes)\n\n # Move detections to the CPU\n # det_boxes = det_boxes[0].to('cpu')\n #\n # # Transform to original image dimensions\n # original_dims = torch.FloatTensor(\n # [original_image.width, original_image.height, original_image.width, original_image.height]).unsqueeze(0)\n # det_boxes = det_boxes * original_dims\n #\n # # Decode class integer labels\n # det_labels = [rev_label_map[l] for l in det_labels[0].to('cpu').tolist()]\n #\n # # If no objects found, the detected labels will be set to ['0.'], i.e. ['background'] in SSD300.detect_objects() in model.py\n # # if det_labels == ['background','nonperson']:\n # # # Just return original image\n # # return original_image\n #\n # # # Annotate\n # # annotated_image = original_image\n # # draw = ImageDraw.Draw(annotated_image)\n #\n # # font = ImageFont.load_default()\n #\n # # Suppress specific classes, if needed\n # for i in range(det_boxes.size(0)):\n #\n # if suppress is not None:\n # if det_labels[i] in suppress:\n # continue\n # # else:\n # # dt=dt+1\n #\n #\n # # Boxes\n # # box_location = det_boxes[i].tolist()\n # # xmind=box_location[0]\n # # xmaxd=box_location[2]\n # # ymind=box_location[1]\n # # ymaxd=box_location[3]\n # # draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]])\n # # draw.rectangle(xy=[l + 1. for l in box_location], outline=label_color_map[\n # # det_labels[i]]) # a second rectangle at an offset of 1 pixel to increase line thickness\n #\n #\n # # # Text\n # # text_size = font.getsize(det_labels[i].upper())\n # # text_location = [box_location[0] + 2., box_location[1] - text_size[1]]\n # # textbox_location = [box_location[0], box_location[1] - text_size[1], box_location[0] + text_size[0] + 4.,\n # # box_location[1]]\n # # draw.rectangle(xy=textbox_location, fill=label_color_map[det_labels[i]])\n # # draw.text(xy=text_location, text=det_labels[i].upper(), fill='white',\n # # font=font)\n #\n # # for object in root.iter('object'):\n # # if (i==0):\n # # gt=gt+1\n # # difficult = int(object.find('difficult').text == '1')\n #\n # # label = object.find('name').text.lower().strip()\n #\n #\n # # bbox = object.find('bndbox')\n #\n # # xmin = int(bbox.find('x').text)-1\n # # ymin = int(bbox.find('y').text)-1\n # # w=int(bbox.find('w').text) if int(bbox.find('w').text)>0 else 1\n # # xmax = xmin+w\n # # h=int(bbox.find('h').text) if int(bbox.find('h').text)>0 else 1\n # # ymax = ymin+h\n # # w_intsec = np.maximum (0, (np.minimum(xmaxd, xmax) - np.maximum(xmind, xmin)))\n # # h_intsec = np.maximum (0, (np.minimum(ymaxd, ymax) - np.maximum(ymin, ymind)))\n # # wd=xmaxd-xmind\n # # hd=ymaxd-ymind\n # # wt=xmax-xmin\n # # ht=ymax-ymin\n # # s_intsec = w_intsec * h_intsec\n # # sd=wd*hd\n # # st=wt*ht\n # # iou=s_intsec/(st+sd-s_intsec)\n # # # print(iou)\n # # if iou>0.5:\n # # tp=tp+1\n #\n # # recall=0\n # # if(gt!=0):\n # # recall=tp/gt\n # # precision=0\n # # if(dt!=0):\n # # precision=tp/dt\n # # f1=0\n # # if(tp!=0):\n # # f1=2*(precision*recall)/(precision+recall)\n # # print(recall)\n # # print(precision)\n # # print(f1)\n # # print(idd)\n # # print(gt)\n # # del draw\n #\n #\n # # return annotated_image", "title": "" }, { "docid": "9450e03510821e7466486e456c55f6a7", "score": "0.5168649", "text": "def GetImgData(Dict,dbh,dbSchema,verbose=0):\n\n#\n# Prepare GTT_FILENAME table with list of possible inputs \n#\n CatList=[]\n for key in Dict:\n CatList.append([key.split(\"/\")[-1]])\n\n # Make sure the GTT_FILENAME table is empty\n curDB=dbh.cursor()\n curDB.execute('delete from GTT_FILENAME')\n # load img ids into opm_filename_gtt table\n print(\"# Loading GTT_FILENAME table for secondary queries with entries for {:d} images\".format(len(CatList)))\n dbh.insert_many('GTT_FILENAME',['FILENAME'],CatList)\n#\n# Obtain associated images (red_immask).\n#\n query=\"\"\"SELECT \n i.filename as imagefile,\n c.filename as catalogfile,\n i.pfw_attempt_id as pfw_attempt_id,\n i.band as band,\n i.expnum as expnum,\n i.ccdnum as ccdnum\n FROM image i, catalog c, GTT_FILENAME gtt\n WHERE c.filename=gtt.filename\n and i.pfw_attempt_id=c.pfw_attempt_id\n and i.filetype='red_immask'\n and i.ccdnum=c.ccdnum\n \"\"\".format(schema=dbSchema)\n\n if (verbose > 0):\n print(\"# Executing query to obtain red_immask images corresponding to the cat_finalcut catalogs\")\n if (verbose == 1):\n print(\"# sql = {:s} \".format(\" \".join([d.strip() for d in query.split('\\n')])))\n if (verbose > 1):\n print(\"# sql = {:s}\".format(query))\n t1=time.time()\n curDB.execute(query)\n desc = [d[0].lower() for d in curDB.description]\n\n for row in curDB:\n rowd = dict(zip(desc, row))\n CatName=rowd['catalogfile']\n ImgName=rowd['imagefile']\n\n if (CatName in Dict):\n CatRow=Dict[CatName]\n if ((rowd['ccdnum']==int(CatRow['CCDNUM']))and\n (rowd['expnum']==int(CatRow['EXPNUM']))):\n\n Dict[CatName]['IMAGEFILE']=rowd['imagefile']\n Dict[CatName]['BAND']=rowd['band']\n else:\n print(\"Catalog mismatch to Image: expnum=({cexp:7s} vs {iexp:7d}), ccdnum=({cccd:2s} vs {iccd:02d}))\".format(\n cexp=Dict[CatName]['EXPNUM'],iexp=rowd['expnum'],\n cccd=Dict[CatName]['CCDNUM'],iccd=rowd['ccdnum']))\n else:\n print('Warning: Catalog name not in Dict (something is really wrong!!!). Catname={:s}'.format(CatName))\n t2=time.time()\n print(\"Timing for IMG query: {:.2f}\".format(t2-t1))\n\n return Dict", "title": "" }, { "docid": "756f3ea2b80043b7a16d75af00a6bc20", "score": "0.5160153", "text": "def find(self,checktiles=[0,1,2]):\n if self.fakeRun:\n return\n \n print(checktiles)\n \n if not checktiles:\n return None\n \n images_list = self.processimage()\n for i in checktiles: #process images right to left because new images are likely to be at right\n results = self.predict(images_list[i])\n if results == 'default':\n continue\n if settings.skipwhiteimages and (results in [\"1\",\"9\",\"13\"]):\n print(\"Warning: White images found but setting to skip white images is true.\")\n continue\n if results not in settings.imageslabels:\n print(\"Warning: Result is an image but not in images labels list\")\n continue\n \n else:\n output = [int(results), i]\n if settings.save_images:\n import os\n\n #output = cv2.cvtColor(converted_output , cv2.COLORBGR2RGB)\n filepath = os.path.join(\"detected images\", \"{}.jpg\".format(str(output)))\n \n bgrimage = cv2.cvtColor(images_list[i], cv2.COLOR_RGB2BGR)\n cv2.imwrite(filepath, bgrimage)\n\n print(\"saving {}\".format(filepath))\n \n return output\n \n return None", "title": "" }, { "docid": "cad834fdc2d0cf072ddd62ad05420790", "score": "0.5156306", "text": "def _get_image_blob_s6(roidb,roidb_noclass1):\n\n\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n scale_inds = np.random.randint(\n 0, high=len(cfg.TRAIN.SCALES), size=num_images\n )\n processed_ims = []\n im_scales = []\n error_flag = [0,0]\n\n for i in range(num_images):\n roidb_noclass = roidb_noclass1.copy()\n if roidb[i][u'image'].split('/')[-1]==u'test.png': #test.jpg\n random_bbox = dict()\n random_bbox['kernel_size_x'] = int(WIDTH / 5)\n random_bbox['kernel_size_y'] = int(HEIGHT / 5)\n random_bbox['tl_x'] = 0\n random_bbox['tl_y'] = 0\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size_x']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size_y']\n im = cv2.imread(roidb[i]['image'])[y0:y1, x0:x1]\n im = cv2.resize(im, (WIDTH, HEIGHT))\n # cv2.imwrite('/home/icubic/aa.png',im)\n error_flag[i] = 0\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n else:\n if 1:\n if len(roidb[i][u'boxes']) == 0:\n random_bbox = dict()\n random_flag = random.randint(0, 1)\n real_yuanlai_width = roidb[i][u'width'] * 1\n real_yuanlai_height = roidb[i][u'height'] * 1\n width_ratio = float(real_yuanlai_width) / 1024\n height_after_ratio = int(float(real_yuanlai_height) / width_ratio)\n width_after_ratio = 1024\n if 1:\n if random_flag == 0:\n #print(random_flag)\n random_bbox['kernel_size_x'] = int(WIDTH / 5)\n random_bbox['kernel_size_y'] = int(HEIGHT / 5)\n\n random_X = width_after_ratio - random_bbox['kernel_size_x']\n random_Y = height_after_ratio - random_bbox['kernel_size_y']\n try:\n random_bbox['tl_x'] = random.randint(0, random_X)\n random_bbox['tl_y'] = random.randint(0, random_Y)\n except:\n print('aa')\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size_x']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size_y']\n im = cv2.imread(roidb[i][u'image'])\n im = cv2.resize(im, (width_after_ratio, height_after_ratio))[y0:y1, x0:x1]\n im = cv2.resize(im, (WIDTH, HEIGHT))\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n else:\n #print(random_flag)\n random_bbox['kernel_size_x'] = int(float(width_after_ratio) / 1.2)\n random_bbox['kernel_size_y'] = int(float(height_after_ratio) / 1.2)\n\n random_X = width_after_ratio - random_bbox['kernel_size_x']\n random_Y = height_after_ratio - random_bbox['kernel_size_y']\n random_bbox['tl_x'] = random.randint(0, random_X)\n random_bbox['tl_y'] = random.randint(0, random_Y)\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size_x']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size_y']\n im = cv2.imread(roidb[i][u'image'])\n im = cv2.resize(im, (width_after_ratio, height_after_ratio))[y0:y1, x0:x1]\n im = cv2.resize(im, (WIDTH, HEIGHT))\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n else:\n im = cv2.imread(roidb[i][u'image'])\n im = cv2.resize(im, (WIDTH, HEIGHT))\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n # cv2.imwrite('/home/icubic/daily_work/circruit_model/tmp_images/aa.png',im)\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n if roidb[i]['flipped']:#for image flip background training\n im = im[:, ::-1, :]\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n im, im_scale = blob_utils.prep_im_for_blob(\n im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE\n )\n im_scales.append(im_scale)\n processed_ims.append(im)\n continue\n real_yuanlai_width = roidb[i][u'width'] * 1\n real_yuanlai_height = roidb[i][u'height'] * 1\n width_ratio = float(real_yuanlai_width) / 1024\n height_after_ratio = int(float(real_yuanlai_height) / width_ratio)\n width_after_ratio = 1024\n\n real_class = []#roidb[i]['gt_classes'][0]\n num_real_class = len(roidb[i]['gt_classes'])\n\n random_bbox = dict()\n random_bbox['kernel_size_x'] = int(WIDTH / 5)\n random_bbox['kernel_size_y'] = int(HEIGHT / 5)\n if 1:\n w_tongji = 0\n h_tongji = 0\n for i_tongji, sub_boxes_tongji in enumerate(roidb[i][u'boxes']):\n crop_x0_tongji = int(sub_boxes_tongji[0] / real_yuanlai_width * width_after_ratio)\n crop_y0_tongji = int(sub_boxes_tongji[1] / real_yuanlai_height * height_after_ratio)\n crop_x1_tongji = int(sub_boxes_tongji[2] / real_yuanlai_width * width_after_ratio)\n crop_y1_tongji = int(sub_boxes_tongji[3] / real_yuanlai_height * height_after_ratio)\n w_tongji = crop_x1_tongji - crop_x0_tongji\n h_tongji = crop_y1_tongji - crop_y0_tongji\n if w_tongji>int(WIDTH / 5) or h_tongji>int(HEIGHT / 5):\n random_bbox['kernel_size_x'] = int(float(width_after_ratio) / 1.2)\n random_bbox['kernel_size_y'] = int(float(height_after_ratio) / 1.2)\n\n\n random_X = width_after_ratio - random_bbox['kernel_size_x']\n random_Y = height_after_ratio - random_bbox['kernel_size_y']\n random_bbox['tl_x'] = random.randint(0, random_X)\n random_bbox['tl_y'] = random.randint(0, random_Y)\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size_x']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size_y']\n try:\n im = cv2.imread(roidb[i][u'image'])\n except:\n im = cv2.imread(roidb[i][u'image'])\n im = cv2.resize(im, (width_after_ratio, height_after_ratio))[y0:y1, x0:x1]\n im = cv2.resize(im, (WIDTH, HEIGHT))\n sum_inside_overlaps = 0\n boxes_inside_overlaps = []\n\n for i_roidb,sub_boxes in enumerate(roidb[i][u'boxes']):\n crop_x0 = int(sub_boxes[0]/real_yuanlai_width*width_after_ratio)\n crop_y0 = int(sub_boxes[1]/real_yuanlai_height*height_after_ratio)\n crop_x1 = int(sub_boxes[2]/real_yuanlai_width*width_after_ratio)\n crop_y1 = int(sub_boxes[3]/real_yuanlai_height*height_after_ratio)\n #real_x0 = float(crop_x0 - x0)*1024/224 # float(crop_x0) / 1024 * 224\n #real_y0 = float(crop_y0 - y0)*1024/224 # float(crop_y0) / 1024 * 224\n #real_x1 = float(crop_x1 - x0)*1024/224 # float(crop_x1) / 1024 * 224\n #real_y1 = float(crop_y1 - y0)*1024/224\n\n\n overlaps_rate = solve_coincide((x0, y0, x1, y1), (crop_x0, crop_y0, crop_x1, crop_y1))\n if overlaps_rate>0.9:\n sum_inside_overlaps = sum_inside_overlaps + 1\n #real_x0 = crop_x0 - x0 # float(crop_x0) / 1024 * 224\n #real_y0 = crop_y0 - y0 # float(crop_y0) / 1024 * 224\n #real_x1 = crop_x1 - x0 # float(crop_x1) / 1024 * 224\n #real_y1 = crop_y1 - y0\n\n real_x0 = float(crop_x0 - x0)*WIDTH/(random_bbox['kernel_size_x']) # float(crop_x0) / 1024 * 224\n real_y0 = float(crop_y0 - y0)*HEIGHT/(random_bbox['kernel_size_y']) # float(crop_y0) / 1024 * 224\n real_x1 = float(crop_x1 - x0)*WIDTH/(random_bbox['kernel_size_x']) # float(crop_x1) / 1024 * 224\n real_y1 = float(crop_y1 - y0)*HEIGHT/(random_bbox['kernel_size_y'])\n if real_x0<0:\n real_x0 = 0\n if real_x0>WIDTH:\n real_x0 = WIDTH\n\n if real_x1<0:\n real_x1 = 0\n if real_x1>WIDTH:\n real_x1 = WIDTH\n\n if real_y0<0:\n real_y0 = 0\n if real_y0>HEIGHT:\n real_y0 = HEIGHT\n\n if real_y1<0:\n real_y1 = 0\n if real_y1>HEIGHT:\n real_y1 = HEIGHT\n #cv2.rectangle(im, (int(real_x0), int(real_y0)), (int(real_x1), int(real_y1)), (0, 255, 255), 3)\n #cv2.imwrite('/home/icubic/daily_work/code/Detectron/detectron/datasets/data/shanghai/aa.png',im)\n\n boxes_inside_overlaps.append([real_x0, real_y0, real_x1, real_y1])\n real_class.append(roidb[i]['gt_classes'][i_roidb])\n #cv2.rectangle(im, (int(real_x0), int(real_y0)),\n #(int(real_x1), int(real_y1)), (255, 0, 255))\n #cv2.imwrite('/home/icubic/daily_work/code/circruit/new/result/uu.png', im)\n #a = roidb[i]['gt_overlaps'].toarray()\n\n if sum_inside_overlaps>0 :\n num_valid_objs = sum_inside_overlaps*1\n boxes = np.zeros((num_valid_objs, 4), dtype=np.float32)\n gt_classes = np.zeros((num_valid_objs), dtype=np.int32)\n gt_overlaps = np.zeros((num_valid_objs, REAL_CLASS), dtype=np.float32)\n box_to_gt_ind_map = np.zeros((num_valid_objs), dtype=np.int32)\n is_crowd = np.zeros((num_valid_objs), dtype=np.bool)\n for ix in range(num_valid_objs):\n gt_classes[ix] = real_class[ix]#real_class*1\n try:\n gt_overlaps[ix, real_class] = 1.0\n except:\n print('error')\n is_crowd[ix] = False\n box_to_gt_ind_map[ix] = ix\n for i_index in range(4):\n boxes[ix,i_index] = boxes_inside_overlaps[ix][i_index]\n\n #for ix in range(num_valid_objs):\n #box_to_gt_ind_map[ix] = ix\n #cls = real_class*1\n roidb_noclass['boxes'] = np.append(roidb_noclass['boxes'], boxes, axis=0)\n\n roidb_noclass['gt_classes'] = np.append(roidb_noclass['gt_classes'], gt_classes)\n #mm = np.append(\n # roidb_noclass['gt_overlaps'].toarray(), gt_overlaps,axis=0)\n roidb_noclass['gt_overlaps'] = np.append(\n roidb_noclass['gt_overlaps'].toarray(), gt_overlaps)\n roidb_noclass['gt_overlaps'] = scipy.sparse.csr_matrix(roidb_noclass['gt_overlaps'])\n #mm = np.append(mm, gt_overlaps, axis=0)\n #roidb_noclass['gt_overlaps'] = scipy.sparse.csr_matrix(mm)\n roidb_noclass['is_crowd'] = np.append(roidb_noclass['is_crowd'], is_crowd)\n roidb_noclass['box_to_gt_ind_map'] = np.append(roidb_noclass['box_to_gt_ind_map'], box_to_gt_ind_map)\n\n gt_overlaps = roidb_noclass['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb_noclass['max_classes'] = max_classes\n roidb_noclass['max_overlaps'] = max_overlaps\n # sanity checks\n # if max overlap is 0, the class must be background (class 0)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # if max overlap > 0, the class must be a fg class (not class 0)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n roidb_noclass['bbox_targets'] = compute_bbox_regression_targets(roidb_noclass)\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n\n else:\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n\n\n\n\n #print('aa')\n\n\n\n\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n im, im_scale = blob_utils.prep_im_for_blob(\n im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE\n )\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales,error_flag", "title": "" }, { "docid": "985be5282a7ade235e0fe5f974c0e5d4", "score": "0.5151824", "text": "def update_imageprops():\n LOGGER.info(\"Getting release information from: %s\", ARG.DATABASE)\n description = dict()\n if 'effector_description' in IMAGE_PROPS:\n try:\n CURSOR['sage'].execute(READ['EFFECTORS'])\n rows = CURSOR['sage'].fetchall()\n except MySQLdb.Error as err:\n sql_error(err)\n for row in rows:\n description[row['cv_term']] = row['definition']\n rows = get_slide_codes()\n cross = dict()\n if rows:\n print(\"Processing %d image%s\" % (len(rows), 's' if len(rows) > 1 else ''))\n else:\n if ARG.SLIDE_CODE:\n LOGGER.critical(\"No images found for slide code %s\", ARG.SLIDE_CODE)\n else:\n LOGGER.critical(\"No images found for release %s\", ARG.RELEASE)\n slide_code = dict()\n for row in tqdm(rows):\n COUNT['read'] += 1\n LOGGER.info(\"Found image %s with cross barcode %s\", row['id'], row['cross_barcode'])\n if row['cross_barcode'] not in cross:\n cdata = call_responder('flycore', '?request=crossdata&cross_barcode='\n + row['cross_barcode'])\n cross[row['cross_barcode']] = {'lab_member': cdata['crossdata']['lab_member'],\n 'lab_project': cdata['crossdata']['Lab_Project'],\n 'cross_description': cdata['crossdata']['Crossed_Notes']}\n if 'effector_description' in IMAGE_PROPS and row['effector'] in description:\n cross[row['cross_barcode']]['effector_description'] = description[row['effector']]\n if row['cross_barcode'] not in cross:\n LOGGER.error(\"Could not find cross data for %s\", row['cross_barcode'])\n COUNT['no cross data'] += 1\n continue\n slide_code[row['slide_code']] = 1\n process_row(row, cross)\n complete_processing(slide_code)", "title": "" }, { "docid": "bea97fda8a1dffbb6ec65467d0cd95b5", "score": "0.51502067", "text": "def who_is_it_voting(image_path, database, model, n):\r\n \r\n ### START CODE HERE ### \r\n \r\n ## Step 1: Compute the target \"encoding\" for the image. Use img_to_encoding() see example above. ## (≈ 1 line)\r\n encoding = img_to_encoding(image_path, model)\r\n \r\n ## Step 2: Find the closest encoding ##\r\n \r\n # Initialize \"min_dist\" to a large value, say 100 (≈1 line)\r\n dist = np.array([])\r\n # Loop over the database to find the distance of each database image to our test image\r\n for (_, db_enc) in database.items():\r\n \r\n # Compute L2 distance between the target \"encoding\" and the current \"emb\" from the database. (≈ 1 line)\r\n dist = np.append(dist,np.linalg.norm(encoding-db_enc))\r\n \r\n min_n_idx = dist.argsort()[:n] # Get closest 5 images from db\r\n min_n_array = np.array(list(database.keys()))[min_n_idx] # Get name of 5 closest images from db\r\n\r\n for i in range(len(min_n_array)):\r\n # clean up names\r\n min_n_array[i] = min_n_array[i][:-2]\r\n\r\n counter = collections.Counter(min_n_array) # Count number of matches\r\n #print(counter)\r\n msk = np.array(list(counter.values()))>= np.ceil(n/2) # Bool Mask for matches more than or equal 3\r\n \r\n \r\n \r\n if np.all(np.array(list(counter.values()))<np.ceil(n/2)):\r\n # If no faces in db with 3 or more matches\r\n print(\"Not in the database.\" +'...'+image_path +'...'+image_path)\r\n return\r\n \r\n identity = np.array(list(counter.keys()))[msk][0]\r\n \r\n print (\"it's \" + str(identity) +'...'+image_path +'...'+image_path)\r\n ### END CODE HERE ###\r\n \r\n return identity", "title": "" }, { "docid": "dbe1a80707a065123ae48160c4d959e9", "score": "0.51444614", "text": "def selective_search_roidb(self):\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print '{} ss roidb loaded from {}'.format(self.name, cache_file)\n \n num_images = self.num_images\n widths = [Image.open(self.image_path_at(i)).size[0]\n for i in xrange(num_images)]\n for i in xrange(num_images):\n boxes = roidb[i]['boxes'].copy()\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n #image=cv2.imread(self.image_path_at(i))\n #width = image.shape[1]\n #height = image.shape[0]\n for ii in xrange(boxes.shape[0]):\n assert (boxes[ii, 2] < widths[i]), '[just loaded] fail at '+str(ii)\n\n return roidb\n\n if int(self._year) == 2011 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n self._check_roidb(gt_roidb)\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n self._check_roidb(gt_roidb)\n roidb = datasets.imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n \n #roidb = self._load_selective_search_roidb(None)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print 'wrote ss roidb to {}'.format(cache_file)\n\n num_images = self.num_images\n widths = [Image.open(self.image_path_at(i)).size[0]\n for i in xrange(num_images)]\n for i in xrange(num_images):\n boxes = roidb[i]['boxes'].copy()\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n #image=cv2.imread(self.image_path_at(i))\n #width = image.shape[1]\n #height = image.shape[0]\n for ii in xrange(boxes.shape[0]):\n assert (boxes[ii, 2] < widths[i]), '[load] fail at '+str(ii)\n\n\n return roidb", "title": "" }, { "docid": "168e97bbbb832fffba090d245549f7d3", "score": "0.51434976", "text": "def GoThroughTopCells(self, direction):\r\n if direction == 'next':\r\n if self.popnexttopimgcounter > (self.TotaNumofCellSelected-1):#Make sure it doesn't go beyond the last coords.\r\n self.popnexttopimgcounter -= 1\r\n \r\n self.CurrentRankCellpProperties = self.DataFrame_sorted.iloc[self.popnexttopimgcounter]\r\n \r\n #--------------------Show image with cell in box----------------------\r\n spec = self.CurrentRankCellpProperties.loc['ImgNameInfor_Tag']\r\n print(spec)\r\n # #-------------- readin image---------------\r\n tag_imagefilename = os.path.join(self.Tag_folder, spec+'_PMT_0Zmax.tif')\r\n print(tag_imagefilename)\r\n loaded_tag_image_display = imread(tag_imagefilename, as_gray=True)\r\n # Retrieve boundingbox information\r\n Each_bounding_box = self.CurrentRankCellpProperties.loc['BoundingBox_Tag']\r\n minr = int(Each_bounding_box[Each_bounding_box.index('minr')+4:Each_bounding_box.index('_maxr')])\r\n maxr = int(Each_bounding_box[Each_bounding_box.index('maxr')+4:Each_bounding_box.index('_minc')]) -1 \r\n minc = int(Each_bounding_box[Each_bounding_box.index('minc')+4:Each_bounding_box.index('_maxc')])\r\n maxc = int(Each_bounding_box[Each_bounding_box.index('maxc')+4:len(Each_bounding_box)]) -1\r\n \r\n loaded_tag_image_display[minr, minc:maxc] = 4\r\n loaded_tag_image_display[maxr, minc:maxc] = 4\r\n loaded_tag_image_display[minr:maxr, minc] = 4\r\n loaded_tag_image_display[minr:maxr, maxc] = 4\r\n \r\n # -------Show image in imageview-------------\r\n self.OriginalImg_item.setImage(np.fliplr(np.rot90(loaded_tag_image_display)), autoLevels=True)\r\n self.OriginalImg_item.setLevels((0, 1))\r\n \r\n self.Matdisplay_Figure.clear()\r\n ax1 = self.Matdisplay_Figure.add_subplot(111)\r\n ax1.imshow(loaded_tag_image_display)#Show the first image\r\n #--------------------------------------------------Add red boundingbox to axis----------------------------------------------\r\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='cyan', linewidth=2)\r\n ax1.add_patch(rect)\r\n ax1.text(maxc, minr, 'NO_{}'.format(self.popnexttopimgcounter),fontsize=10, color='orange', style='italic')\r\n self.Matdisplay_Figure.tight_layout()\r\n self.Matdisplay_Canvas.draw()\r\n \r\n #-------------------Print details of cell of interest----------------\r\n self.normalOutputWritten('------------------No.{} out of {}----------------\\n'.format(self.popnexttopimgcounter+1, self.TotaNumofCellSelected))\r\n self.normalOutputWritten('ID: {}\\n{}: {}\\n{}: {}\\n{}: {}\\n'.format(spec, self.EvaluatingPara_list[0], round(self.CurrentRankCellpProperties.loc[self.EvaluatingPara_list[0]], 4), \\\r\n self.EvaluatingPara_list[1], round(self.CurrentRankCellpProperties.loc[self.EvaluatingPara_list[1]], 4), \r\n 'IDNumber', self.CurrentRankCellpProperties.name))\r\n #------------------Stage move----------------------------------------\r\n# self.CurrentPos = spec[spec.index('_R')+2:len(spec)].split('C')\r\n# self.ludlStage.moveAbs(int(self.CurrentPos[0]),int(self.CurrentPos[1]))\r\n \r\n self.popnexttopimgcounter += 1 # Alwasy plus 1 to get it ready for next move.\r\n \r\n elif direction == 'previous':\r\n self.popnexttopimgcounter -= 2 \r\n if self.popnexttopimgcounter >= 0:\r\n \r\n self.CurrentRankCellpProperties = self.DataFrame_sorted.iloc[self.popnexttopimgcounter]\r\n \r\n #--------------------Show image with cell in box----------------------\r\n spec = self.CurrentRankCellpProperties.loc['ImgNameInfor_Tag']\r\n print(spec)\r\n # #-------------- readin image---------------\r\n tag_imagefilename = os.path.join(self.Tag_folder, spec+'_PMT_0Zmax.tif')\r\n print(tag_imagefilename)\r\n loaded_tag_image_display = imread(tag_imagefilename, as_gray=True)\r\n # Retrieve boundingbox information\r\n Each_bounding_box = self.CurrentRankCellpProperties.loc['BoundingBox_Tag']\r\n minr = int(Each_bounding_box[Each_bounding_box.index('minr')+4:Each_bounding_box.index('_maxr')])\r\n maxr = int(Each_bounding_box[Each_bounding_box.index('maxr')+4:Each_bounding_box.index('_minc')]) -1 \r\n minc = int(Each_bounding_box[Each_bounding_box.index('minc')+4:Each_bounding_box.index('_maxc')])\r\n maxc = int(Each_bounding_box[Each_bounding_box.index('maxc')+4:len(Each_bounding_box)]) -1\r\n \r\n loaded_tag_image_display[minr, minc:maxc] = 4\r\n loaded_tag_image_display[maxr, minc:maxc] = 4\r\n loaded_tag_image_display[minr:maxr, minc] = 4\r\n loaded_tag_image_display[minr:maxr, maxc] = 4\r\n \r\n # -------Show image in imageview-------------\r\n self.OriginalImg_item.setImage(np.fliplr(np.rot90(loaded_tag_image_display)), autoLevels=True)\r\n self.OriginalImg_item.setLevels((0, 1))\r\n \r\n self.Matdisplay_Figure.clear()\r\n ax1 = self.Matdisplay_Figure.add_subplot(111)\r\n ax1.imshow(loaded_tag_image_display)#Show the first image\r\n #--------------------------------------------------Add red boundingbox to axis----------------------------------------------\r\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='cyan', linewidth=2)\r\n ax1.add_patch(rect)\r\n ax1.text(maxc, minr, 'NO_{}'.format(self.popnexttopimgcounter),fontsize=10, color='orange', style='italic')\r\n self.Matdisplay_Figure.tight_layout()\r\n self.Matdisplay_Canvas.draw()\r\n \r\n #-------------------Print details of cell of interest----------------\r\n self.normalOutputWritten('------------------No.{} out of {}----------------\\n'.format(self.popnexttopimgcounter+1, self.TotaNumofCellSelected))\r\n self.normalOutputWritten('ID: {}\\n{}: {}\\n{}: {}\\n{}: {}\\n'.format(spec, self.EvaluatingPara_list[0], round(self.CurrentRankCellpProperties.loc[self.EvaluatingPara_list[0]], 4), \\\r\n self.EvaluatingPara_list[1], round(self.CurrentRankCellpProperties.loc[self.EvaluatingPara_list[1]], 4), \r\n 'IDNumber', self.CurrentRankCellpProperties.name))\r\n \r\n #------------------Stage move----------------------------------------\r\n# self.CurrentPos = spec[spec.index('_R')+2:len(spec)].split('C')\r\n# self.ludlStage.moveAbs(int(self.CurrentPos[0]),int(self.CurrentPos[1]))\r\n \r\n if self.popnexttopimgcounter < (self.TotaNumofCellSelected-1):\r\n self.popnexttopimgcounter += 1\r\n else:\r\n self.popnexttopimgcounter = 0\r\n \r\n elif direction == 'null':\r\n self.popnexttopimgcounter -= 1\r\n \r\n self.CurrentRankCellpProperties = self.DataFrame_sorted.iloc[self.popnexttopimgcounter]\r\n \r\n #--------------------Show image with cell in box----------------------\r\n spec = self.CurrentRankCellpProperties.loc['ImgNameInfor_Tag']\r\n print(spec)\r\n # #-------------- readin image---------------\r\n tag_imagefilename = os.path.join(self.Tag_folder, spec+'_PMT_0Zmax.tif')\r\n print(tag_imagefilename)\r\n loaded_tag_image_display = imread(tag_imagefilename, as_gray=True)\r\n # Retrieve boundingbox information\r\n Each_bounding_box = self.CurrentRankCellpProperties.loc['BoundingBox_Tag']\r\n minr = int(Each_bounding_box[Each_bounding_box.index('minr')+4:Each_bounding_box.index('_maxr')])\r\n maxr = int(Each_bounding_box[Each_bounding_box.index('maxr')+4:Each_bounding_box.index('_minc')]) -1 \r\n minc = int(Each_bounding_box[Each_bounding_box.index('minc')+4:Each_bounding_box.index('_maxc')])\r\n maxc = int(Each_bounding_box[Each_bounding_box.index('maxc')+4:len(Each_bounding_box)]) -1\r\n \r\n loaded_tag_image_display[minr, minc:maxc] = 4\r\n loaded_tag_image_display[maxr, minc:maxc] = 4\r\n loaded_tag_image_display[minr:maxr, minc] = 4\r\n loaded_tag_image_display[minr:maxr, maxc] = 4\r\n \r\n # -------Show image in imageview-------------\r\n self.OriginalImg_item.setImage(np.fliplr(np.rot90(loaded_tag_image_display)), autoLevels=True)\r\n self.OriginalImg_item.setLevels((0, 1))\r\n \r\n self.Matdisplay_Figure.clear()\r\n ax1 = self.Matdisplay_Figure.add_subplot(111)\r\n ax1.imshow(loaded_tag_image_display)#Show the first image\r\n #--------------------------------------------------Add red boundingbox to axis----------------------------------------------\r\n rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='cyan', linewidth=2)\r\n ax1.add_patch(rect)\r\n ax1.text(maxc, minr, 'NO_{}'.format(self.popnexttopimgcounter),fontsize=10, color='orange', style='italic')\r\n self.Matdisplay_Figure.tight_layout()\r\n self.Matdisplay_Canvas.draw()\r\n \r\n self.popnexttopimgcounter += 1\r\n \r\n elif direction == 'IDNumber':\r\n self.GotoSequence()", "title": "" }, { "docid": "235fbd3e5feed20056cc355a92130db4", "score": "0.51422757", "text": "def select_imgs(self, minx: float, miny: float, maxx: float, maxy: float, em_tol: float = 5.0, num_iters: int = 25)-> gpd.GeoDataFrame:\n query_res = gpd.GeoDataFrame(gpd.read_file(self._ctx_shp).cx[minx:maxx, miny:maxy])\n query_res['area'] = query_res.area\n query_res = self._reduce(query_res[query_res['EmAngle'] <= em_tol].sort_values(['EmAngle', 'area'], ascending=[True, False]))\n query_res = self._reduce(query_res.sort_values('area', ascending=False))\n # simplistic way to reduce number of images,\n # yes, I know using a tree would be better but whatevs\n for _ in tqdm.trange(num_iters, leave=True, desc=\"shuffle reduce\"):\n query_res = self._reduce(query_res, shuffle=True)\n\n return query_res", "title": "" }, { "docid": "d85f65065ec03fe75b011bb0300569ed", "score": "0.514071", "text": "def _get_image_blob_s6_ok(roidb,roidb_noclass1):\n\n\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n scale_inds = np.random.randint(\n 0, high=len(cfg.TRAIN.SCALES), size=num_images\n )\n processed_ims = []\n im_scales = []\n error_flag = [0,0]\n\n for i in range(num_images):\n roidb_noclass = roidb_noclass1.copy()\n if roidb[i][u'image'].split('/')[-1]==u'test.jpg':\n random_bbox = dict()\n random_bbox['kernel_size_x'] = int(WIDTH / 5)\n random_bbox['kernel_size_y'] = int(HEIGHT / 5)\n random_bbox['tl_x'] = 0\n random_bbox['tl_y'] = 0\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size_x']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size_y']\n im = cv2.imread(roidb[i]['image'])[y0:y1, x0:x1]\n im = cv2.resize(im, (WIDTH, HEIGHT))\n # cv2.imwrite('/home/icubic/aa.png',im)\n error_flag[i] = 0\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n else:\n if 1:\n real_yuanlai_width = roidb[i][u'width'] * 1\n real_yuanlai_height = roidb[i][u'height'] * 1\n width_ratio = float(real_yuanlai_width) / 1024\n height_after_ratio = int(float(real_yuanlai_height) / width_ratio)\n width_after_ratio = 1024\n\n real_class = []#roidb[i]['gt_classes'][0]\n num_real_class = len(roidb[i]['gt_classes'])\n\n random_bbox = dict()\n random_bbox['kernel_size_x'] = int(WIDTH / 5)\n random_bbox['kernel_size_y'] = int(HEIGHT / 5)\n random_X = width_after_ratio - random_bbox['kernel_size_x']\n random_Y = height_after_ratio - random_bbox['kernel_size_y']\n random_bbox['tl_x'] = random.randint(0, random_X)\n random_bbox['tl_y'] = random.randint(0, random_Y)\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size_x']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size_y']\n im = cv2.imread(roidb[i]['image'])\n im = cv2.resize(im, (width_after_ratio, height_after_ratio))[y0:y1, x0:x1]\n im = cv2.resize(im, (WIDTH, HEIGHT))\n sum_inside_overlaps = 0\n boxes_inside_overlaps = []\n\n for i_roidb,sub_boxes in enumerate(roidb[i][u'boxes']):\n crop_x0 = int(sub_boxes[0]/real_yuanlai_width*width_after_ratio)\n crop_y0 = int(sub_boxes[1]/real_yuanlai_height*height_after_ratio)\n crop_x1 = int(sub_boxes[2]/real_yuanlai_width*width_after_ratio)\n crop_y1 = int(sub_boxes[3]/real_yuanlai_height*height_after_ratio)\n #real_x0 = float(crop_x0 - x0)*1024/224 # float(crop_x0) / 1024 * 224\n #real_y0 = float(crop_y0 - y0)*1024/224 # float(crop_y0) / 1024 * 224\n #real_x1 = float(crop_x1 - x0)*1024/224 # float(crop_x1) / 1024 * 224\n #real_y1 = float(crop_y1 - y0)*1024/224\n\n\n overlaps_rate = solve_coincide((x0, y0, x1, y1), (crop_x0, crop_y0, crop_x1, crop_y1))\n if overlaps_rate>0.9:\n sum_inside_overlaps = sum_inside_overlaps + 1\n #real_x0 = crop_x0 - x0 # float(crop_x0) / 1024 * 224\n #real_y0 = crop_y0 - y0 # float(crop_y0) / 1024 * 224\n #real_x1 = crop_x1 - x0 # float(crop_x1) / 1024 * 224\n #real_y1 = crop_y1 - y0\n\n real_x0 = float(crop_x0 - x0)*WIDTH/(random_bbox['kernel_size_x']) # float(crop_x0) / 1024 * 224\n real_y0 = float(crop_y0 - y0)*HEIGHT/(random_bbox['kernel_size_y']) # float(crop_y0) / 1024 * 224\n real_x1 = float(crop_x1 - x0)*WIDTH/(random_bbox['kernel_size_x']) # float(crop_x1) / 1024 * 224\n real_y1 = float(crop_y1 - y0)*HEIGHT/(random_bbox['kernel_size_y'])\n if real_x0<0:\n real_x0 = 0\n if real_x0>WIDTH:\n real_x0 = WIDTH\n\n if real_x1<0:\n real_x1 = 0\n if real_x1>WIDTH:\n real_x1 = WIDTH\n\n if real_y0<0:\n real_y0 = 0\n if real_y0>HEIGHT:\n real_y0 = HEIGHT\n\n if real_y1<0:\n real_y1 = 0\n if real_y1>HEIGHT:\n real_y1 = HEIGHT\n #cv2.rectangle(im, (int(real_x0), int(real_y0)), (int(real_x1), int(real_y1)), (0, 255, 255), 3)\n #cv2.imwrite('/home/icubic/daily_work/code/Detectron/detectron/datasets/data/shanghai/aa.png',im)\n\n boxes_inside_overlaps.append([real_x0, real_y0, real_x1, real_y1])\n real_class.append(roidb[i]['gt_classes'][i_roidb])\n #cv2.rectangle(im, (int(real_x0), int(real_y0)),\n #(int(real_x1), int(real_y1)), (255, 0, 255))\n #cv2.imwrite('/home/icubic/daily_work/code/circruit/new/result/uu.png', im)\n #a = roidb[i]['gt_overlaps'].toarray()\n\n if sum_inside_overlaps>0:\n num_valid_objs = sum_inside_overlaps*1\n boxes = np.zeros((num_valid_objs, 4), dtype=np.float32)\n gt_classes = np.zeros((num_valid_objs), dtype=np.int32)\n gt_overlaps = np.zeros((num_valid_objs, REAL_CLASS), dtype=np.float32)\n box_to_gt_ind_map = np.zeros((num_valid_objs), dtype=np.int32)\n is_crowd = np.zeros((num_valid_objs), dtype=np.bool)\n for ix in range(num_valid_objs):\n gt_classes[ix] = real_class[ix]#real_class*1\n try:\n gt_overlaps[ix, real_class] = 1.0\n except:\n print('error')\n is_crowd[ix] = False\n box_to_gt_ind_map[ix] = ix\n for i_index in range(4):\n boxes[ix,i_index] = boxes_inside_overlaps[ix][i_index]\n\n #for ix in range(num_valid_objs):\n #box_to_gt_ind_map[ix] = ix\n #cls = real_class*1\n roidb_noclass['boxes'] = np.append(roidb_noclass['boxes'], boxes, axis=0)\n\n roidb_noclass['gt_classes'] = np.append(roidb_noclass['gt_classes'], gt_classes)\n #mm = np.append(\n # roidb_noclass['gt_overlaps'].toarray(), gt_overlaps,axis=0)\n roidb_noclass['gt_overlaps'] = np.append(\n roidb_noclass['gt_overlaps'].toarray(), gt_overlaps)\n roidb_noclass['gt_overlaps'] = scipy.sparse.csr_matrix(roidb_noclass['gt_overlaps'])\n #mm = np.append(mm, gt_overlaps, axis=0)\n #roidb_noclass['gt_overlaps'] = scipy.sparse.csr_matrix(mm)\n roidb_noclass['is_crowd'] = np.append(roidb_noclass['is_crowd'], is_crowd)\n roidb_noclass['box_to_gt_ind_map'] = np.append(roidb_noclass['box_to_gt_ind_map'], box_to_gt_ind_map)\n\n gt_overlaps = roidb_noclass['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb_noclass['max_classes'] = max_classes\n roidb_noclass['max_overlaps'] = max_overlaps\n # sanity checks\n # if max overlap is 0, the class must be background (class 0)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # if max overlap > 0, the class must be a fg class (not class 0)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n roidb_noclass['bbox_targets'] = compute_bbox_regression_targets(roidb_noclass)\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n\n else:\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n\n\n\n\n #print('aa')\n\n\n\n \n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n im, im_scale = blob_utils.prep_im_for_blob(\n im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE\n )\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales,error_flag", "title": "" }, { "docid": "9b08a4e4bb3411dca8221d855fca30c0", "score": "0.51376027", "text": "def iterateOverDataset():\n path = os.path.dirname(os.path.abspath(__file__))\n\n for group in ('00000','00001', '00001', '00002', '00003', '00004', '00005', '00007', '00008'):\n with open(path + \"/GTSRB/Final_Training/Images/\" + group + \"/GT-\" + group + \".csv\", \"r\") as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\";\")\n for row in reader:\n temppath = path + \"\\\\GTSRB\\\\Final_Training\\\\Images\\\\\" + group + \"\\\\\" + row['Filename']\n speed, img, gray = detectSpeedLimitSign(temppath, 5)\n markAndShowDetectedSpeedLimits(speed, img)", "title": "" }, { "docid": "c8604b0fc2ad8c28824749967163301e", "score": "0.5136245", "text": "def find_top_n_images_to_cover_area(rects,number_of_rects=20): \n rects_from_highest_to_lowest_area = sorted(rects,key=area_of_rect,reverse=True)", "title": "" }, { "docid": "1b7431584f7954b406c9cf4a6a6d49bd", "score": "0.5128243", "text": "def run_retrieval(self, qrnn):\n quantiles = torch.tensor(qrnn.quantiles).float()\n n_pixels = self.n_pixels\n n_scans = self.n_scans\n n_quantiles = len(quantiles)\n\n y_pred = torch.zeros((n_scans, n_pixels, n_quantiles))\n mean = torch.zeros((n_scans, n_pixels))\n first_tertial = torch.zeros((n_scans, n_pixels))\n second_tertial = torch.zeros((n_scans, n_pixels))\n pop = torch.zeros((n_scans, n_pixels))\n\n\n with torch.no_grad():\n for i in range(len(self)):\n\n i_start = i * self.scans_per_batch\n i_end = (i + 1) * self.scans_per_batch\n\n x = torch.tensor(self[i]).float().detach()\n y = qrnn.model(x).detach()\n\n\n y_pred[i_start:i_end, :, :] = y.reshape(-1, n_pixels, n_quantiles)\n\n means = qq.posterior_mean(y, quantiles, quantile_axis=1)\n\n mean[i_start:i_end] = means.reshape(-1, self.n_pixels)\n\n t = qq.posterior_quantiles(y, quantiles, [0.333], quantile_axis=1)\n first_tertial[i_start:i_end] = t.reshape(-1, self.n_pixels)\n t = qq.posterior_quantiles(y, quantiles, [0.666], quantile_axis=1)\n second_tertial[i_start:i_end] = t.reshape(-1, self.n_pixels)\n\n p = qq.probability_larger_than(y, quantiles, 0.01, quantile_axis=1)\n pop[i_start:i_end] = p.reshape(-1, self.n_pixels)\n\n\n dims = [\"scans\", \"pixels\", \"quantiles\"]\n\n data = {\n \"quantiles\": ((\"quantiles\",), qrnn.quantiles),\n \"precip_quantiles\": (dims, y_pred.detach().numpy()),\n \"precip_mean\": (dims[:2], mean.detach().numpy()),\n \"precip_1st_tertial\": (dims[:2], first_tertial.detach().numpy()),\n \"precip_3rd_tertial\": (dims[:2], second_tertial.detach().numpy()),\n \"precip_pop\": (dims[:2], pop.detach().numpy())\n }\n return xarray.Dataset(data)", "title": "" }, { "docid": "3b7db5a269498eebeb76c33b2d3311c8", "score": "0.5128085", "text": "def get_relevant_imgs(img_lst, img_map, indices, distances,k, form=\"list\", rank=True, img_dir=None):\n df_lst = []\n for img in img_lst:\n df_lst.append(get_similar_imgs(img, img_map, indices, distances, k, img_dir=img_dir))\n \n df = pd.concat(df_lst)\n if rank:\n df = df.sort_values(\"dist\")\n else:\n df = df.sample(k)\n if form == \"list\":\n return df.head(k)[\"img\"].values\n elif form == \"df\":\n return df.head(k)", "title": "" }, { "docid": "65272d2a0d1369eca273fe62b8b90b8e", "score": "0.5119438", "text": "def compute_photometric_loss(prev_images, next_images, flow_dict):\n total_photometric_loss = 0.\n loss_weight_sum = 0.\n for i in range(len(flow_dict)):\n for image_num in range(prev_images.shape[0]):\n flow = flow_dict[\"flow{}\".format(i)][image_num]\n height = flow.shape[1]\n width = flow.shape[2]\n\n prev_images_resize = F.to_tensor(F.resize(F.to_pil_image(prev_images[image_num].cpu()),\n [height, width])).cuda()\n next_images_resize = F.to_tensor(F.resize(F.to_pil_image(next_images[image_num].cpu()),\n [height, width])).cuda()\n\n prev_images_warped = warp_images_with_flow(prev_images_resize, flow)\n distance = prev_images_warped - next_images_resize\n\n if i == 3 and image_num == 0:\n vis_warp = prev_images_warped.clone().detach().cpu().numpy().squeeze()\n vis_prev = prev_images_resize.clone().detach().cpu().numpy().squeeze()\n vis_next = next_images_resize.clone().detach().cpu().numpy().squeeze()\n vis_dist = distance.clone().detach().cpu().numpy().squeeze()\n photometric_vis = [vis_warp, vis_prev, vis_next, vis_dist]\n # img = np.hstack([vis_warp, vis_prev, vis_next, vis_dist])\n # cv2.imshow(\"warp image\", img)\n # cv2.waitKey(1)\n\n \n photometric_loss = charbonnier_loss(distance, calc_mean=False)\n total_photometric_loss += photometric_loss\n loss_weight_sum += 1.\n total_photometric_loss /= loss_weight_sum\n return total_photometric_loss, photometric_vis", "title": "" }, { "docid": "437ec11d77e8ed5373a670c27df486e5", "score": "0.5105579", "text": "def forward_test(self, imgs):", "title": "" }, { "docid": "6f6427c91900fb8abefe5f3f41a2af79", "score": "0.5101513", "text": "def quick_stats(input_yaml):\n LABEL_DICT = {\n \"Off\" : 0,\n \"Red\" : 1,\n \"Yellow\" : 2,\n \"Red-yellow\" : 3,\n \"Green\":4\n }\n LABEL_DICT_R={v: k for k, v in LABEL_DICT.items()}\n\n LABEL_DICT_P = {\n \"Circle\" : 0,\n \"Straight\" : 1,\n \"Left\" : 2,\n \"StraightLeft\" : 3,\n \"Right\": 4,\n \"Pedestrian\": 8,\n \"Bike\": 9\n }\n # classes are saved as in bstld\n\n LABEL_DICT_P_R={v: k for k, v in LABEL_DICT_P.items()}\n\n images = get_all_labels(input_yaml)\n\n widths = []\n heights = []\n sizes = []\n\n num_images = len(images)\n num_lights = 0\n appearances = {\n \"Off\" : 0,\n \"Red\" : 0,\n \"Yellow\" : 0,\n \"Red-yellow\" : 0,\n \"Green\":0\n }\n appearances_P = {\n \"Circle\" : 0,\n \"Straight\" : 0,\n \"Left\" : 0,\n \"StraightLeft\" : 0,\n \"Right\": 0,\n \"Pedestrian\": 0,\n \"Bike\": 0\n }\n\n large=0\n medium=0\n small=0\n\n counter = 0\n for image in tqdm(images):\n\n num_lights += len(image['objects'])\n for box in image['objects']:\n \n if box['width']<=width_limit:\n continue\n\n widths.append(box['width'])\n heights.append(box['height'])\n\n class_str=str(box['class_id'])[-2]\n # class_str_P=str(box['class_id'])[-1]\n\n appearances[LABEL_DICT_R[int(class_str)]]+=1\n # appearances_P[LABEL_DICT_P_R[int(class_str_P)]]+=1\n\n size=box['width']*box['height']\n sizes.append(size)\n\n if size<=(32*32):\n small+=1\n elif size>(96*96):\n large+=1\n else:\n medium+=1\n\n # avg_width = sum(widths) / float(len(widths))\n # avg_height = sum(heights) / float(len(heights))\n # avg_size = sum(sizes) / float(len(sizes))\n\n # median_width = sorted(widths)[len(widths) // 2]\n # median_height = sorted(heights)[len(heights) // 2]\n # median_size = sorted(sizes)[len(sizes) // 2]\n\n # print('Number of images:', num_images)\n print('Number of traffic lights:', num_lights, '\\n')\n\n print('Small images:', small)\n print('Medium images:', medium)\n print('Large images:', large, '\\n')\n\n # print('Minimum width:', min(widths))\n # print('Average width:', avg_width)\n # print('median width:', median_width)\n # print('maximum width:', max(widths), '\\n')\n\n # print('Minimum height:', min(heights))\n # print('Average height:', avg_height)\n # print('median height:', median_height)\n # print('maximum height:', max(heights), '\\n')\n\n # print('Minimum size:', min(sizes))\n # print('Average size:', avg_size)\n # print('median size:', median_size)\n # print('maximum size:', max(sizes), '\\n')\n\n print('Labels:')\n for key, label in appearances.items():\n print('\\t{}: {}'.format(key, label))\n\n print()\n\n # print('Labels_Pictogram:')\n # for key, label in appearances_P.items():\n # print('\\t{}: {}'.format(key, label))", "title": "" }, { "docid": "5695ddd1562e9a958a892f55263032ca", "score": "0.50955164", "text": "def visualize(self, result):\n t0 = time()\n max_batch = self.vis_max_batch\n indices = self.vis_t_indices\n indices_pred = self.vis_t_indices\n image_summaries = []\n if not (self.use_hmr_only and not self.do_hallucinate):\n cams = np.take(result['cams'][:max_batch], indices_pred, axis=1)\n imgs = result['images']\n if self.data_format == 'NCHW':\n imgs = np.transpose(imgs, [0, 1, 3, 4, 2])\n kps_gt = result['kps_gt'][:max_batch]\n kps_pred = result['kps_pred'][:max_batch]\n verts = result['verts'].reshape((max_batch, len(indices), 6890, 3))\n\n for b in range(max_batch):\n all_rend_imgs = []\n if not (self.use_hmr_only and not self.do_hallucinate):\n imgs_sub = np.take(imgs, indices, axis=1)\n kps_gt_sub = np.take(kps_gt, indices, axis=1)\n kps_pred_sub = np.take(kps_pred, indices_pred, axis=1)\n for j, (img, cam, kp_gt, kp_pred, vert) in enumerate(\n zip(imgs_sub[b], cams[b], kps_gt_sub[b],\n kps_pred_sub[b], verts[b])):\n rend_img = vis_util.visualize_img(\n img=img,\n cam=cam,\n kp_gt=kp_gt,\n kp_pred=kp_pred,\n vert=vert,\n renderer=self.renderer,\n text={\n 'frame': indices[j]\n })\n all_rend_imgs.append(np.hstack(rend_img))\n combined = np.vstack(all_rend_imgs)\n sio = BytesIO()\n plt.imsave(sio, combined, format='png')\n vis_sum = tf.Summary.Image(\n encoded_image_string=sio.getvalue(),\n height=combined.shape[0],\n width=combined.shape[1])\n image_summaries.append(\n tf.Summary.Value(\n tag='vis_images/{}'.format(b), image=vis_sum))\n # Do static.\n if self.use_hmr_only and not self.do_hallucinate:\n img = result['images_static'][b]\n cam = result['cams_static'][b][0]\n kp_gt = result['kps_gt_static'][b][0]\n kp_pred = result['kps_pred_static'][b][0]\n vert = result['verts_static'][b][0]\n\n rend_img = vis_util.visualize_img(\n img=img,\n cam=cam,\n kp_gt=kp_gt,\n kp_pred=kp_pred,\n vert=vert,\n renderer=self.renderer,\n )\n rend_img = np.hstack(rend_img)\n sio = BytesIO()\n plt.imsave(sio, rend_img, format='png')\n vis_sum = tf.Summary.Image(\n encoded_image_string=sio.getvalue(),\n height=rend_img.shape[0],\n width=rend_img.shape[1])\n image_summaries.append(\n tf.Summary.Value(\n tag='vis_images_static/{}'.format(b), image=vis_sum))\n\n if self.predict_delta and not self.use_hmr_only:\n all_delta_imgs = []\n for dt, preds in sorted(result['deltas'].items()):\n delta_t = dt\n cams_dt = preds['cams'][b]\n kps_pr_dt = preds['kps_pred'][b]\n verts_dt = preds['verts'][b]\n # Take the right subsamples (verts are already subsampled):\n cams_dt = np.take(cams_dt, indices_pred, axis=0)\n kps_pr_dt = np.take(kps_pr_dt, indices_pred, axis=0)\n imgs_sub = np.take(imgs[b], indices + delta_t, axis=0)\n kps_gt_sub = np.take(kps_gt[b], indices + delta_t, axis=0)\n all_delta_imgs.append(\n self.visualize_strip(\n images=imgs_sub,\n cams=cams_dt,\n kps_gt=kps_gt_sub,\n kps_pr=kps_pr_dt,\n verts=verts_dt,\n indices=indices,\n dt=dt,\n ))\n combined = np.hstack(all_delta_imgs)\n sio = BytesIO()\n plt.imsave(sio, combined, format='png')\n vis_sum = tf.Summary.Image(\n encoded_image_string=sio.getvalue(),\n height=combined.shape[0],\n width=combined.shape[1])\n image_summaries.append(\n tf.Summary.Value(\n tag='vis_images_delta/delta_{}'.format(b),\n image=vis_sum))\n if self.do_hallucinate:\n all_hal_imgs = []\n for dt, preds in sorted(result['hal'].items()):\n delta_t = dt\n cams_dt = preds['cams'][b]\n kps_pr_dt = preds['kps_pred'][b]\n verts_dt = preds['verts'][b]\n # Take the right subsamples (verts are already subsampled):\n cams_dt = np.take(cams_dt, indices_pred, axis=0)\n kps_pr_dt = np.take(kps_pr_dt, indices_pred, axis=0)\n imgs_sub = np.take(imgs[b], indices + delta_t, axis=0)\n kps_gt_sub = np.take(kps_gt[b], indices + delta_t, axis=0)\n all_hal_imgs.append(\n self.visualize_strip(\n images=imgs_sub,\n cams=cams_dt,\n kps_gt=kps_gt_sub,\n kps_pr=kps_pr_dt,\n verts=verts_dt,\n indices=indices,\n dt=dt,\n ))\n combined = np.hstack(all_hal_imgs)\n sio = BytesIO()\n plt.imsave(sio, combined, format='png')\n vis_sum = tf.Summary.Image(\n encoded_image_string=sio.getvalue(),\n height=combined.shape[0],\n width=combined.shape[1])\n image_summaries.append(\n tf.Summary.Value(\n tag='vis_images_delta/hal_{}'.format(b),\n image=vis_sum))\n\n summary = tf.Summary(value=image_summaries)\n self.summary_writer.add_summary(\n summary, global_step=result['iteration'])\n print('Visualization time:', time() - t0)", "title": "" }, { "docid": "569b3a0ed06af61131764722d6169543", "score": "0.50943637", "text": "def find_darts(self, image: np.ndarray, visualize_results=False) -> DartSearchResult:\n search_image = cv2.resize(image, self.image_resolution)\n task_pool = ((search_image, profile) for profile in self.profiles)\n results_out = []\n output_image = search_image.copy()\n for found_darts, profile in self.dart_finding_pool.imap_unordered(DartDetector.query_image_for_profile,\n task_pool):\n results_out += found_darts\n if visualize_results:\n DartDetector.render_dart_results(found_darts, output_image, profile)\n\n return sorted(results_out), output_image", "title": "" }, { "docid": "2f3b171e05afbec4715bd695ea81ac31", "score": "0.5092353", "text": "def calculateSimilarityMetric(self, results_np: np.ndarray, threshold=4.5, threshold2=[]):\n shot_boundaries_l = []\n for i in range(0, len(results_np)):\n vid_name = results_np[i][0]\n start = results_np[i][1]\n end = results_np[i][2]\n distances_l = results_np[i][3]\n distances_np = np.array(distances_l).astype('float')\n\n if(self.config_instance.activate_candidate_selection == 0):\n # just take all frame positions over specified threshold\n\n THRESHOLD_MODE = self.config_instance.threshold_mode\n if (THRESHOLD_MODE == \"adaptive\"):\n shot_l = []\n thresholds = []\n window_size = self.config_instance.window_size\n alpha = threshold\n beta = threshold2\n for x in range(0, len(distances_np)):\n if (x % window_size == 0):\n th = np.mean(distances_np[x:x + window_size]) + alpha + np.std(distances_np[x:x + window_size]) * beta\n thresholds.append(th)\n thresholds = np.array(thresholds)\n print(thresholds.shape)\n\n for x in range(0, len(distances_np)):\n if (distances_np[x] > thresholds[x]):\n idx_curr = x + 1\n idx_prev = x\n shot_boundaries_l.append([vid_name, idx_prev, idx_curr])\n # print(\"cut at: \" + str(i) + \" -> \" + str(i+1))\n # print(i)\n # print(thresholds[i])\n # print(distances_np[i])\n\n if (THRESHOLD_MODE == \"fixed\"):\n idx_max = np.where(distances_np > threshold)[0]\n print(idx_max)\n\n if(len(idx_max) == 1) :\n final_idx = idx_max #+ start\n shot_boundaries_l.append([vid_name, final_idx, final_idx + 1])\n elif(len(idx_max) > 1):\n final_idx = idx_max #+ start\n print(final_idx)\n #exit()\n for a in range(0, len(final_idx)):\n shot_boundaries_l.append([vid_name, final_idx[a], final_idx[a] + 1])\n\n elif(self.config_instance.activate_candidate_selection == 1):\n # just take all frame positions over specified threshold\n idx_max = np.argmax(distances_np)\n final_idx = idx_max + start\n shot_boundaries_l.append([vid_name, final_idx, final_idx + 1])\n\n # print(final_idx)\n\n # cv2.imwrite(\"./test_result\" + str(i) + \"_1.png\", self.vid_instance.getFrame(idx_max[i]))\n # cv2.imwrite(\"./test_result\" + str(i) + \"_2.png\", self.vid_instance.getFrame(idx_max[i] + 1))\n\n shot_boundaries_np = np.array(shot_boundaries_l)\n #print(shot_boundaries_np)\n return shot_boundaries_np", "title": "" }, { "docid": "d449aeb4d73385a673fe0288e0958e89", "score": "0.50911874", "text": "def cal_metric(pred_list, gt_list, image_level=True):\n OK_1, OK_2 = 0, 0\n NOK_1, NOK_2 = 0, 0\n ACC_1, ACC_2 = 0, 0\n preds, gts = [], []\n for pred_path, gt_path in zip(pred_list, gt_list):\n predict = (read_img(pred_path, True) > 0).astype(int)\n preds.append(predict)\n # If it's a positive sample directory and there is no GT label, create one.\n if gt_path == \"good\":\n gt = np.zeros(predict.shape, predict.dtype)\n else:\n gt = read_img(gt_path, True)\n gt = (cv2.resize(gt, predict.shape) > 0).astype(int)\n gts.append(gt)\n if image_level:\n TP, TN, FP, FN = calculate_TP_TN_FP_FN_image(ground_truth=gt, predicted_mask=predict)\n else:\n TP, TN, FP, FN = calculate_TP_TN_FP_FN_pixel(ground_truth=gt, predicted_mask=predict)\n OK_1 += TP\n OK_2 += TP + FN\n NOK_1 += TN\n NOK_2 += FP + TN\n ACC_1 += TP + TN\n ACC_2 += TP + TN + FP + FN\n if image_level:\n print(\n \"ok: {}, nok: {}, avg: {}\".format(\n round_acc(OK_1 / OK_2), round_acc(NOK_1 / NOK_2), round_acc(ACC_1 / ACC_2)\n )\n )\n return ACC_1 / ACC_2\n y_pred = np.array(preds).flatten()\n y = np.array(gts).flatten()\n metric = nn.ROC(pos_label=1)\n metric.clear()\n metric.update(y_pred, y)\n fpr, tpr, _ = metric.eval()\n auc = nn.auc(fpr, tpr)\n print(\n \"AUC: {}, ok: {}, nok: {}, avg: {}\".format(\n round_acc(auc), round_acc(OK_1 / OK_2), round_acc(NOK_1 / NOK_2), round_acc(ACC_1 / ACC_2)\n )\n )\n return auc", "title": "" }, { "docid": "5aac2fbdaeacbcc9c51d58942cf4ec1f", "score": "0.5088953", "text": "def ranking(query_embedds, target_embedds, img_ids):\n\n cos_sim = torch.mm(query_embedds,target_embedds.T)/ \\\n torch.mm(query_embedds.norm(2, dim=1, keepdim=True),\n target_embedds.norm(2, dim=1, keepdim=True).T)\n _, idx = torch.topk(cos_sim, len(query_embedds)//100, dim=1)\n top20 = idx.cpu().numpy()\n img_ids = np.array(img_ids)\n count = 0\n with open('answer.csv', 'w') as f:\n f.write(\"Descritpion_ID,Top_20_Image_IDs\\n\")\n for i, img_id in enumerate(img_ids):\n top_imgs = img_ids[top20[i]]\n top_imgs_str = \" \".join(list(top_imgs))\n text_id = img_id.split(\".\")[0]+\".txt\"\n f.write(text_id+\",\"+top_imgs_str+\"\\n\")\n if img_id in list(top_imgs):\n count+=1\n print(\"count\", count)", "title": "" }, { "docid": "2e862d29ae8933c519ef9ed57fcbf3b3", "score": "0.5082914", "text": "def visualize_image_set(images_np: List, boxes_list: List, classes_list: List, \n category_index: dict, title: str, min_threshold: float, scores_list = [], interactive=True) -> None:\n should_use_dummy_scores = False\n if isinstance(scores_list, list) and not scores_list:\n should_use_dummy_scores = True\n\n if len(images_np) > 16:\n num_images_to_plot = 16\n else:\n num_images_to_plot = len(images_np)\n\n plt.figure(figsize=(30, 15))\n plt.suptitle(title)\n for idx in range(num_images_to_plot):\n boxes = boxes_list[idx]\n categories = classes_list[idx]\n if should_use_dummy_scores: # set scores to equal 100%\n scores = [1.0] * len(boxes)\n else:\n scores = scores_list[idx]\n print(scores)\n\n if not isinstance(boxes, np.ndarray):\n boxes = np.array(boxes)\n if not isinstance(scores, np.ndarray):\n scores = np.array(scores)\n \n plt.subplot(4, 4, idx+1)\n \n print('scores > ' + str(int(min_threshold*100)) + '%:')\n print([score for score in scores if score > min_threshold])\n\n plot_detections(\n images_np[idx],\n boxes,\n categories,\n scores, \n category_index)\n if interactive: plt.ion()\n plt.show()\n # plt.savefig('tests/test_set.png')\n plt.pause(0.001)\n input('Press [enter] to continue.')", "title": "" }, { "docid": "a2c9d0414913b5e285245ca2566f0237", "score": "0.50764364", "text": "def multistitch(self, image_generator, ratio=0.75, reproj_thresh=4.0, manual=True, os=\"linux\"):\r\n\r\n # init result, iteration variables\r\n last_image = next(image_generator)\r\n result = last_image.copy()\r\n (last_kps, last_features) = self.detectAndDescribe(last_image)\r\n accumulated_vector = (0, 0)\r\n\r\n # iteration variable. start at 1 because first iteration checks images n and n-1\r\n i = 1\r\n\r\n for next_image in image_generator:\r\n (next_kps, next_features) = self.detectAndDescribe(next_image)\r\n\r\n matM = self.matchKeypoints(next_kps, last_kps,\r\n next_features, last_features, ratio, reproj_thresh)\r\n\r\n if matM is None:\r\n print(f'No Matches at i = {str(i)}, returning current result')\r\n return result\r\n\r\n (_, matH, _) = matM\r\n\r\n # this stuff only works for isometric anyway, so we only respect the translation\r\n trans_x = int(matH[0][2])\r\n trans_y = int(matH[1][2])\r\n\r\n if np.linalg.norm((trans_x,trans_y)) > 100000:\r\n print(f'Strange translation. Skipping current image')\r\n continue\r\n\r\n if manual:\r\n last_key = None\r\n while last_key != 13:\r\n test_image = self.translate_and_merge((last_image, next_image), (trans_x, trans_y))\r\n cv2.imshow(\"testimage\", test_image)\r\n last_key = cv2.waitKeyEx(0)\r\n print(last_key)\r\n\r\n if os == \"win\":\r\n if last_key == 2490368: trans_y = trans_y + 1\r\n if last_key == 2555904: trans_x = trans_x - 1\r\n if last_key == 2621440: trans_y = trans_y - 1\r\n if last_key == 2424832: trans_x = trans_x + 1\r\n else:\r\n if last_key == 65361: trans_x = trans_x + 1\r\n if last_key == 65362: trans_y = trans_y - 1\r\n if last_key == 65363: trans_x = trans_x - 1\r\n if last_key == 65364: trans_y = trans_y + 1\r\n\r\n\r\n # translation is only between images n and n-1 - we have to keep track of all translations up to here\r\n accumulated_vector = (accumulated_vector[0] + trans_x, accumulated_vector[1] + trans_y)\r\n\r\n result = self.translate_and_merge((result, next_image), accumulated_vector)\r\n\r\n # if the new image extended the canvas in negative x or y, we need discard the change in the total vector -\r\n # the change is already contained in the moved canvas origin\r\n if accumulated_vector[0] < 0:\r\n accumulated_vector = (0, accumulated_vector[1])\r\n if accumulated_vector[1] < 0:\r\n accumulated_vector = (accumulated_vector[0], 0)\r\n\r\n last_image = next_image\r\n (last_kps, last_features) = (next_kps, next_features)\r\n i = i + 1\r\n\r\n return result", "title": "" }, { "docid": "ae650d72bf947332f4aac4c5137e41d5", "score": "0.50757676", "text": "def evaluate(model, test_dataloader, model_upsample=True):\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n model = model.to(device)\n model.eval()\n if model_upsample:\n upsample_2x = Upsample(scale_factor = 2, mode = 'bilinear', align_corners = True) \n\n all_predictions = []\n all_depths = []\n\n with torch.no_grad():\n\n for it, batch in enumerate(test_dataloader):\n # check this\n \n images = batch[0]['img']\n depths = batch[0]['depth']\n crop = batch[1][0].cpu().numpy()\n \n images = torch.autograd.Variable(images.to(device))\n depths = torch.autograd.Variable(depths.to(device))\n\n images = normalize_batch(images)\n \n if model_upsample:\n predictions_unflipped = upsample_2x(model(images)) \n else:\n predictions_unflipped = model(images) \n predictions_unflipped = predictions_unflipped[:, :, crop[0]:crop[1]+1, crop[2]:crop[3]+1]\n\n\n if model_upsample:\n predictions_flipped = upsample_2x(model(torch.from_numpy(images.cpu().numpy()[:,:,:,::-1].copy()).to(device))) \n else:\n predictions_flipped = model(torch.from_numpy(images.cpu().numpy()[:,:,:,::-1].copy()).to(device)) \n\n predictions_from_flipped = torch.from_numpy(predictions_flipped.cpu().numpy()[:,:,:,::-1].copy()).to(device)\n predictions_from_flipped = predictions_from_flipped[:, :, crop[0]:crop[1]+1, crop[2]:crop[3]+1]\n\n predictions = 0.5 * predictions_unflipped + 0.5 * predictions_from_flipped\n\n depths = depths[:, :, crop[0]:crop[1]+1, crop[2]:crop[3]+1]\n all_predictions.append(predictions)\n all_depths.append(depths)\n # END FOR\n\n all_predictions = torch.cat(all_predictions, dim = 0)\n all_depths = torch.cat(all_depths, dim = 0)\n\n metrics = evaluate_predictions(all_predictions, all_depths)\n\n return metrics", "title": "" }, { "docid": "10802e95a2e7d5bee8ca7d0ad9319fad", "score": "0.50727206", "text": "def display_current_results(self, visuals, epoch):\n for label, image in visuals.items():\n if self.opt.model != 'classifier':\n img_np = util.tensor2im(image, imtype=np.uint8)\n img_shape = img_np.shape\n b, c, d, h, w = img_shape\n slice_portion = int(d/2) # For 3D images, get three images at increasing depth\n img_sample = img_np[0, 0, slice_portion, :,:] # choose the first sample in the batch\n img_sample2 = img_np[0, 0, :, slice_portion, :] # choose the second sample in the batch\n img_sample3 = img_np[0, 0, :, :, slice_portion] # choose the third sample in the batch\n\n fig_slice = plt.figure(edgecolor='b', dpi=150)\n ax = fig_slice.add_subplot(1, 3, 1)\n ax2 = fig_slice.add_subplot(1, 3, 2)\n ax3 = fig_slice.add_subplot(1, 3, 3)\n\n ax.set_axis_off()\n ax2.set_axis_off()\n ax3.set_axis_off()\n\n ax.set_title('XY slice')\n ax2.set_title('XZ slice')\n ax3.set_title('YZ slice')\n\n ax.imshow(img_sample, cmap='gray')\n ax2.imshow(img_sample2, cmap='gray')\n ax3.imshow(img_sample3, cmap='gray')\n\n plt.gca().set_axis_off()\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0,\n hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.close(fig_slice)\n\n img_mip_xy = np.amax(img_np[0,0], 0)\n img_mip_xz = np.amax(img_np[0,0], 1)\n img_mip_yz = np.amax(img_np[0,0], 2)\n\n fig_mip = plt.figure(edgecolor='b', dpi=150)\n ax_2_1 = fig_mip.add_subplot(1, 3, 1)\n ax_2_2= fig_mip.add_subplot(1, 3, 2)\n ax_2_3 = fig_mip.add_subplot(1, 3, 3)\n\n ax_2_1.set_axis_off()\n ax_2_2.set_axis_off()\n ax_2_3.set_axis_off()\n\n ax_2_1.set_title('XY MIP')\n ax_2_2.set_title('XZ MIP')\n ax_2_3.set_title('YZ MIP')\n\n ax_2_1.imshow(img_mip_xy, vmax=256, cmap='gray')\n ax_2_2.imshow(img_mip_xz, vmax=256,cmap='gray')\n ax_2_3.imshow(img_mip_yz, vmax=256, cmap='gray')\n\n plt.gca().set_axis_off()\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0,\n hspace=0, wspace=0)\n plt.margins(0, 0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n plt.close(fig_mip)\n\n self.tb_writer.add_figure('train_slice_images/' + label, fig_slice, epoch)\n self.tb_writer.add_figure('train_mip_images/' + label, fig_mip, epoch)\n\n else: # if the model is a classifier, display with the labels.\n if label == 'output_tr_softmax' or label == 'output_val_softmax' or label =='label_GT':\n\n #image[0] chooses the first item in the batch.\n predicted = torch.argmax(image[0])\n label_print = predicted.cpu().float().numpy()\n if label_print == 0:\n label_print_str = 'Axial'\n elif label_print == 1:\n label_print_str = 'Lateral'\n\n fig_slice = plt.figure()\n plt.text(0.1, 0.4, label_print_str, size=60, bbox=dict(boxstyle=\"square\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n ))\n plt.show()\n plt.close(fig_slice)\n\n self.tb_writer.add_figure('train_images/' + label, fig_slice, epoch)\n\n else:\n img_np = util.tensor2im(image[0], imtype=np.uint8)\n img_np = img_np.squeeze()\n fig_slice = plt.figure()\n plt.imshow(img_np, cmap='gray')\n plt.close(fig_slice)\n\n self.tb_writer.add_figure('train_images/' + label, fig_slice, epoch)", "title": "" }, { "docid": "78460c0dede6ea09aa40a049c604d2a9", "score": "0.5065433", "text": "def get_anchor_gt(all_img_data, class_count, C, img_length_calc_function, backend, mode='train'):\n sample_selector = SampleSelector(class_count)\n\n while True:\n if mode == 'train':\n random.shuffle(all_img_data)\n\n for img_data in all_img_data:\n try:\n\n if C.balanced_classes and sample_selector.skip_sample_for_balanced_class(img_data):\n continue\n\n # read in image, and optionally add augmentation\n\n if mode == 'train':\n img_data_aug, x_img = data_augment.augment(img_data, C, augment=True)\n else:\n img_data_aug, x_img = data_augment.augment(img_data, C, augment=False)\n\n (width, height) = (img_data_aug['width'], img_data_aug['height'])\n (rows, cols, _) = x_img.shape\n\n assert cols == width\n assert rows == height\n\n # get image dimensions for resizing\n (resized_width, resized_height) = get_new_img_size(width, height, C.im_size)\n\n # resize the image so that smalles side is length = 600px\n x_img = cv2.resize(x_img, (resized_width, resized_height), interpolation=cv2.INTER_CUBIC)\n\n try:\n # rpn ground-truth cls, reg\n y_rpn_cls, y_rpn_regr = calc_rpn(C, img_data_aug, width, height, resized_width, resized_height, img_length_calc_function)\n except:\n continue\n\n # Zero-center by mean pixel, and preprocess image\n\n x_img = x_img[:, :, (2, 1, 0)] # BGR -> RGB\n x_img = x_img.astype(np.float32)\n x_img[:, :, 0] -= C.img_channel_mean[0]\n x_img[:, :, 1] -= C.img_channel_mean[1]\n x_img[:, :, 2] -= C.img_channel_mean[2]\n x_img /= C.img_scaling_factor\n\n x_img = np.transpose(x_img, (2, 0, 1))\n x_img = np.expand_dims(x_img, axis=0)\n\n y_rpn_regr[:, y_rpn_regr.shape[1]//2:, :, :] *= C.std_scaling\n\n if backend == 'tf':\n x_img = np.transpose(x_img, (0, 2, 3, 1))\n y_rpn_cls = np.transpose(y_rpn_cls, (0, 2, 3, 1))\n y_rpn_regr = np.transpose(y_rpn_regr, (0, 2, 3, 1))\n\n yield np.copy(x_img), [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], img_data_aug\n\n except Exception as e:\n print(e)\n continue", "title": "" }, { "docid": "bba3480fd40824bf5373aa3897c8c732", "score": "0.5065274", "text": "def main_compare_two_simulations():\n #folder for storing result images\n img_folder = \"\"\n start_date = datetime(1980, 1, 1)\n end_date = datetime(2010, 12, 31)\n\n\n path0 = \"/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-rl.hdf5\"\n path2 = \"/skynet3_rech1/huziy/hdf_store/quebec_0.1_crcm5-hcd-rl-intfl_ITFS.hdf5\"\n\n exp_label = \"interflow_effect_soil\"\n\n img_folder = \"images_for_lake-river_paper\"\n img_folder = os.path.join(img_folder, exp_label)\n if not os.path.isdir(img_folder):\n os.makedirs(img_folder)\n\n\n rectangle = IndexRectangle(\n lower_left_point=IndexPoint(40, 20),\n width=100, height=45\n )\n\n params2 = InputParams(hdf_path = path2,\n is_for_comparison=True, start_date=start_date, end_date=end_date, rectangle=rectangle)\n\n params0 = InputParams(hdf_path=path0,\n is_for_comparison=True, start_date=start_date, end_date=end_date, rectangle=rectangle)\n\n imin, jmin, w, h = params0.get_start_end_indices_of_selected_region()\n\n i_sel, j_sel = np.where(params0.get_land_mask_using_flow_dirs())\n\n i_sel_1 = i_sel[(i_sel >= imin) & (i_sel < imin + w) & (j_sel >= jmin) & (j_sel < jmin + h)]\n j_sel_1 = j_sel[(i_sel >= imin) & (i_sel < imin + w) & (j_sel >= jmin) & (j_sel < jmin + h)]\n\n i_sel = i_sel_1\n j_sel = j_sel_1\n\n levs2d, dnums2d = None, None\n\n #plot the profile\n fig = plt.figure()\n gs = gridspec.GridSpec(len(params0.var_list), 2)\n\n #The number of levels of interest\n n_select_level = 5\n\n #calculate and plot differences\n for vindex, var_name in enumerate(params0.var_list):\n print(\"plotting {0} ...\".format(var_name))\n dates, levels, data2 = params2.calculate_mean_clim_for_3d_var(var_name=var_name)\n _, _, data0 = params0.calculate_mean_clim_for_3d_var(var_name=var_name)\n\n data = data2 - data0\n\n #calculate the profile\n selected_diff = data[:, :n_select_level, i_sel, j_sel]\n sel_data = (data0[:, :n_select_level, i_sel, j_sel] + data2[:, :n_select_level, i_sel, j_sel]) * 0.5\n selected_mean = np.zeros_like(sel_data)\n where_to_compute = np.abs(selected_diff) > 0\n selected_mean[where_to_compute] = selected_diff[where_to_compute] / sel_data[where_to_compute] * 100.0\n\n selected_mean = selected_mean.mean(axis=2)\n\n\n\n #rectangle subplot\n ax = fig.add_subplot(gs[:, 0])\n params0.basemap.drawcoastlines(linewidth=cpp.COASTLINE_WIDTH, ax=ax)\n ax.add_patch(params0.get_mpl_rectangle_for_selected_region())\n\n #profile subplot\n ax = fig.add_subplot(gs[vindex, 1])\n assert isinstance(ax, Axes)\n\n\n if levs2d is None:\n ax.set_ylabel(\"Depth (m)\")\n levels_meters = np.cumsum([0, ] + class_conf.level_width_list_26_default)[:-1][:n_select_level]\n dnums = date2num(dates)\n levs2d, dnums2d = np.meshgrid(levels_meters, dnums)\n\n\n\n\n vmin, vmax = selected_mean.min(), selected_mean.max()\n d = max(abs(vmin), abs(vmax))\n ncolors = 11\n cmap = cm.get_cmap(\"RdBu_r\", ncolors)\n color_levs = np.linspace(-d, d, ncolors + 1)\n\n step = color_levs[1] - color_levs[0]\n ndec = abs(floor(np.log10(step)))\n color_levs = np.round(color_levs, decimals=int(ndec))\n\n img = ax.contourf(dnums2d, levs2d, selected_mean, cmap = cmap, levels = color_levs)\n cb = plt.colorbar(img, ticks = color_levs[::2])\n cb.ax.set_aspect(10)\n\n ax.xaxis.set_major_formatter(DateFormatter(\"%d\\n%b\"))\n ax.xaxis.set_major_locator(MonthLocator(bymonth=list(range(1, 13, 2))))\n if vindex < len(params0.var_list) - 1:\n ax.xaxis.set_ticklabels([])\n ax.invert_yaxis()\n ax.yaxis.set_major_locator(MaxNLocator(nbins=5))\n\n #ax.grid(b = True)\n ax.annotate(infovar.get_display_label_for_var(var_name),\n xy = (0.8, 0.2), xycoords = \"axes fraction\",\n bbox = dict(facecolor = \"w\"))\n\n\n #construct the path to the output figure\n impath = os.path.join(img_folder, params0.get_imfilename_for_var(var_name = \"_\".join(params0.var_list)))\n\n #save the figure\n fig.savefig(impath, dpi=cpp.FIG_SAVE_DPI, bbox_inches = \"tight\")\n plt.close(fig)", "title": "" }, { "docid": "a9eeff51583020997eb37f3f303835aa", "score": "0.5057549", "text": "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "title": "" }, { "docid": "5e8f4039f5272c096e3d3697fc040d37", "score": "0.5052959", "text": "def _compute_band_results(self, band):\n width = self._original_img[band].width\n height = self._original_img[band].height\n or_img_array = image_to_array(self._original_img[band])\n re_img_array = image_to_array(self._reconstructed_img[band])\n or_values = [int(or_img_array[x][y]) for x in range(0, width)\n for y in range(0, height)\n if self._target_region.is_target(x, y)]\n re_values = [int(re_img_array[x][y]) for x in range(0, width)\n for y in range(0, height)\n if self._target_region.is_target(x, y)]\n # for x in range(0, width):\n # for y in range(0, height):\n # if self._target_region.is_target(x, y):\n # or_values.append(or_img_array[x][y])\n # re_values.append(re_img_array[x][y])\n total_num = len(or_values)\n # or_array = numpy.array(or_values)\n # re_array = numpy.array(re_values)\n r_up, r_down_o, r_down_r, are_up = 0, 0, 0, 0\n or_mean = numpy.mean(or_values)\n re_mean = numpy.mean(re_values)\n for i in range(0, total_num):\n or_value = or_values[i]\n re_value = re_values[i]\n r_up = r_up + (or_value-or_mean)*(re_value-re_mean)\n r_down_o = r_down_o + (or_value-or_mean)**2\n r_down_r = r_down_r + (re_value-re_mean)**2\n if or_value == 0:\n or_value = 1\n are_up = are_up + abs(or_value-re_value)/or_value\n r = r_up/((r_down_o*r_down_r)**(1/2))\n are = (are_up/total_num)*100\n std_o = (r_down_o/total_num)**(1/2)\n std_r = (r_down_r/total_num)**(1/2)\n cov_or = r_up/total_num\n uiqi = (cov_or/(std_o*std_r)) *\\\n ((2*or_mean*re_mean)/(or_mean**2+re_mean**2)) *\\\n ((2*std_o*std_r)/(std_o**2+std_r**2))\n return BandStatisticsResult(or_values, re_values, r, are, uiqi)", "title": "" }, { "docid": "45e54e37076b61932b22bbb14f8cbc21", "score": "0.5050292", "text": "def run(self):\n i = 0\n for elm in self.listA:\n self.value = int(elm.split('\\\\')[1].split('_')[0])\n self.img = np.array(cv2.resize(cv2.imread(elm, 0), (imgSize,imgSize)))\n with rlock:\n data.append([self.img, self.value])", "title": "" }, { "docid": "ced1d5044f9910d0ff47e380a261831d", "score": "0.5047265", "text": "def process_images(self):\n base = self.img_dir + \"/\"\n bg_path = base + 'background.png'\n arm_path = base + 'arm.png'\n uncompressed_path = base + 'object.png'\n\n baxter_obj = BaxterExperiment(bg_path)\n baxter_obj.set_arm_image(arm_path)\n baxter_obj.set_uncompressed_image(uncompressed_path)\n\n print \"Uncompressed size: \" + str(baxter_obj.get_uncompressed_size())\n for i in range(999):\n path = base + \"compression\" + ('%03d' % i) + \".png\"\n if os.path.isfile(path):\n baxter_obj.set_compressed_image(path)\n else:\n break\n print \"Compressed size: \" + str(baxter_obj.get_compressed_size())\n\n baxter_obj.export_sizes(base + \"sizes.csv\")\n baxter_obj.display_results()", "title": "" }, { "docid": "188569b40f6a583be6aac8e141d2256c", "score": "0.504378", "text": "def grabIms(IMAGE_HEIGHT, IMAGE_WIDTH):\r\n print(\"Loading images...\")\r\n #The directory for all the images in the database\r\n images_all = [] \r\n for root, dirs, files in os.walk(\".\"):\r\n \r\n #Going through all the files \r\n for filename in files:\r\n \r\n try:\r\n \r\n #Opening and cropping\r\n im = Image.open(filename)\r\n width, height = im.size # Get dimensions\r\n left = (width - IMAGE_WIDTH)/2\r\n top = (height - IMAGE_HEIGHT)/2\r\n right = (width + IMAGE_WIDTH)/2\r\n bottom = (height + IMAGE_HEIGHT)/2\r\n \r\n im = im.crop((left, top, right, bottom))\r\n \r\n #Adding images to the list \r\n images_all.append(im)\r\n except:\r\n pass\r\n \r\n\r\n\r\n\r\n #Creating one big array\r\n # \r\n X = np.vstack(images_all).reshape([-1,IMAGE_HEIGHT,IMAGE_WIDTH,3])\r\n X = X/np.max(X) #Scaling, just in case\r\n \r\n #Splitting into training and validation sets\r\n Xtr = X[0:int(0.8*len(X))]\r\n x_val = X[int(0.8*len(X)):]\r\n return Xtr, x_val", "title": "" }, { "docid": "a084d161965f44bbb891ae3b9cae912b", "score": "0.50375664", "text": "def get_image_pairs(similarity: Dict[str, List[Tuple[str, float]]],\n topk: Optional[int] = None) -> List[Tuple[str, str, float]]:\n image_pairs = []\n for query_image_name, images_to_match in sorted(similarity.items()):\n k = 0\n for mapping_image_name, score in sorted(images_to_match, key=lambda x: x[1], reverse=True):\n if topk is not None and k >= topk:\n break\n # don't match image with itself\n if query_image_name == mapping_image_name:\n continue\n image_pairs.append([query_image_name, mapping_image_name, score])\n k += 1\n return image_pairs", "title": "" }, { "docid": "46b60ee149326e3dade26e0bdce2d640", "score": "0.50367826", "text": "def _watch_images(self, tag: str, grid_size: tuple = (3, 3), shuffle=False, save_file=True):\n self.watcher.image(self.fake,\n self.current_epoch,\n tag=\"%s/fake\" % tag,\n grid_size=grid_size,\n shuffle=shuffle,\n save_file=save_file)\n self.watcher.image(self.ground_truth,\n self.current_epoch,\n tag=\"%s/real\" % tag,\n grid_size=grid_size,\n shuffle=shuffle,\n save_file=save_file)", "title": "" }, { "docid": "d452ae6ea4eebf6ee0290344561c320a", "score": "0.5036608", "text": "def image_process(image_list):\n for i in range(len(image_list)):\n for j in range(i + 1, len(image_list)):\n if not image_compare(image_list[i], image_list[j]):\n print \"%s is same as %s\" % (image_list[i], image_list[j])", "title": "" }, { "docid": "30af372f736ce0f9a5f2d3f53a5149c7", "score": "0.503556", "text": "def calc_run_stats(path_to_confounds, high_std_dvars_thresh = 1.5, high_motion_thresh = 0.5):\n\n confounds_df = pd.read_csv(path_to_confounds, delimiter='\\t')\n output_dict = {}\n\n\n output_dict['mean_gs'] = np.nanmean(confounds_df['global_signal'].values)\n output_dict['mean_wm'] = np.nanmean(confounds_df['white_matter'].values)\n output_dict['mean_csf'] = np.nanmean(confounds_df['csf'].values)\n output_dict['mean_std_dvars'] = np.nanmean(confounds_df['std_dvars'].values)\n output_dict['num_high_std_dvars_tps'] = np.where(confounds_df['std_dvars'] > high_std_dvars_thresh)[0].shape[0]\n output_dict['max_std_dvars'] = np.nanmax(confounds_df['std_dvars'].values)\n output_dict['mean_dvars'] = np.nanmean(confounds_df['dvars'].values)\n output_dict['mean_fd'] = np.nanmean(confounds_df['framewise_displacement'].values)\n output_dict['num_high_motion_tps'] = np.where(confounds_df['framewise_displacement'] > high_motion_thresh)[0].shape[0]\n output_dict['max_fd'] = np.nanmax(confounds_df['framewise_displacement'].values)\n\n\n #Now calculate some metrics that need the image loaded....\n confounds_beginning = path_to_confounds[:-len('desc-confounds_regressors.tsv')]\n reference_img_path = confounds_beginning + 'space-T1w_boldref.nii.gz'\n aparcaseg_img_path = confounds_beginning + 'space-T1w_desc-aparcaseg_dseg.nii.gz'\n brainmask_img_path = confounds_beginning + 'space-T1w_desc-brain_mask.nii.gz'\n\n reference_img_data = nib_load(reference_img_path).get_fdata()\n aparcaseg_img_path = nib_load(aparcaseg_img_path).get_fdata()\n brainmask_img_path = nib_load(brainmask_img_path).get_fdata()\n\n\n #local_dev_ratio, brainmask_var_component_ratio, gm_skin_1dil_var_component_ratio = batch_calc_alignment_metrics(reference_img_data, aparcaseg_img_path, brainmask_img_path)\n\n #output_dict['local_dev_ratio'] = local_dev_ratio\n #output_dict['brainmask_var_component_ratio'] = brainmask_var_component_ratio\n #output_dict['gm_skin_1dil_var_component_ratio'] = gm_skin_1dil_var_component_ratio\n\n\n return output_dict", "title": "" }, { "docid": "906b5bd228afafd1dff5a2b408251da9", "score": "0.5022917", "text": "def _get_image_blob_s6_0(roidb,roidb_noclass1):\n\n\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n scale_inds = np.random.randint(\n 0, high=len(cfg.TRAIN.SCALES), size=num_images\n )\n processed_ims = []\n im_scales = []\n error_flag = [0,0]\n\n for i in range(num_images):\n roidb_noclass = roidb_noclass1.copy()\n if roidb[i][u'image'].split('/')[-1]==u'test.jpg':\n random_bbox = dict()\n random_bbox['kernel_size'] = 224\n random_bbox['tl_x'] = 0\n random_bbox['tl_y'] = 0\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size']\n im = cv2.imread(roidb[i]['image'])[y0:y1, x0:x1]\n im = cv2.resize(im,(WIDTH,HEIGHT))\n #cv2.imwrite('/home/icubic/aa.png',im)\n error_flag[i] = 0\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n else:\n if 1:\n real_class = []#roidb[i]['gt_classes'][0]\n num_real_class = len(roidb[i]['gt_classes'])\n\n random_bbox = dict()\n random_bbox['kernel_size'] = 224\n random_bbox['tl_x'] = random.randint(0, 800)\n random_bbox['tl_y'] = random.randint(0, 800)\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size']\n im = cv2.imread(roidb[i]['image'])[y0:y1, x0:x1]\n im = cv2.resize(im, (WIDTH, HEIGHT))\n sum_inside_overlaps = 0\n boxes_inside_overlaps = []\n\n for i_roidb,sub_boxes in enumerate(roidb[i][u'boxes']):\n crop_x0 = int(sub_boxes[0])\n crop_y0 = int(sub_boxes[1])\n crop_x1 = int(sub_boxes[2])\n crop_y1 = int(sub_boxes[3])\n #real_x0 = float(crop_x0 - x0)*1024/224 # float(crop_x0) / 1024 * 224\n #real_y0 = float(crop_y0 - y0)*1024/224 # float(crop_y0) / 1024 * 224\n #real_x1 = float(crop_x1 - x0)*1024/224 # float(crop_x1) / 1024 * 224\n #real_y1 = float(crop_y1 - y0)*1024/224\n\n\n overlaps_rate = solve_coincide((x0, y0, x1, y1), (crop_x0, crop_y0, crop_x1, crop_y1))\n if overlaps_rate>0.9:\n sum_inside_overlaps = sum_inside_overlaps + 1\n #real_x0 = crop_x0 - x0 # float(crop_x0) / 1024 * 224\n #real_y0 = crop_y0 - y0 # float(crop_y0) / 1024 * 224\n #real_x1 = crop_x1 - x0 # float(crop_x1) / 1024 * 224\n #real_y1 = crop_y1 - y0\n real_x0 = float(crop_x0 - x0)*WIDTH/224 # float(crop_x0) / 1024 * 224\n real_y0 = float(crop_y0 - y0)*HEIGHT/224 # float(crop_y0) / 1024 * 224\n real_x1 = float(crop_x1 - x0)*WIDTH/224 # float(crop_x1) / 1024 * 224\n real_y1 = float(crop_y1 - y0)*HEIGHT/224\n if real_x0<0:\n real_x0 = 0\n if real_x0>WIDTH:\n real_x0 = WIDTH\n\n if real_x1<0:\n real_x1 = 0\n if real_x1>WIDTH:\n real_x1 = WIDTH\n\n if real_y0<0:\n real_y0 = 0\n if real_y0>HEIGHT:\n real_y0 = HEIGHT\n\n if real_y1<0:\n real_y1 = 0\n if real_y1>HEIGHT:\n real_y1 = HEIGHT\n\n\n boxes_inside_overlaps.append([real_x0, real_y0, real_x1, real_y1])\n real_class.append(roidb[i]['gt_classes'][i_roidb])\n #cv2.rectangle(im, (int(real_x0), int(real_y0)),\n #(int(real_x1), int(real_y1)), (255, 0, 255))\n #cv2.imwrite('/home/icubic/daily_work/code/circruit/new/result/uu.png', im)\n #a = roidb[i]['gt_overlaps'].toarray()\n\n if sum_inside_overlaps>0:\n num_valid_objs = sum_inside_overlaps*1\n boxes = np.zeros((num_valid_objs, 4), dtype=np.float32)\n gt_classes = np.zeros((num_valid_objs), dtype=np.int32)\n gt_overlaps = np.zeros((num_valid_objs, 3), dtype=np.float32)\n box_to_gt_ind_map = np.zeros((num_valid_objs), dtype=np.int32)\n is_crowd = np.zeros((num_valid_objs), dtype=np.bool)\n for ix in range(num_valid_objs):\n gt_classes[ix] = real_class[ix]#real_class*1\n try:\n gt_overlaps[ix, real_class] = 1.0\n except:\n print('error')\n is_crowd[ix] = False\n box_to_gt_ind_map[ix] = ix\n for i_index in range(4):\n boxes[ix,i_index] = boxes_inside_overlaps[ix][i_index]\n\n #for ix in range(num_valid_objs):\n #box_to_gt_ind_map[ix] = ix\n #cls = real_class*1\n roidb_noclass['boxes'] = np.append(roidb_noclass['boxes'], boxes, axis=0)\n\n roidb_noclass['gt_classes'] = np.append(roidb_noclass['gt_classes'], gt_classes)\n #mm = np.append(\n # roidb_noclass['gt_overlaps'].toarray(), gt_overlaps,axis=0)\n roidb_noclass['gt_overlaps'] = np.append(\n roidb_noclass['gt_overlaps'].toarray(), gt_overlaps)\n roidb_noclass['gt_overlaps'] = scipy.sparse.csr_matrix(roidb_noclass['gt_overlaps'])\n #mm = np.append(mm, gt_overlaps, axis=0)\n #roidb_noclass['gt_overlaps'] = scipy.sparse.csr_matrix(mm)\n roidb_noclass['is_crowd'] = np.append(roidb_noclass['is_crowd'], is_crowd)\n roidb_noclass['box_to_gt_ind_map'] = np.append(roidb_noclass['box_to_gt_ind_map'], box_to_gt_ind_map)\n\n gt_overlaps = roidb_noclass['gt_overlaps'].toarray()\n # max overlap with gt over classes (columns)\n max_overlaps = gt_overlaps.max(axis=1)\n # gt class that had the max overlap\n max_classes = gt_overlaps.argmax(axis=1)\n roidb_noclass['max_classes'] = max_classes\n roidb_noclass['max_overlaps'] = max_overlaps\n # sanity checks\n # if max overlap is 0, the class must be background (class 0)\n zero_inds = np.where(max_overlaps == 0)[0]\n assert all(max_classes[zero_inds] == 0)\n # if max overlap > 0, the class must be a fg class (not class 0)\n nonzero_inds = np.where(max_overlaps > 0)[0]\n assert all(max_classes[nonzero_inds] != 0)\n roidb_noclass['bbox_targets'] = compute_bbox_regression_targets(roidb_noclass)\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n\n else:\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = HEIGHT\n roidb[i][u'width'] = WIDTH\n\n\n if 0:\n if sum_inside_overlaps==0:\n roidb[i] = roidb_noclass['0'].copy()\n roidb[i][u'height'] = 1024\n roidb[i][u'width'] = 1024\n if sum_inside_overlaps==1:\n num_valid_objs = 1\n roidb[i] = roidb_noclass['1'].copy()\n a = roidb[i]['gt_overlaps'].toarray()\n\n #for i_inside in enumerate(sum_inside_overlaps)\n\n if sum_inside_overlaps==2:\n num_valid_objs = 2\n roidb[i] = roidb_noclass['2'].copy()\n a = roidb[i]['gt_overlaps'].toarray()\n\n if sum_inside_overlaps==3:\n num_valid_objs = 3\n roidb[i] = roidb_noclass['3'].copy()\n a = roidb[i]['gt_overlaps'].toarray()\n\n if 0:\n crop_x0 = int(roidb[i][u'boxes'][0][0])\n crop_y0 = int(roidb[i][u'boxes'][0][1])\n crop_x1 = int(roidb[i][u'boxes'][0][2])\n crop_y1 = int(roidb[i][u'boxes'][0][3])\n crop_w = crop_x1 - crop_x0\n crop_h = crop_y1 - crop_y0\n random_bbox = dict()\n random_bbox['kernel_size'] = 224\n random_bbox['tl_x'] = random.randint(0, 800)\n random_bbox['tl_y'] = random.randint(0, 800)\n x0 = random_bbox['tl_x']\n x1 = random_bbox['tl_x'] + random_bbox['kernel_size']\n y0 = random_bbox['tl_y']\n y1 = random_bbox['tl_y'] + random_bbox['kernel_size']\n #real_x0 = crop_x0-x0#float(crop_x0) / 1024 * 224\n #real_y0 = crop_y0-y0#float(crop_y0) / 1024 * 224\n #real_x1 = 1024#float(crop_x1) / 1024 * 224\n #real_y1 = 1024#float(crop_y1) / 1024 * 224\n overlaps_rate = solve_coincide((x0,y0,x1,y1),(crop_x0,crop_y0,crop_x1,crop_y1))\n im = cv2.imread(roidb[i]['image'])[y0:y1, x0:x1]\n #im = cv2.resize(im, (1024, 1024))\n if overlaps_rate>0.9:\n real_x0 = crop_x0 - x0 # float(crop_x0) / 1024 * 224\n real_y0 = crop_y0 - y0 # float(crop_y0) / 1024 * 224\n real_x1 = crop_x1 - x0 # float(crop_x1) / 1024 * 224\n real_y1 = crop_y1 - y0\n roidb[i][u'boxes'][0][0] = real_x0\n roidb[i][u'boxes'][0][1] = real_y0\n roidb[i][u'boxes'][0][2] = real_x1\n roidb[i][u'boxes'][0][3] = real_y1\n roidb[i][u'height'] = 224\n roidb[i][u'width'] = 224\n error_flag[i] = 1\n #cv2.imwrite('/home/icubic/daily_work/code/Detectron/detectron/datasets/data/s6_test/aa.png',im)\n else:\n roidb[i] = roidb_noclass.copy()\n roidb[i][u'height'] = 224\n roidb[i][u'width'] = 224\n error_flag[i] = 0\n #print('aa')\n\n\n\n\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n im, im_scale = blob_utils.prep_im_for_blob(\n im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE\n )\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales,error_flag", "title": "" }, { "docid": "1e7dcc69505daafab81ed7658412ea03", "score": "0.50228405", "text": "def eval_on_images(shading_image_arr, pixel_labels_dir, thres_list, photo_id, bl_filter_size, img_dir):\n\n shading_image_linear_grayscale = shading_image_arr\n shading_image_linear_grayscale[shading_image_linear_grayscale < 1e-4] = 1e-4\n shading_image_linear_grayscale = np.log(shading_image_linear_grayscale)\n\n shading_gradmag = saw_utils.compute_gradmag(shading_image_linear_grayscale)\n shading_gradmag = np.abs(shading_gradmag)\n\n if bl_filter_size:\n shading_gradmag_max = maximum_filter(shading_gradmag, size=bl_filter_size)\n\n # We have the following ground truth labels:\n # (0) normal/depth discontinuity non-smooth shading (NS-ND)\n # (1) shadow boundary non-smooth shading (NS-SB)\n # (2) smooth shading (S)\n # (100) no data, ignored\n y_true = saw_utils.load_pixel_labels(pixel_labels_dir=pixel_labels_dir, photo_id=photo_id)\n \n img_path = img_dir+ str(photo_id) + \".png\"\n\n # diffuclut and harder dataset\n srgb_img = saw_utils.load_img_arr(img_path)\n srgb_img = np.mean(srgb_img, axis = 2)\n img_gradmag = saw_utils.compute_gradmag(srgb_img)\n\n smooth_mask = (y_true == 2)\n average_gradient = np.zeros_like(img_gradmag)\n # find every connected component\n labeled_array, num_features = label(smooth_mask)\n for j in range(1, num_features+1):\n # for each connected component, compute the average image graident for the region\n avg = np.mean(img_gradmag[labeled_array == j])\n average_gradient[labeled_array == j] = avg\n\n average_gradient = np.ravel(average_gradient)\n\n y_true = np.ravel(y_true)\n ignored_mask = y_true > 99\n\n # If we don't have labels for this photo (so everything is ignored), return\n # None\n if np.all(ignored_mask):\n return [None] * len(thres_list)\n\n ret = []\n for thres in thres_list:\n y_pred = (shading_gradmag < thres).astype(int)\n y_pred_max = (shading_gradmag_max < thres).astype(int)\n y_pred = np.ravel(y_pred)\n y_pred_max = np.ravel(y_pred_max)\n # Note: y_pred should have the same image resolution as y_true\n assert y_pred.shape == y_true.shape\n\n # confusion_matrix = saw_utils.grouped_confusion_matrix(y_true[~ignored_mask], y_pred[~ignored_mask], y_pred_max[~ignored_mask])\n confusion_matrix = saw_utils.grouped_weighted_confusion_matrix(y_true[~ignored_mask], y_pred[~ignored_mask], y_pred_max[~ignored_mask], average_gradient[~ignored_mask])\n ret.append(confusion_matrix)\n\n return ret", "title": "" }, { "docid": "af85b37476af24b5d497bc364b95ee99", "score": "0.50200456", "text": "def _run_inference(self, dataset, summary, threshod=0.5):\n imageid_labels = {}\n ds.config.set_seed(58)\n self._count = 0\n for j, next_element in enumerate(dataset):\n now = time()\n inputs, labels, _ = self._unpack_next_element(next_element)\n prob = self._model(inputs).asnumpy()\n for idx, inp in enumerate(inputs):\n gt_labels = labels[idx]\n gt_probs = [float(prob[idx][i]) for i in gt_labels]\n\n data_np = _convert_image_format(np.expand_dims(inp.asnumpy(), 0), 'NCHW')\n _, _, _, image_string = _make_image(_normalize(data_np))\n\n predicted_labels = [int(i) for i in (prob[idx] > threshod).nonzero()[0]]\n predicted_probs = [float(prob[idx][i]) for i in predicted_labels]\n\n union_labs = list(set(gt_labels + predicted_labels))\n imageid_labels[str(self._count)] = union_labs\n\n explain = Explain()\n explain.image_id = str(self._count)\n explain.image_data = image_string\n summary.add_value(\"explainer\", \"image\", explain)\n\n explain = Explain()\n explain.image_id = str(self._count)\n explain.ground_truth_label.extend(gt_labels)\n explain.inference.ground_truth_prob.extend(gt_probs)\n explain.inference.predicted_label.extend(predicted_labels)\n explain.inference.predicted_prob.extend(predicted_probs)\n summary.add_value(\"explainer\", \"inference\", explain)\n\n summary.record(1)\n\n self._count += 1\n print(\"Finish running and writing {}-th batch inference data. Time elapsed: {}s\".format(j, time() - now))\n return imageid_labels", "title": "" }, { "docid": "15ee694492e60325df798848b0a7d74f", "score": "0.5014322", "text": "def CNN_query(model,\n expr,\n pool_inds,\n method_name,\n session,\n col=True,\n extra_feed_dict={}):\n \n k = expr.pars['k']\n B = expr.pars['B']\n lambda_ = expr.pars['lambda_']\n batch_size = expr.pars['batch_size']\n\n if method_name=='egl':\n # uncertainty filtering\n print(\"Uncertainty filtering...\")\n posteriors = NNAL_tools.batch_posteriors(\n model, pool_inds, \n img_path_list, \n batch_size, \n session, col, extra_feed_dict)\n \n if B < posteriors.shape[1]:\n sel_inds = NNAL_tools.uncertainty_filtering(posteriors, B)\n sel_posteriors = posteriors[:, sel_inds]\n else:\n B = posteriors.shape[1]\n sel_posteriors = posteriors\n sel_inds = np.arange(B)\n\n # EGL scoring\n print(\"Computing the scores..\")\n c = posteriors.shape[0]\n scores = np.zeros(B)\n T = len(model.grad_log_posts['0'])\n for i in range(B):\n # gradients of samples one-by-one\n feed_dict = {model.x:np.expand_dims(\n pool_X[sel_inds[i],:,:,:], \n axis=0)}\n feed_dict.update(extra_feed_dict)\n \n if c < 20:\n grads = session.run(\n model.grad_log_posts, \n feed_dict=feed_dict)\n sel_classes = np.arange(c)\n else:\n # if the number of classes is large,\n # compute gradients of the largest twenty\n # posteriors\n sel_classes = np.argsort(\n -sel_posteriors[:,i])[:10]\n sel_classes_grads = {\n str(cc): model.grad_log_posts[str(cc)]\n for cc in sel_classes\n }\n grads = session.run(sel_classes_grads, \n feed_dict=feed_dict)\n \n for j in range(len(sel_classes)):\n class_score = 0. \n for t in range(T):\n class_score += np.sum(\n grads[str(sel_classes[j])][t]**2)\n scores[i] += class_score*sel_posteriors[\n sel_classes[j],i]\n if not(i%10):\n print(i, end=',')\n\n # select the highest k scores\n Q_inds = sel_inds[np.argsort(-scores)[:k]]\n\n elif method_name=='random':\n n = len(pool_inds)\n Q_inds = np.random.permutation(n)[:k]\n \n elif method_name=='entropy':\n # computing the posteriors\n posteriors = NNAL_tools.idxBatch_posteriors(\n model, \n pool_inds,\n expr,\n session, \n col, \n extra_feed_dict)\n \n # entropies \n entropies = NNAL_tools.compute_entropy(posteriors)\n Q_inds = np.argsort(-entropies)[:k]\n \n elif method_name=='fi':\n # uncertainty filtering\n print(\"Uncertainty filtering...\", end='\\n\\t')\n\n posteriors = NNAL_tools.idxBatch_posteriors(\n model, \n pool_inds, \n expr, \n session, \n col, \n extra_feed_dict)\n \n # uncertainty filtering\n if B < posteriors.shape[1]:\n sel_inds = NNAL_tools.uncertainty_filtering(\n posteriors, B)\n sel_posteriors = posteriors[:, sel_inds]\n else:\n B = posteriors.shape[1]\n sel_posteriors = posteriors\n sel_inds = np.arange(B)\n \n # forming A-matrices\n # division by two in computing size of A is because \n # in each layer we have gradients with respect to\n # weights and bias terms --> number of layers that\n # are considered is obtained after dividing by 2\n A_size = int(\n len(model.grad_posts['0'])/2)\n c,n = posteriors.shape\n\n A = []\n # load an images\n # indices: sel_inds --> pool_inds\n # CAUTIOUS: this will give an error if the selected\n # indices in `sel_inds` contains only one index.\n sel_X, _ = NN.load_winds(\n pool_inds[sel_inds],\n expr.imgs_path_file,\n expr.pars['target_shape'],\n expr.pars['mean'])\n \n for i in range(B):\n X_i = sel_X[i,:,:,:]\n feed_dict = {\n model.x:np.expand_dims(X_i, axis=0)}\n feed_dict.update(extra_feed_dict)\n\n # remove zero, or close-to-zero posteriors\n x_posterior = sel_posteriors[:,i]\n x_posterior[x_posterior<1e-6] = 0.\n nz_classes = np.where(x_posterior > 0.)[0]\n nz_posts = x_posterior[nz_classes] / np.sum(\n x_posterior[nz_classes])\n nz_classes_grads = {\n str(cc): model.grad_posts[str(cc)]\n for cc in nz_classes}\n \n # computing the gradients\n # grads={ '0': dP(y=0|x)/dtheta, \n # '1': dP(y=1|x)/dtheta, \n # etc }\n # if there are too many classes, \n # grads={ 'c0': dP(y=c0|x)/dtheta, \n # 'c1': dP(y=c1|x)/dtheta, \n # etc }\n # where {c0,c1,etc} are classes with largest\n # posteriors for x.\n # \n if len(nz_classes) < 10:\n grads = session.run(nz_classes_grads, \n feed_dict=feed_dict)\n sel_classes = nz_classes\n new_posts = nz_posts\n else:\n # if the number of classes is large,\n # compute gradients of few classes with \n # largest posteriors only\n sel_nz_classes = np.argsort(-nz_posts)[:10]\n sel_classes = nz_classes[sel_nz_classes]\n sel_classes_grads = {\n str(cc): nz_classes_grads[str(cc)]\n for cc in sel_classes}\n # normalizing posteriors of the selected classes\n new_posts = nz_posts[sel_nz_classes]\n new_posts /= np.sum(new_posts)\n # gradients for the selected classes\n grads = session.run(sel_classes_grads, \n feed_dict=feed_dict)\n\n Ai = np.zeros((A_size, A_size))\n \n for j in range(len(sel_classes)):\n shrunk_grad = NNAL_tools.shrink_gradient(\n grads[str(sel_classes[j])], 'sum')\n Ai += np.outer(shrunk_grad, \n shrunk_grad) / new_posts[j] \\\n + np.eye(A_size)*1e-5\n\n if not(i%10):\n print(i, end=',')\n \n A += [Ai]\n \n # extracting features for pool samples\n # using only few indices of the features\n F = model.extract_features(pool_inds[sel_inds], \n expr,session)\n # selecting from those features that have the most\n # non-zero values among the selected samples\n nnz_feats = np.sum(F>0, axis=1)\n feat_inds = np.argsort(-nnz_feats)[:int(B/2)]\n F_sel = F[feat_inds,:]\n # taking care of the rank\n while np.linalg.matrix_rank(F_sel)<len(feat_inds):\n # if the matrix is not full row-rank, discard\n # the last selected index (worst among all)\n feat_inds = feat_inds[:-1]\n F_sel = F[feat_inds,:]\n if len(feat_inds) < 10:\n warnings.warn(\n \"Few features (%d) are selected\"% (\n len(feat_inds)))\n \n # taking care of the conditional number\n while np.linalg.cond(F_sel) > 1e6:\n feat_inds = feat_inds[:-1]\n F_sel = F[feat_inds,:]\n if len(feat_inds)==1:\n lambda_=0\n break\n \n #pdb.set_trace()\n # subtracting the mean\n F_sel -= np.repeat(np.expand_dims(\n np.mean(F_sel, axis=1),\n axis=1), B, axis=1)\n \n print('Cond. #: %f'% (np.linalg.cond(F_sel)),\n end='\\n\\t')\n print('# selected features: %d'% \n (len(feat_inds)), end='\\n\\t')\n # SDP\n print('Solving SDP..',end='\\n\\t')\n soln = NNAL_tools.SDP_query_distribution(\n A, lambda_, F_sel, k)\n print('status: %s'% (soln['status']), end='\\n\\t')\n q_opt = np.array(soln['x'][:B])\n \n # sampling from the optimal solution\n Q_inds = NNAL_tools.sample_query_dstr(\n q_opt, k, replacement=True)\n Q_inds = sel_inds[Q_inds]\n \n elif method_name=='rep-entropy':\n # uncertainty filtering\n print(\"Uncertainty filtering...\")\n posteriors = NNAL_tools.idxBatch_posteriors(\n model, \n pool_inds, \n expr, \n session, \n col, \n extra_feed_dict)\n \n if B < posteriors.shape[1]:\n sel_inds = NNAL_tools.uncertainty_filtering(\n posteriors, B)\n sel_posteriors = posteriors[:, sel_inds]\n else:\n B = posteriors.shape[1]\n sel_posteriors = posteriors\n sel_inds = np.arange(B)\n \n n = len(pool_inds)\n rem_inds = list(set(np.arange(n)) - set(sel_inds))\n \n print(\"\\t Finding Similarities..\", end='\\n\\t')\n # extract the features for all the pool\n # sel_inds, rem_inds --> pool_inds\n F = model.extract_features(pool_inds, \n expr,\n session)\n F_uncertain = F[:, sel_inds]\n norms_uncertain = np.sqrt(np.sum(F_uncertain**2, axis=0))\n F_rem_pool = F[:, rem_inds]\n norms_rem = np.sqrt(np.sum(F_rem_pool**2, axis=0))\n \n # compute cos-similarities between filtered images\n # and the rest of the unlabeled samples\n dots = np.dot(F_rem_pool.T, F_uncertain)\n norms_outer = np.outer(norms_rem, norms_uncertain)\n sims = dots / norms_outer\n \n print(\"Greedy optimization..\", end='\\n\\t')\n # start from empty set\n Q_inds = []\n nQ_inds = np.arange(B)\n # add most representative samples one by one\n for i in range(k):\n rep_scores = np.zeros(B-i)\n for j in range(B-i):\n cand_Q = Q_inds + [nQ_inds[j]]\n rep_scores[j] = np.sum(\n np.max(sims[:, cand_Q], axis=1))\n iter_sel = nQ_inds[np.argmax(rep_scores)]\n # update the iterating sets\n Q_inds += [iter_sel]\n nQ_inds = np.delete(\n nQ_inds, np.argmax(rep_scores))\n \n Q_inds = sel_inds[Q_inds]\n\n return Q_inds", "title": "" }, { "docid": "381f28a58bcc7de667f41d9f809b338d", "score": "0.5012707", "text": "def read_image_statistic():\n df = pd.read_excel(IMAGE_STATISTIC_PATH)\n log.info(df.head().to_string())\n df.head(5)\n\n df = df.astype({'image_name': 'str', 'perspective': 'str', 'full_pig_face': 'int'})\n df['perspective'] = pd.Categorical(df.perspective)\n df.info()\n\n index = df.index\n log.info('Image Count all:' + str(len(index)))\n return df", "title": "" }, { "docid": "da8db3f458dca44837d5fdabafc58709", "score": "0.5011885", "text": "def evaluate(self, model):\n model.eval()\n cuda = torch.cuda.is_available()\n Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n ids = []\n data_dict = []\n dataiterator = iter(self.dataloader)\n #print(\" Val datasets number is : {}\".format(len(self.dataloader)))\n for i in tqdm(range(len(self.dataloader))):\n #while True:\n #try:\n img, _, info_img, id_, img_path = next(dataiterator) # load a batch\n #except StopIteration:\n #break\n info_img = [float(info.numpy()) for info in info_img]\n id_ = int(id_)\n ids.append(id_)\n with torch.no_grad():\n img = Variable(img.type(Tensor))\n start_time = current_milli_time()\n _,outputs = model(img)\n self.inference_time += (current_milli_time() - start_time)\n outputs=outputs.unsqueeze(0)\n outputs = postprocess(\n outputs, cfg.DATA[\"NUM\"], self.confthre, self.nmsthre)\n if outputs[0] is None:\n continue\n outputs = outputs[0].cpu().data\n\n for output in outputs:\n x1 = float(output[0])\n y1 = float(output[1])\n x2 = float(output[2])\n y2 = float(output[3])\n label = self.dataset.class_ids[int(output[6])]\n box = box2label((y1, x1, y2, x2), info_img)\n bbox = [box[1], box[0], box[3] - box[1], box[2] - box[0]]\n score = float(output[4].data.item() * output[5].data.item()) # object score * class score\n A = {\"image_id\": id_, \"category_id\": label, \"bbox\": bbox,\n \"score\": score, \"segmentation\": []} # COCO json format\n data_dict.append(A)\n\n if self.__visual_imgs and i <= self.__visual_imgs:\n imgshow = cv2.imread(img_path[0])\n bboxes_prd = Evaluator(model).get_bbox(imgshow, cfg.TEST[\"MULTI_SCALE_TEST\"], cfg.TEST[\"FLIP_TEST\"])\n if bboxes_prd.shape[0] != 0:\n boxes = bboxes_prd[..., :4]\n class_inds = bboxes_prd[..., 5].astype(np.int32)\n scores = bboxes_prd[..., 4]\n visualize_boxes(image=imgshow, boxes=boxes, labels=class_inds, probs=scores, class_labels=self.classes)\n path = os.path.join(self.pred_result_path, \"imgs/{}.jpg\".format(i))\n cv2.imwrite(path, imgshow)\n\n\n annType = ['segm', 'bbox', 'keypoints']\n self.inference_time = 1.0 * self.inference_time / len(self.dataloader)\n # Evaluate the Dt (detection) json comparing with the ground truth\n if len(data_dict) > 0:\n cocoGt = self.dataset.coco\n _, tmp = tempfile.mkstemp()\n json.dump(data_dict, open(tmp, 'w'))\n cocoDt = cocoGt.loadRes(tmp)\n cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])\n cocoEval.params.imgIds = ids\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n\n '''\n # ----------pltshow------------- #\n # precision[t,:,k,a,m] PR curves recall-precision value\n # T:IoU thresh.5-.95, gap=0.05, t[0]=0.5,t[1]=0.55,t[2]=0.6,t[3]=0.65,t[4]=0.7,t[5]=0.75 ……,t[9]=0.95\n # R:101 recall thresh,0-101\n # K:class k[0] = person,k[1] = bycicle,.....COCO\n # A:area, a[0]=all,a[1]=small,a[2]=medium,a[3]=large\n # M:Maxdet m[0]=1,m[1]=10,m[2]=100\n\n #C75: PR at IoU=.75 (AP at strict IoU), area under curve corresponds to APIoU=.75 metric.\n #C50: PR at IoU=.50 (AP at PASCAL IoU), area under curve corresponds to APIoU=.50 metric.\n #Loc: PR at IoU=.10 (localization errors ignored, but not duplicate detections). All remaining settings use IoU=.1.\n #Sim: PR after supercategory false positives (fps) are removed. Specifically, any matches to objects with a different class label but that belong to the same supercategory don't count as either a fp (or tp). Sim is computed by setting all objects in the same supercategory to have the same class label as the class in question and setting their ignore flag to 1. Note that person is a singleton supercategory so its Sim result is identical to Loc.\n #Oth: PR after all class confusions are removed. Similar to Sim, except now if a detection matches any other object it is no longer a fp (or tp). Oth is computed by setting all other objects to have the same class label as the class in question and setting their ignore flag to 1.\n #BG: PR after all background (and class confusion) fps are removed. For a single category, BG is a step function that is 1 until max recall is reached then drops to 0 (the curve is smoother after averaging across categories).\n #FN: PR after all remaining errors are removed (trivially AP=1).\n\n pr_array1 = cocoEval.eval['precision'][0, :, 0, 0, 2]\n pr_array2 = cocoEval.eval['precision'][5, :, 0, 0, 2]\n #pr_array3 = cocoEval.eval['precision'][6, :, 0, 0, 2]\n #pr_array4 = cocoEval.eval['precision'][9, :, 0, 0, 2]\n x = np.arange(0.0, 1.01, 0.01)\n # x_1 = np.arange(0, 1.01, 0.111)\n plt.xlabel('IoU')\n plt.ylabel('precision')\n plt.xlim(0, 1.0)\n plt.ylim(0, 1.01)\n plt.grid(True)\n plt.plot(x, pr_array1, color='blue', linewidth = '3', label='IoU=0.5')\n plt.plot(x, pr_array2, color='green', linewidth = '3', label='IoU=0.75')\n plt.title(\"P-R curves catid=person maxDet=100\")\n plt.legend(loc=\"lower left\")\n plt.savefig(\"../prediction/APs.png\", dpi=600)\n # plt.show()'''\n return cocoEval.stats[0], cocoEval.stats[1], self.inference_time\n else:\n return 0, 0, 0", "title": "" }, { "docid": "13ac0cdc301df18694dfaf0cff9009e8", "score": "0.500821", "text": "def acquire_images(cam, nodemap, nodemap_tldevice, j, k): #j is exposure number, k is gain level\r\n\r\n #print('*** IMAGE ACQUISITION ***\\n')\r\n try:\r\n result = True\r\n\r\n # Begin acquiring images\r\n cam.BeginAcquisition()\r\n\r\n #print('Acquiring images...')\r\n\r\n\t\t#matrix to collect images\r\n print('')\r\n picList = [] #picList is local so it should have to be cleared every time running acquire images\r\n\r\n # Retrieve, convert, and save images\r\n # image number specified and iterated through for each exposure/gain configuration\r\n for i in range(NUM_IMAGES):\r\n try:\r\n\r\n # Retrieve the next image from the trigger\r\n result &= grab_next_image_by_trigger(nodemap, cam)\r\n\r\n # Retrieve next received image\r\n image_result = cam.GetNextImage()\r\n\r\n # Ensure image completion\r\n if image_result.IsIncomplete():\r\n print('Image incomplete with image status %d ...' % image_result.GetImageStatus())\r\n\r\n else:\r\n image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR) #Do not use Mono12, not compatible with conversion\r\n\r\n\t\t\t\t\t#add to piclist\r\n imgarray = image_converted.GetNDArray()\r\n picList.append(imgarray) #array of the pixel values of each individual image\r\n\r\n\t\t\t\t\t# Release image\r\n image_result.Release()\r\n\r\n except PySpin.SpinnakerException as ex:\r\n print('Error image acq: %s' % ex)\r\n return False\r\n\r\n #now that picList is populated with NUM_Images we send it to\r\n #pixel points where n pixels will be averaged throughput all images\r\n #then average of pixel n will be compared to make sure all n pixels are behaving\r\n #the same at which point all averages of n pixels will be averaged to give the dark noise\r\n #value for the M/N Exposure/Gain settings configureation\r\n cam.EndAcquisition() #KL_758 :)\r\n PixelPoints_hist(picList,j,k)\r\n\r\n except PySpin.SpinnakerException as ex:\r\n print('Error imgAcq: %s' % ex)\r\n return False\r\n\r\n return result", "title": "" }, { "docid": "5bea1a73dfa41b42be6b53be3716ae2a", "score": "0.4998312", "text": "def compute_mIoU(gt_dir, pred_dir, root, uncounted_list, seq_len):\n\n # if int(seq_len) == 4:\n # roo_gt =\n # elif int(seq_len) == 4:\n # roo_gt =\n # else:\n # roo_gt =\n\n if uncounted_list == 'no':\n Uncounted_list = []\n elif uncounted_list == 'v2':\n Uncounted_list = range(40, 48)\n else:\n Uncounted_list = range(35, 48)\n print('Uncounted_list')\n print(Uncounted_list)\n num_classes = 24\n # print('Num classes', num_classes)\n hist = np.zeros((num_classes, num_classes))\n with open(gt_dir, 'rb') as file:\n gt_imgs_dic = pickle.load(file)\n\n with open(pred_dir, 'rb') as file:\n pred_imgs_dic = pickle.load(file)\n\n gt_imgs_overall = []\n pred_imgs_overall = []\n for Dict_key in Dict_key_list:\n gt_imgs_overall += sorted(gt_imgs_dic[Dict_key])\n pred_imgs_overall += sorted(pred_imgs_dic[Dict_key])\n\n print('overal_all')\n gt_imgs = gt_imgs_overall\n pred_imgs = pred_imgs_overall\n # print(len(gt_imgs))\n # print(len(pred_imgs))\n # print(gt_imgs)\n for ind in range(len(gt_imgs)):\n # print(int(pred_imgs[ind].split('_')[0]))\n # if int(pred_imgs[ind].split('_')[0]) in Uncounted_list:\n # print(int(pred_imgs[ind].split('_')[0]))\n\n if not int(pred_imgs[ind].split('_')[0]) in Uncounted_list:\n if os.path.exists(os.path.join(root, pred_imgs[ind])) and os.path.exists(os.path.join(root, gt_imgs[ind])):\n # pred = np.array(colormap2id(os.path.join(root, pred_imgs[ind])))\n # label = np.array(colormap2id(os.path.join(root, gt_imgs[ind])))\n # if len(label.flatten()) != len(pred.flatten()):\n # print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()),\n # len(pred.flatten()), gt_imgs[ind],\n # pred_imgs[ind]))\n # continue\n # hist += fast_hist(label.flatten(), pred.flatten(), num_classes)\n\n pred = np.array(colormap2id_np(os.path.join(root, pred_imgs[ind])))\n label = np.array(colormap2id_np(os.path.join(root, gt_imgs[ind])))\n if len(label.flatten()) != len(pred.flatten()):\n print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()),\n len(pred.flatten()),\n gt_imgs[ind],\n pred_imgs[ind]))\n continue\n\n hist += fast_hist(label.flatten(), pred.flatten(), num_classes)\n # try:\n # pred = np.array(colormap2id_np(os.path.join(root, pred_imgs[ind])))\n # label = np.array(colormap2id_np(os.path.join(root, gt_imgs[ind])))\n # if len(label.flatten()) != len(pred.flatten()):\n # print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()),\n # len(pred.flatten()),\n # gt_imgs[ind],\n # pred_imgs[ind]))\n # continue\n #\n # hist += fast_hist(label.flatten(), pred.flatten(), num_classes)\n # except:\n # print(pred_imgs[ind])\n # if ind > 0 and ind % 10 == 0:\n # print('{:d} / {:d}: {:0.2f}'.format(ind, len(gt_imgs), 100 * np.mean(per_class_iu(hist))))\n\n mIoUs = per_class_iu(hist)\n\n acc_overall, acc_percls, iu, fwIU = result_stats(hist)\n # for ind_class in range(num_classes):\n # print('===>' + str(ind_class) + ':\\t' + str(round(mIoUs[ind_class] * 100, 2)))\n # print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2)))\n print({'mIoU': ' {:0.2f} fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.format(\n np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))})\n\n\n for subset_indx in range(len(Dict_key_list)):\n hist = np.zeros((num_classes, num_classes))\n print(Dict_key_list[subset_indx])\n gt_imgs = sorted(gt_imgs_dic[Dict_key_list[subset_indx]])\n pred_imgs = sorted(pred_imgs_dic[Dict_key_list[subset_indx]])\n # print(len(gt_imgs))\n # print(len(pred_imgs))\n for ind in range(len(gt_imgs)):\n if not int(pred_imgs[ind].split('_')[0]) in Uncounted_list:\n if os.path.exists(os.path.join(root, pred_imgs[ind])) and os.path.exists(os.path.join(root, gt_imgs[ind])):\n\n pred = np.array(colormap2id_np(os.path.join(root, pred_imgs[ind])))\n label = np.array(colormap2id_np(os.path.join(root, gt_imgs[ind])))\n if len(label.flatten()) != len(pred.flatten()):\n print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()),\n len(pred.flatten()),\n gt_imgs[ind],\n pred_imgs[ind]))\n continue\n\n hist += fast_hist(label.flatten(), pred.flatten(), num_classes)\n\n # try:\n # pred = np.array(colormap2id_np(os.path.join(root, pred_imgs[ind])))\n # label = np.array(colormap2id_np(os.path.join(root, gt_imgs[ind])))\n # if len(label.flatten()) != len(pred.flatten()):\n # print('Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(len(label.flatten()),\n # len(pred.flatten()),\n # gt_imgs[ind],\n # pred_imgs[ind]))\n # continue\n # hist += fast_hist(label.flatten(), pred.flatten(), num_classes)\n # except:\n # print(pred_imgs[ind])\n # if ind > 0 and ind % 10 == 0:\n # print('{:d} / {:d}: {:0.2f}'.format(ind, len(gt_imgs), 100*np.mean(per_class_iu(hist))))\n\n mIoUs = per_class_iu(hist)\n\n acc_overall, acc_percls, iu, fwIU = result_stats(hist)\n # for ind_class in range(num_classes):\n # print('===>' + str(ind_class) + ':\\t' + str(round(mIoUs[ind_class] * 100, 2)))\n # print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2)))\n print({'mIoU':' {:0.2f} fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.format(\n np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))})\n\n\n\n return mIoUs, fwIU, acc_overall, np.nanmean(acc_percls)", "title": "" }, { "docid": "2fdb89aab5e37296b0eadb5d2c794afa", "score": "0.49943894", "text": "def calculateEvaluationMetrics(self):\n #print(self.config_instance.path_postfix_raw_results)\n if (self.config_instance.path_postfix_raw_results == 'csv'):\n vid_name_list = os.listdir(str(self.config_instance.path_raw_results_eval))\n vid_name_list = [i for i in vid_name_list if i.endswith('.csv')]\n elif (self.config_instance.path_postfix_raw_results == 'npy'):\n vid_name_list = os.listdir(str(self.config_instance.path_raw_results_eval))\n vid_name_list = [i for i in vid_name_list if i.endswith('.npy')]\n #print(vid_name_list)\n\n final_results = []\n fp_video_based = None\n\n if(self.config_instance.threshold_mode == 'adaptive'):\n ## alpha\n #thresholds_l = [0.4]\n thresholds_l = [0.1, 0.2, 0.30, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\n ## beta\n #thresholds2_l = [0.5]\n thresholds2_l = [0.1, 0.2, 0.30, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n\n elif(self.config_instance.threshold_mode == 'fixed'):\n thresholds_l = [1.0, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.30, 0.20, 0.10, 0.0]\n thresholds2_l = [0.0] # oonly in adaptive mode\n else:\n thresholds_l = []\n thresholds2_l = [] # oonly in adaptive mode\n\n\n for t in thresholds_l:\n for t2 in thresholds2_l:\n tp_sum = 0\n fp_sum = 0\n tn_sum = 0\n fn_sum = 0\n THRESHOLD = t\n THRESHOLD2 = t2\n\n if(int(self.config_instance.save_eval_results) == 1):\n fp_video_based = open(self.config_instance.path_eval_results + \"/final_results_th-\" + str(THRESHOLD) + \"-\" + str(THRESHOLD2) + \".csv\", 'w')\n header = \"vid_name;tp;fp;tn;fn;p;r;acc;f1_score;tp_rate;fp_rate\"\n fp_video_based.write(header + \"\\n\")\n\n results_l = []\n for vid_name in vid_name_list:\n if(self.config_instance.path_postfix_raw_results == 'csv'):\n results_np = self.loadRawResultsFromCsv(self.config_instance.path_raw_results_eval + \"/\" + vid_name)\n elif (self.config_instance.path_postfix_raw_results == 'npy'):\n results_np = self.loadRawResultsFromNumpy(self.config_instance.path_raw_results_eval + \"/\" + vid_name)\n\n # calculate similarity measures of consecutive frames and threshold them\n shot_boundaries_np = self.calculateSimilarityMetric(results_np, threshold=THRESHOLD, threshold2=THRESHOLD2)\n #print(shot_boundaries_np)\n \n # calculate evaluation metrics\n tp, fp, tn, fn = self.evaluation(shot_boundaries_np, vid_name)\n p, r, acc, f1_score, tp_rate, fp_rate = self.calculateMetrics(tp, fp, tn, fn)\n\n if (int(self.config_instance.save_eval_results) == 1):\n tmp_str = str(vid_name.replace('results_raw_', '').split('.')[0]) + \";\" + str(tp) + \";\" + str(fp) + \\\n \";\" + str(tn) + \";\" + str(fn) + \";\" + str(p) + \";\" + str(r) + \";\" + str(acc) + \";\" + \\\n str(f1_score) + \";\" + str(tp_rate) + \";\" + str(fp_rate)\n print(tmp_str)\n fp_video_based.write(tmp_str + \"\\n\")\n #else:\n # tp = 0;\n # fp = 0;\n # tn = 0;\n # fn = 0;\n # p = 0;\n # r = 0;\n # acc = 0;\n # f1_score = 0;\n results_l.append([vid_name, tp, fp, tn, fn, p, r, acc, f1_score])\n\n tp_sum = tp_sum + tp\n fp_sum = fp_sum + fp\n tn_sum = tn_sum + tn\n fn_sum = fn_sum + fn\n\n p, r, acc, f1_score, tp_rate, fp_rate = self.calculateMetrics(tp_sum, fp_sum, tn_sum, fn_sum)\n\n if (int(self.config_instance.save_eval_results) == 1):\n tmp_str = str(\"overall\" + \";\" + str(tp_sum) + \";\" + str(fp_sum) + \\\n \";\" + str(tn_sum) + \";\" + str(fn_sum) + \";\" + str(p) + \";\" + str(r) + \";\" + str(acc) + \";\" + \\\n str(f1_score) + \";\" + str(tp_rate) + \";\" + str(fp_rate))\n print(tmp_str)\n\n fp_video_based.write(tmp_str + \"\\n\")\n fp_video_based.close()\n\n final_results.append([str(THRESHOLD), tp_sum, fp_sum, tn_sum, fn_sum, p, r, acc, f1_score, tp_rate, fp_rate])\n final_results_np = np.array(final_results)\n return final_results_np", "title": "" }, { "docid": "7182da39ee27e21777e48a3e4a53d3e7", "score": "0.4991181", "text": "def extract_images(self):\n for num, _ in enumerate(self.img_list):\n self.extract_image(num)", "title": "" } ]
b2e98536efdc0cb2f58835cb49f27af1
The menu is seslected, create a click control.
[ { "docid": "8972ba31eb913b59f9149eddadeb28bd", "score": "0.5932137", "text": "def OnItem(self, e):\n self.process_control(e, \"click\")", "title": "" } ]
[ { "docid": "091f72e3307c61510e4290029546b2c2", "score": "0.6858837", "text": "def sig_activate(self, widget, menu):\n menu.event('clicked')", "title": "" }, { "docid": "8ab69f90be7788607bbdb641781d43ac", "score": "0.6697031", "text": "def click():", "title": "" }, { "docid": "27957c72781583208cbc37d87bd55979", "score": "0.6684054", "text": "def _create_create_menu(self):\n self._create_menu_widget = BrCreateMenu(self)\n\n # connect menu\n self._create_menu_widget.guide_loc_action.triggered.connect(\n self._create_guide\n )\n\n self._create_menu_widget.joint_action.triggered.connect(\n self._create_joint\n )\n\n self._create_menu_widget.build_action.triggered.connect(\n self._create_build\n )", "title": "" }, { "docid": "ae29294a04759faf4dba24e79d0aea81", "score": "0.665211", "text": "def menu():", "title": "" }, { "docid": "8ae0532e07318ae3747b079cade9af2e", "score": "0.6637754", "text": "def setup_menu(self):\n pass", "title": "" }, { "docid": "b1e0deaabee16a70b7be24a1509daa3d", "score": "0.6611915", "text": "def handleMenuEvent(self,ev,menu): \n pass", "title": "" }, { "docid": "da40587b53f58004c371c87e66076b5e", "score": "0.6569286", "text": "def create_menu(self):\n self.menu = TextMenu(0.07, 0.1, 0.2196, 0.0781)\n self.menu.add_item(_(\"Watch\"), None, \"watch\")\n self.menu.active = True\n\n self.add(self.menu)", "title": "" }, { "docid": "2bd1b0111358a22e99875f9f7fff277a", "score": "0.6549571", "text": "def addMenu(self):\n menu = self.interface.getPulldownMenu(3)\n actionBefore = menu.actions()[4]\n menu.insertAction(actionBefore, self.action)", "title": "" }, { "docid": "eb6df77b86ebc7e966a06b2a56022d07", "score": "0.64444447", "text": "def create_menu(self):\n\n # Fill list with buttons by the information picked from strMenu\n for i in range(len(self.strMenu)):\n self.menu.append([])\n for ii in range(len(self.strMenu[i])):\n sign = self.strMenu[i][ii]\n\n if sign in self.special:\n self.menu[i].append(ttk.Button(self.keyboardFrame, text=sign,\n **self.settings, command=self.special[sign])\n )\n self.menu[i][ii].grid(row=i, column=ii)\n\n elif sign == \"\":\n self.menu[i].append(None)\n\n # Must be usual sign\n else:\n self.menu[i].append(ttk.Button(self.keyboardFrame, text=sign, **self.settings,\n command=lambda lambda_sign=sign:\n self.operationEntry.insert(END, lambda_sign))\n )\n self.menu[i][ii].grid(row=i, column=ii)\n\n # Create action which will update itself\n self.update()\n self.root.mainloop()", "title": "" }, { "docid": "663912f780be53248dba254e09c7bc77", "score": "0.6302211", "text": "def _create_dcc_menu(self):\n self._dcc_menu_widget = BrDccMenu(self)\n\n # connect menu", "title": "" }, { "docid": "748f581f12a9203ad1f2d79df45039d7", "score": "0.6287714", "text": "def on_click(self, pos, button):\n pass", "title": "" }, { "docid": "8191441d53f351fcc177771daaedd5ac", "score": "0.62407756", "text": "def createMenu(self):\n\t\tdef doMenu(themenu, listOfCaptions, popuphandler=None):\n\t\t\tlistOfCaptions.sort()\n\t\t\tfor it in listOfCaptions:\n\t\t\t\tid = wx.NewId()\n\t\t\t\tself.menuItems[str(id)] = it\n\t\t\t\titem = themenu.Append(id, it)\n\t\t\t\tif popuphandler is None: self.Bind(wx.EVT_MENU, self.popupHandler, item)\n\t\t\t\telse: self.Bind(wx.EVT_MENU, popuphandler, item) \n\t\tself.menu = wx.Menu()\n\t\tcommands = wx.Menu()\n\t\tdoMenu(commands, self.parser.commands.keys())\n\t\tself.menu.AppendSubMenu(commands, 'commands')\n\t\tk = self.parser.sections.keys()\n\t\tsections = wx.Menu()\n\t\tdoMenu(sections, [x for x in k if x not in self.parser.truckSections])\n\t\tself.menu.AppendSubMenu(sections, 'sections')\n\t\t\n\t\tdoMenu(self.menu, ['group beams'], self.groupHightlightBeams)\n\t\t\n\t\t#local variable\n\t\t#menuItems = ['set_beam_defaults', ';']", "title": "" }, { "docid": "fb95c612342b39b8d8b2c2fd55076673", "score": "0.61750215", "text": "def createMenu(self,menuDic,menuOrder=None): \n #There is no Actual Menu in Blender, bu we can use the Pull Down Menu\n #subMenu: create a callback that create another PupMenu with the subvalue\n #this is the lastine...\n #the menu id is the first elem id\n if menuOrder : \n lookat = menuOrder\n else :\n lookat = list(menuDic.keys())\n x=5\n menubar = Menu(self.root)\n for mitem in lookat:\n mitem_menu = Menu(menubar, tearoff=0)\n for elem in menuDic[mitem]: \n if elem[\"sub\"] is not None:\n submenu = Menu(mitem_menu, tearoff=0)\n# self.MenuSubBegin(elem[\"name\"])\n for sub in elem['sub']:\n if elem['sub'][sub][\"action\"] is not None :\n submenu.add_command(label=elem['sub'][sub][\"name\"], \n command=partial(elem['sub'][sub][\"action\"],sub))\n else :\n submenu.add_command(label=elem['sub'][sub][\"name\"])\n mitem_menu.add_cascade(label=elem[\"name\"], menu=submenu)\n else:\n if elem[\"action\"] is not None :\n mitem_menu.add_command(label=elem[\"name\"], command=elem[\"action\"])\n else :\n mitem_menu.add_command(label=elem[\"name\"], command=elem[\"action\"])\n menubar.add_cascade(label=mitem, menu=mitem_menu)\n self.root.config(menu=menubar)", "title": "" }, { "docid": "31dcc6557a8b3975cae01a05da094991", "score": "0.6156516", "text": "def menu(self):\n raise NotImplementedError", "title": "" }, { "docid": "341dec6abdd5bdd4430806e0ef53f714", "score": "0.61487925", "text": "def _buttMenu(self):\n if self._hudMenuButton.getText() == 'Continue': #Go to next round\n self._roundFinalise()\n elif self._hudMenuButton.getText() == 'Cancel': #Cancel button\n if (self._hudCallButton.getText() == 'Tsumo' or\n self._hudKanButton.getText() == 'Clos. Kan' or\n self._hudKanButton.getText() == 'Late Kan' or\n self._hudChiButton.getText() == 'Riichi'): #If user's turn\n self._curStage = 'turnmid'\n self._optionsDeny = True\n else: #If other player's discard\n self._curStage = 'turnstart'\n elif self._hudMenuButton.getText() == 'Menu':\n self.popupDialog(\"WindowGameMenu\")\n self._disableButtons()", "title": "" }, { "docid": "b66c6d6b8994b349c71255d6b272df67", "score": "0.6132619", "text": "def add_buttons(self):\n # Create the menu under \"File\"\n file_menu = tkinter.Menu(self.menu)\n self.menu.add_cascade(label=\"File\", menu=file_menu)\n file_menu.add_command(label=\"Save\", command=self.save)\n file_menu.add_command(label=\"Open...\", command=self.open)\n file_menu.add_separator()\n file_menu.add_command(label=\"Exit\", command=self.root.destroy)\n\n # Create the menu under \"Help\"\n help_menu = tkinter.Menu(self.menu)\n self.menu.add_cascade(label=\"Help\", menu=help_menu)\n help_menu.add_command(label=\"About...\", command=self.open_readme)", "title": "" }, { "docid": "05d8ef7e3616ba6b7456ac36f9ce546e", "score": "0.6088853", "text": "def menu(self, menu):\n self._menu = menu", "title": "" }, { "docid": "64b21adf50fbb96bca947b56b6a16713", "score": "0.6088281", "text": "def open_main_menu(self):\r\n\t\tself._extra_menus = []\r\n\t\tself._interface = MainMenu()", "title": "" }, { "docid": "700d6b73e474f494ceaa4ab0ab555520", "score": "0.6085836", "text": "def click(self) -> None:\n pass", "title": "" }, { "docid": "a8e3964fe47283daee78002f24c16e2b", "score": "0.6082318", "text": "def click_charge_categories_submenu(self):\n self.set_existing_handles()\n self.click_element(self.charge_categories_submenu_loctor)\n self.switch_to_window()", "title": "" }, { "docid": "a12b17e591e95dc791fe643c06461abc", "score": "0.6051236", "text": "def menu_new(self, widget, data=None):\n\n self.game = Game()\n self.actualizar()", "title": "" }, { "docid": "0c8b65cb968ef4b1a6d15f7182cdd438", "score": "0.60367376", "text": "def on_btn_volver(self, button):\n self.parent.show_main_menu()", "title": "" }, { "docid": "7c41b63d0958625bba36206332422b17", "score": "0.60124534", "text": "def click_press(self):\n pass", "title": "" }, { "docid": "57d600c027a697ef1167855a48cd18a7", "score": "0.6003724", "text": "def on_click(self):\n pass", "title": "" }, { "docid": "03faf7cc04b73fe45497cb91ab1381c0", "score": "0.598046", "text": "def menu_inicial():\r\n limpiar_pantalla()\r\n\r\n iniciar_sesion = tk.Button(window, text=\"INICIAR SESION\", font=(\"Arial bold\", 10),\r\n width=30, height=4, bg=\"orange\", command=lambda: log_in())\r\n iniciar_sesion.place(relx=0.5, rely=0.45, anchor=\"c\")\r\n\r\n registrarse = tk.Button(window, text=\"REGISTRARSE\", font=(\"Arial bold\", 10), width=30,\r\n height=4, bg=\"gray\", command=lambda: registrar())\r\n registrarse.place(relx=0.5, rely=0.6, anchor=\"c\")\r\n\r\n opcion_salir = tk.Button(window, text=\"Salir\",\r\n font=(\"Arial bold\", 25), command=lambda: cerrar_aplicacion(), bg=\"red\")\r\n opcion_salir.place(relx=0.5, rely=0.875, width=300, height=50, anchor=\"c\")", "title": "" }, { "docid": "d15979e3d2eb0d8abdf3990e976a9f8a", "score": "0.5978266", "text": "def create_menu_button(self):\r\n # menu variable\r\n colors = ('Red', 'Green', 'Blue')\r\n\r\n # create the Menubutton\r\n menu_button = ttk.Menubutton(\r\n self,\r\n text='Select a color')\r\n\r\n # create a new menu instance\r\n menu = tk.Menu(menu_button, tearoff=0)\r\n\r\n for color in colors:\r\n menu.add_radiobutton(\r\n label=color,\r\n value=color,\r\n variable=self.selected_color)\r\n\r\n # associate menu with the Menubutton\r\n menu_button[\"menu\"] = menu\r\n\r\n menu_button.pack(expand=True)", "title": "" }, { "docid": "c5823ff71749d12977fde0318fdfb719", "score": "0.5935681", "text": "def context_menu(self, position):\n # get a menu\n self.context_menu = QtWidgets.QMenu()\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the level of the selected item\n level = self.get_level_selected_item()\n if level == 0:\n selected_text = str(idx.data())\n if selected_text == \"Global\":\n self.context_menu.actionAddGlobal = QtWidgets.QAction(self)\n self.context_menu.actionAddGlobal.setText(\"Add attribute\")\n self.context_menu.addAction(self.context_menu.actionAddGlobal)\n self.context_menu.actionAddGlobal.triggered.connect(self.add_global)\n elif selected_text == \"Variables\":\n self.context_menu.actionAddVariable = QtWidgets.QAction(self)\n self.context_menu.actionAddVariable.setText(\"Add variable\")\n self.context_menu.addAction(self.context_menu.actionAddVariable)\n self.context_menu.actionAddVariable.triggered.connect(self.add_variable)\n elif level == 1:\n selected_item = idx.model().itemFromIndex(idx)\n parent = selected_item.parent()\n if (str(parent.text()) == \"Files\") and (selected_item.column() == 1):\n key = str(parent.child(selected_item.row(),0).text())\n # check to see if we have the selected subsection\n if key == \"file_path\":\n self.context_menu.actionBrowseFilePath = QtWidgets.QAction(self)\n self.context_menu.actionBrowseFilePath.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseFilePath)\n self.context_menu.actionBrowseFilePath.triggered.connect(self.browse_file_path)\n elif key == \"in_filename\":\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif key == \"out_filename\":\n self.context_menu.actionBrowseOutputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseOutputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseOutputFile)\n self.context_menu.actionBrowseOutputFile.triggered.connect(self.browse_output_file)\n else:\n pass\n elif str(parent.text()) == \"Global\":\n self.context_menu.actionRemoveGlobal = QtWidgets.QAction(self)\n self.context_menu.actionRemoveGlobal.setText(\"Remove attribute\")\n self.context_menu.addAction(self.context_menu.actionRemoveGlobal)\n self.context_menu.actionRemoveGlobal.triggered.connect(self.remove_item)\n elif str(parent.text()) == \"Variables\":\n self.context_menu.actionAddFunction = QtWidgets.QAction(self)\n self.context_menu.actionAddFunction.setText(\"Add Function\")\n self.context_menu.addAction(self.context_menu.actionAddFunction)\n self.context_menu.actionAddFunction.triggered.connect(self.add_function)\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveVariable)\n self.context_menu.actionRemoveVariable.triggered.connect(self.remove_item)\n elif level == 2:\n section_text = str(idx.parent().parent().data())\n subsection_text = str(idx.parent().data())\n subsubsection_text = str(idx.data())\n if section_text == \"Variables\":\n if subsubsection_text == \"Attr\":\n self.context_menu.actionAddAttribute = QtWidgets.QAction(self)\n self.context_menu.actionAddAttribute.setText(\"Add attribute\")\n self.context_menu.addAction(self.context_menu.actionAddAttribute)\n self.context_menu.actionAddAttribute.triggered.connect(self.add_attribute)\n elif subsubsection_text in [\"Function\", \"xl\", \"csv\"]:\n self.context_menu.actionRemoveSubSubSection = QtWidgets.QAction(self)\n self.context_menu.actionRemoveSubSubSection.setText(\"Remove item\")\n self.context_menu.addAction(self.context_menu.actionRemoveSubSubSection)\n self.context_menu.actionRemoveSubSubSection.triggered.connect(self.remove_item)\n elif level == 3:\n if str(idx.parent().data()) == \"Attr\":\n self.context_menu.actionRemoveAttribute = QtWidgets.QAction(self)\n self.context_menu.actionRemoveAttribute.setText(\"Remove attribute\")\n self.context_menu.addAction(self.context_menu.actionRemoveAttribute)\n self.context_menu.actionRemoveAttribute.triggered.connect(self.remove_item)\n elif (str(idx.parent().data()) == \"Function\" and\n str(idx.data()) == \"Right click to browse\"):\n implemented_functions_name = [name for name,data in inspect.getmembers(pfp_func,inspect.isfunction)]\n self.context_menu.actionAddFunction = {}\n for item in implemented_functions_name:\n self.context_menu.actionAddFunction[item] = QtWidgets.QAction(self)\n self.context_menu.actionAddFunction[item].setText(str(item))\n self.context_menu.addAction(self.context_menu.actionAddFunction[item])\n self.context_menu.actionAddFunction[item].triggered.connect(self.add_function_entry)\n\n self.context_menu.exec_(self.view.viewport().mapToGlobal(position))", "title": "" }, { "docid": "4ed596b026813b3ea6d81f6b4275d143", "score": "0.5935531", "text": "def clicked(self):\n print(\"Clicked\")", "title": "" }, { "docid": "4ed596b026813b3ea6d81f6b4275d143", "score": "0.5935531", "text": "def clicked(self):\n print(\"Clicked\")", "title": "" }, { "docid": "5770351e31f9a35d64d3cd95c5a2705e", "score": "0.59349436", "text": "def _create_task_menu(self):\n self._task_menu_widget = BrTaskMenu(self)\n\n # connect menu", "title": "" }, { "docid": "5af40fc4c9baa25b13fd14ac8d447dd8", "score": "0.5932903", "text": "def open_new_menu(self, interface: InterfaceManager):\r\n\t\tself._extra_menus.append(interface)", "title": "" }, { "docid": "69e14cf62e7177bac4dec41772c6c1d9", "score": "0.59314984", "text": "def add_menu_item(self, menu, event, label):\n action = self.ui.get_callback(event)\n item = gtk.MenuItem(label)\n item.connect('activate', action)\n menu.append(item)", "title": "" }, { "docid": "46975de16a13c05ea51a7325fb7f424b", "score": "0.59226984", "text": "def menu(self):\r\n self.menuscherm = Frame(self.scherm, bg=self.background1)\r\n self.menuscherm.place(x=1300, y=400)\r\n\r\n startknop = Button(self.menuscherm, text='start', command=lambda:[self.menuscherm.destroy(),self.spelvorm()], bg='#202125', fg='White', font=('arial', 20), width=7, borderwidth=0)\r\n startknop.pack(padx=20, pady=10)\r\n\r\n settingsknop = Button(self.menuscherm, text='settings', command=lambda:[self.menuscherm.destroy(),self.settings()], bg='#202125', fg='White', font=('arial', 20), width=7, borderwidth=0)\r\n settingsknop.pack(padx=20)\r\n\r\n regelsknop = Button(self.menuscherm, text='Regels', command=lambda:[self.menuscherm.destroy(),self.regels()], bg='#202125', fg='White', font=('arial', 20), width=7, borderwidth=0)\r\n regelsknop.pack(padx=20, pady=10)\r\n\r\n exitknop = Button(self.menuscherm, text='exit', command=self.quit, bg='#202125', fg='White', font=('arial', 20), width=7, borderwidth=0)\r\n exitknop.pack(padx=20)", "title": "" }, { "docid": "3cec984043374eee106a3a9eeb8acfc9", "score": "0.59196454", "text": "def init_menu(self):\n mainmenu = ui.ShowMenuTree(self.display, \"PLOP\")\n showmainmenu = ui.ShowMenuTree(self.display, \"\")\n capt1menu = ui.ShowMenuTree(self.display, \"CAP1\")\n capt1_rawtension = ui.ShowMenuTreeLive(self.display, livedisp_rawvoltage, [self.sensor1, ])\n capt1_value = ui.ShowMenuLive(self.display, livedisp_sensorvalue, [self.sensor1, ])\n capt1_direction = ui.ShowMenuStatic(self.display, \"WAY{0}\".format(settings.get(\"sensor\", \"direction\")))\n capt1_autominmax_menu = ui.ShowMenuTree(self.display, \"AUTO\")\n capt1_autominmax = ui.ShowMenuLive(self.display, livedisp_autovoltage, [self.sensor1, ])\n sysmenu = ui.ShowMenuTree(self.display, \"SYST\")\n poweroff = ui.ShowMenuTree(self.display, \"POFF\")\n reboot = ui.ShowMenuTree(self.display, \"REBT\")\n restart = ui.ShowMenuTree(self.display, \"REST\")\n # capt1_setminmax = ui.ShowMenuTreeLive(self.display, livedisp_rawvoltage, [self.sensor1, ])\n\n ### mainmenu ###\n mainmenu.elements.append(showmainmenu)\n showmainmenu.elements.append(capt1menu)\n showmainmenu.elements.append(sysmenu)\n showmainmenu.elements.append(ui.create_back(self.display, \"HIDE\"))\n ### capt1 ###\n # capt1 #\n capt1menu.elements.append(capt1_rawtension)\n capt1menu.elements.append(capt1_autominmax_menu)\n capt1menu.elements.append(capt1_value)\n capt1menu.elements.append(capt1_direction)\n capt1menu.elements.append(ui.create_back(self.display, \"MENU\"))\n # capt1 -> raw #\n capt1_rawtension.push_left = get_set_sensor_min(self.sensor1)\n capt1_rawtension.push_right = get_set_sensor_max(self.sensor1)\n capt1_rawtension.push_ok = capt1_rawtension.back_quit\n # capt1 -> auto #\n capt1_autominmax.push_ok = change_min_max_auto(capt1_autominmax, self.sensor1)\n capt1_autominmax_menu.elements.append(capt1_autominmax)\n capt1_autominmax_menu.elements.append(ui.create_back(self.display, \"CAP1\"))\n # capt1 -> value #\n # capt1 -> way #\n capt1_direction.push_ok = change_way(capt1_direction)\n\n #capt1_value.push_ok = capt1_value.back_quit\n\n ### sysmenu ###\n # poweroff #\n poweroff.push_ok = system.poweroff\n reboot.push_ok = system.reboot\n restart.push_ok = master.main_exit\n sysmenu.elements.append(poweroff)\n sysmenu.elements.append(reboot)\n sysmenu.elements.append(restart)\n sysmenu.elements.append(ui.create_back(self.display, \"MENU\"))\n\n mainmenu.enter()", "title": "" }, { "docid": "a70a270e0b245681f0f02d99656cd7c3", "score": "0.5914566", "text": "def createMenu(self):\n #Actions\n newAction = QtGui.QAction(\"&New...\",self)\n newAction.setShortcut(\"Ctrl+N\")\n newAction.setStatusTip('Create New Listing')\n newAction.triggered.connect(self.NewListing)\n updAction = QtGui.QAction(\"&Update...\",self)\n updAction.setShortcut(\"Ctrl+U\")\n updAction.setStatusTip('Update Inventory')\n updAction.triggered.connect(self.UpdateInventory)\n\n #Initiate and add actions\n self.statusBar()\n mainMenu = self.menuBar()\n fileMenu = mainMenu.addMenu('&File')\n fileMenu.addAction(newAction)\n fileMenu.addAction(updAction)\n editMenu = mainMenu.addMenu('&Edit')", "title": "" }, { "docid": "25843b6db31543b37bd35eab2314bd1f", "score": "0.59041905", "text": "def __PopupMenu(self, event, pos):\r\n\r\n menu = self.__CreatePopup()\r\n menu.Popup(event, pos=pos)", "title": "" }, { "docid": "7c66b0a712fbc4618708ba69b5c61054", "score": "0.589372", "text": "def context_menu(self, position):\n # get a menu\n self.context_menu = QtWidgets.QMenu()\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item text\n selected_text = str(idx.data())\n # get the selected item\n selected_item = idx.model().itemFromIndex(idx)\n # get the level of the selected item\n level = self.get_level_selected_item()\n # initialise logical for inserting a separator\n add_separator = False\n if level == 0:\n # sections with only 1 level\n if selected_text == \"Files\":\n self.context_menu.actionAddFileEntry = QtWidgets.QAction(self)\n self.context_menu.actionAddFileEntry.setText(\"Add item\")\n self.context_menu.addAction(self.context_menu.actionAddFileEntry)\n self.context_menu.actionAddFileEntry.triggered.connect(self.add_fileentry)\n elif selected_text == \"Output\":\n pass\n elif selected_text == \"Options\":\n # get a list of existing entries in this section\n existing_entries = self.get_existing_entries()\n # only put a QC check in the context menu if it is not already present\n if \"MaxGapInterpolate\" not in existing_entries:\n self.context_menu.actionAddMaxGapInterpolate = QtWidgets.QAction(self)\n self.context_menu.actionAddMaxGapInterpolate.setText(\"MaxGapInterpolate\")\n self.context_menu.addAction(self.context_menu.actionAddMaxGapInterpolate)\n self.context_menu.actionAddMaxGapInterpolate.triggered.connect(self.add_maxgapinterpolate)\n elif selected_text == \"Global\":\n self.context_menu.actionAddGlobalAttribute = QtWidgets.QAction(self)\n self.context_menu.actionAddGlobalAttribute.setText(\"Add global attribute\")\n self.context_menu.addAction(self.context_menu.actionAddGlobalAttribute)\n self.context_menu.actionAddGlobalAttribute.triggered.connect(self.add_global_attribute)\n elif selected_text in [\"EcosystemRespiration\"]:\n pass\n #self.context_menu.actionAddVariable = QtWidgets.QAction(self)\n #self.context_menu.actionAddVariable.setText(\"Add variable\")\n #self.context_menu.addAction(self.context_menu.actionAddVariable)\n #self.context_menu.actionAddVariable.triggered.connect(self.add_er_variable)\n elif selected_text in [\"NetEcosystemExchange\"]:\n pass\n elif selected_text in [\"GrossPrimaryProductivity\"]:\n pass\n elif level == 1:\n # sections with 2 levels\n # get the parent of the selected item\n parent = selected_item.parent()\n if (str(parent.text()) == \"Files\") and (selected_item.column() == 1):\n key = str(parent.child(selected_item.row(),0).text())\n if key in [\"file_path\", \"plot_path\"]:\n self.context_menu.actionBrowseFilePath = QtWidgets.QAction(self)\n self.context_menu.actionBrowseFilePath.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseFilePath)\n self.context_menu.actionBrowseFilePath.triggered.connect(self.browse_file_path)\n elif key in [\"in_filename\"]:\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif key in [\"out_filename\"]:\n self.context_menu.actionBrowseOutputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseOutputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseOutputFile)\n self.context_menu.actionBrowseOutputFile.triggered.connect(self.browse_output_file)\n elif (str(parent.text()) == \"Global\") and (selected_item.column() == 0):\n self.context_menu.actionRemoveGlobalAttribute = QtWidgets.QAction(self)\n self.context_menu.actionRemoveGlobalAttribute.setText(\"Remove attribute\")\n self.context_menu.addAction(self.context_menu.actionRemoveGlobalAttribute)\n self.context_menu.actionRemoveGlobalAttribute.triggered.connect(self.remove_item)\n elif (str(parent.text()) == \"NetEcosystemExchange\") and (selected_item.column() == 0):\n self.context_menu.actionRemoveNEEVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveNEEVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveNEEVariable)\n self.context_menu.actionRemoveNEEVariable.triggered.connect(self.remove_item)\n elif (str(parent.text()) == \"GrossPrimaryProductivity\") and (selected_item.column() == 0):\n self.context_menu.actionRemoveGPPVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveGPPVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveGPPVariable)\n self.context_menu.actionRemoveGPPVariable.triggered.connect(self.remove_item)\n elif (str(parent.text()) == \"EcosystemRespiration\") and (selected_item.column() == 0):\n self.context_menu.actionRemoveERVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveERVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveERVariable)\n self.context_menu.actionRemoveERVariable.triggered.connect(self.remove_item)\n elif level == 2:\n pass\n\n self.context_menu.exec_(self.view.viewport().mapToGlobal(position))", "title": "" }, { "docid": "5f10d5fe2115d0737b6f38178181acfb", "score": "0.58869064", "text": "def drawOnlineMenu(self):\n pass", "title": "" }, { "docid": "2596a62351146d53d8a0cc4da9ba4197", "score": "0.5885261", "text": "def click_cover_letter_templates_submenu(self):\n self.set_existing_handles()\n self.click_element(self.cover_letter_templates_submenu_locator)\n self.switch_to_window()", "title": "" }, { "docid": "508ae13e276894f0a5ed686acb60bbbf", "score": "0.5875708", "text": "def addMenu(self):\n c = self.c\n table = (\n (\"Parameterize Section Reference\", None, self.parameterize),\n )\n c.frame.menu.createMenuItemsFromTable(\"Outline\", table)", "title": "" }, { "docid": "150ea81aa63c1f3941e3fea00af1214c", "score": "0.58701396", "text": "def click(self):\n self._click_handlers(self)", "title": "" }, { "docid": "03fc8486635b28b98cabcd8986848adb", "score": "0.5868219", "text": "def onRightClick(self, event):\t\t \n\t\tself.PopupMenu(self.menu, event.GetPosition())", "title": "" }, { "docid": "0ae9502ab7ac08ad9d3609f280698b6d", "score": "0.58569205", "text": "def context_menu(self, position):\n # get a menu\n self.context_menu = QtWidgets.QMenu()\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item text\n selected_text = str(idx.data())\n # get the selected item\n selected_item = idx.model().itemFromIndex(idx)\n # get the level of the selected item\n level = self.get_level_selected_item()\n # initialise logical for inserting a separator\n add_separator = False\n if level == 0:\n if selected_text in [\"Variables\"]:\n self.context_menu.actionAddVariable = QtWidgets.QAction(self)\n self.context_menu.actionAddVariable.setText(\"Add variable\")\n self.context_menu.addAction(self.context_menu.actionAddVariable)\n self.context_menu.actionAddVariable.triggered.connect(self.add_new_variable)\n elif selected_text in [\"General\"]:\n self.context_menu.actionAddItem = QtWidgets.QAction(self)\n self.context_menu.actionAddItem.setText(\"Add item\")\n self.context_menu.addAction(self.context_menu.actionAddItem)\n self.context_menu.actionAddItem.triggered.connect(self.add_general_item)\n elif level == 1:\n # sections with 2 levels\n # get the parent of the selected item\n parent = selected_item.parent()\n if (str(parent.text()) == \"Files\") and (selected_item.column() == 1):\n key = str(parent.child(selected_item.row(),0).text())\n if key in [\"file_path\"]:\n self.context_menu.actionBrowseFilePath = QtWidgets.QAction(self)\n self.context_menu.actionBrowseFilePath.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseFilePath)\n self.context_menu.actionBrowseFilePath.triggered.connect(self.browse_file_path)\n elif key in [\"in_filename\"]:\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif key in [\"out_filename\"]:\n self.context_menu.actionBrowseOutputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseOutputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseOutputFile)\n self.context_menu.actionBrowseOutputFile.triggered.connect(self.browse_output_file)\n elif (str(parent.text()) == \"Variables\") and (selected_item.column() == 0):\n self.context_menu.actionRemoveOption = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOption.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveOption)\n self.context_menu.actionRemoveOption.triggered.connect(self.remove_item)\n elif (str(parent.text()) == \"General\") and (selected_item.column() == 0):\n self.context_menu.actionRemoveItem = QtWidgets.QAction(self)\n self.context_menu.actionRemoveItem.setText(\"Remove item\")\n self.context_menu.addAction(self.context_menu.actionRemoveItem)\n self.context_menu.actionRemoveItem.triggered.connect(self.remove_item)\n elif level == 2:\n # sections with 3 levels\n pass\n\n self.context_menu.exec_(self.view.viewport().mapToGlobal(position))", "title": "" }, { "docid": "b81eec154697590a8844bd6b25b26819", "score": "0.5850347", "text": "def build(self, sqt, actions):\n pageid = sqt.createMenu(self._page_name, -1, -1, -1, 3)\n groupid = sqt.createMenu(self._group_name, pageid, -1)\n for name in self._action_names:\n salome_widget = actions.__getitem__(name).build(sqt)\n sqt.createMenu(salome_widget, groupid, -1, -1, 3)", "title": "" }, { "docid": "655f511a8a0780e4fef95de52af08122", "score": "0.5846211", "text": "def StartingClick(self):\n pass", "title": "" }, { "docid": "5b97308a851f897fd79d89ae49ab1aee", "score": "0.58460176", "text": "def create_menu(self, parent):\n self.menubar = tk.Menu(self.main_frame)\n\n # File menu\n self.file_menu = tk.Menu(self.menubar)\n menu = self.file_menu\n menu.add_command(label=\"Open\", command=self.open)\n menu.add_command(label=\"Exit\", command=self.exit)\n self.menubar.add_cascade(label=\"File\", menu=self.file_menu)\n\n # Edit menu\n self.edit_menu = tk.Menu(self.menubar)\n menu = self.edit_menu\n menu.add_command(label=\"Complement\", command=self.complement)\n menu.add_command(label=\"Antiparallel\", command=self.antiparallel)\n menu.add_command(label=\"Reverse\", command=self.reverse)\n menu.add_command(label=\"Fix sequence\", command=self.fix_sequence)\n menu.add_command(label=\"Search\", command=self.search)\n self.menubar.add_cascade(label=\"Edit\", menu=self.edit_menu)\n\n # Translation menu\n self.translation_menu = tk.Menu(self.menubar)\n\n self.gencode_menu = tk.Menu(self.translation_menu)\n self.frame_menu = tk.Menu(self.translation_menu)\n\n menu = self.translation_menu\n menu.add_cascade(label=\"Genetic Codes\", menu=self.gencode_menu)\n menu.add_cascade(label=\"Frame\", menu=self.frame_menu)\n menu.add_separator()\n menu.add_command(label=\"Single frame translation\", command=self.translate)\n menu.add_command(\n label=\"Three frame translation (+)\",\n command=lambda: self.gcframe(direction=\"plus\"),\n )\n menu.add_command(\n label=\"Three frame translation (-)\",\n command=lambda: self.gcframe(direction=\"minus\"),\n )\n menu.add_command(label=\"Six frame translation\", command=self.gcframe)\n menu.add_command(label=\"Extract to FASTA\", command=self.extract)\n\n # Frames submenu\n self.frame_int = tk.IntVar()\n menu = self.frame_menu\n menu.add_radiobutton(label=\"+1\", variable=self.frame_int, value=1)\n menu.add_radiobutton(label=\"+2\", variable=self.frame_int, value=2)\n menu.add_radiobutton(label=\"+3\", variable=self.frame_int, value=3)\n menu.add_radiobutton(label=\"-1\", variable=self.frame_int, value=-1)\n menu.add_radiobutton(label=\"-2\", variable=self.frame_int, value=-2)\n menu.add_radiobutton(label=\"-3\", variable=self.frame_int, value=-3)\n self.frame_int.set(1)\n\n # Codon tables submenu\n self.current_codon_table = tk.StringVar()\n self.current_codon_table.set(\"Standard\")\n self.current_codon_table_id = 1\n\n keys = list(self.translation_tables.keys())\n keys.remove(\"Standard\")\n keys.sort()\n keys = [\"Standard\"] + keys\n\n menu = self.gencode_menu\n for table in keys:\n menu.add_radiobutton(\n label=table,\n command=self.set_codon_table,\n variable=self.current_codon_table,\n )\n\n self.menubar.add_cascade(label=\"Translations\", menu=self.translation_menu)\n\n # Tools menu\n self.tools_menu = tk.Menu(self.menubar)\n menu = self.tools_menu\n menu.add_command(label=\"Blast\", command=self.blast)\n menu.add_command(label=\"Stats\", command=self.statistics)\n self.menubar.add_cascade(label=\"Tools\", menu=self.tools_menu)\n\n # Help menu\n self.help_menu = tk.Menu(self.menubar)\n menu = self.help_menu\n menu.add_command(label=\"Help\", command=lambda: xbbtools_help())\n self.menubar.add_cascade(label=\"Help\", menu=self.help_menu)\n\n self.parent.config(menu=self.menubar)", "title": "" }, { "docid": "af2e77082b840859749bb4ac32dccac4", "score": "0.5842183", "text": "def make_menu():\n def click_exit(icon, item):\n # We need to kill the event thread...\n icon.timeout.set()\n icon.stop()\n\n def click_status(icon,item):\n # Actually we don't do anything here\n pass\n\n return pystray.Menu(\n pystray.MenuItem( 'Last sync: 1 minute',\n click_status ),\n pystray.Menu.SEPARATOR,\n pystray.MenuItem( 'Exit',\n click_exit ) )", "title": "" }, { "docid": "82b6567eb92203b497058c41bfea0137", "score": "0.5824653", "text": "def popup(self,event):\r\n # get the information from event\r\n x,y = event.GetPosition()\r\n sx, sy = self.view.CalcUnscrolledPosition(x, y)\r\n shape, attachment = self.view.FindShape(sx, sy)\r\n # right click at shape\r\n if shape:\r\n # creat the popupmenu when right click on shape\r\n popmenu = self.view.make_popmenu()\r\n # right click at canvas (not at shape)\r\n else:\r\n # creat the popupmenu when right click on canvas\r\n popmenu2 = self.view.make_popmenu_2()\r\n event.Skip(True)", "title": "" }, { "docid": "1c13e13a9c07c7ac762099302dc17acb", "score": "0.5820121", "text": "def _on_context_menu(self, pos):\n nid = self._tree.itemAt(pos)\n\n if nid is None:\n return\n\n _, node, object = self._get_node_data(nid)\n\n self._data = (node, object, nid)\n self._context = {'object': object,\n 'editor': self,\n 'node': node,\n 'info': self.ui.info,\n 'handler': self.ui.handler}\n\n # Try to get the parent node of the node clicked on:\n pnid = nid.parent()\n if pnid is None or pnid is self._tree.invisibleRootItem():\n parent_node = parent_object = None\n else:\n _, parent_node, parent_object = self._get_node_data(pnid)\n\n self._menu_node = node\n self._menu_parent_node = parent_node\n self._menu_parent_object = parent_object\n\n menu = node.get_menu(object)\n\n if menu is None:\n # Use the standard, default menu:\n menu = self._standard_menu(node, object)\n\n elif isinstance(menu, Menu):\n # Use the menu specified by the node:\n group = menu.find_group( NewAction )\n if group is not None:\n # Only set it the first time:\n group.id = ''\n actions = self._new_actions( node, object )\n if len( actions ) > 0:\n group.insert( 0, Menu( name = 'New', *actions ) )\n\n else:\n # All other values mean no menu should be displayed:\n menu = None\n\n # Only display the menu if a valid menu is defined:\n if menu is not None:\n qmenu = menu.create_menu( self._tree, self )\n qmenu.exec_(self._tree.mapToGlobal(pos))\n\n # Reset all menu related cached values:\n self._data = self._context = self._menu_node = \\\n self._menu_parent_node = self._menu_parent_object = None", "title": "" }, { "docid": "1954d829b0e8df8a3baaabebb4186d02", "score": "0.58158445", "text": "def startMenu():\r\n chooseDisplay(screen_menu)\r\n ROOT.fill(BLACK)\r\n \r\n title = font_large.render(\"SokoAnn\", 1, WHITE)\r\n author = font_small.render(\"©2020 Sankti Goździelewski\", 1, WHITE)\r\n\r\n button_play = font_medium.render(\"Nowa gra\", 1, WHITE)\r\n button_tutorial = font_medium.render(\"Instrukcja\", 1, WHITE)\r\n button_credits = font_medium.render(\"Autorzy\", 1, WHITE)\r\n\r\n ROOT.blit(title, (TEXT_INDENT, 50))\r\n ROOT.blit(author, (TEXT_INDENT, 100))\r\n\r\n ROOT.blit(button_play, (TEXT_INDENT, 250))\r\n ROOT.blit(button_tutorial, (TEXT_INDENT, 300))\r\n ROOT.blit(button_credits, (TEXT_INDENT, 350))\r\n\r\n buttonDisplay()", "title": "" }, { "docid": "1b73ec8577daac116eacf11eab574b81", "score": "0.58158237", "text": "def create(menuId=menuId):\n\n if pm.menu(menuId, exists=True):\n pm.deleteUI(menuId)\n\n pm.menu(menuId,\n parent=\"MayaWindow\",\n tearOff=True,\n allowOptionBoxes=True,\n label=menuId)\n\n return menuId", "title": "" }, { "docid": "22d9618b47da42f65d7083f78fde738c", "score": "0.5814135", "text": "def _assembleMenu (self, menubar):\r\n \r\n filemenu = Menu(menubar, tearoff=0)\r\n \r\n filemenu.add_command(label=\"New Character\", command=self.newCharacter) \r\n \r\n filemenu.add_separator()\r\n \r\n filemenu.add_command(label=\"Quit %s\" % self.main_title,\r\n command=lambda parent=self.parent: parent.quit())\r\n \r\n menubar.add_cascade(label=\"File\", menu=filemenu)\r\n self.parent.config(menu=menubar)", "title": "" }, { "docid": "9d41a4665a39b25e2e37fbb18ade57cc", "score": "0.5808464", "text": "def createHelpMenu(self):\n info_menu = tk.Menu(self.parent_menu)\n self.parent_menu.add_cascade(label=\"Help\",menu= info_menu)\n\n #Will call the method defined in the child class\n info_menu.add_separator()\n info_menu.add_command(label=\"\", command= self.secret)\n info_menu.add_separator()\n \n info_menu.add_separator()\n info_menu.add_command(label=\"How to operate\", command= self.showOperationInstructions)\n info_menu.add_separator()\n \n info_menu.add_command(label=\"Info About Contributors\",command=self.showContributors)\n info_menu.add_separator()", "title": "" }, { "docid": "e209437f4a155a1c98732213d49cfab9", "score": "0.5801848", "text": "def menu(variable = \"L0\", items = \"2\", option1 = \"\", option2 = \"\", option3 = \"\", option4 = \"\", option5 = \"\", option6 = \"\", option7 = \"\", option8 = \"\", cancelOnB = \"True\", layout = \"dialogue\"):\r\n element = makeElement()\r\n element[\"command\"] = \"EVENT_MENU\"\r\n element[\"args\"] = {\r\n \"variable\": variable,\r\n \"items\": items,\r\n \"option1\": option1,\r\n \"option2\": option2,\r\n \"option3\": option3,\r\n \"option4\": option4,\r\n \"option5\": option5,\r\n \"option6\": option6,\r\n \"option7\": option7,\r\n \"option8\": option8,\r\n \"cancelOnB\": cancelOnB,\r\n \"layout\": layout,\r\n }\r\n return element", "title": "" }, { "docid": "631001acc499b3beaf0d75144ebb7bc7", "score": "0.5801622", "text": "def create_menus(self):\n self.statusBar().showMessage(\"Ready\")\n close_action = QAction(\"Exit\", self)\n close_action.setShortcut(\"Ctrl+Q\")\n close_action.setStatusTip('Leave The App')\n close_action.triggered.connect(self.close_application)\n connect_action = QAction(\"Connect\", self)\n main_menu = self.menuBar()\n file_menu = main_menu.addMenu('&File')\n file_menu.addAction(connect_action)\n file_menu.addAction(close_action)", "title": "" }, { "docid": "b5d516f18b25f514f4d3f01e3af2d0a2", "score": "0.5795112", "text": "def context_menu(self, position):\n # get a menu\n self.context_menu = QtWidgets.QMenu()\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item text\n selected_text = str(idx.data())\n # get the selected item\n selected_item = idx.model().itemFromIndex(idx)\n # get the level of the selected item\n level = self.get_level_selected_item()\n if level == 0:\n # sections with only 1 level\n if selected_text == \"Options\":\n existing_entries = self.get_existing_entries()\n if \"NumberOfDimensions\" not in existing_entries:\n self.context_menu.actionAddNumberOfDimensions = QtWidgets.QAction(self)\n self.context_menu.actionAddNumberOfDimensions.setText(\"NumberOfDimensions\")\n self.context_menu.addAction(self.context_menu.actionAddNumberOfDimensions)\n self.context_menu.actionAddNumberOfDimensions.triggered.connect(self.add_numberofdimensions)\n if \"MaxGapInterpolate\" not in existing_entries:\n self.context_menu.actionAddMaxGapInterpolate = QtWidgets.QAction(self)\n self.context_menu.actionAddMaxGapInterpolate.setText(\"MaxGapInterpolate\")\n self.context_menu.addAction(self.context_menu.actionAddMaxGapInterpolate)\n self.context_menu.actionAddMaxGapInterpolate.triggered.connect(self.add_maxgapinterpolate)\n if \"FixTimeStepMethod\" not in existing_entries:\n self.context_menu.actionAddFixTimeStepMethod = QtWidgets.QAction(self)\n self.context_menu.actionAddFixTimeStepMethod.setText(\"FixTimeStepMethod\")\n self.context_menu.addAction(self.context_menu.actionAddFixTimeStepMethod)\n self.context_menu.actionAddFixTimeStepMethod.triggered.connect(self.add_fixtimestepmethod)\n if \"Truncate\" not in existing_entries:\n self.context_menu.actionAddTruncate = QtWidgets.QAction(self)\n self.context_menu.actionAddTruncate.setText(\"Truncate\")\n self.context_menu.addAction(self.context_menu.actionAddTruncate)\n self.context_menu.actionAddTruncate.triggered.connect(self.add_truncate)\n if \"TruncateThreshold\" not in existing_entries:\n self.context_menu.actionAddTruncateThreshold = QtWidgets.QAction(self)\n self.context_menu.actionAddTruncateThreshold.setText(\"TruncateThreshold\")\n self.context_menu.addAction(self.context_menu.actionAddTruncateThreshold)\n self.context_menu.actionAddTruncateThreshold.triggered.connect(self.add_truncatethreshold)\n if \"SeriesToCheck\" not in existing_entries:\n self.context_menu.actionAddSeriesToCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddSeriesToCheck.setText(\"SeriesToCheck\")\n self.context_menu.addAction(self.context_menu.actionAddSeriesToCheck)\n self.context_menu.actionAddSeriesToCheck.triggered.connect(self.add_seriestocheck)\n elif level == 1:\n parent = selected_item.parent()\n if (str(parent.text()) == \"Options\") and (selected_item.column() == 0):\n self.context_menu.actionRemoveOption = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOption.setText(\"Remove option\")\n self.context_menu.addAction(self.context_menu.actionRemoveOption)\n self.context_menu.actionRemoveOption.triggered.connect(self.remove_item)\n elif str(parent.text()) == \"Files\":\n if selected_text == \"In\":\n self.context_menu.actionAddInputFile = QtWidgets.QAction(self)\n self.context_menu.actionAddInputFile.setText(\"Add input file\")\n self.context_menu.addAction(self.context_menu.actionAddInputFile)\n self.context_menu.actionAddInputFile.triggered.connect(self.add_inputfile)\n elif level == 2:\n parent = selected_item.parent()\n section = selected_item.parent().parent()\n if ((str(section.text()) == \"Files\") and (str(parent.text()) == \"In\")):\n if (selected_item.column() == 0):\n self.context_menu.actionRemoveInputFile = QtWidgets.QAction(self)\n self.context_menu.actionRemoveInputFile.setText(\"Remove file\")\n self.context_menu.addAction(self.context_menu.actionRemoveInputFile)\n self.context_menu.actionRemoveInputFile.triggered.connect(self.remove_item)\n elif (selected_item.column() == 1):\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif ((str(section.text()) == \"Files\") and (str(parent.text()) == \"Out\")):\n if str(parent.child(selected_item.row(), 0).text()) == \"ncFileName\":\n self.context_menu.actionBrowseOutputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseOutputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseOutputFile)\n self.context_menu.actionBrowseOutputFile.triggered.connect(self.browse_output_file)\n elif level == 3:\n pass\n\n self.context_menu.exec_(self.view.viewport().mapToGlobal(position))", "title": "" }, { "docid": "d6925ed12e2fcf67138ea341564bae22", "score": "0.5794818", "text": "def on_menuitem_client_activate(self, widget):\n self.set_client_visible(True)", "title": "" }, { "docid": "1726de903a53abb4b212b5688c5291d2", "score": "0.57918334", "text": "def runRenderLayerMenuUI():\n\trenderLayerMenu()\n\t#does not delete ui at the moment", "title": "" }, { "docid": "0c745e616175069b15d297abf795a435", "score": "0.57918227", "text": "def context_menu(self, position):\n # get a menu\n self.context_menu = QtWidgets.QMenu()\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item text\n selected_text = str(idx.data())\n # get the selected item\n selected_item = idx.model().itemFromIndex(idx)\n # get the level of the selected item\n level = self.get_level_selected_item()\n # initialise logical for inserting a separator\n add_separator = False\n if level == 0:\n # sections with only 1 level\n if selected_text == \"Files\":\n self.context_menu.actionAddFileEntry = QtWidgets.QAction(self)\n self.context_menu.actionAddFileEntry.setText(\"Add item\")\n self.context_menu.addAction(self.context_menu.actionAddFileEntry)\n self.context_menu.actionAddFileEntry.triggered.connect(self.add_fileentry)\n elif selected_text == \"Output\":\n pass\n elif selected_text == \"Options\":\n # get a list of existing entries\n existing_entries = self.get_existing_entries()\n # only put an option in the context menu if it is not already present\n if \"MaxGapInterpolate\" not in existing_entries:\n self.context_menu.actionAddMaxGapInterpolate = QtWidgets.QAction(self)\n self.context_menu.actionAddMaxGapInterpolate.setText(\"MaxGapInterpolate\")\n self.context_menu.addAction(self.context_menu.actionAddMaxGapInterpolate)\n self.context_menu.actionAddMaxGapInterpolate.triggered.connect(self.add_maxgapinterpolate)\n if \"InterpolateType\" not in existing_entries:\n self.context_menu.actionAddInterpolateType = QtWidgets.QAction(self)\n self.context_menu.actionAddInterpolateType.setText(\"InterpolateType\")\n self.context_menu.addAction(self.context_menu.actionAddInterpolateType)\n self.context_menu.actionAddInterpolateType.triggered.connect(self.add_interpolatetype)\n elif selected_text in [\"Drivers\"]:\n self.context_menu.actionAddVariable = QtWidgets.QAction(self)\n self.context_menu.actionAddVariable.setText(\"Add variable\")\n self.context_menu.addAction(self.context_menu.actionAddVariable)\n self.context_menu.actionAddVariable.triggered.connect(self.add_new_variable)\n elif level == 1:\n # sections with 2 levels\n # get the parent of the selected item\n parent = selected_item.parent()\n if (str(parent.text()) == \"Files\") and (selected_item.column() == 1):\n key = str(parent.child(selected_item.row(),0).text())\n if key in [\"file_path\", \"plot_path\"]:\n self.context_menu.actionBrowseFilePath = QtWidgets.QAction(self)\n self.context_menu.actionBrowseFilePath.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseFilePath)\n self.context_menu.actionBrowseFilePath.triggered.connect(self.browse_file_path)\n elif key in [\"in_filename\"]:\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif key in [\"out_filename\"]:\n self.context_menu.actionBrowseOutputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseOutputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseOutputFile)\n self.context_menu.actionBrowseOutputFile.triggered.connect(self.browse_output_file)\n else:\n self.context_menu.actionBrowseAlternateFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseAlternateFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseAlternateFile)\n self.context_menu.actionBrowseAlternateFile.triggered.connect(self.browse_alternate_file)\n elif (str(parent.text()) == \"Files\") and (selected_item.column() == 0):\n key = str(parent.child(selected_item.row(),0).text())\n if key not in [\"file_path\", \"plot_path\", \"in_filename\", \"out_filename\"]:\n self.context_menu.actionRemoveItem = QtWidgets.QAction(self)\n self.context_menu.actionRemoveItem.setText(\"Remove item\")\n self.context_menu.addAction(self.context_menu.actionRemoveItem)\n self.context_menu.actionRemoveItem.triggered.connect(self.remove_item)\n else:\n pass\n elif (str(parent.text()) == \"Options\"):\n key = str(parent.child(selected_item.row(),0).text())\n if (selected_item.column() == 0):\n self.context_menu.actionRemoveOption = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOption.setText(\"Remove option\")\n self.context_menu.addAction(self.context_menu.actionRemoveOption)\n self.context_menu.actionRemoveOption.triggered.connect(self.remove_item)\n elif (selected_item.column() == 1) and (key == \"InterpolateType\"):\n if selected_text != \"linear\":\n self.context_menu.actionChangeInterpolateType = QtWidgets.QAction(self)\n self.context_menu.actionChangeInterpolateType.setText(\"linear\")\n self.context_menu.addAction(self.context_menu.actionChangeInterpolateType)\n self.context_menu.actionChangeInterpolateType.triggered.connect(lambda:self.change_selected_text(\"linear\"))\n if selected_text != \"Akima\":\n self.context_menu.actionChangeInterpolateType = QtWidgets.QAction(self)\n self.context_menu.actionChangeInterpolateType.setText(\"Akima\")\n self.context_menu.addAction(self.context_menu.actionChangeInterpolateType)\n self.context_menu.actionChangeInterpolateType.triggered.connect(lambda:self.change_selected_text(\"Akima\"))\n elif (str(parent.text()) in [\"Drivers\"]):\n # get a list of existing entries\n existing_entries = self.get_existing_entries()\n # only put a QC check in the context menu if it is not already present\n if \"GapFillFromAlternate\" not in existing_entries:\n self.context_menu.actionAddAlternate = QtWidgets.QAction(self)\n self.context_menu.actionAddAlternate.setText(\"Add Alternate\")\n self.context_menu.addAction(self.context_menu.actionAddAlternate)\n self.context_menu.actionAddAlternate.triggered.connect(self.add_alternate)\n add_separator = True\n if \"GapFillUsingMDS\" not in existing_entries:\n self.context_menu.actionAddMDS = QtWidgets.QAction(self)\n self.context_menu.actionAddMDS.setText(\"Add MDS\")\n self.context_menu.addAction(self.context_menu.actionAddMDS)\n self.context_menu.actionAddMDS.triggered.connect(self.add_MDS)\n add_separator = True\n if \"GapFillFromClimatology\" not in existing_entries:\n self.context_menu.actionAddClimatology = QtWidgets.QAction(self)\n self.context_menu.actionAddClimatology.setText(\"Add Climatology\")\n self.context_menu.addAction(self.context_menu.actionAddClimatology)\n self.context_menu.actionAddClimatology.triggered.connect(self.add_climatology)\n add_separator = True\n if add_separator:\n add_separator = False\n self.context_menu.addSeparator()\n if \"RangeCheck\" not in existing_entries:\n self.context_menu.actionAddRangeCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddRangeCheck.setText(\"Add RangeCheck\")\n self.context_menu.addAction(self.context_menu.actionAddRangeCheck)\n self.context_menu.actionAddRangeCheck.triggered.connect(self.add_rangecheck)\n add_separator = True\n if \"DependencyCheck\" not in existing_entries:\n self.context_menu.actionAddDependencyCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddDependencyCheck.setText(\"Add DependencyCheck\")\n self.context_menu.addAction(self.context_menu.actionAddDependencyCheck)\n self.context_menu.actionAddDependencyCheck.triggered.connect(self.add_dependencycheck)\n add_separator = True\n if \"DiurnalCheck\" not in existing_entries:\n self.context_menu.actionAddDiurnalCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddDiurnalCheck.setText(\"Add DiurnalCheck\")\n self.context_menu.addAction(self.context_menu.actionAddDiurnalCheck)\n self.context_menu.actionAddDiurnalCheck.triggered.connect(self.add_diurnalcheck)\n add_separator = True\n if \"ExcludeDates\" not in existing_entries:\n self.context_menu.actionAddExcludeDates = QtWidgets.QAction(self)\n self.context_menu.actionAddExcludeDates.setText(\"Add ExcludeDates\")\n self.context_menu.addAction(self.context_menu.actionAddExcludeDates)\n self.context_menu.actionAddExcludeDates.triggered.connect(self.add_excludedates)\n add_separator = True\n if add_separator:\n add_separator = False\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveOption = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOption.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveOption)\n self.context_menu.actionRemoveOption.triggered.connect(self.remove_item)\n elif level == 2:\n # sections with 3 levels\n subsubsection_name = str(idx.data())\n if subsubsection_name in [\"RangeCheck\", \"DependencyCheck\", \"DiurnalCheck\"]:\n self.context_menu.actionRemoveQCCheck = QtWidgets.QAction(self)\n self.context_menu.actionRemoveQCCheck.setText(\"Remove QC check\")\n self.context_menu.addAction(self.context_menu.actionRemoveQCCheck)\n self.context_menu.actionRemoveQCCheck.triggered.connect(self.remove_item)\n elif subsubsection_name in [\"ExcludeDates\"]:\n self.context_menu.actionAddExcludeDateRange = QtWidgets.QAction(self)\n self.context_menu.actionAddExcludeDateRange.setText(\"Add date range\")\n self.context_menu.addAction(self.context_menu.actionAddExcludeDateRange)\n self.context_menu.actionAddExcludeDateRange.triggered.connect(self.add_excludedaterange)\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveQCCheck = QtWidgets.QAction(self)\n self.context_menu.actionRemoveQCCheck.setText(\"Remove QC check\")\n self.context_menu.addAction(self.context_menu.actionRemoveQCCheck)\n self.context_menu.actionRemoveQCCheck.triggered.connect(self.remove_item)\n elif subsubsection_name in [\"GapFillFromAlternate\", \"GapFillUsingMDS\", \"GapFillFromClimatology\"]:\n if subsubsection_name == \"GapFillFromAlternate\":\n self.context_menu.actionAddMoreAlternate = QtWidgets.QAction(self)\n self.context_menu.actionAddMoreAlternate.setText(\"Add Alternate\")\n self.context_menu.addAction(self.context_menu.actionAddMoreAlternate)\n self.context_menu.actionAddMoreAlternate.triggered.connect(self.add_more_alternate)\n self.context_menu.actionRemoveGFMethod = QtWidgets.QAction(self)\n self.context_menu.actionRemoveGFMethod.setText(\"Remove method\")\n self.context_menu.addAction(self.context_menu.actionRemoveGFMethod)\n self.context_menu.actionRemoveGFMethod.triggered.connect(self.remove_item)\n elif level == 3:\n # sections with 4 levels\n # get the parent text\n parent = idx.parent()\n parent_text = str(parent.data())\n if parent_text == \"ExcludeDates\":\n self.context_menu.actionRemoveExcludeDateRange = QtWidgets.QAction(self)\n self.context_menu.actionRemoveExcludeDateRange.setText(\"Remove date range\")\n self.context_menu.addAction(self.context_menu.actionRemoveExcludeDateRange)\n self.context_menu.actionRemoveExcludeDateRange.triggered.connect(self.remove_daterange)\n elif parent_text == \"GapFillFromAlternate\":\n # get a list of existing entries\n existing_entries = self.get_existing_entries()\n if \"fit\" not in existing_entries:\n self.context_menu.actionAddAltFit = QtWidgets.QAction(self)\n self.context_menu.actionAddAltFit.setText(\"Add fit\")\n self.context_menu.addAction(self.context_menu.actionAddAltFit)\n self.context_menu.actionAddAltFit.triggered.connect(self.add_alternate_fit)\n add_separator = True\n if \"lag\" not in existing_entries:\n self.context_menu.actionAddAltLag = QtWidgets.QAction(self)\n self.context_menu.actionAddAltLag.setText(\"Add lag\")\n self.context_menu.addAction(self.context_menu.actionAddAltLag)\n self.context_menu.actionAddAltLag.triggered.connect(self.add_alternate_lag)\n add_separator = True\n if add_separator:\n add_separator = False\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveGFMethodVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveGFMethodVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveGFMethodVariable)\n self.context_menu.actionRemoveGFMethodVariable.triggered.connect(self.remove_item)\n elif level == 4:\n selected_item = idx.model().itemFromIndex(idx)\n selected_text = str(idx.data())\n parent = idx.parent()\n key = parent.child(selected_item.row(), 0)\n key_text = str(key.data())\n if selected_text in [\"fit\", \"lag\"]:\n self.context_menu.actionRemoveItem = QtWidgets.QAction(self)\n self.context_menu.actionRemoveItem.setText(\"Remove item\")\n self.context_menu.addAction(self.context_menu.actionRemoveItem)\n self.context_menu.actionRemoveItem.triggered.connect(self.remove_item)\n elif key_text == \"fit\":\n if selected_text != \"ols\":\n self.context_menu.actionOLS = QtWidgets.QAction(self)\n self.context_menu.actionOLS.setText(\"OLS\")\n self.context_menu.addAction(self.context_menu.actionOLS)\n self.context_menu.actionOLS.triggered.connect(lambda:self.change_selected_text(\"ols\"))\n if selected_text != \"ols_thru0\":\n self.context_menu.actionOLSThroughOrigin = QtWidgets.QAction(self)\n self.context_menu.actionOLSThroughOrigin.setText(\"OLS through origin\")\n self.context_menu.addAction(self.context_menu.actionOLSThroughOrigin)\n self.context_menu.actionOLSThroughOrigin.triggered.connect(lambda:self.change_selected_text(\"ols_thru0\"))\n if selected_text != \"replace\":\n self.context_menu.actionReplace = QtWidgets.QAction(self)\n self.context_menu.actionReplace.setText(\"replace\")\n self.context_menu.addAction(self.context_menu.actionReplace)\n self.context_menu.actionReplace.triggered.connect(lambda:self.change_selected_text(\"replace\"))\n if selected_text != \"mrev\":\n self.context_menu.actionMREV = QtWidgets.QAction(self)\n self.context_menu.actionMREV.setText(\"mrev\")\n self.context_menu.addAction(self.context_menu.actionMREV)\n self.context_menu.actionMREV.triggered.connect(lambda:self.change_selected_text(\"mrev\"))\n if selected_text != \"rma\":\n self.context_menu.actionRMA = QtWidgets.QAction(self)\n self.context_menu.actionRMA.setText(\"rma\")\n self.context_menu.addAction(self.context_menu.actionRMA)\n self.context_menu.actionRMA.triggered.connect(lambda:self.change_selected_text(\"rma\"))\n if selected_text != \"odr\":\n self.context_menu.actionODR = QtWidgets.QAction(self)\n self.context_menu.actionODR.setText(\"odr\")\n self.context_menu.addAction(self.context_menu.actionODR)\n self.context_menu.actionODR.triggered.connect(lambda:self.change_selected_text(\"odr\"))\n elif key_text == \"lag\":\n if selected_text != \"yes\":\n self.context_menu.actionYes = QtWidgets.QAction(self)\n self.context_menu.actionYes.setText(\"Yes\")\n self.context_menu.addAction(self.context_menu.actionYes)\n self.context_menu.actionYes.triggered.connect(lambda:self.change_selected_text(\"yes\"))\n if selected_text != \"no\":\n self.context_menu.actionNo = QtWidgets.QAction(self)\n self.context_menu.actionNo.setText(\"No\")\n self.context_menu.addAction(self.context_menu.actionNo)\n self.context_menu.actionNo.triggered.connect(lambda:self.change_selected_text(\"no\"))\n\n self.context_menu.exec_(self.view.viewport().mapToGlobal(position))", "title": "" }, { "docid": "1276bdcea43dd347ac4c5effb5d6feba", "score": "0.57909495", "text": "def main_menu(self):\n self.menu_buttons = []\n self.map_editor_button = Button(game=self, x=(WIDTH // 2 - 100), y=HEIGHT // 2, text=\"Map Editor\")\n self.menu_buttons.append(self.map_editor_button)\n\n self.start_game_button = Button(game=self, x=(WIDTH // 2 + 100), y=HEIGHT // 2, text=\"Start Game\")\n self.menu_buttons.append(self.start_game_button)\n\n self.logout_button = Button(game=self, x=(WIDTH // 2 - 100), y=(HEIGHT // 2 + 48), text=\"Logout\")\n self.menu_buttons.append(self.logout_button)\n\n self.groups_button = Button(game=self, x=(WIDTH // 2 + 100), y=(HEIGHT // 2 + 48), text=\"Groups\")\n self.menu_buttons.append(self.groups_button)\n\n self.stats_button = Button(game=self, x=(WIDTH // 2 + 100), y=(HEIGHT // 2 + 96), text=\"Stats\")\n self.menu_buttons.append(self.stats_button)\n\n self.options_button = Button(game=self, x=(WIDTH // 2 - 100), y=(HEIGHT // 2 + 96), text=\"Options\")\n self.menu_buttons.append(self.options_button)", "title": "" }, { "docid": "787a1e89837997d119d81de778c4d98f", "score": "0.57884896", "text": "def init_ui(self, ):\n self.mm = MenuManager.get()\n p = self.mm.menus['Jukebox']\n self.menu = self.mm.create_menu(\"Reftracker\", p, command=self.run)", "title": "" }, { "docid": "6c8f9b8e8df3ddb05a84d4c98d82f449", "score": "0.578189", "text": "def test_create_main_menu(self):\n self.assertIsInstance(MenuState.create_main_menu(), state.ButtonSet)", "title": "" }, { "docid": "bf22b36ccaa3a25282d38143015c1951", "score": "0.5780407", "text": "def on_click(self, event):\n self.set_active()", "title": "" }, { "docid": "40e3079b91a0f3514aff367b38b326ab", "score": "0.5777277", "text": "def win_obj_mouse_click(main_dlg,title=\"\", auto_id=\"\", control_type=\"\"):\n loader.win_obj_mouse_click(main_dlg,title, auto_id, control_type)", "title": "" }, { "docid": "b2cb07b6b70ec1de74a97900d22ff376", "score": "0.57677317", "text": "def showContextMenu(self, pos, add=True, color=True, attribute=True):\n menu = QtGui.QMenu()\n self._parent.createNodesMenu(menu, self.mapToScene(pos), add=add, color=color, attribute=attribute)\n menu.exec_(self.mapToGlobal(pos))", "title": "" }, { "docid": "a02f61f5ea130b220a02a6ade6fce0d9", "score": "0.57655257", "text": "def handle_context_menu(self, pos):\r\n items = self.selectedItems()\r\n menu = QtGui.QMenu(self)\r\n if not items:\r\n self.add_action = QtGui.QAction(\"Add\", self)\r\n self.connect(self.add_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n self.add_item)\r\n menu.addAction(self.add_action)\r\n\r\n elif type(items[0]) == SceneTreeWidgetItem:\r\n item = items[0]\r\n\r\n # color picker\r\n self.color_action = QtGui.QAction(\"Color\", self)\r\n self.connect(self.color_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n self.handle_set_color)\r\n menu.addAction(self.color_action)\r\n\r\n # shading toggle menu item\r\n self.shading_menu = QtGui.QMenu(\"Shading\", self)\r\n shading_group = QtGui.QActionGroup(self.shading_menu)\r\n\r\n self.clearAct = QtGui.QAction(\"Clear\", self)\r\n self.clearAct.setCheckable(True)\r\n self.clearAct.setActionGroup(shading_group)\r\n self.clearAct.setData(-1)\r\n self.clearAct.setChecked(item.object.mode == -1)\r\n self.clearAct.toggled.connect(self.handle_set_mode)\r\n self.shading_menu.addAction(self.clearAct)\r\n self.shading_menu.addSeparator()\r\n\r\n self.offAct = QtGui.QAction(\"Off\", self)\r\n #self.offAct.setShortcut(\"0\")\r\n self.offAct.setCheckable(True)\r\n self.offAct.setActionGroup(shading_group)\r\n self.offAct.setData(Mode.OFF)\r\n self.offAct.setChecked(item.object.mode == Mode.OFF)\r\n self.offAct.toggled.connect(self.handle_set_mode)\r\n self.shading_menu.addAction(self.offAct)\r\n\r\n self.fillAct = QtGui.QAction(\"Fill\", self)\r\n #self.fillAct.setShortcut(\"1\")\r\n self.fillAct.setCheckable(True)\r\n self.fillAct.setActionGroup(shading_group)\r\n self.fillAct.setData(Mode.FILL)\r\n self.fillAct.setChecked(item.object.mode == Mode.FILL)\r\n self.fillAct.toggled.connect(self.handle_set_mode)\r\n self.shading_menu.addAction(self.fillAct)\r\n \r\n self.lineAct = QtGui.QAction(\"Line\", self)\r\n #self.lineAct.setShortcut(\"2\")\r\n self.lineAct.setCheckable(True)\r\n self.lineAct.setActionGroup(shading_group)\r\n self.lineAct.setData(Mode.LINE)\r\n self.lineAct.setChecked(item.object.mode == Mode.LINE)\r\n self.lineAct.toggled.connect(self.handle_set_mode)\r\n self.shading_menu.addAction(self.lineAct)\r\n\r\n self.pointAct = QtGui.QAction(\"Point \", self)\r\n #self.pointAct.setShortcut(\"3\")\r\n self.pointAct.setCheckable(True)\r\n self.pointAct.setActionGroup(shading_group)\r\n self.pointAct.setData(Mode.POINT)\r\n self.pointAct.setChecked(item.object.mode == Mode.POINT)\r\n self.pointAct.toggled.connect(self.handle_set_mode)\r\n self.shading_menu.addAction(self.pointAct)\r\n \r\n self.bboxAct = QtGui.QAction(\"Bounds \", self)\r\n #self.bboxAct.setShortcut(\"4\")\r\n self.bboxAct.setCheckable(True)\r\n self.bboxAct.setActionGroup(shading_group)\r\n self.bboxAct.setData(Mode.BOUNDS)\r\n self.bboxAct.setChecked(item.object.mode == Mode.BOUNDS)\r\n self.bboxAct.toggled.connect(self.handle_set_mode)\r\n self.shading_menu.addAction(self.bboxAct)\r\n\r\n menu.addMenu(self.shading_menu)\r\n menu.addSeparator()\r\n\r\n # hide/show\r\n if item.object.loaded:\r\n self.load_action = QtGui.QAction(\"Hide\", self)\r\n self.connect(self.load_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n item.unload)\r\n else:\r\n self.load_action = QtGui.QAction(\"Show\", self)\r\n self.connect(self.load_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n item.load)\r\n menu.addAction(self.load_action)\r\n\r\n # duplicate scene\r\n #self.duplicate_action = QtGui.QAction(\"Duplicate\", self)\r\n #self.connect(self.duplicate_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n # self.handle_duplicate_item)\r\n #menu.addAction(self.duplicate_action)\r\n\r\n # remove scene\r\n self.remove_action = QtGui.QAction(\"Remove\", self)\r\n self.connect(self.remove_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n self.handle_remove_item)\r\n menu.addAction(self.remove_action)\r\n\r\n else:\r\n item = items[0]\r\n if type(item) == CameraTreeWidgetItem:\r\n self.view_action = QtGui.QAction(\"Look through selected\", self)\r\n self.connect(self.view_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n self.view_camera)\r\n menu.addAction(self.view_action)\r\n\r\n else:\r\n self.copy_name_action = QtGui.QAction(\"Copy name\", self)\r\n self.connect(self.copy_name_action, QtCore.SIGNAL(\"triggered (bool)\"), \r\n self.copy_name)\r\n menu.addAction(self.copy_name_action)\r\n\r\n menu.popup(self.mapToGlobal(pos))", "title": "" }, { "docid": "01cb58aeb0a05ad6d3c480683947a836", "score": "0.57621545", "text": "def _build_main_menu(self):\n actions = self.actions\n main_menu = self.menu\n\n \"\"\"menus_desc = (\n (\"Current study case\",\n [\"update-mesh\", \"edit\", \"copy\", \"rename\", \"remove\", \"run\", \"stop\", \"status\"]),\n (\"Tools\",\n [\"run-astk\", \"run-eficas\"]),\n (\"Wizards\",\n [\"linear-elastic\", \"modal-analysis\", \"linear-thermic\", \"Crack-analysis\"]),\n )\n for menu_title, actions_keys in menus_desc:\n menu = main_menu.add_menu(menu_title)\n #peter.zhang, add seperator\n main_menu.add_action(SEP)\n for key in actions_keys:\n #peter.zhang, add menu items directly.\n #menu.add_action(actions[key])\n main_menu.add_action(actions[key])\"\"\"\n menus_desc = [\"add\",\"update-mesh\", \"edit\", \"copy\", \"rename\", \"remove\", \"run\", \"stop\", \"status\", \"run-astk\", \"run-eficas\"]\n pagename = [\"linear-elastic\", \"modal-analysis\", \"linear-thermic\", \"Crack-analysis\"]\n page_actions = [\"static_model_module\", \"modal_analysis_module\", \"linear_thermic_module\", \"crack_analysis_module\"]\n #modal_analysis_page_actions = [\"modal-analysis-model-define\", \"modal-analysis-mesh-selection\", \"modal-analysis-elementary-characteristics\", \"modal-analysis-material-properties\", \"modal-analysis-boundaries-conditions\", \"modal-analysis-number-of-modes\"]\n #linear_thermic_page_actions = [\"linear-thermic-model-define\", \"linear-thermic-mesh-selection\", \"linear-thermic-material-properties\", \"linear-thermic-boundaries-degrees-conditions\", \"linear-thermic-boundaries-pressure-conditions\"]\n \n op_page = Page(\"Operations\",\"Solver\", menus_desc)\n op_page.build(self.sqt, actions)\n \n ms_page = Page(\"Analysis\", \"Sets\", page_actions)\n ms_page.build(self.sqt, actions)\n \n #lt_page = Page(\"Linear thermic\", \"Sets\", linear_thermic_page_actions)\n #lt_page.build(self.sqt, actions)\n \n #le_page = Page(\"linear-elastic\",\"Sets\", linear_static_page_actions)\n #le_page.build(self.sqt, actions)", "title": "" }, { "docid": "e5db292ff86604be32759eb9ede13a52", "score": "0.5751222", "text": "def modify_menu(self):\n options_menu = wx.Menu()\n\n title = 'Switch B-Scan Orientation'\n description = 'Toggles orientation between horizontal and vertical'\n self.toggle_orientation_menu = wx.MenuItem(options_menu, wx.ID_ANY,\n title, description)\n\n options_menu.Append(self.toggle_orientation_menu)\n\n self.menu_bar.Append(options_menu, '&Options')\n self.SetMenuBar(self.menu_bar)\n self.Fit()", "title": "" }, { "docid": "bfd4a5157c551bc181cdfcd41958386f", "score": "0.5748992", "text": "def drawSoloMenu(self):\n pass", "title": "" }, { "docid": "1f5f352f1f6eb4b7bdf2cb09c269e5c9", "score": "0.5740647", "text": "def display_menu():\r\n display.blit(logo, (0 ,0))\r\n start_button.draw(display)\r\n start_button.mouse_event(display, light_green)\r\n start_button.draw_text(\"Start\" , display)\r\n\r\n exit_button.draw(display)\r\n exit_button.mouse_event(display, light_red)\r\n exit_button.draw_text(\"Exit\" , display)\r\n\r\n rules_button.draw(display)\r\n rules_button.mouse_event(display, light_blue)\r\n rules_button.draw_text(\"Rules\", display)", "title": "" }, { "docid": "31f982f7fe73fa51f8c09cbc9bded8b6", "score": "0.5737788", "text": "def setCurrentMenu(self, menu):\n\n self.currentMenu = menu", "title": "" }, { "docid": "064b551f885aa9d9bd5e3818b7687acd", "score": "0.5737321", "text": "def create_menu(self):\n if self.__parent.checking:\n return\n\n self.__parent.checking = True\n self.__parent.draw_data()\n self.export_menu_layout = QtWidgets.QHBoxLayout()\n self.export_menu_layout.setSpacing(5)\n self.export_menu_layout.setObjectName(\"Buttons layout\")\n\n button_export = QtWidgets.QPushButton(\"Export XML\")\n button_exportHTML = QtWidgets.QPushButton(\"Export HTML\")\n button_cancel = QtWidgets.QPushButton(\"Cancel\")\n self.export_menu_layout.addWidget(button_export)\n self.export_menu_layout.addWidget(button_exportHTML)\n button_export.clicked.connect(self.export_button_signal)\n button_exportHTML.clicked.connect(self.export_html_signal)\n button_cancel.clicked.connect(self.export_buttons_cancel_slot)\n self.export_menu_layout.addWidget(button_cancel)\n\n # button_exportHTML.setDisabled(True)\n\n self.__parent.frame_layout.insertLayout(1, self.export_menu_layout)", "title": "" }, { "docid": "2f378895835de0a34354c0a8985b18d1", "score": "0.57355875", "text": "def click_statement_templates_submenu(self):\n self.set_existing_handles()\n self.click_element(self.statement_templates_submenu_locator)\n self.switch_to_window()", "title": "" }, { "docid": "9c8642409557157c5c193f29e00d87c4", "score": "0.5728083", "text": "def on_context_menu(self, point):\n menu = QtGui.QMenu(self)\n # new menu item\n new = QtGui.QAction('New', self)\n menu.addAction(new)\n # edit menu item\n edit = QtGui.QAction('Edit', self)\n menu.addAction(edit)\n menu.addSeparator()\n # settings menu item\n settings = QtGui.QAction('Settings', self)\n menu.addAction(settings)\n # make the connections.\n new.triggered.connect(self.make_new_config)\n edit.triggered.connect(self.edit_current_config)\n settings.triggered.connect(self.settings)\n # showing the menu.\n menu.exec_(self.toolButton.mapToGlobal(point))", "title": "" }, { "docid": "6d4ea3845b075bc8e445157e5dc76642", "score": "0.5715242", "text": "def click_rating_method_submenu(self):\n self.set_existing_handles()\n self.click_element(self.rating_method_submenu_locator)", "title": "" }, { "docid": "9123c00ec0a31198df875af72390d7e3", "score": "0.5713129", "text": "def click_product_submenu(self):\n self.set_existing_handles()\n self.click_element(self.product_submenu_locator)\n self.switch_to_window()", "title": "" }, { "docid": "511bd34065cd94641c3cf8c7c35e167b", "score": "0.57125235", "text": "def add_to_menu ( self, menu_item ):\n action = menu_item.item.action\n self.eval_when( action.enabled_when, menu_item, 'enabled' )\n self.eval_when( action.checked_when, menu_item, 'checked' )", "title": "" }, { "docid": "90bd26dbfbe6b78b1ea05177ca656977", "score": "0.57070076", "text": "def menu(*args, **kwargs):\n return el('menu', *args, **kwargs)", "title": "" }, { "docid": "7fe153ee8902c21a3ab412b1e173a539", "score": "0.570104", "text": "def popup(self, event):\n global popup_menu\n\n popup_menu = tk.Menu(self.master, tearoff=0)\n # Function used to create a custom cube\n popup_menu.add_command(label=\"Créer cube personnalisé ici\",\n command=lambda canv=self.canvas, contain=self.container, case_dict=self.__dict_case, grid=self.grid:\n self.user_action.preprocess_creation(\n event,\n x=event.x,\n y=event.y,\n canvas=canv,\n container=contain,\n dict_case=case_dict,\n grid=grid,\n instant=False\n ),\n )\n\n # Function used_to create a cube with his default info\n popup_menu.add_command(label=\"Créer instantanément un cube ici\",\n command=lambda canv=self.canvas, contain=self.container, case_dict=self.__dict_case, grid=self.grid:\n self.user_action.preprocess_creation(\n event,\n x=event.x,\n y=event.y,\n canvas=canv,\n container=contain,\n dict_case=case_dict,\n grid=grid,\n instant=True\n ),\n )\n\n # Function used to created a columns of cube\n popup_menu.add_command(label=\"Créer colonne ici\",\n command=lambda canv=self.canvas, contain=self.container, case_dict=self.__dict_case, grid=self.grid:\n self.user_action.preprocess_creation(\n event,\n x=event.x,\n y=event.y,\n canvas=canv,\n container=contain,\n dict_case=case_dict,\n grid=grid,\n instant=False,\n kind=\"columns\",\n ))\n popup_menu.add_command(label=\"Créer ligne ici\",\n command=lambda canv=self.canvas, contain=self.container, case_dict=self.__dict_case, grid=self.grid:\n self.user_action.preprocess_creation(\n event,\n x=event.x,\n y=event.y,\n canvas=canv,\n container=contain,\n dict_case=case_dict,\n grid=grid,\n instant=False,\n kind=\"lines\",\n ))\n popup_menu.add_separator()\n popup_menu.add_command(label=\"Detruire cube\",\n command=lambda canv=self.canvas, contain=self.container, cube_list=self.__list_cube:\n self.user_action.destroy_cube(\n event,\n canvas=canv,\n container=contain,\n list_cube=cube_list\n ))\n\n try:\n popup_menu.tk_popup(event.x_root, event.y_root)\n finally:\n popup_menu.grab_release()\n\n self.canvas.bind('<Button-1>', self.destroy_popup)", "title": "" }, { "docid": "6063a546eb0d04e194a6183b0adbd54c", "score": "0.56990206", "text": "def __init__(self, menu, x, y, texts, sound_player, action, selected=False):\n super().__init__(pg.SYSTEM_CURSOR_HAND, sound_player, BUTTON_CLICK, action)\n self.menu = menu\n\n self.x = x\n self.y = y\n self.w = H(96)\n self.h = H(160)\n\n self.selected = selected\n self.rect = pg.Rect(self.x, self.y, self.w, self.h)\n self.texts = texts\n self.text_surface = None\n self.text_pos = None\n\n self.bg_surface = pg.image.load(SIDE_BUTTON_BG).convert_alpha()\n self.bg_surface = pg.transform.scale(self.bg_surface, (self.w, self.h))", "title": "" }, { "docid": "51b0164bba4e378cd9bc485b06bc8957", "score": "0.56964236", "text": "def menu_button(self):\n\n self.menu_text = STAT_FONT.render('Menu', True, (100, 100, 100))\n return self.menu_text", "title": "" }, { "docid": "13547b1d5debce7ff996b18755208f70", "score": "0.5678982", "text": "def addContextActions(self, menu):\n pass", "title": "" }, { "docid": "1408956e8f35129b385df4a9fc5ebcfa", "score": "0.5673221", "text": "def initmenu(self):\n\n self.agr = gtk.AccelGroup()\n self.window.add_accel_group(self.agr)\n\n self.menu_bar = gtk.MenuBar()\n self.mainbox.pack_start(self.menu_bar, False, False, 0)\n self.menu_bar.show()\n\n # Menu desplegable de Juego\n self.root_juego = gtk.MenuItem(\"Juego\")\n self.root_juego.show()\n self.menu_juego = gtk.Menu()\n\n self.menu_juego_item1 = gtk.ImageMenuItem(gtk.STOCK_NEW, self.agr)\n self.key, self.mod = gtk.accelerator_parse(\"<Control>N\")\n self.menu_juego_item1.add_accelerator(\"activate\", self.agr, self.key, self.mod, gtk.ACCEL_VISIBLE)\n self.menu_juego_item1.connect(\"activate\", self.menu_new, \"Nuevo\")\n self.menu_juego.append(self.menu_juego_item1)\n self.menu_juego_item1.show()\n\n self.menu_juego_item2 = gtk.ImageMenuItem(gtk.STOCK_SAVE, self.agr)\n self.key, self.mod = gtk.accelerator_parse(\"<Control>S\")\n self.menu_juego_item2.add_accelerator(\"activate\", self.agr, self.key, self.mod, gtk.ACCEL_VISIBLE)\n self.menu_juego_item2.connect(\"activate\", self.menu_save, \"Guardar\")\n self.menu_juego.append(self.menu_juego_item2)\n self.menu_juego_item2.show()\n\n self.menu_juego_item3 = gtk.ImageMenuItem(gtk.STOCK_OPEN, self.agr)\n self.key, self.mod = gtk.accelerator_parse(\"<Control>O\")\n self.menu_juego_item3.add_accelerator(\"activate\", self.agr, self.key, self.mod, gtk.ACCEL_VISIBLE)\n self.menu_juego_item3.connect(\"activate\", self.menu_load, \"Cargar\")\n self.menu_juego.append(self.menu_juego_item3)\n self.menu_juego_item3.show()\n\n self.menu_juego_item4 = gtk.ImageMenuItem(gtk.STOCK_QUIT, self.agr)\n self.key, self.mod = gtk.accelerator_parse(\"<Control>Q\")\n self.menu_juego_item4.add_accelerator(\"activate\", self.agr, self.key, self.mod, gtk.ACCEL_VISIBLE)\n self.menu_juego_item4.connect(\"activate\", self.quit_program)\n self.menu_juego.append(self.menu_juego_item4)\n self.menu_juego_item4.show()\n\n # menu desplegable de Editar\n self.root_editar = gtk.MenuItem(\"Editar\")\n self.root_editar.show()\n self.menu_editar = gtk.Menu()\n\n self.menu_editar_item1 = gtk.ImageMenuItem(gtk.STOCK_PREFERENCES, self.agr)\n self.key, self.mod = gtk.accelerator_parse(\"<Control>P\")\n self.menu_editar_item1.add_accelerator(\"activate\", self.agr, self.key, self.mod, gtk.ACCEL_VISIBLE)\n self.menu_editar_item1.connect(\"activate\", self.menu_preferences, \"Preferencias\")\n self.menu_editar.append(self.menu_editar_item1)\n self.menu_editar_item1.show()\n\n # menu desplegable de Ayuda\n self.root_ayuda = gtk.MenuItem(\"Ayuda\")\n self.root_ayuda.show()\n self.menu_ayuda = gtk.Menu()\n\n self.menu_ayuda_item1 = gtk.ImageMenuItem(gtk.STOCK_HELP)\n self.key, self.mod = gtk.accelerator_parse(\"F1\")\n self.menu_ayuda_item1.add_accelerator(\"activate\", self.agr, self.key, self.mod, gtk.ACCEL_VISIBLE)\n self.menu_ayuda_item1.connect(\"activate\", self.menu_contents, \"Contenidos\")\n self.menu_ayuda.append(self.menu_ayuda_item1)\n self.menu_ayuda_item1.show()\n\n self.menu_ayuda_item2 = gtk.ImageMenuItem(gtk.STOCK_ABOUT)\n self.key, self.mod = gtk.accelerator_parse(\"F2\")\n self.menu_ayuda_item2.add_accelerator(\"activate\", self.agr, self.key, self.mod, gtk.ACCEL_VISIBLE)\n self.menu_ayuda_item2.connect(\"activate\", self.menu_about, \"Acerca de\")\n self.menu_ayuda.append(self.menu_ayuda_item2)\n self.menu_ayuda_item2.show()\n\n self.root_juego.set_submenu(self.menu_juego)\n self.root_editar.set_submenu(self.menu_editar)\n self.root_ayuda.set_submenu(self.menu_ayuda)\n\n self.menu_bar.append(self.root_juego)\n self.menu_bar.append(self.root_editar)\n self.menu_bar.append(self.root_ayuda)", "title": "" }, { "docid": "3c514878c4ad7cc7e1e7971e4dd2b771", "score": "0.5671529", "text": "def newMenu(self, *args):\n return _soqt.SoQtPopupMenu_newMenu(self, *args)", "title": "" }, { "docid": "52d52871c23627e1f707ddbdf96108b7", "score": "0.5669422", "text": "def context_menu(self, position):\n # get a menu\n self.context_menu = QtWidgets.QMenu()\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the level of the selected item\n level = self.get_level_selected_item()\n if level == 0:\n add_separator = False\n selected_text = str(idx.data())\n self.section_headings = []\n root = self.model.invisibleRootItem()\n for i in range(root.rowCount()):\n self.section_headings.append(str(root.child(i).text()))\n if \"Options\" not in self.section_headings and selected_text == \"Files\":\n self.context_menu.actionAddOptionsSection = QtWidgets.QAction(self)\n self.context_menu.actionAddOptionsSection.setText(\"Add Options section\")\n self.context_menu.addAction(self.context_menu.actionAddOptionsSection)\n self.context_menu.actionAddOptionsSection.triggered.connect(self.add_options_section)\n add_separator = True\n if selected_text == \"Files\":\n if add_separator:\n self.context_menu.addSeparator()\n add_separator = False\n existing_entries = self.get_existing_entries()\n if \"file_path\" not in existing_entries:\n self.context_menu.actionAddfile_path = QtWidgets.QAction(self)\n self.context_menu.actionAddfile_path.setText(\"Add file_path\")\n self.context_menu.addAction(self.context_menu.actionAddfile_path)\n self.context_menu.actionAddfile_path.triggered.connect(self.add_file_path)\n if \"in_filename\" not in existing_entries:\n self.context_menu.actionAddin_filename = QtWidgets.QAction(self)\n self.context_menu.actionAddin_filename.setText(\"Add in_filename\")\n self.context_menu.addAction(self.context_menu.actionAddin_filename)\n self.context_menu.actionAddin_filename.triggered.connect(self.add_in_filename)\n if \"out_filename\" not in existing_entries:\n self.context_menu.actionAddout_filename = QtWidgets.QAction(self)\n self.context_menu.actionAddout_filename.setText(\"Add out_filename\")\n self.context_menu.addAction(self.context_menu.actionAddout_filename)\n self.context_menu.actionAddout_filename.triggered.connect(self.add_out_filename)\n if \"plot_path\" not in existing_entries:\n self.context_menu.actionAddplot_path = QtWidgets.QAction(self)\n self.context_menu.actionAddplot_path.setText(\"Add plot_path\")\n self.context_menu.addAction(self.context_menu.actionAddplot_path)\n self.context_menu.actionAddplot_path.triggered.connect(self.add_plot_path)\n elif selected_text == \"Options\":\n existing_entries = self.get_existing_entries()\n if \"SONIC_Check\" not in existing_entries:\n self.context_menu.actionSonicCheck = QtWidgets.QAction(self)\n self.context_menu.actionSonicCheck.setText(\"SONIC_Check\")\n self.context_menu.addAction(self.context_menu.actionSonicCheck)\n self.context_menu.actionSonicCheck.triggered.connect(self.add_sonic_check)\n add_separator = True\n if \"IRGA_Check\" not in existing_entries:\n self.context_menu.actionIRGACheck = QtWidgets.QAction(self)\n self.context_menu.actionIRGACheck.setText(\"IRGA_Check\")\n self.context_menu.addAction(self.context_menu.actionIRGACheck)\n self.context_menu.actionIRGACheck.triggered.connect(self.add_irga_check)\n add_separator = True\n if add_separator:\n self.context_menu.addSeparator()\n add_separator = False\n self.context_menu.actionRemoveOptionsSection = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOptionsSection.setText(\"Remove section\")\n self.context_menu.addAction(self.context_menu.actionRemoveOptionsSection)\n self.context_menu.actionRemoveOptionsSection.triggered.connect(self.remove_section)\n elif selected_text == \"Variables\":\n self.context_menu.actionAddVariable = QtWidgets.QAction(self)\n self.context_menu.actionAddVariable.setText(\"Add variable\")\n self.context_menu.addAction(self.context_menu.actionAddVariable)\n self.context_menu.actionAddVariable.triggered.connect(self.add_variable)\n elif selected_text == \"Plots\":\n self.context_menu.actionAddTimeSeries = QtWidgets.QAction(self)\n self.context_menu.actionAddTimeSeries.setText(\"Add time series\")\n self.context_menu.addAction(self.context_menu.actionAddTimeSeries)\n self.context_menu.actionAddTimeSeries.triggered.connect(self.add_timeseries)\n self.context_menu.actionAddScatterPlot = QtWidgets.QAction(self)\n self.context_menu.actionAddScatterPlot.setText(\"Add scatter plot\")\n self.context_menu.addAction(self.context_menu.actionAddScatterPlot)\n self.context_menu.actionAddScatterPlot.triggered.connect(self.add_scatterplot)\n elif level == 1:\n selected_item = idx.model().itemFromIndex(idx)\n parent = selected_item.parent()\n if (str(parent.text()) == \"Files\") and (selected_item.column() == 1):\n key = str(parent.child(selected_item.row(),0).text())\n # check to see if we have the selected subsection\n if key == \"file_path\":\n self.context_menu.actionBrowseFilePath = QtWidgets.QAction(self)\n self.context_menu.actionBrowseFilePath.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseFilePath)\n self.context_menu.actionBrowseFilePath.triggered.connect(self.browse_file_path)\n elif key == \"in_filename\":\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif key == \"out_filename\":\n self.context_menu.actionBrowseOutputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseOutputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseOutputFile)\n self.context_menu.actionBrowseOutputFile.triggered.connect(self.browse_output_file)\n elif key == \"plot_path\":\n self.context_menu.actionBrowsePlotPath = QtWidgets.QAction(self)\n self.context_menu.actionBrowsePlotPath.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowsePlotPath)\n self.context_menu.actionBrowsePlotPath.triggered.connect(self.browse_plot_path)\n else:\n pass\n elif (str(parent.text()) == \"Options\") and (selected_item.column() == 1):\n key = str(parent.child(selected_item.row(),0).text())\n if key == \"irga_type\":\n existing_entry = str(parent.child(selected_item.row(),1).text())\n if existing_entry != \"Li-7500\":\n self.context_menu.actionSetIRGATypeLi7500 = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeLi7500.setText(\"Li-7500\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeLi7500)\n self.context_menu.actionSetIRGATypeLi7500.triggered.connect(self.set_irga_li7500)\n if existing_entry != \"Li-7500A (<V6.5)\":\n self.context_menu.actionSetIRGATypeLi7500APre6_5 = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeLi7500APre6_5.setText(\"Li-7500A (<V6.5)\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeLi7500APre6_5)\n self.context_menu.actionSetIRGATypeLi7500APre6_5.triggered.connect(self.set_irga_li7500a_pre6_5)\n if existing_entry != \"Li-7500A (>=V6.5)\":\n self.context_menu.actionSetIRGATypeLi7500APost6_5 = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeLi7500APost6_5.setText(\"Li-7500A (>=V6.5)\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeLi7500APost6_5)\n self.context_menu.actionSetIRGATypeLi7500APost6_5.triggered.connect(self.set_irga_li7500a_post6_5)\n if existing_entry != \"Li-7500RS\":\n self.context_menu.actionSetIRGATypeLi7500RS = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeLi7500RS.setText(\"Li-7500RS\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeLi7500RS)\n self.context_menu.actionSetIRGATypeLi7500RS.triggered.connect(self.set_irga_li7500rs)\n if existing_entry != \"Li-7200\":\n self.context_menu.actionSetIRGATypeLi7200 = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeLi7200.setText(\"Li-7200\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeLi7200)\n self.context_menu.actionSetIRGATypeLi7200.triggered.connect(self.set_irga_li7200)\n if existing_entry != \"EC150\":\n self.context_menu.actionSetIRGATypeEC150 = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeEC150.setText(\"EC150\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeEC150)\n self.context_menu.actionSetIRGATypeEC150.triggered.connect(self.set_irga_ec150)\n if existing_entry != \"EC155\":\n self.context_menu.actionSetIRGATypeEC155 = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeEC155.setText(\"EC155\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeEC155)\n self.context_menu.actionSetIRGATypeEC155.triggered.connect(self.set_irga_ec155)\n if existing_entry != \"IRGASON\":\n self.context_menu.actionSetIRGATypeIRGASON = QtWidgets.QAction(self)\n self.context_menu.actionSetIRGATypeIRGASON.setText(\"IRGASON\")\n self.context_menu.addAction(self.context_menu.actionSetIRGATypeIRGASON)\n self.context_menu.actionSetIRGATypeIRGASON.triggered.connect(self.set_irga_irgason)\n elif key in [\"SONIC_Check\", \"IRGA_Check\"]:\n existing_entry = str(parent.child(selected_item.row(),1).text())\n if existing_entry != \"Yes\":\n self.context_menu.actionSetCheckYes = QtWidgets.QAction(self)\n self.context_menu.actionSetCheckYes.setText(\"Yes\")\n self.context_menu.addAction(self.context_menu.actionSetCheckYes)\n self.context_menu.actionSetCheckYes.triggered.connect(self.set_check_yes)\n if existing_entry != \"No\":\n self.context_menu.actionSetCheckNo = QtWidgets.QAction(self)\n self.context_menu.actionSetCheckNo.setText(\"No\")\n self.context_menu.addAction(self.context_menu.actionSetCheckNo)\n self.context_menu.actionSetCheckNo.triggered.connect(self.set_check_no)\n elif (str(parent.text()) == \"Options\") and (selected_item.column() == 0):\n if selected_item.text() in [\"SONIC_Check\", \"IRGA_Check\"]:\n self.context_menu.actionRemoveOption = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOption.setText(\"Remove option\")\n self.context_menu.addAction(self.context_menu.actionRemoveOption)\n self.context_menu.actionRemoveOption.triggered.connect(self.remove_item)\n elif str(parent.text()) == \"Variables\":\n # get a list of existing entries\n existing_entries = self.get_existing_entries()\n # only put a QC check in the context menu if it is not already present\n if \"RangeCheck\" not in existing_entries:\n self.context_menu.actionAddRangeCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddRangeCheck.setText(\"Add RangeCheck\")\n self.context_menu.addAction(self.context_menu.actionAddRangeCheck)\n self.context_menu.actionAddRangeCheck.triggered.connect(self.add_rangecheck)\n if \"DependencyCheck\" not in existing_entries:\n self.context_menu.actionAddDependencyCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddDependencyCheck.setText(\"Add DependencyCheck\")\n self.context_menu.addAction(self.context_menu.actionAddDependencyCheck)\n self.context_menu.actionAddDependencyCheck.triggered.connect(self.add_dependencycheck)\n if \"DiurnalCheck\" not in existing_entries:\n self.context_menu.actionAddDiurnalCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddDiurnalCheck.setText(\"Add DiurnalCheck\")\n self.context_menu.addAction(self.context_menu.actionAddDiurnalCheck)\n self.context_menu.actionAddDiurnalCheck.triggered.connect(self.add_diurnalcheck)\n if \"ExcludeDates\" not in existing_entries:\n self.context_menu.actionAddExcludeDates = QtWidgets.QAction(self)\n self.context_menu.actionAddExcludeDates.setText(\"Add ExcludeDates\")\n self.context_menu.addAction(self.context_menu.actionAddExcludeDates)\n self.context_menu.actionAddExcludeDates.triggered.connect(self.add_excludedates)\n if \"LowerCheck\" not in existing_entries:\n self.context_menu.actionAddLowerCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddLowerCheck.setText(\"Add LowerCheck\")\n self.context_menu.addAction(self.context_menu.actionAddLowerCheck)\n self.context_menu.actionAddLowerCheck.triggered.connect(self.add_lowercheck)\n if \"UpperCheck\" not in existing_entries:\n self.context_menu.actionAddUpperCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddUpperCheck.setText(\"Add UpperCheck\")\n self.context_menu.addAction(self.context_menu.actionAddUpperCheck)\n self.context_menu.actionAddUpperCheck.triggered.connect(self.add_uppercheck)\n if \"CorrectWindDirection\" not in existing_entries:\n self.context_menu.actionAddWindDirectionCorrection = QtWidgets.QAction(self)\n self.context_menu.actionAddWindDirectionCorrection.setText(\"Add CorrectWindDirection\")\n self.context_menu.addAction(self.context_menu.actionAddWindDirectionCorrection)\n self.context_menu.actionAddWindDirectionCorrection.triggered.connect(self.add_winddirectioncorrection)\n if \"Linear\" not in existing_entries:\n self.context_menu.actionAddLinear = QtWidgets.QAction(self)\n self.context_menu.actionAddLinear.setText(\"Add Linear\")\n self.context_menu.addAction(self.context_menu.actionAddLinear)\n self.context_menu.actionAddLinear.triggered.connect(self.add_linear)\n #self.context_menu.actionAddExcludeHours = QtWidgets.QAction(self)\n #self.context_menu.actionAddExcludeHours.setText(\"Add ExcludeHours\")\n #self.context_menu.addAction(self.context_menu.actionAddExcludeHours)\n #self.context_menu.actionAddExcludeHours.triggered.connect(self.add_excludehours)\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveVariable)\n self.context_menu.actionRemoveVariable.triggered.connect(self.remove_item)\n elif str(parent.text()) == \"Plots\":\n self.context_menu.actionRemovePlot = QtWidgets.QAction(self)\n self.context_menu.actionRemovePlot.setText(\"Remove plot\")\n self.context_menu.addAction(self.context_menu.actionRemovePlot)\n self.context_menu.actionRemovePlot.triggered.connect(self.remove_item)\n elif level == 2:\n add_separator = False\n if str(idx.data()) in [\"ExcludeDates\"]:\n self.context_menu.actionAddExcludeDateRange = QtWidgets.QAction(self)\n self.context_menu.actionAddExcludeDateRange.setText(\"Add date range\")\n self.context_menu.addAction(self.context_menu.actionAddExcludeDateRange)\n self.context_menu.actionAddExcludeDateRange.triggered.connect(self.add_excludedaterange)\n add_separator = True\n if str(idx.data()) in [\"LowerCheck\"]:\n self.context_menu.actionAddLowerCheckRange = QtWidgets.QAction(self)\n self.context_menu.actionAddLowerCheckRange.setText(\"Add date range\")\n self.context_menu.addAction(self.context_menu.actionAddLowerCheckRange)\n self.context_menu.actionAddLowerCheckRange.triggered.connect(self.add_lowercheckrange)\n add_separator = True\n if str(idx.data()) in [\"UpperCheck\"]:\n self.context_menu.actionAddUpperCheckRange = QtWidgets.QAction(self)\n self.context_menu.actionAddUpperCheckRange.setText(\"Add date range\")\n self.context_menu.addAction(self.context_menu.actionAddUpperCheckRange)\n self.context_menu.actionAddUpperCheckRange.triggered.connect(self.add_uppercheckrange)\n add_separator = True\n if str(idx.data()) in [\"CorrectWindDirection\"]:\n self.context_menu.actionAddWindDirectionCorrectionRange = QtWidgets.QAction(self)\n self.context_menu.actionAddWindDirectionCorrectionRange.setText(\"Add date range\")\n self.context_menu.addAction(self.context_menu.actionAddWindDirectionCorrectionRange)\n self.context_menu.actionAddWindDirectionCorrectionRange.triggered.connect(self.add_winddirectioncorrectionrange)\n add_separator = True\n if str(idx.data()) in [\"Linear\"]:\n self.context_menu.actionAddLinearRange = QtWidgets.QAction(self)\n self.context_menu.actionAddLinearRange.setText(\"Add date range\")\n self.context_menu.addAction(self.context_menu.actionAddLinearRange)\n self.context_menu.actionAddLinearRange.triggered.connect(self.add_linearrange)\n add_separator = True\n if add_separator:\n self.context_menu.addSeparator()\n add_separator = False\n self.context_menu.actionRemoveQCCheck = QtWidgets.QAction(self)\n self.context_menu.actionRemoveQCCheck.setText(\"Remove QC check\")\n self.context_menu.addAction(self.context_menu.actionRemoveQCCheck)\n self.context_menu.actionRemoveQCCheck.triggered.connect(self.remove_item)\n elif level == 3:\n if (str(idx.parent().data()) in [\"ExcludeDates\", \"LowerCheck\", \"UpperCheck\", \"Linear\"] and\n str(idx.data()) != \"0\"):\n self.context_menu.actionRemoveExcludeDateRange = QtWidgets.QAction(self)\n self.context_menu.actionRemoveExcludeDateRange.setText(\"Remove date range\")\n self.context_menu.addAction(self.context_menu.actionRemoveExcludeDateRange)\n self.context_menu.actionRemoveExcludeDateRange.triggered.connect(self.remove_daterange)\n\n self.context_menu.exec_(self.view.viewport().mapToGlobal(position))", "title": "" }, { "docid": "6c991b6cf321b591578fc9371cd75281", "score": "0.56672907", "text": "def create_menu(self, root):\n menubar = Menu(root, type='menubar')\n root['menu'] = menubar\n\n menu_file = Menu(menubar)\n menu_options = Menu(menubar)\n menu_run = Menu(menubar)\n menu_folders = Menu(menubar)\n menu_links = Menu(menubar)\n menu_help = Menu(menubar)\n #menu_beta = Menu(menubar)\n menubar.add_cascade(menu=menu_file, label='File')\n menubar.add_cascade(menu=menu_options, label='Options')\n menubar.add_cascade(menu=menu_run, label='Run')\n menubar.add_cascade(menu=menu_folders, label='Folders')\n menubar.add_cascade(menu=menu_links, label='Links')\n menubar.add_cascade(menu=menu_help, label='Help')\n #menubar.add_cascade(menu=menu_beta, label='Testing')\n\n menu_file.add_command(\n label='Re-load param set', command=self.load_params,\n accelerator='Ctrl+L')\n menu_file.add_command(\n label='Re-save param set', command=self.save_params,\n accelerator='Ctrl+S')\n menu_file.add_command(\n label='Output log', command=lambda: LogWindow(self.root))\n\n menu_file.add_command(\n label='Restore default settings', command=self.restore_defaults)\n\n if sys.platform.startswith('linux'):\n menu_file.add_command(\n label=\"Configure terminal...\", command=self.configure_terminal)\n\n if len(lnp.folders) > 1:\n menu_file.add_command(\n label=\"Reload/Choose DF folder\", command=self.reload_program)\n\n menu_file.add_command(\n label='Import from previous install...',\n command=self.migrate_settings)\n\n if sys.platform != 'darwin':\n menu_file.add_command(\n label='Exit', command=self.exit_program, accelerator='Alt+F4')\n root.bind_all('<Control-l>', lambda e: self.load_params())\n root.bind_all('<Control-s>', lambda e: self.save_params())\n\n self.autoclose.set(lnp.userconfig.get_bool('autoClose'))\n menu_options.add_checkbutton(\n label='Close GUI on launch', onvalue=True, offvalue=False,\n variable=self.autoclose, command=self.set_autoclose)\n\n if update.updates_configured():\n menu_updates = menu_updates = Menu(menubar)\n menu_options.add_cascade(\n menu=menu_updates, label='Check for updates')\n options = [\n \"Every launch\", \"Every day\", \"Every 3 days\", \"Every 7 days\",\n \"Every 14 days\", \"Every 30 days\", \"Never\"]\n daylist = [0, 1, 3, 7, 14, 30, -1]\n self.updateDays.set(lnp.userconfig.get_number('updateDays'))\n for i, o in enumerate(options):\n menu_updates.add_radiobutton(\n label=o, value=daylist[i], variable=self.updateDays,\n command=lambda i=i: self.configure_updates(daylist[i]))\n self.downloadBaselines.set(lnp.userconfig.get_bool('downloadBaselines'))\n menu_options.add_checkbutton(\n label='Allow auto-download of baselines', onvalue=True,\n offvalue=False, variable=self.downloadBaselines,\n command=self.set_downloads)\n\n self.show_scrollbars.set(lnp.userconfig.get_bool('tkgui_show_scroll'))\n menu_options.add_checkbutton(\n label='Always show scrollbars (reloads program)', onvalue=True,\n offvalue=False, variable=self.show_scrollbars,\n command=self.set_show_scroll)\n\n menu_run.add_command(\n label='Dwarf Fortress', command=launcher.run_df,\n accelerator='Ctrl+R')\n menu_run.add_command(\n label='Init Editor', command=self.run_init, accelerator='Ctrl+I')\n root.bind_all('<Control-r>', lambda e: launcher.run_df())\n root.bind_all('<Control-i>', lambda e: self.run_init())\n\n self.populate_menu(\n lnp.config.get_list('folders'), menu_folders,\n launcher.open_folder_idx)\n self.populate_menu(\n lnp.config.get_list('links'), menu_links,\n launcher.open_link_idx)\n\n menu_help.add_command(\n label=\"Help\", command=self.show_help, accelerator='F1')\n menu_help.add_command(\n label=\"About\", command=self.show_about, accelerator='Alt+F1')\n menu_help.add_command(label=\"About DF...\", command=self.show_df_info)\n root.bind_all('<F1>', lambda e: self.show_help())\n root.bind_all('<Alt-F1>', lambda e: self.show_about())\n root.createcommand('tkAboutDialog', self.show_about)\n return menubar", "title": "" }, { "docid": "c9546a06a3e74c2a985ffae5be28af63", "score": "0.566376", "text": "def _createMenu(self):\n from .E5ModelMenu import E5ModelMenu\n return E5ModelMenu(self)", "title": "" }, { "docid": "14a0136c6a4caaecbe20b4ecba98aede", "score": "0.5661188", "text": "def start(self):\n\t\t\n\t\tself.insert(\"<openbox_pipe_menu>\")", "title": "" }, { "docid": "284a2aae787bce6124200730e02fe5a8", "score": "0.5660069", "text": "def createMenus(self):\n self.fileMenu = self.menuBar().addMenu(\"&File\")\n self.editMenu = self.menuBar().addMenu(\"&Edit\")\n self.formatMenu = self.menuBar().addMenu(\"&Format\")\n self.helpMenu = self.menuBar().addMenu(\"&Help\")", "title": "" }, { "docid": "8be14a60e6e5c2c27d2c8d79cfea2473", "score": "0.5660061", "text": "def _create_edit_menu(self):\n self._edit_menu_widget = BrEditMenu(self)\n\n # connect menu\n self._edit_menu_widget.del_action.triggered.connect(\n self._delete_selected\n )", "title": "" }, { "docid": "c2bdf6b585fba860d6c523977ecdd947", "score": "0.5651527", "text": "def select_handler(self):\n\n menu_model = MenuModel()\n menu_model()\n start_menu = menu_model().start_menu\n menu = MenuView(start_menu)\n choice = menu().choice\n if choice == '0':\n tournament = TournamentController()\n title = tournament()\n tour = TournamentModel.get_tournament(title)\n self.tour_info = tour\n menu = MenuController(self.tour_info)\n menu()\n elif choice == '1':\n tournaments_list = TournamentModel.get_all_tournaments()\n if len(tournaments_list) != 0:\n chosen_option = TournamentController.show_all()\n self.manage_list_choice(int(chosen_option))\n else:\n error = Error('No tournament created yet')\n error()\n menu = MenuController(self.tour_info)\n menu()\n else:\n error = Error('your choice is not in the list')\n error()", "title": "" }, { "docid": "95aa97db780aa136a9af794783831809", "score": "0.56491065", "text": "def initGui(self):\n\n icon_path = ':/plugins/busqueda_cordenadas/icon.png'\n self.add_action(\n icon_path,\n text=self.tr(u''),\n callback=self.run,\n parent=self.iface.mainWindow())\n\n # will be set False in run()\n self.first_start = True\n\n result = QObject.connect(self.clickTool, SIGNAL(\"canvasClicked(const QgsPoint &, Qt::MouseButton)\"), self.handleMouseDown)", "title": "" }, { "docid": "7551157cf580dadaf13ccbbf2566f6f6", "score": "0.5640211", "text": "def create_menus(self):\n menubar = tk.Menu(self.master)\n self.master.config(menu=menubar)\n action_menu = tk.Menu(menubar)\n\n open_stream_menu = tk.Menu(action_menu)\n\n # set command for each service\n for service in ['Twitch', 'MLG', 'Azubu']:\n open_stream_menu.add_command(label=service, command=lambda service=service: self.show_open_stream_dialog(service))\n\n # menu for opening a stream in another service (or a service you don't follow on Twitch)\n action_menu.add_cascade(label='Open Stream', menu=open_stream_menu)\n action_menu.add_separator()\n\n # menu for watching Twitch Past Broadcasts\n action_menu.add_command(label='Past Broadcast', command=lambda: self.show_open_stream_dialog('Twitch', True))\n action_menu.add_separator()\n\n # menu for refreshing the live streams\n action_menu.add_command(label='Refresh', command=self.refresh)\n\n # adds the menu to the menubar\n menubar.add_cascade(label='Actions', menu=action_menu)", "title": "" }, { "docid": "8c9cd24b4e7fb10c846ec81b8efc2a84", "score": "0.56386745", "text": "def main_menu(self):\r\n View.add_title_menu(\"CHESS MAIN MENU\")\r\n if self.actual_tournaments_list:\r\n if self.actual_tournaments_list[-1].in_progress:\r\n View.add_menu_line(\"Continue Tournament\")\r\n if self.actual_tournaments_list[-1].round_list:\r\n self.control = RoundsController()\r\n else:\r\n self.control = TournamentController()\r\n else:\r\n View.add_menu_line(\"New Tournament\")\r\n self.control = TournamentController()\r\n else:\r\n View.add_menu_line(\"New Tournament\")\r\n self.control = TournamentController()\r\n View.add_menu_line(\"Manage Players\")\r\n View.add_menu_line(\"Rapports\")\r\n View.add_menu_line(\"Quit\")\r\n choice = TestService.test_alpha(test_element=('1', '2', '3', '4'))\r\n if choice == '1':\r\n return self.control()\r\n if choice == '2':\r\n self.control = ManagePlayerController()\r\n if choice == '3':\r\n self.control = rapport_controller.RapportController()\r\n if choice == '4':\r\n return None\r\n return self.control()", "title": "" }, { "docid": "486cabd0cf2b2db105975df47dc83e05", "score": "0.5634848", "text": "def click_handler(self):\n self.setText(\"X\")\n self.setDisabled(True)\n self.choice_fn(self.x, self.y)\n\n # self.setText(\"O\")\n\n # if not self.revealed:\n # self.revealed = True\n # if self.mine:\n # self.game_over_fn()\n # self.setText('X')\n\n # else:\n # self.setDisabled(True)\n # count = self.reveal_fn(self.x, self.y)\n # if count > 0:\n # self.setText(str(count))\n # else:\n # self.setText('X')", "title": "" }, { "docid": "5f9b31614f8a95a179d7e233d8df7fd3", "score": "0.5632106", "text": "def launch(self):\n\t\tif self.sensitive:\n\t\t\tif pygame.mouse.get_pressed()[0] and not self.cursor_collide(self.cursor):\n\t\t\t\tself.click=True\n\t\t\telif self.press_cursor_collide(self.cursor):\n\t\t\t\tif not self.click:\n\t\t\t\t\tself.click = True\n\t\t\t\t\tself.callback()\n\t\t\telif not pygame.mouse.get_pressed()[0]:\n\t\t\t\tself.click = False\n\t\tif self._text:\n\t\t\tself.ajust_button()\n\t\t\tself._text.write()", "title": "" }, { "docid": "8f1f08bd0eb3f86169706e5412024a0b", "score": "0.5629456", "text": "def init_menu(self):\n self.menu = Menu(self.master)\n self.master.config(menu=self.menu, borderwidth=10)\n self.file = Menu(self.menu)\n self.edit_templates = Menu(self.menu)\n self.file.add_command(label=\"New\", command=open_blank_file)\n self.file.add_command(label=\"Open ARE Letter\", command=open_existing_ARE_file)\n self.file.add_command(label=\"Open PRL Letter\", command=open_existing_PRL_file)\n self.file.add_command(label=\"Open ATP Letter\", command=open_existing_ATP_file)\n self.file.add_command(label=\"Exit\", command=application_exit)\n self.template_list = []\n for item in self.db:\n self.edit_templates.add_command(label=item['name'], command=partial(open_doc, TEMPLATE_PATH + item['docpath']))\n self.menu.add_cascade(label=\"File\", menu=self.file)\n self.menu.add_cascade(label=\"Edit Templates\", menu=self.edit_templates)", "title": "" }, { "docid": "d2f045b4139b80b71d7826a789ebff44", "score": "0.56217337", "text": "def make_btn():\n # The functional part of the main window is the canvas.\n Dragable('B').attach(main.canvas)", "title": "" }, { "docid": "612f2f7a9257bf5366f8faa0c5bf056b", "score": "0.5618463", "text": "def context_menu(self, position):\n # get a menu\n self.context_menu = QtWidgets.QMenu()\n # get the index of the selected item\n idx = self.view.selectedIndexes()[0]\n # get the selected item text\n selected_text = str(idx.data())\n # get the selected item\n selected_item = idx.model().itemFromIndex(idx)\n # get the level of the selected item\n level = self.get_level_selected_item()\n # initialise logical for inserting a separator\n add_separator = False\n if level == 0:\n # sections with only 1 level\n # get a list of the section headings at the root level\n section_headings = []\n root = self.model.invisibleRootItem()\n for i in range(root.rowCount()):\n section_headings.append(str(root.child(i).text()))\n if \"SummaryPlots\" not in section_headings:\n self.context_menu.actionAddSummaryPlots = QtWidgets.QAction(self)\n self.context_menu.actionAddSummaryPlots.setText(\"Add summary plots section\")\n self.context_menu.addAction(self.context_menu.actionAddSummaryPlots)\n self.context_menu.actionAddSummaryPlots.triggered.connect(self.add_summary_plots_section)\n add_separator = True\n if \"ustar_threshold\" not in section_headings:\n self.context_menu.actionAddUstarThreshold = QtWidgets.QAction(self)\n self.context_menu.actionAddUstarThreshold.setText(\"Add u* threshold section\")\n self.context_menu.addAction(self.context_menu.actionAddUstarThreshold)\n self.context_menu.actionAddUstarThreshold.triggered.connect(self.add_ustar_threshold_section)\n add_separator = True\n if \"Imports\" not in section_headings:\n self.context_menu.actionAddImports = QtWidgets.QAction(self)\n self.context_menu.actionAddImports.setText(\"Add Imports section\")\n self.context_menu.addAction(self.context_menu.actionAddImports)\n self.context_menu.actionAddImports.triggered.connect(self.add_imports_section)\n add_separator = True\n if selected_text == \"Files\":\n self.context_menu.actionAddFileEntry = QtWidgets.QAction(self)\n self.context_menu.actionAddFileEntry.setText(\"Add item\")\n self.context_menu.addAction(self.context_menu.actionAddFileEntry)\n self.context_menu.actionAddFileEntry.triggered.connect(self.add_fileentry)\n elif selected_text == \"Output\":\n pass\n elif selected_text == \"Options\":\n if add_separator:\n self.context_menu.addSeparator()\n add_separator = False\n # get a list of existing entries in this section\n existing_entries = self.get_existing_entries()\n # only put an option in the context menu if it is not already present\n if \"MaxGapInterpolate\" not in existing_entries:\n self.context_menu.actionAddMaxGapInterpolate = QtWidgets.QAction(self)\n self.context_menu.actionAddMaxGapInterpolate.setText(\"MaxGapInterpolate\")\n self.context_menu.addAction(self.context_menu.actionAddMaxGapInterpolate)\n self.context_menu.actionAddMaxGapInterpolate.triggered.connect(self.add_maxgapinterpolate)\n if \"MaxShortGapDays\" not in existing_entries:\n self.context_menu.actionAddMaxShortGapDays = QtWidgets.QAction(self)\n self.context_menu.actionAddMaxShortGapDays.setText(\"MaxShortGapDays\")\n self.context_menu.addAction(self.context_menu.actionAddMaxShortGapDays)\n self.context_menu.actionAddMaxShortGapDays.triggered.connect(self.add_maxshortgapdays)\n if \"FilterList\" not in existing_entries:\n self.context_menu.actionAddFilterList = QtWidgets.QAction(self)\n self.context_menu.actionAddFilterList.setText(\"FilterList\")\n self.context_menu.addAction(self.context_menu.actionAddFilterList)\n self.context_menu.actionAddFilterList.triggered.connect(self.add_filterlist)\n if \"TurbulenceFilter\" not in existing_entries:\n self.context_menu.actionAddTurbulenceFilter = QtWidgets.QAction(self)\n self.context_menu.actionAddTurbulenceFilter.setText(\"TurbulenceFilter\")\n self.context_menu.addAction(self.context_menu.actionAddTurbulenceFilter)\n self.context_menu.actionAddTurbulenceFilter.triggered.connect(self.add_turbulencefilter)\n if \"DayNightFilter\" not in existing_entries:\n self.context_menu.actionAddDayNightFilter = QtWidgets.QAction(self)\n self.context_menu.actionAddDayNightFilter.setText(\"DayNightFilter\")\n self.context_menu.addAction(self.context_menu.actionAddDayNightFilter)\n self.context_menu.actionAddDayNightFilter.triggered.connect(self.add_daynightfilter)\n if \"UseFsdsyn_threshold\" not in existing_entries:\n self.context_menu.actionAddUseFsdsyn_threshold = QtWidgets.QAction(self)\n self.context_menu.actionAddUseFsdsyn_threshold.setText(\"UseFsdsyn_threshold\")\n self.context_menu.addAction(self.context_menu.actionAddUseFsdsyn_threshold)\n self.context_menu.actionAddUseFsdsyn_threshold.triggered.connect(self.add_usefsdsynthreshold)\n if \"AcceptDayTimes\" not in existing_entries:\n self.context_menu.actionAddAcceptDayTimes = QtWidgets.QAction(self)\n self.context_menu.actionAddAcceptDayTimes.setText(\"AcceptDayTimes\")\n self.context_menu.addAction(self.context_menu.actionAddAcceptDayTimes)\n self.context_menu.actionAddAcceptDayTimes.triggered.connect(self.add_acceptdaytimes)\n if \"UseEveningFilter\" not in existing_entries:\n self.context_menu.actionAddUseEveningFilter = QtWidgets.QAction(self)\n self.context_menu.actionAddUseEveningFilter.setText(\"UseEveningFilter\")\n self.context_menu.addAction(self.context_menu.actionAddUseEveningFilter)\n self.context_menu.actionAddUseEveningFilter.triggered.connect(self.add_useeveningfilter)\n if \"EveningFilterLength\" not in existing_entries:\n self.context_menu.actionAddEveningFilterLength = QtWidgets.QAction(self)\n self.context_menu.actionAddEveningFilterLength.setText(\"EveningFilterLength\")\n self.context_menu.addAction(self.context_menu.actionAddEveningFilterLength)\n self.context_menu.actionAddEveningFilterLength.triggered.connect(self.add_eveningfilterlength)\n if \"Fsd_threshold\" not in existing_entries:\n self.context_menu.actionAddFsd_threshold = QtWidgets.QAction(self)\n self.context_menu.actionAddFsd_threshold.setText(\"Fsd_threshold\")\n self.context_menu.addAction(self.context_menu.actionAddFsd_threshold)\n self.context_menu.actionAddFsd_threshold.triggered.connect(self.add_fsdthreshold)\n if \"sa_threshold\" not in existing_entries:\n self.context_menu.actionAddsa_threshold = QtWidgets.QAction(self)\n self.context_menu.actionAddsa_threshold.setText(\"sa_threshold\")\n self.context_menu.addAction(self.context_menu.actionAddsa_threshold)\n self.context_menu.actionAddsa_threshold.triggered.connect(self.add_sathreshold)\n if \"TruncateToImports\" not in existing_entries:\n self.context_menu.actionAddTruncateToImports = QtWidgets.QAction(self)\n self.context_menu.actionAddTruncateToImports.setText(\"TruncateToImports\")\n self.context_menu.addAction(self.context_menu.actionAddTruncateToImports)\n self.context_menu.actionAddTruncateToImports.triggered.connect(self.add_truncatetoimports)\n elif selected_text in [\"Fluxes\", \"Variables\"]:\n self.context_menu.actionAddVariable = QtWidgets.QAction(self)\n self.context_menu.actionAddVariable.setText(\"Add variable\")\n self.context_menu.addAction(self.context_menu.actionAddVariable)\n self.context_menu.actionAddVariable.triggered.connect(self.add_new_variable)\n elif selected_text in [\"ustar_threshold\"]:\n self.context_menu.actionAddUstarThreshold = QtWidgets.QAction(self)\n self.context_menu.actionAddUstarThreshold.setText(\"Add year\")\n self.context_menu.addAction(self.context_menu.actionAddUstarThreshold)\n self.context_menu.actionAddUstarThreshold.triggered.connect(self.add_ustar_threshold_daterange)\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveUstarThreshold = QtWidgets.QAction(self)\n self.context_menu.actionRemoveUstarThreshold.setText(\"Remove section\")\n self.context_menu.addAction(self.context_menu.actionRemoveUstarThreshold)\n self.context_menu.actionRemoveUstarThreshold.triggered.connect(self.remove_section)\n elif selected_text in [\"Imports\"]:\n self.context_menu.actionAddImportsVariable = QtWidgets.QAction(self)\n self.context_menu.actionAddImportsVariable.setText(\"Add variable\")\n self.context_menu.addAction(self.context_menu.actionAddImportsVariable)\n self.context_menu.actionAddImportsVariable.triggered.connect(self.add_imports_variable)\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveImportsSection = QtWidgets.QAction(self)\n self.context_menu.actionRemoveImportsSection.setText(\"Remove section\")\n self.context_menu.addAction(self.context_menu.actionRemoveImportsSection)\n self.context_menu.actionRemoveImportsSection.triggered.connect(self.remove_section)\n elif selected_text in [\"SummaryPlots\"]:\n self.context_menu.actionAddSummaryPlot = QtWidgets.QAction(self)\n self.context_menu.actionAddSummaryPlot.setText(\"Add summary plot\")\n self.context_menu.addAction(self.context_menu.actionAddSummaryPlot)\n self.context_menu.actionAddSummaryPlot.triggered.connect(self.add_summary_plot)\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveSummaryPlotSection = QtWidgets.QAction(self)\n self.context_menu.actionRemoveSummaryPlotSection.setText(\"Remove section\")\n self.context_menu.addAction(self.context_menu.actionRemoveSummaryPlotSection)\n self.context_menu.actionRemoveSummaryPlotSection.triggered.connect(self.remove_section)\n elif level == 1:\n # sections with 2 levels\n # get the parent of the selected item\n parent = selected_item.parent()\n if (str(parent.text()) == \"Files\") and (selected_item.column() == 1):\n key = str(parent.child(selected_item.row(),0).text())\n if key in [\"file_path\", \"plot_path\"]:\n self.context_menu.actionBrowseFilePath = QtWidgets.QAction(self)\n self.context_menu.actionBrowseFilePath.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseFilePath)\n self.context_menu.actionBrowseFilePath.triggered.connect(self.browse_file_path)\n elif key in [\"in_filename\"]:\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif key in [\"out_filename\"]:\n self.context_menu.actionBrowseOutputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseOutputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseOutputFile)\n self.context_menu.actionBrowseOutputFile.triggered.connect(self.browse_output_file)\n elif key in [\"cpd_filename\"]:\n self.context_menu.actionBrowseCPDFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseCPDFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseCPDFile)\n self.context_menu.actionBrowseCPDFile.triggered.connect(self.browse_cpd_file)\n else:\n self.context_menu.actionBrowseInputFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseInputFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseInputFile)\n self.context_menu.actionBrowseInputFile.triggered.connect(self.browse_input_file)\n elif (str(parent.text()) == \"Files\") and (selected_item.column() == 0):\n key = str(parent.child(selected_item.row(),0).text())\n if key not in [\"file_path\", \"plot_path\", \"in_filename\", \"out_filename\"]:\n self.context_menu.actionRemoveItem = QtWidgets.QAction(self)\n self.context_menu.actionRemoveItem.setText(\"Remove item\")\n self.context_menu.addAction(self.context_menu.actionRemoveItem)\n self.context_menu.actionRemoveItem.triggered.connect(self.remove_item)\n else:\n pass\n elif (str(parent.text()) == \"Options\"):\n key = str(parent.child(selected_item.row(),0).text())\n if (selected_item.column() == 0):\n self.context_menu.actionRemoveOption = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOption.setText(\"Remove option\")\n self.context_menu.addAction(self.context_menu.actionRemoveOption)\n self.context_menu.actionRemoveOption.triggered.connect(self.remove_item)\n elif (selected_item.column() == 1) and (key == \"InterpolateType\"):\n if selected_text != \"linear\":\n self.context_menu.actionChangeInterpolateType = QtWidgets.QAction(self)\n self.context_menu.actionChangeInterpolateType.setText(\"linear\")\n self.context_menu.addAction(self.context_menu.actionChangeInterpolateType)\n self.context_menu.actionChangeInterpolateType.triggered.connect(lambda:self.change_selected_text(\"linear\"))\n if selected_text != \"Akima\":\n self.context_menu.actionChangeInterpolateType = QtWidgets.QAction(self)\n self.context_menu.actionChangeInterpolateType.setText(\"Akima\")\n self.context_menu.addAction(self.context_menu.actionChangeInterpolateType)\n self.context_menu.actionChangeInterpolateType.triggered.connect(lambda:self.change_selected_text(\"Akima\"))\n elif (str(parent.text()) in [\"Fluxes\", \"Variables\"]):\n # get a list of existing entries\n existing_entries = self.get_existing_entries()\n # only put a QC check in the context menu if it is not already present\n if \"GapFillUsingSOLO\" not in existing_entries:\n self.context_menu.actionAddSOLO = QtWidgets.QAction(self)\n self.context_menu.actionAddSOLO.setText(\"Add SOLO\")\n self.context_menu.addAction(self.context_menu.actionAddSOLO)\n self.context_menu.actionAddSOLO.triggered.connect(self.add_solo)\n add_separator = True\n if \"GapFillLongSOLO\" not in existing_entries:\n self.context_menu.actionAddLongSOLO = QtWidgets.QAction(self)\n self.context_menu.actionAddLongSOLO.setText(\"Add SOLO (long gaps)\")\n self.context_menu.addAction(self.context_menu.actionAddLongSOLO)\n self.context_menu.actionAddLongSOLO.triggered.connect(self.add_solo_long)\n add_separator = True\n if \"GapFillUsingMDS\" not in existing_entries:\n self.context_menu.actionAddMDS = QtWidgets.QAction(self)\n self.context_menu.actionAddMDS.setText(\"Add MDS\")\n self.context_menu.addAction(self.context_menu.actionAddMDS)\n self.context_menu.actionAddMDS.triggered.connect(self.add_MDS)\n add_separator = True\n if \"GapFillFromClimatology\" not in existing_entries:\n self.context_menu.actionAddClimatology = QtWidgets.QAction(self)\n self.context_menu.actionAddClimatology.setText(\"Add Climatology\")\n self.context_menu.addAction(self.context_menu.actionAddClimatology)\n self.context_menu.actionAddClimatology.triggered.connect(self.add_climatology)\n add_separator = True\n if add_separator:\n add_separator = False\n self.context_menu.addSeparator()\n if \"RangeCheck\" not in existing_entries:\n self.context_menu.actionAddRangeCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddRangeCheck.setText(\"Add RangeCheck\")\n self.context_menu.addAction(self.context_menu.actionAddRangeCheck)\n self.context_menu.actionAddRangeCheck.triggered.connect(self.add_rangecheck)\n add_separator = True\n if \"DependencyCheck\" not in existing_entries:\n self.context_menu.actionAddDependencyCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddDependencyCheck.setText(\"Add DependencyCheck\")\n self.context_menu.addAction(self.context_menu.actionAddDependencyCheck)\n self.context_menu.actionAddDependencyCheck.triggered.connect(self.add_dependencycheck)\n add_separator = True\n if \"DiurnalCheck\" not in existing_entries:\n self.context_menu.actionAddDiurnalCheck = QtWidgets.QAction(self)\n self.context_menu.actionAddDiurnalCheck.setText(\"Add DiurnalCheck\")\n self.context_menu.addAction(self.context_menu.actionAddDiurnalCheck)\n self.context_menu.actionAddDiurnalCheck.triggered.connect(self.add_diurnalcheck)\n add_separator = True\n if \"ExcludeDates\" not in existing_entries:\n self.context_menu.actionAddExcludeDates = QtWidgets.QAction(self)\n self.context_menu.actionAddExcludeDates.setText(\"Add ExcludeDates\")\n self.context_menu.addAction(self.context_menu.actionAddExcludeDates)\n self.context_menu.actionAddExcludeDates.triggered.connect(self.add_excludedates)\n add_separator = True\n if add_separator:\n add_separator = False\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveOption = QtWidgets.QAction(self)\n self.context_menu.actionRemoveOption.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveOption)\n self.context_menu.actionRemoveOption.triggered.connect(self.remove_item)\n elif (str(parent.text()) == \"ustar_threshold\"):\n self.context_menu.actionRemoveDateRange = QtWidgets.QAction(self)\n self.context_menu.actionRemoveDateRange.setText(\"Remove date range\")\n self.context_menu.addAction(self.context_menu.actionRemoveDateRange)\n self.context_menu.actionRemoveDateRange.triggered.connect(self.remove_daterange)\n elif (str(parent.text()) == \"Imports\"):\n self.context_menu.actionRemoveImportsVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveImportsVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveImportsVariable)\n self.context_menu.actionRemoveImportsVariable.triggered.connect(self.remove_item)\n elif str(parent.text()) == \"SummaryPlots\":\n self.context_menu.actionRemoveSummaryPlot = QtWidgets.QAction(self)\n self.context_menu.actionRemoveSummaryPlot.setText(\"Remove summary plot\")\n self.context_menu.addAction(self.context_menu.actionRemoveSummaryPlot)\n self.context_menu.actionRemoveSummaryPlot.triggered.connect(self.remove_item)\n elif level == 2:\n # sections with 3 levels\n parent = selected_item.parent()\n grand_parent = selected_item.parent().parent()\n subsubsection_name = str(idx.data())\n if subsubsection_name in [\"RangeCheck\", \"DependencyCheck\", \"DiurnalCheck\"]:\n self.context_menu.actionRemoveQCCheck = QtWidgets.QAction(self)\n self.context_menu.actionRemoveQCCheck.setText(\"Remove QC check\")\n self.context_menu.addAction(self.context_menu.actionRemoveQCCheck)\n self.context_menu.actionRemoveQCCheck.triggered.connect(self.remove_item)\n elif subsubsection_name in [\"ExcludeDates\"]:\n self.context_menu.actionAddExcludeDateRange = QtWidgets.QAction(self)\n self.context_menu.actionAddExcludeDateRange.setText(\"Add date range\")\n self.context_menu.addAction(self.context_menu.actionAddExcludeDateRange)\n self.context_menu.actionAddExcludeDateRange.triggered.connect(self.add_excludedaterange)\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveQCCheck = QtWidgets.QAction(self)\n self.context_menu.actionRemoveQCCheck.setText(\"Remove QC check\")\n self.context_menu.addAction(self.context_menu.actionRemoveQCCheck)\n self.context_menu.actionRemoveQCCheck.triggered.connect(self.remove_item)\n elif subsubsection_name in [\"GapFillUsingSOLO\", \"GapFillLongSOLO\", \"GapFillUsingMDS\", \"GapFillFromClimatology\"]:\n self.context_menu.actionRemoveGFMethod = QtWidgets.QAction(self)\n self.context_menu.actionRemoveGFMethod.setText(\"Remove method\")\n self.context_menu.addAction(self.context_menu.actionRemoveGFMethod)\n self.context_menu.actionRemoveGFMethod.triggered.connect(self.remove_GFMethod)\n if str(grand_parent.text() == \"Imports\"):\n key = str(parent.child(selected_item.row(),0).text())\n if (key == \"file_name\") and (selected_item.column() == 1):\n self.context_menu.actionBrowseImportsFile = QtWidgets.QAction(self)\n self.context_menu.actionBrowseImportsFile.setText(\"Browse...\")\n self.context_menu.addAction(self.context_menu.actionBrowseImportsFile)\n self.context_menu.actionBrowseImportsFile.triggered.connect(self.browse_imports_file)\n elif level == 3:\n existing_entries = self.get_existing_entries()\n # sections with 4 levels\n # get the parent text\n parent_text = str(idx.parent().data())\n if parent_text == \"ExcludeDates\":\n self.context_menu.actionRemoveExcludeDateRange = QtWidgets.QAction(self)\n self.context_menu.actionRemoveExcludeDateRange.setText(\"Remove date range\")\n self.context_menu.addAction(self.context_menu.actionRemoveExcludeDateRange)\n self.context_menu.actionRemoveExcludeDateRange.triggered.connect(self.remove_daterange)\n elif parent_text == \"GapFillUsingSOLO\":\n # get a list of existing entries\n existing_entries = self.get_existing_entries()\n if \"solo_settings\" not in existing_entries:\n self.context_menu.actionAddSOLOSettings = QtWidgets.QAction(self)\n self.context_menu.actionAddSOLOSettings.setText(\"Add SOLO settings\")\n self.context_menu.addAction(self.context_menu.actionAddSOLOSettings)\n self.context_menu.actionAddSOLOSettings.triggered.connect(self.add_solo_settings)\n add_separator = True\n if add_separator:\n add_separator = False\n self.context_menu.addSeparator()\n self.context_menu.actionRemoveGFMethodVariable = QtWidgets.QAction(self)\n self.context_menu.actionRemoveGFMethodVariable.setText(\"Remove variable\")\n self.context_menu.addAction(self.context_menu.actionRemoveGFMethodVariable)\n self.context_menu.actionRemoveGFMethodVariable.triggered.connect(self.remove_item)\n elif level == 4:\n selected_text = str(idx.data())\n if selected_text in [\"solo_settings\"]:\n self.context_menu.actionRemoveSOLOSettings = QtWidgets.QAction(self)\n self.context_menu.actionRemoveSOLOSettings.setText(\"Remove item\")\n self.context_menu.addAction(self.context_menu.actionRemoveSOLOSettings)\n self.context_menu.actionRemoveSOLOSettings.triggered.connect(self.remove_item)\n\n self.context_menu.exec_(self.view.viewport().mapToGlobal(position))", "title": "" }, { "docid": "bec8e86d977e734adde7e1e1b636806a", "score": "0.5611421", "text": "def __init__(menu):\n #_____________________VENTANA___________________________#\n tkinter.Tk.__init__(menu);\n\n \"\"\"Titulo\"\"\"\n menu.title(\"Hola! Por favor inserta tu nombre\");\n\n \"\"\"Tamaño\"\"\"\n menu.geometry(\"400x50\");\n\n \"\"\"Indica que la ventana no se puede ampliar\"\"\"\n menu.resizable(width=False, height=False)\n \n #______________________BOTONES__________________________#\n global nombreusuario\n nombreusuario = \"Secret\" \n\n menu.textnombre = crearTexto (menu, \"textnombre\", 100, 10, 15);\n menu.textnombre.configure (bg = \"paleturquoise1\")\n menu.labelnombre = crearEtiqueta(menu, \"labelnombre\", \"Nombre\", 10, 10)\n menu.labelnombre.configure (fg = \"black\")\n menu.botonsiguiente = crearBoton (menu, \"botonsiguiente\", \"Siguiente\", 250, 5, 10, menu.listonombre)\n menu.botonsiguiente.configure (bg = \"blue\")", "title": "" } ]
3c22b47464a2ef66e958ecb9f5d8c7f5
Computes mutual information between two images variate from a joint histogram.
[ { "docid": "559f2a37553f7fa9c6f0e143c7d09a98", "score": "0.60261965", "text": "def similarity_measure(image1,image2,norm=True,bin_rule=None,measure=\"MI\"):\n arr1 = image1.ravel()\n arr2 = image2.ravel()\n if bin_rule == None or bin_rule == \"sturges\":\n dx,Nbins = sturges_bin_width(arr1)\n elif bin_rule == \"scott\":\n dx,Nbins = scott_bin_width(arr1)\n elif bin_rule == \"freedman\":\n dx,Nbins = freedman_bin_width(arr1)\n elif bin_rule == 'auto':\n if len(arr1)<400:\n dx,Nbins = sturges_bin_width(arr1)\n else:\n dx,Nbins = scott_bin_width(arr1)\n else:\n raise ValueError(\"Unrecognised bin width rule: please use auto, scott, sturges or freedman\")\n\n # Convert bins counts to probability values\n hgram, x_edges, y_edges = np.histogram2d(arr1,arr2,Nbins)\n if measure == \"MI\":\n pxy = MutualInformation(renormalize=norm)\n elif measure == \"NMI\":\n pxy = NormalizedMutualInformation(renormalize=norm)\n elif measure == \"PMI\":\n pxy = ParzenMutualInformation(renormalize=norm)\n elif measure == \"DPMI\":\n pxy = DiscreteParzenMutualInformation(renormalize=norm)\n else:\n pxy = NormalizedMutualInformation(renormalize=norm)\n return pxy(hgram)", "title": "" } ]
[ { "docid": "44c5f9982842ac43a6b6ab9c7cab0b07", "score": "0.6591423", "text": "def mutual_information(X, Y):\n\n\n # compute limits\n X = X.compute()\n xmin = da.min(X).compute()\n xmax = da.max(X).compute()\n\n Y = Y.compute()\n ymin = da.min(Y).compute()\n ymax = da.max(Y).compute()\n\n # compute indices\n xi = X-xmin\n yi = Y-ymin\n\n # reshape for histogram\n XY = da.rechunk(da.stack([da.ravel(X),da.ravel(Y)],axis=1),chunks=(1e6,-1), balance=True)\n\n # joint probability distribution\n xbins = range(xmin, xmax+2)\n ybins = range(ymin, ymax+2)\n H, edges = da.histogramdd(XY, bins=[xbins, ybins], density = True)\n H = H.compute()\n\n # x marginal probability distribution\n p_x = H.sum(axis=1)\n # y marginal probability distribution\n p_y = H.sum(axis=0)\n\n\n H_ = da.from_array(H[xi,yi])\n x_ = da.from_array(p_x[xi])\n y_ = da.from_array(p_y[yi])\n\n # Mutual information I(X,Y)\n try:\n MI=da.sum(H_*da.log(H_/(x_*y_)))\n MI=MI.compute()\n except:\n print('H', H.shape)\n print('px', p_x.shape)\n print('py', p_y.shape)\n print('MI failed',xbins, ybins)\n MI = None\n\n return MI", "title": "" }, { "docid": "a9d19916ed889cb831dc7bdbe46ee4be", "score": "0.6504762", "text": "def mutual_information(arr1,arr2,norm=True,bin_rule=\"sturges\"):\n \n if bin_rule == None or bin_rule == \"sturges\":\n dx,Nbins = sturges_bin_width(arr1)\n elif bin_rule == \"scott\":\n dx,Nbins = scott_bin_width(arr1)\n elif bin_rule == \"freedman\":\n dx,Nbins = freedman_bin_width(arr1)\n else:\n raise ValueError(\"Unrecognised bin width rule: please use scott, sturges or freedman\")\n\n # Convert bins counts to probability values\n hgram, x_edges, y_edges = np.histogram2d(arr1,arr2,Nbins)\n\n pxy = hgram/ float(np.sum(hgram))\n px = np.sum(pxy, axis=1) # marginal for x over y\n py = np.sum(pxy, axis=0) # marginal for y over x\n px_py = px[:, None] * py[None, :] # Broadcast to multiply marginals\n # Now we can do the calculation using the pxy, px_py 2D arrays\n nzs = pxy > 0 # Only non-zero pxy values contribute to the sum\n \n if norm:\n nxzx = px > 0\n nxzy = py > 0\n h_x = -np.sum(px[nxzx]* np.log(px[nxzx]) )\n h_y = -np.sum(py[nxzy]* np.log(py[nxzy]) )\n norm = 1.0/(max(np.amax(h_x),np.amax(h_y)))\n else:\n norm = 1.0\n\n i_xy= norm*(np.sum(pxy[nzs] * np.log(pxy[nzs] / px_py[nzs])))\n \n return i_xy", "title": "" }, { "docid": "acd10144f0fd0fbc8e31da1043cb280f", "score": "0.6382095", "text": "def mutual_information(joint):\n # we can compute px and py from joint by applying the law of total probability\n px = joint.sum(axis=1)\n py = joint.sum(axis=0)\n independent = multiply.outer(px, py)\n assert joint.shape == independent.shape\n return kl_divergence(array(joint.flat), array(independent.flat))", "title": "" }, { "docid": "6f5509f3c0f19482fa4d5d9e2203e00d", "score": "0.63297844", "text": "def mutual_information(marginal_p_distrib_X, marginal_p_distrib_Y, joint_p_distrib):\n\n I = 0\n n = len(marginal_p_distrib_X)\n m = len(marginal_p_distrib_Y)\n for i in range(n):\n for j in range(m):\n if(joint_p_distrib[j][i] != 0\n and marginal_p_distrib_X[i] != 0\n and marginal_p_distrib_Y[j] != 0):\n arg = (joint_p_distrib[j][i])/(marginal_p_distrib_X[i] * marginal_p_distrib_Y[j])\n I += joint_p_distrib[j][i] * math.log(arg, 2)\n return I", "title": "" }, { "docid": "f15bb2adf3a241190de5e1049838f29a", "score": "0.59982514", "text": "def mutual_information_2d(x, y, sigma=1, normalized=False):\n bins = (256, 256)\n\n jh = np.histogram2d(x, y, bins=bins)[0]\n\n # smooth the jh with a gaussian filter of given sigma\n ndimage.gaussian_filter(jh, sigma=sigma, mode='constant',\n output=jh)\n\n # compute marginal histograms\n jh = jh + EPS\n sh = np.sum(jh)\n jh = jh / sh\n s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))\n s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))\n\n # Normalised Mutual Information of:\n # Studholme, jhill & jhawkes (1998).\n # \"A normalized entropy measure of 3-D medical image alignment\".\n # in Proc. Medical Imaging 1998, vol. 3338, San Diego, CA, pp. 132-143.\n if normalized:\n mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2)))\n / np.sum(jh * np.log(jh))) - 1\n else:\n mi = (np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1))\n - np.sum(s2 * np.log(s2)))\n\n return mi", "title": "" }, { "docid": "c6ddd3950a7848eabd82501033e46d05", "score": "0.59371835", "text": "def mutual_information(p, j):\n return shannon_entropy(np.sum(p, axis=j)) - conditional_shannon_entropy(p, j)", "title": "" }, { "docid": "fa170f816ab000ebce0afdf8f2cc90c0", "score": "0.5864358", "text": "def cond_mutual_information(marginal_p_distrib_X, marginal_p_distrib_Z, joint_p_distrib_XZ, joint_p_distrib_YZ, joint_p_distrib_XYZ):\n\n # I_1 = I(X;Z)\n I_1 = mutual_information(marginal_p_distrib_X, marginal_p_distrib_Z, joint_p_distrib_XZ)\n # H_1 = H(X)\n H_1 = entropy(marginal_p_distrib_X)\n # Transpose of joint distribution to be coherent with function joint_cond_entropy\n joint_p_distrib_YXZ = np.transpose(joint_p_distrib_XYZ, (1, 0, 2))\n # H_2 = H(X|Y,Z)\n H_2 = joint_cond_entropy(joint_p_distrib_YXZ, joint_p_distrib_YZ)\n # I(X;Y|Z) = - I(X;Z) + H(X) - H(X|Y,Z)\n I = -I_1 + H_1 - H_2\n return I", "title": "" }, { "docid": "384a52e41e35d3950ae56df94eb9bc90", "score": "0.5847458", "text": "def mutual_information(x, y):\n H_y = entropy(y)\n feature_partition = partition(x)\n H_y_x = 0\n for k, v in feature_partition.items():\n weightage = len(v) / len(y)\n H_y_x += weightage * entropy([y[k] for k in v])\n I = H_y - H_y_x\n return I", "title": "" }, { "docid": "2b34fe66474f2a9c75c3b9687bd00789", "score": "0.57525307", "text": "def run(self, x_df, y_df, bins=None, logtype=DEFAULT_LOG_TYPE):\n\n assert check.argument_integer(bins, allow_none=True)\n assert check.indexes_align((x_df.columns, y_df.columns))\n assert x_df.shape[0] > 0\n assert x_df.shape[1] > 0\n assert y_df.shape[0] > 0\n assert y_df.shape[1] > 0\n\n if bins is not None:\n self.bins = bins\n\n mi = mutual_information(y_df, x_df, self.bins, temp_dir=self.temp_dir, logtype=logtype)\n mi_bg = mutual_information(x_df, x_df, self.bins, temp_dir=self.temp_dir, logtype=logtype)\n clr = calc_mixed_clr(utils.df_set_diag(mi, 0), utils.df_set_diag(mi_bg, 0))\n\n MPControl.sync_processes(pref=SYNC_CLR_KEY)\n\n return clr, mi", "title": "" }, { "docid": "634dc479ed7e8d2a81dead6cebe3ec03", "score": "0.55721515", "text": "def compare_images_hist(a_img, b_img, **kwargs):\n hists = [cv2.calcHist([im], [0, 1, 2], None, [8, 8, 8],\n [0, 256, 0, 256, 0, 256]) for im in (a_img, b_img)]\n return cv2.compareHist(*hists, method=kwargs.get('method', cv2.HISTCMP_CORREL))", "title": "" }, { "docid": "82bee2178c44f0895e616cce57a08647", "score": "0.5465447", "text": "def getvalue(self):\r\n h1 = Image.open(self.im1).histogram()\r\n h2 = Image.open(self.im2).histogram()\r\n rms = math.sqrt(reduce(operator.add,map(lambda a,b: (a-b)**2, h1, h2))/len(h1))\r\n print 'rms value is' , rms\r\n return rms", "title": "" }, { "docid": "a5c202b4cc28735a0d4837b51da72e64", "score": "0.54577726", "text": "def mutual_information(X, Y, bins, logtype=DEFAULT_LOG_TYPE, temp_dir=None):\n\n assert check.indexes_align((X.columns, Y.columns))\n\n # Create dense output matrix and copy the inputs\n mi_r = X.index\n mi_c = Y.index\n\n X = X.values\n Y = Y.values\n\n # Discretize the input matrixes\n X = _make_array_discrete(X.transpose(), bins, axis=0)\n Y = _make_array_discrete(Y.transpose(), bins, axis=0)\n\n # Build the MI matrix\n if MPControl.is_dask():\n from inferelator.distributed.dask_functions import build_mi_array_dask\n return pd.DataFrame(build_mi_array_dask(X, Y, bins, logtype=logtype), index=mi_r, columns=mi_c)\n else:\n return pd.DataFrame(build_mi_array(X, Y, bins, logtype=logtype, temp_dir=temp_dir), index=mi_r,\n columns=mi_c)", "title": "" }, { "docid": "d807c585a1b231d4f834a654aedf9e39", "score": "0.54230666", "text": "def mutual_info(data, norm=True, conf=None):\n if isinstance(data, pd.DataFrame):\n data = data.values\n\n # data smoothing\n data_smoothed = data.copy()\n data_smoothed += 1\n\n if data.shape[0] < 2 or data.shape[1] < 2:\n if conf is not None:\n return 0, 1, 1.0\n else:\n return 0\n\n # row/column sums\n sum_x = np.sum(data_smoothed, axis=1)\n sum_y = np.sum(data_smoothed, axis=0)\n\n # joint probabilities\n data_size = np.array(data_smoothed).sum()\n\n # entropies\n h_x = stats.entropy(sum_x)\n h_y = stats.entropy(sum_y)\n h_xy = stats.entropy(data_smoothed.flatten())\n\n mi = -h_xy + h_x + h_y\n\n # normalized mutual info\n if norm:\n if (h_x == 0) or (h_y == 0) or (mi == 0):\n mi = 0\n else:\n mi = mi/min(h_x, h_y)\n\n # no confidence levels, return single measure\n if conf is None:\n return mi\n\n gstat, pval, dof, _ = tests.g_test(data)\n\n ci_low, ci_high = intervals.ci_mi(gstat, dof, data_size, conf)\n\n if pval > 1-conf:\n ci_low = 0\n\n if norm:\n ci_low /= min(h_x, h_y)\n ci_high /= min(h_x, h_y)\n\n ci_low = max(ci_low, 0)\n ci_high = min(ci_high, 1)\n\n return ci_low, ci_high, pval", "title": "" }, { "docid": "d0fbb99ad218ef95a22ce08fee3bee8c", "score": "0.5387687", "text": "def mutual_info(rank_counts):\n logger.info(\"Start to compute Mutial Information\")\n rank_results = {}\n for c in rank_counts:\n terms = set(rank_counts[c][\"N1\"][\"terms\"].keys())\n rank_results[c] = []\n n0 = 1.0 * rank_counts[c][\"N0\"][\"value\"]\n n1 = 1.0 * rank_counts[c][\"N1\"][\"value\"]\n n = n0 + n1\n for t in terms:\n mui = 0.0\n n11 = 1.0 * rank_counts[c][\"N1\"][\"terms\"].get(t, 0)\n n10 = n1 - n11\n n01 = 1.0 * rank_counts[c][\"N0\"][\"terms\"].get(t, 0)\n n00 = n0 - n01\n t_n1 = n11 + n01\n t_n0 = n10 + n00\n mui += ((n11 + 1) / (n + 4)) * np.log2(((n + 2) *\n (n + 2) *\n (n11 + 1))\n /\n ((n1 + 1) *\n (t_n1 + 1) *\n (n + 4)))\n mui += ((n00 + 1) / (n + 4)) * np.log2(((n + 2) *\n (n + 2) *\n (n00 + 1))\n /\n ((n0 + 1) *\n (t_n0 + 1) *\n (n + 4)))\n mui += ((n10 + 1) / (n + 4)) * np.log2(((n + 2) *\n (n + 2) *\n (n10 + 1))\n /\n ((n1 + 1) *\n (t_n0 + 1) *\n (n + 4)))\n mui += ((n01 + 1) / (n + 4)) * np.log2(((n + 2) *\n (n + 2) *\n (n01 + 1))\n /\n ((n0 + 1) *\n (t_n1 + 1) *\n (n + 4)))\n\n rank_results[c].append((t, mui))\n rank_results[c] = sorted(rank_results[c],\n key=lambda x: x[1], reverse=True)\n return rank_results", "title": "" }, { "docid": "ac7134b60f313e2e05e7d5cb7454d1f9", "score": "0.5381406", "text": "def conditional_mutual_information(p, j, *conditional_indices):\n return conditional_shannon_entropy(np.sum(p, axis=j), *conditional_indices) - conditional_shannon_entropy(p, j,\n *conditional_indices)", "title": "" }, { "docid": "4be0516d562b3bc304b49a9003726ceb", "score": "0.5349631", "text": "def mutualInformation(label:np.ndarray, x:np.ndarray):\n\n\n if len(label.shape) == 2:\n label = label.flatten()\n if len(x.shape) == 2:\n x = x.flatten()\n\n return metrics.mutual_info_score(label,x)", "title": "" }, { "docid": "9c9f64216559b92070c7645a0885655e", "score": "0.5293185", "text": "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n ## YOUR CODE HERE\n \n # example of type of values: logJoint[\"SomeLabel\"] = math.log(1e-301) \n for y in self.legalLabels:\n logJoint[y] = math.log(self.labelDistribution[y])\n for fi in datum.keys():\n #print self.featuresGivenLabelsDistribution.has_key((y, fi, datum[fi]))\n #print self.featuresGivenLabelsDistribution.getCount((y, fi, datum[fi]))\n logJoint[y] += math.log(self.featuresGivenLabelsDistribution[y].getCount((fi, datum[fi])))\n \n return logJoint", "title": "" }, { "docid": "67fa6d8c8b435506f9493816ef24f7bd", "score": "0.5290776", "text": "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n\n \"*** YOUR CODE HERE ***\"\n for label in self.legalLabels:\n logJoint[label] = math.log(float(self.counter[label]) / self.N) # log P(label)\n for feature in self.features:\n numerator = self.counter[(label, feature)][datum[feature]] + self.k # add smoothing parameter k here\n denominator = self.counter[(label, feature)][0] + self.counter[(label, feature)][1] + 2 * self.k # we assumed binary feature {0,1}\n\n logJoint[label] += math.log(float(numerator) / denominator) # log P(x_i | label)\n \n return logJoint", "title": "" }, { "docid": "981ac353ff628c446558ddf1b43de876", "score": "0.52768767", "text": "def joints_pred_numpy(self, img, coord = 'hm', thresh = 0.2, sess = None):\r\n\t\tif sess is None:\r\n\t\t\thm = self.HG.Session.run(self.HG.pred_sigmoid , feed_dict = {self.HG.img: img})\r\n\t\telse:\r\n\t\t\thm = sess.run(self.HG.pred_sigmoid , feed_dict = {self.HG.img: img})\r\n\t\tjoints = -1*np.ones(shape = (self.params['num_joints'], 2))\r\n\t\tfor i in range(self.params['num_joints']):\r\n\t\t\tindex = np.unravel_index(hm[0,:,:,i].argmax(), (self.params['hm_size'],self.params['hm_size']))\r\n\t\t\tif hm[0,index[0], index[1],i] > thresh:\r\n\t\t\t\tif coord == 'hm':\r\n\t\t\t\t\tjoints[i] = np.array(index)\r\n\t\t\t\telif coord == 'img':\r\n\t\t\t\t\tjoints[i] = np.array(index) * self.params['img_size'] / self.params['hm_size']\r\n\t\treturn joints", "title": "" }, { "docid": "3264c9cc0b669e821e4c9f57d1e87cd0", "score": "0.5262502", "text": "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for x in range(len(self.legalLabels)):\n num = self.legalLabels[x]\n logJoint[num] = math.log(self.prior[num]) #For every possible label, for its logJoint counter index, we are adding the prior probability for P(y) for the corresponding label\n for key, value in datum.items(): #We traverse through every pixel in the datum\n if value == 0:\n if self.datumProb[key,num][0] != 0: #Ensure the probability is not 0 since we cannot take the log of 0\n logJoint[num] = logJoint[num] + math.log(self.datumProb[key,num][0]) #Add the probability of that pixel being that feature value given a certain label to the logJoint index for the corresponding label\n elif value == 1:\n if self.datumProb[key,num][1] != 0:\n logJoint[num] = logJoint[num] + math.log(self.datumProb[key,num][1])\n else:\n if self.datumProb[key,num][2] != 0:\n logJoint[num] = logJoint[num] + math.log(self.datumProb[key,num][2])\n return logJoint", "title": "" }, { "docid": "13843e5e0ffb3aeff1c13bda9066d02d", "score": "0.524807", "text": "def hsitogramEqualize(imgOrig: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray):\n image = imgOrig\n if isRGB(imgOrig):\n image = cv2.normalize(image.astype('double'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n image = transformRGB2YIQ(image)\n imgEq = image[:, :, 0]\n imgEq = cv2.normalize(imgEq, None, 0, 255, cv2.NORM_MINMAX)\n imgEq = imgEq.astype('uint8')\n else:\n imgEq = image * 255\n imgEq = imgEq.astype('uint8')\n\n lut = np.zeros(256, dtype=imgEq.dtype) # Create an empty lookup table\n\n histOrg, bins = np.histogram(imgEq.flatten(), 256, [0, 256])\n cdf = histOrg.cumsum() # Calculate cumulative histogram\n cdf_normalized = cdf * histOrg.max() / cdf.max()\n cdf_normalized = cdf * histOrg.max() / cdf.max()\n plt.title('origin image histogram')\n plt.plot(cdf_normalized, color='b')\n plt.hist(imgEq.flatten(), 256, [0, 256], color='r')\n plt.xlim([0, 256])\n plt.show()\n cdf_m = np.ma.masked_equal(cdf, 0) # Remove the 0 value in the histogram\n cdf_m = (cdf_m - cdf_m.min()) * 255 / (\n cdf_m.max() - cdf_m.min()) # equal to the lut[i] = int(255.0 *p[i]) formula described earlier\n\n cdf = np.ma.filled(cdf_m, 0).astype('uint8') # Add the masked elements to 0\n linearImg = imgEq\n linearImg = cdf[linearImg]\n histEq, bins = np.histogram(linearImg.flatten(), 256, [0, 255])\n cdflin = histEq.cumsum()\n cdf_normalized_lin = cdflin * histEq.max() / cdflin.max()\n plt.title('linear CDF ')\n plt.plot(cdf_normalized_lin, color='b')\n plt.hist(linearImg.flatten(), 256, [0, 255], color='r')\n plt.xlim([0, 256])\n plt.legend(('cdf', 'histogram'), loc='upper left')\n plt.show()\n if isRGB(imgOrig):\n imgEq = cv2.normalize(imgEq.astype('double'), None, 0.0, 1.0, cv2.NORM_MINMAX)\n image[:, :, 0] = imgEq\n imgEq = transformYIQ2RGB(image)\n imgEq = imgEq * 255\n imgEq = imgEq.astype('uint8')\n\n imgEq = cv2.LUT(imgEq, cdf)\n\n return imgEq, histOrg, histEq\n pass", "title": "" }, { "docid": "bd35481a0ef121b3d58375459e7ecbd1", "score": "0.52401286", "text": "def make_marginal_histograms(self):\n \"\"\"\n fig, ax = plt.subplots()\n myhist_rt = plt.hist(self.intensity_3D_full[:,0])\n ofname = ('hist_rt.png')\n plt.savefig(ofname)\n \n fig, ax = plt.subplots()\n myhist_mz = plt.hist(self.intensity_3D_full[:,1])\n ofname = ('hist_mz.png')\n plt.savefig(ofname)\n \"\"\"\n fig, ax = plt.subplots()\n \"\"\"\n #only plot for intensities greater than 200,000 (takes 7 min.)\n high_intensities = np.zeros(self.intensity_3D_full.shape[0])\n counter = 0\n for intensity in self.intensity_3D_full[:,2]:\n if intensity > 200000:\n high_intensities[counter] = intensity\n counter +=1\n high_intensities.resize(counter)\n myhist_i = plt.hist(high_intensities)\n \"\"\"\n myhist_i = plt.hist(self.intensity_3D_full[:,2])\n ofname = ('hist_log_i.png')\n plt.savefig(ofname)\n \n return", "title": "" }, { "docid": "5d27f0a88740b7e9e20af8fc49c4c6f9", "score": "0.523833", "text": "def joint_p(param_1_values, param_2_values):\n param_1_unique, param_1_counts = np.unique(param_1_values, return_counts=True)\n param_2_unique, param_2_counts = np.unique(param_2_values, return_counts=True)\n n = len(param_1_unique)\n m = len(param_2_unique)\n joint_p_distrib = np.zeros([m,n]) # Lines~param_2, columns~param_1\n for i, mod_1 in enumerate(param_1_unique):\n for j, mod_2 in enumerate(param_2_unique):\n for k in range(len(param_1_values)):\n if param_1_values[k]==mod_1 and param_2_values[k]==mod_2:\n joint_p_distrib[j][i] += 1/len(param_1_values)\n return joint_p_distrib", "title": "" }, { "docid": "493ac269398151a1e33333bffd6aa623", "score": "0.52228665", "text": "def mutual_info(mean_prob, mc_prob):\n eps = 1e-5\n first_term = -1 * np.sum(mean_prob * np.log(mean_prob + eps), axis=1)\n second_term = np.sum(np.mean([prob * np.log(prob + eps) for prob in mc_prob], axis=0), axis=1)\n return first_term + second_term", "title": "" }, { "docid": "dc8646bbf4e24c225cac2ec363d853e1", "score": "0.5201841", "text": "def calcMutualInformation(Y, z, n_bins=50, distributions=False,\n correct_bias=False):\n\n if np.sum(Y > 0) == 0:\n if distributions:\n return np.nan, np.nan, np.nan\n else:\n return np.nan\n\n # Histogram of stimulus projections (all stimulus examples)\n hist_raw, edges_raw = np.histogram(z, n_bins, density=True)\n hist_raw /= np.sum(hist_raw)\n\n # Histogram of spike-triggered stimulus projections\n N = z.shape[0]\n n_trials = 1\n if Y.ndim > 1:\n n_trials = Y.shape[1]\n\n hist_spk = np.zeros((n_bins,))\n for trial in range(0, n_trials):\n if n_trials > 1:\n spikes = Y[:, trial] > 0\n proj_spk = z[spikes]\n else:\n spikes = (Y > 0).nonzero()\n proj_spk = z[spikes[0]]\n\n hist_spk += np.histogram(proj_spk, edges_raw)[0]\n\n hist_spk /= np.sum(hist_spk)\n\n # Kullback-Leibler divergence between the two distributions\n if not distributions:\n valid = hist_spk > 0\n mi = np.dot(hist_spk[valid].T, np.log2(np.divide(hist_spk[valid],\n hist_raw[valid])))\n if correct_bias:\n mi -= n_bins / (2. * np.log(2) * N * n_trials)\n return mi\n\n else:\n return hist_raw, hist_spk, edges_raw", "title": "" }, { "docid": "8d5affa9dcfe832584b2f63db5db815c", "score": "0.51940495", "text": "def joint_entropy(joint_p_distrib):\n\n H = 0\n n = np.shape(joint_p_distrib)[1]\n m = np.shape(joint_p_distrib)[0]\n for i in range(n):\n for j in range(m):\n arg = joint_p_distrib[j][i]\n if(arg !=0):\n H -= arg*math.log(arg, 2)\n return H", "title": "" }, { "docid": "738cf5083fc1e0b36e0e44b4b0d6d812", "score": "0.5190543", "text": "def calculateLogJointProbabilities(self, instance):\n log_joint = util.Counter()\n\n for label in self.legalLabels:\n posterior_probability = 0 # log(p_1) = log(1) = 0\n\n for feature in self.features:\n posterior_probability += math.log(self.conditionalProb[(feature, label, instance[feature])])\n\n log_joint[label] = posterior_probability + math.log(self.prior[label])\n\n return log_joint", "title": "" }, { "docid": "61852dbb54f6a541f73ceecafbba1e3e", "score": "0.51903015", "text": "def myOwnHybridIm(sigma1,im1,sigma2,im2):\n im1I = im1.astype('float')\n im2I = im2.astype('float')\n ker1 = calculate1DGaussian(sigma1)\n blurred1 = convoluteWithSeparableMask(ker1,ker1,im1I)\n ker2 = calculate1DGaussian(sigma2)\n blurred2 = convoluteWithSeparableMask(ker2,ker2,im2I)\n hifreq = im2I - blurred2\n ret = hifreq + blurred1\n blurred1 = correctOverFlows(blurred1)\n hifreq = correctOverFlows(hifreq)\n ret = correctOverFlows(ret)\n return blurred1.astype('uint8'), hifreq.astype('uint8'), ret.astype('uint8')", "title": "" }, { "docid": "50dea305ad6a7285091b6e3447aed752", "score": "0.51861054", "text": "def mutual_information(Xbin,q,npmi_norm=False):\n\n Xcsr = Xbin.transpose().tocsr()\n DF = np.array(Xcsr.sum(axis=1)).squeeze()\n\n (nw, nd) = Xcsr.shape\n\n nd = float(nd)\n eps = 1e-8\n #Convert that as Boolean array\n #Matrix Multiplication by Chunks of 100, or parrallize\n P_DF = DF / (float(nd))\n P_NDF = 1.0 - P_DF\n\n log_P_DF = np.log(eps + P_DF)\n log_P_NDF = np.log(eps+ P_NDF)\n\n\n Nij = safe_sparse_dot(Xcsr, q, dense_output=False) #Could speed up by using false\n nnzindx=np.nonzero(Nij)[0]\n\n #u == class q\n P_y = q.sum()/nd\n P_Ny= 1-P_y\n\n\n P_w_y = Nij[nnzindx] / nd #Fraction of docs containing u and v\n P_Nw_y = P_y - P_w_y #(DF[wid]- Nij)/nd # Fraction of doc containing of class y but do not contain w\n P_w_Ny = P_DF[nnzindx] - P_w_y #(DF - Nij)/nd\n\n #\n #Check Values here\n '''\n debug = (eps+P_Nw_y)<0.0\n found_index = np.nonzero(debug)[0]\n print('Debugging')\n #print(np.log(eps+P_Nw_y))\n print(list(P_Nw_y[found_index]))\n print(found_index)\n print P_DF[found_index]\n '''\n\n #P_Nv_NDu = (nd - DF - (DF[wid]-Nij)) /nd\n P_Nw_Ny = 1.0 - P_w_y -P_Nw_y - P_w_Ny\n\n\n DS1 =np.log(P_y+eps)*np.ones(nw)[nnzindx]\n DS2 =np.log(P_Ny+eps) *np.ones(nw)[nnzindx]\n\n DS3 =log_P_DF[nnzindx]\n DS4 =log_P_NDF[nnzindx]\n\n #gi =np.zeros(nw)\n gi =-np.inf*np.ones(nw)\n\n #TODO Check that all the stats are indeed positive\n\n gi[nnzindx] = P_w_y * (np.log(eps+P_w_y) - DS1 - DS3) +\\\n P_Nw_y * ( np.log(eps + P_Nw_y) - DS1 - DS4) + \\\n P_w_Ny * (np.log(eps + P_w_Ny) - DS2 - DS3 ) + \\\n P_Nw_Ny * ( np.log(eps + P_Nw_Ny) - DS2- DS4 )\n\n if npmi_norm:\n nmi_renorm = - (P_w_y*np.log(P_w_y) + P_Nw_y*np.log(eps + P_Nw_y) + P_w_Ny *np.log(eps + P_w_Ny) + P_Nw_Ny*np.log(eps + P_Nw_Ny) )\n gi[nnzindx]/= nmi_renorm\n\n return gi", "title": "" }, { "docid": "8feeb34a8259c60d26e3787ecdf2ac39", "score": "0.5180255", "text": "def dlt_homography(I1pts, I2pts):\n #--- FILL ME IN ---\n \n #Define array x and y containing the x and y coordinates of Image 1 respectively\n x = I1pts[0]\n y = I1pts[1]\n\n #Define array u and v containing the x and y coordinates of Image 2 respectively\n u = I2pts[0]\n v = I2pts[1]\n \n #Define an 8 by 9 with 0 values to populate later on \n A = np.zeros((8,9))\n \n\n # Creates Matrix: four 2 by 9 Ai matrices (one per point correspondence) \n # and stacks them on top of one another to create a single 8 by 9 matrix A\n for i in range(len(x)):\n Ai = np.array( [\n [-x[i], -y[i], -1, 0, 0, 0, u[i]*x[i], u[i]*y[i], u[i]],\n [0, 0, 0, -x[i], -y[i], -1, v[i]*x[i], v[i]*y[i], v[i]],\n ])\n A[2*i:2*i+2,:] = Ai\n \n #Use nullspace function to solve A*h = 0 \n h = null_space(A)\n \n #Create final 3by3 homography matrix and normalize by lower right entry\n H = h.reshape(3,3)\n H = H/H[2,2]\n \n #------------------\n\n return H, A", "title": "" }, { "docid": "464c51a5ed765bb757e89ab09d45d947", "score": "0.51730305", "text": "def mutual_info(mc_prob):\n eps = 1e-5\n mean_prob = mc_prob.mean(axis=0)\n first_term = -1 * np.sum(mean_prob * np.log(mean_prob + eps), axis=1)\n second_term = np.sum(np.mean([prob * np.log(prob + eps) for prob in mc_prob], axis=0), axis=1)\n return first_term + second_term", "title": "" }, { "docid": "731dd4f3f4bb084d61028a85a25ff186", "score": "0.5161857", "text": "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n #from what I can tell, the basic idea here is to calculate the probabilities for all labels and output a list with them.\n for label in self.legalLabels:\n condprob = 1\n counter = 0\n for feature in datum.keys():\n if feature == (-1,-1):\n continue\n condprob *= self.dataStats[label][counter][datum[feature]]\n #condprob *= self.GaussianPDF(feature,self.dataStats[label][counter][0],self.dataStats[label][counter][1])\n counter += 1\n #multiply by prior probability\n holder = self.dataStats[label][-1]\n logJoint[label] = condprob*holder #*self.dataStats[label][-1]\n\n return logJoint", "title": "" }, { "docid": "38b5997319d7d11f0653dda3daa2ea2e", "score": "0.5129865", "text": "def act(self, image, other) -> np.ndarray:\n ...", "title": "" }, { "docid": "5cc3cd5cfb81f51032296d51cb4435c7", "score": "0.5128277", "text": "def get_histograma(self,imagem):\r\n return cv2.calcHist([imagem], [0, 1, 2], None,self.numBins,[0, 256, 0, 256, 0, 256])", "title": "" }, { "docid": "baf3e47443d13ae796855452f932f799", "score": "0.5124308", "text": "def hsitogramEqualize(imgOrig: np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray):\r\n # ============ check if the color is RGB or Gray\r\n color = checkIfColor(imgOrig)\r\n if color:\r\n n, m, k = imgOrig.shape\r\n else:\r\n n, m = imgOrig.shape\r\n\r\n # convert to YIQ to take the Y\r\n if color:\r\n # ============ take the Y array to perform\r\n image = transformRGB2YIQ(imgOrig.copy())[:, :, 0]\r\n else:\r\n image = imgOrig.copy()\r\n\r\n image = cv.normalize(image, None, 0, 255, cv.NORM_MINMAX)\r\n image = image.astype('uint8')\r\n\r\n # ============ make histogram\r\n histOrg, old_bins = np.histogram(image.ravel(), 256)\r\n # ============ calculate the cumsum\r\n cumsum = np.cumsum(histOrg)\r\n # ============ create lookuptable\r\n cumsum = cumsum / np.max(cumsum)\r\n lookUpTable = cumsum * 255\r\n lookUpTable = lookUpTable.astype('uint8')\r\n # enter all parameter to the look up table\r\n imEq = lookUpTable[image.ravel()]\r\n if not color:\r\n histEQ, new_bins = np.histogram(imEq.ravel(), 256)\r\n return imEq.reshape(n, m), histOrg, histEQ\r\n else:\r\n histEQ, new_bins = np.histogram(imEq.ravel(), 256)\r\n newImage = transformRGB2YIQ(imgOrig.copy())\r\n newImage[:, :, 0] = imEq.reshape(n, m) / 255\r\n newImage = transformYIQ2RGB(newImage)\r\n return newImage, histOrg, histEQ", "title": "" }, { "docid": "9422533ab143552ca905ef9c09395510", "score": "0.51173943", "text": "def calculateJointProbabilities(self, instance):\n joint = util.Counter()\n\n for legal_label in self.legalLabels:\n posterior_probability = 1.0\n\n for feature in self.features:\n posterior_probability *= self.conditionalProb[(feature, legal_label, instance[feature])]\n\n joint[legal_label] = posterior_probability * self.prior[legal_label]\n\n return joint", "title": "" }, { "docid": "52800f8cd5ef1704841a10aa6edbe62d", "score": "0.5113604", "text": "def compute_link_statistics(\n interactions_graph,\n annotations_dict,\n annotation_term_i,\n annotation_term_j\n ):\n annotated_i_genes = annotations_dict[annotation_term_i]\n # Get neighbors of genes annotated with term i that are not\n # annotated with term i themselves\n neighbors_of_i = interactions_graph.get_neighbors_of_annotated(\n annotated_i_genes)\n annotated_j_genes = annotations_dict[annotation_term_j]\n intersection_size = len(neighbors_of_i.intersection(\n annotated_j_genes))\n union_size = len(neighbors_of_i.union(annotated_j_genes))\n jaccard = intersection_size / float(union_size)\n\n stats = {\n 'term1_size': len(annotated_i_genes),\n 'neighbors_of_term1': len(neighbors_of_i),\n 'term2_size': len(annotated_j_genes),\n 'intersection': intersection_size,\n 'union': union_size,\n 'jaccard': jaccard\n }\n return stats", "title": "" }, { "docid": "a66a10efdcb651d40d1acba48aa1b3db", "score": "0.510098", "text": "def compute_mutual_information(series, delay, num_bins):\n mutual_information = 0\n max_val = max(series)\n min_val = min(series)\n delayed_series = series[delay:len(series)]\n shortened_series = series[0:len(series) - delay]\n bin_size = abs(max_val - min_val) / num_bins\n prob_in_bin_dict = {}\n condition_to_be_in_bin = {}\n condition_delay_to_be_in_bin = {}\n\n for i in range(0, num_bins):\n memoize_prob(i, bin_size, condition_to_be_in_bin, min_val, prob_in_bin_dict, shortened_series)\n\n for j in range(0, num_bins):\n memoize_prob(j, bin_size, condition_to_be_in_bin, min_val, prob_in_bin_dict, shortened_series)\n\n if j not in condition_delay_to_be_in_bin:\n cond = compute_condition(j, bin_size, min_val, delayed_series)\n condition_delay_to_be_in_bin.update({j: cond})\n\n p_ij = calculate_joint_prob(condition_delay_to_be_in_bin, condition_to_be_in_bin, i, j, shortened_series)\n if p_ij != 0 and prob_in_bin_dict[i] != 0 and prob_in_bin_dict[j] != 0:\n mutual_information -= p_ij * math.log(p_ij / (prob_in_bin_dict[i] * prob_in_bin_dict[j]))\n\n return mutual_information", "title": "" }, { "docid": "99db755c9f930e7b3bcf2d708d06b864", "score": "0.50913095", "text": "def plot_histogram(self, cur_dist, next_dist, pid, a, i):\n action_map = {0: 'A', 1: 'B', 2: 'C'}\n if pid == 0:\n action = action_map[a[0]]\n else:\n action = action_map[a[1]]\n actions_map = {(0, 0): '(A, A)', (0, 1): '(A, B)', (0, 2): '(A, C)', (1, 0): '(B, A)', (1, 1): '(B, B)',\n (1, 2): '(B, C)', (2, 0): '(C, A)', (2, 1): '(C, B)', (2, 2): '(C, C)'}\n a_joint = actions_map[a]\n plt.figure()\n plt.rcParams.update({'font.size': 13.7})\n plt.subplots_adjust(left=0.15, bottom=0.11, right=0.9, top=0.87, wspace=0.22, hspace=0.35)\n plt.hist(cur_dist, bins=self.bins, label=f'current distribution of action {action}', alpha=0.9)\n plt.hist(next_dist, bins=self.bins, label=f\"target distribution after joint-action {a_joint}\"\n , alpha=0.6)\n plt.xlabel(\"Q-value\")\n plt.ylabel(\"Number of samples\")\n\n plt.title(f\"Current and target distribution\\n\"\n f\"Iteration {i}, agent {pid+1}, $l$ = {round(self.sim_metric, 2)}\")\n plt.legend()", "title": "" }, { "docid": "e7bd5c6f7d48b20ae082310fe793aecd", "score": "0.5089446", "text": "def joints_pred(self, img, coord = 'hm', debug = False, sess = None):\r\n\t\tif debug:\r\n\t\t\tt = time()\r\n\t\t\tif sess is None:\r\n\t\t\t\tj1 = self.HG.Session.run(self.HG.joint_tensor, feed_dict = {self.HG.img: img})\r\n\t\t\telse:\r\n\t\t\t\tj1 = sess.run(self.HG.joint_tensor, feed_dict = {self.HG.img: img})\r\n\t\t\tprint('JT:', time() - t)\r\n\t\t\tt = time()\r\n\t\t\tif sess is None:\r\n\t\t\t\tj2 = self.HG.Session.run(self.HG.joint_tensor_final, feed_dict = {self.HG.img: img})\r\n\t\t\telse:\r\n\t\t\t\tj2 = sess.run(self.HG.joint_tensor_final, feed_dict = {self.HG.img: img})\r\n\t\t\tprint('JTF:', time() - t)\r\n\t\t\tif coord == 'hm':\r\n\t\t\t\treturn j1, j2\r\n\t\t\telif coord == 'img':\r\n\t\t\t\treturn j1 * self.params['img_size'] / self.params['hm_size'], j2 *self.params['img_size'] / self.params['hm_size']\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Error: 'coord' argument different of ['hm','img']\")\r\n\t\telse:\r\n\t\t\tif sess is None:\r\n\t\t\t\tj = self.HG.Session.run(self.HG.joint_tensor_final, feed_dict = {self.HG.img: img})\r\n\t\t\telse:\r\n\t\t\t\tj = sess.run(self.HG.joint_tensor_final, feed_dict = {self.HG.img: img})\r\n\t\t\tif coord == 'hm':\r\n\t\t\t\treturn j\r\n\t\t\telif coord == 'img':\r\n\t\t\t\treturn j * self.params['img_size'] / self.params['hm_size']\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Error: 'coord' argument different of ['hm','img']\")", "title": "" }, { "docid": "952c8658e98a7874bce113163c537afb", "score": "0.5087664", "text": "def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor:\n if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set):\n traceout = list(cut)\n else:\n traceout = [i for i in range(cut)]\n\n if len(s.shape) == 2 and s.shape[0] == s.shape[1]:\n # mixed state\n n = int(np.log2(backend.sizen(s)) / 2)\n hab = entropy(s)\n\n # subsystem a\n rhoa = reduced_density_matrix(s, traceout)\n ha = entropy(rhoa)\n\n # need subsystem b as well\n other = tuple(i for i in range(n) if i not in traceout)\n rhob = reduced_density_matrix(s, other) # type: ignore\n hb = entropy(rhob)\n\n # pure system\n else:\n hab = 0.0\n rhoa = reduced_density_matrix(s, traceout)\n ha = hb = entropy(rhoa)\n\n return ha + hb - hab", "title": "" }, { "docid": "b8c67bc69b92ef737c53067d5b90293f", "score": "0.5085419", "text": "def descrever(self,imagem):\r\n result=[]\r\n hist_r=cv2.calcHist([imagem], [2], None,self.numBins[2],[0, 256])\r\n hist_g=cv2.calcHist([imagem], [1], None,self.numBins[1],[0, 256])\r\n hist_b=cv2.calcHist([imagem], [0], None,self.numBins[0],[0, 256])\r\n #Dimensoes da Imagem\r\n N, M = imagem.shape[:2]\r\n #Media\r\n media_r=0\r\n media_g=0\r\n media_b=0\r\n\r\n for index, valor in enumerate(hist_r, start=0): # default is zero\r\n media_r+=index*self.densidade_probabilidade(valor,N,M)\r\n print media_r", "title": "" }, { "docid": "73e71101f62486c7e9c2de4e6b2e0149", "score": "0.5080443", "text": "def mutual_info(x: np.ndarray, y: np.ndarray, style: str = \"freedman\") -> float:\n def freedman(x: np.ndarray) -> int:\n iqr = np.quantile(x, [0.25, 0.75])\n return int((x.max() - x.min()) / (2 * (iqr[1] - iqr[0]) * x.size ** (-1 / 3))) + 1\n\n def scott(x: np.ndarray) -> int:\n return int((x.max() - x.min()) / (3.5 * np.std(x) * x.size ** (-1 / 3))) + 1\n\n def sturges(x: np.ndarray) -> int:\n return int(1 + np.log2(x.size)) + 1\n\n bin_funcs: Dict[str, Callable[[np.ndarray], float]] = {\"freedman\": freedman, \"scott\": scott, \"sturges\": sturges}\n if style in bin_funcs:\n bin_func = bin_funcs[style]\n bin_no = [bin_func(x), bin_func(y)]\n else:\n try:\n bin_no = [int(style), int(style)]\n except ValueError:\n raise ValueError(\"style can only be one of ['freedman', 'scott', 'sturges']\")\n dist = np.histogram2d(x, y, bin_no)[0]\n return mutual_info_score(None, None, contingency=dist)", "title": "" }, { "docid": "bc950f975557fd60d2bfde6e8cc704e7", "score": "0.50534797", "text": "def histogram_compare(histogram1, histogram2):\n result = cv2.compareHist(histogram1, histogram2, cv2.HISTCMP_CORREL)\n return result", "title": "" }, { "docid": "aaa7f9c3774c41358798a4195f19aaf6", "score": "0.5040308", "text": "def metrics(firstImage, secondImage):\n\n ssim = structural_similarity(\n firstImage, secondImage, data_range=firstImage.max() - firstImage.min(), multichannel=True\n )\n psnr = peak_signal_noise_ratio(firstImage, secondImage, data_range=firstImage.max() - firstImage.min())\n\n image_metrics = {\"SSIM\": ssim, \"PSNR\": psnr}\n\n return image_metrics", "title": "" }, { "docid": "8f98c807dea4dcb80fbd01a0388bda08", "score": "0.50382566", "text": "def joint_entropy_3(joint_p_distrib):\n\n H = 0\n n = np.shape(joint_p_distrib)[1]\n m = np.shape(joint_p_distrib)[0]\n p = np.shape(joint_p_distrib)[2]\n for i in range(n):\n for j in range(m):\n for k in range(p):\n arg = joint_p_distrib[j][i][k]\n if(arg !=0):\n H -= arg*math.log(arg, 2)\n return H", "title": "" }, { "docid": "93642979a7ef6ea0c004d42dc9bc2b5b", "score": "0.502434", "text": "def mutual_information_equation(args):\r\n\r\n h_x = args[0]\r\n h_yzw = args[1:]\r\n h_xyzw = args\r\n m = entropy(h_x) + entropy(h_yzw) - entropy(h_xyzw) \r\n\r\n return m", "title": "" }, { "docid": "4507afb8d19803387ca583c56b78d98d", "score": "0.5024232", "text": "def mi(xs,ys,correct=True):\n hx = entropy(xs,correct=correct)\n hy = entropy(ys,correct=correct)\n hxy = entropy(zip(xs,ys),correct=correct)\n return hx + hy - hxy", "title": "" }, { "docid": "86a01b581f54827886e5333ee1e0e941", "score": "0.5002903", "text": "def jointplot_samples(self):\n\n params = self.genob.inferable_params\n log = [p.plotlog for p in params]\n plt.clf()\n\n g = sns.jointplot(\n x=self.samples[0],\n y=self.samples[1],\n kind=\"hist\",\n bins=30,\n log_scale=log[:2],\n height=10,\n ratio=3,\n space=0,\n )\n g.plot_joint(sns.kdeplot, color=\"k\", zorder=1, levels=6, alpha=0.75)\n g.plot_marginals(sns.rugplot, color=\"r\", height=-0.1, clip_on=False, alpha=0.1)\n plt.xlabel(params[0].name)\n plt.ylabel(params[1].name)\n plt.title(f\"Jointplot at iteration {self.iter}\")\n plt.savefig(f\"./results/jointplot_{self.iter}.png\")\n plt.clf()", "title": "" }, { "docid": "149d64a84007452373cf4498d393bb77", "score": "0.4997504", "text": "def cond_joint_entropy(marginal_p_distrib_Z, joint_p_distrib_XYZ):\n # H_1 = H(Z)\n H_1 = entropy(marginal_p_distrib_Z)\n # H_2 = H(X,Y,Z)\n H_2 = joint_entropy_3(joint_p_distrib_XYZ)\n # H(X,Y|Z) = H(X,Y,Z) - H(Z)\n H = H_2 - H_1\n return H", "title": "" }, { "docid": "9eb7d545c66de6941e730d1b766ad049", "score": "0.49967444", "text": "def q_mutual_info(self, rho, nA, nB, indicesA, indicesB, reg):\n sa=ent_entropy(rho, nA, indicesA, reg)\n sb=ent_entropy(rho, nB, indicesB, reg)\n sab=vonNeumann_entropy(rho, reg)\n s=sa+sb-sab\n return s", "title": "" }, { "docid": "8cd13dca52b30b712d7121c59604210b", "score": "0.49883908", "text": "def maps(self, x, y):\r\n\r\n # check shapes\r\n tensor_shape_x = x.shape\r\n tensor_shape_y = y.shape\r\n assert tensor_shape_x == tensor_shape_y, 'volume shapes do not match'\r\n assert torch.min(x) >= 0, 'voxel values must be non-negative'\r\n assert torch.min(y) >= 0, 'voxel values must be non-negative'\r\n\r\n eps = 1e-6\r\n\r\n # reshape to [bs, V, B]\r\n if len(tensor_shape_x) != 3:\r\n x = torch.reshape(x, (tensor_shape_x[1], tensor_shape_x[2], tensor_shape_x[3])) # [bs, V, B1]\r\n y = torch.reshape(y, (tensor_shape_x[1], tensor_shape_x[2], tensor_shape_x[3])) # [bs, V, B2]\r\n\r\n # x probability for each batch entry\r\n px = torch.sum(x, 1, keepdim=True) # [bs, 1, B1]\r\n px = px / torch.sum(px, dim=2, keepdim=True)\r\n # y probability for each batch entry\r\n py = torch.sum(y, 1, keepdim=True) # [bs, 1, B2]\r\n py = py / torch.mean(py, dim=2, keepdim=True)\r\n\r\n # joint probability for each batch entry\r\n x_trans = x.permute(0, 2, 1) # [bs, B1, V]\r\n pxy = torch.bmm(x_trans, y) # [bs, B1, B2]\r\n pxy = pxy / (torch.sum(pxy, dim=[1, 2], keepdim=True) + eps) # [bs, B1, B2]\r\n\r\n # independent xy probability\r\n px_trans = px.permute(0, 2, 1) # [bs, B1, 1]\r\n pxpy = torch.bmm(px_trans, py) # [bs, B1, B2]\r\n pxpy_eps = pxpy + eps\r\n\r\n # mutual information\r\n log_term = torch.log(pxy / pxpy_eps + eps) # [bs, B1, B2]\r\n mi = torch.sum(pxy * log_term, dim=[1, 2]) # [bs]\r\n return mi", "title": "" }, { "docid": "fd75b96f829a13d11916e175d66cd94b", "score": "0.49658006", "text": "def showMyOwnHybridIm(sigma1,im1,sigma2,im2):\n imgs = myOwnHybridIm(sigma1,im1,sigma2,im2)\n pintaVarias(imgs)", "title": "" }, { "docid": "07850fe0c2addbda698616170f2d78b1", "score": "0.49640396", "text": "def plot_joint_dist(pmf1, pmf2, thresh=0.8):\n\n def clean(probability_mass_function):\n \"\"\"Removes values below thresh.\"\"\"\n vals = [val for val in probability_mass_function.values() if val < thresh]\n for val in vals:\n probability_mass_function.remove(val)\n\n clean(pmf1)\n clean(pmf2)\n pmf = thinkbayes.make_joint(pmf1, pmf2)\n\n thinkplot.underride_figure(figsize=(6, 6))\n thinkplot.contour_plot(pmf, contour_bool=False, pcolor_bool=True)\n\n thinkplot.plot_line([thresh, 1.0], [thresh, 1.0], color=\"gray\", alpha=0.2, linewidth=4)\n\n thinkplot.save_plot(\n root=\"sat_joint\",\n xlabel=\"p_correct Alice\",\n ylabel=\"p_correct Bob\",\n axis=[thresh, 1.0, thresh, 1.0],\n formats=[\"pdf\", \"eps\"],\n )", "title": "" }, { "docid": "95d764579dc80af38cbe86dbf4336afd", "score": "0.49627915", "text": "def histogramEqualization ():\n \n global currentImage, newImage\n \n ycbcr = currentImage.convert('YCbCr')\n\n array = imageToArray(ycbcr)\n\n bins = 256 # Number of bins in the histogram. Corresponds to the intensity range for each pixel.\n pixels = float(currentImage.width * currentImage.height)\n\n frequencies = np.bincount(array[:,:,0].flatten(), minlength=bins) # Returns the frequency of each intensity value occurring.\n equalizedFrequencies = np.zeros(bins)\n \n mapping = dict()\n \n for r in range(bins):\n \n # Creates a mapping of every intensity r in the current image to an intensity s in the new image.\n \n s = int(np.clip(round((bins/pixels) * np.sum(frequencies[0:r]) - 1), 0, 255))\n\n\n equalizedFrequencies[s] += frequencies[r]\n mapping[r] = s\n\n vfunc = np.vectorize(mapping.get)\n\n array[:,:,0] = vfunc(array[:,:,0])\n\n ycbcr = arrayToImage(array)\n newImage = ycbcr.convert('RGB')\n\n currentImage = newImage\n\n #draw = ImageDraw.Draw(currentImage)\n #draw.text((0, 0),\"Sample Text\",(255,255,255))", "title": "" }, { "docid": "fa3a7016a1e5c0bfb753dd24b9cafedc", "score": "0.49627694", "text": "def __prepare_histogram(h1, h2):\n h1 = h1 if scipy.ndarray == type(h1) else scipy.asarray(h1)\n h2 = h2 if scipy.ndarray == type(h2) else scipy.asarray(h2)\n if h1.shape != h2.shape or h1.size != h2.size:\n raise ValueError('h1 and h2 must be of same shape and size')\n return h1, h2", "title": "" }, { "docid": "9cb0b82d300678abb638e3789e52df04", "score": "0.49611765", "text": "def compute_joint_prob(event_count,i,count):\n print(\"-\"*31)\n print(\"for p = {} q1 = {} q2 = {}\".format(i[0],i[1],i[2]))\n print(\"-\"*31)\n if count:\n print(\"event count\")\n print(\"-\"*31)\n for key,value in event_count.items():\n print(key.ljust(10) + str(value).ljust(6))\n else: \n print(\"event joint prob\")\n print(\"-\"*31)\n for key,value in event_count.items():\n print((\"P(\"+key[0]+\",\"+key[1]+')').ljust(10) + str(value/1000).ljust(6))\n print()", "title": "" }, { "docid": "f0c7a68db902c17b767646d326a44a66", "score": "0.49475682", "text": "def collecting_similarity_values_for_plot(self, pid, actions, iteration):\n if actions == (0, 0):\n self.j_a_0_0[pid][0].append(self.sim_metric)\n self.j_a_0_0[pid][1].append(iteration)\n elif actions == (0, 1):\n self.j_a_0_1[pid][0].append(self.sim_metric)\n self.j_a_0_1[pid][1].append(iteration)\n elif actions == (0, 2):\n self.j_a_0_2[pid][0].append(self.sim_metric)\n self.j_a_0_2[pid][1].append(iteration)\n elif actions == (1, 0):\n self.j_a_1_0[pid][0].append(self.sim_metric)\n self.j_a_1_0[pid][1].append(iteration)\n elif actions == (1, 1):\n self.j_a_1_1[pid][0].append(self.sim_metric)\n self.j_a_1_1[pid][1].append(iteration)\n elif actions == (1, 2):\n self.j_a_1_2[pid][0].append(self.sim_metric)\n self.j_a_1_2[pid][1].append(iteration)\n elif actions == (2, 0):\n self.j_a_2_0[pid][0].append(self.sim_metric)\n self.j_a_2_0[pid][1].append(iteration)\n elif actions == (2, 1):\n self.j_a_2_1[pid][0].append(self.sim_metric)\n self.j_a_2_1[pid][1].append(iteration)\n elif actions == (2, 2):\n self.j_a_2_2[pid][0].append(self.sim_metric)\n self.j_a_2_2[pid][1].append(iteration)", "title": "" }, { "docid": "fea01a8f79350cc716ddf8964cd20e39", "score": "0.4934867", "text": "def compute_pmi(term_a, term_b, n, dictionary):\n p_a = 0\n p_b = 0\n tables_a = []\n tables_b = []\n if term_a in dictionary:\n tables_a = dictionary[term_a]\n p_a = len(tables_a) / n\n if term_b in dictionary:\n tables_b = dictionary[term_b]\n\n p_b = len(tables_b) / n\n p_a_b = len(set(tables_a).intersection(tables_b)) / n\n if p_a_b == 0.0:\n return 0.0\n return math.log(p_a_b / (p_a * p_b))", "title": "" }, { "docid": "ffc54c848ab20b6dfdaaff9d6aeecdfb", "score": "0.4920127", "text": "def __init__(self, hist1, hist2):\n self.__h1 = hist1 # Model Source, realH\n self.__h2 = hist2 # ndH, generated\n\n self.__h1f = hist1.values.flatten()\n self.__h2f = hist2.values.flatten()\n\n self.__diff = (self.__h2f - self.__h1f)", "title": "" }, { "docid": "478ac5d27bba2c2a8fb6d0ed606b2930", "score": "0.49180236", "text": "def Update( self, nbins=None, targetpc=None, p2p=None ):\r\n\t\tif nbins != None: self.nbins = nbins\r\n\t\tif targetpc != None: self.targetpc = targetpc\r\n\t\tif p2p != None: self.p2p = p2p\r\n\r\n\t\tdef ResponseStats( type ):\r\n\t\t\tx = self.overlay.ResponseMagnitudes( p2p=self.p2p, type=type )\r\n\t\t\tx = [ xi for xi, emphasis in zip( x, self.overlay.emphasis ) if emphasis >= 0 ]\r\n\t\t\txSorted = sorted( x )\r\n\t\t\tif len( x ) == 0: xMedian = xMean = 0.0\r\n\t\t\telse:\r\n\t\t\t\txMean = sum( x ) / float( len ( x ) )\r\n\t\t\t\txMedian = Quantile( xSorted, 0.5, alreadySorted=True )\r\n\t\t\treturn x, xSorted, xMean, xMedian\r\n\t\tself.nremoved = sum( [ emphasis < 0 for emphasis in self.overlay.emphasis ] )\r\n\t\tr, rSorted, rMean, rMedian = ResponseStats( 'response' )\r\n\t\tc, cSorted, cMean, cMedian = ResponseStats( 'comparison' )\r\n\t\tb, bSorted, bMean, bMedian = ResponseStats( 'prestimulus' )\r\n\t\tn = len( r )\r\n\t\tif n: targets = Quantile( rSorted, ( self.targetpc / 100.0, 1.0 - self.targetpc / 100.0 ), alreadySorted=True )\r\n\t\telse: targets = [ 0 ]\r\n\t\tdowntarget, uptarget = max( targets ), min( targets )\r\n\t\tmatplotlib.pyplot.figure( self.axes.figure.number ).sca( self.axes )\r\n\t\tmatplotlib.pyplot.cla()\r\n\t\t#print 'calculated = ' + repr( r )\r\n\t\tif len( r ) == 1: extra = dict( range=[ r[ 0 ] * 0.9, r[ 0 ] * 1.1 ] )\r\n\t\telse: extra = {}\r\n\t\tif len( r ): self.counts, self.binCenters, self.patches = matplotlib.pyplot.hist( r, bins=self.nbins, facecolor=self.overlay.lineprops[ 0 ].color, edgecolor='none', **extra )\r\n\t\telse: self.counts, self.binCenters, self.patches = [], [], []\r\n\t\tself.xController = AxisController( self.axes, 'x', units='V', fmt='%g', start=self.axes.get_xlim() )\r\n\t\tself.xController.Home()\r\n\t\tvals = [ downtarget, uptarget, rMean ]\r\n\t\tself.downline, self.upline, self.meanline = matplotlib.pyplot.plot( [ vals, vals ], [ [ 0 for v in vals ], [ 1 for v in vals ] ], color='#FF0000', linewidth=4, alpha=0.5, transform=self.axes.get_xaxis_transform() )\r\n\t\tself.panel.n.set( n )\r\n\t\tself.panel.prestimulus.set( [ bMedian, bMean ] )\r\n\t\tself.panel.comparison.set( [ cMedian, cMean ] )\r\n\t\tself.panel.response.set( [ rMedian, rMean ] )\r\n\t\tself.panel.uptarget.set( uptarget )\r\n\t\tself.panel.downtarget.set( downtarget )\r\n\t\tif self.nremoved: self.axes.set_title( '%d of %d trials removed' % ( self.nremoved, self.nremoved + n ) )\r\n\t\telse: self.axes.set_title( '' )", "title": "" }, { "docid": "3536c3aa2b25f33d10b9b1a64d1d4dbf", "score": "0.49159926", "text": "def prob_2_2(self):\n ###### START CODE HERE ######\n flat=self.A.flatten()\n fig=plt.hist(flat, bins=20, edgecolor='black', linewidth=1.2)\n plt.title('2.2 Intensity Histogram')\n plt.xlabel(\"Intensity Range\")\n plt.ylabel(\"Frequency\")\n plt.savefig(\"2_2.png\")\n plt.show()\n ###### END CODE HERE ######\n pass", "title": "" }, { "docid": "e27b4c4f8f36765a3ff693aa3ccc7803", "score": "0.4912891", "text": "def joint_marginal(self):\n jm_mu = None\n for i in range(self.mu.ndim):\n # step 1: compute the marginal\n sum_axes = tuple(j for j in range(self.mu.ndim) if j != i)\n ith_marginal = self.mu.sum(axis=sum_axes)\n\n # step 2: expand the new joint-marginal expectation tensor with\n # the marginal\n jm_mu = (jm_mu[..., np.newaxis] * ith_marginal[np.newaxis, ...]\n if jm_mu is not None\n else ith_marginal)\n return MultivariateBernoulli(jm_mu, list(self.variable_names))", "title": "" }, { "docid": "d5d3cc5cb26de04f14887bd384a182e5", "score": "0.4899892", "text": "def hist_photo():\n # Image test\n img = cv2.imread('photo2.jpg', 0)\n\n # Detection\n detection, centres = detect_eye(img, is_gray = True)\n\n # Affichage du resultat\n plt.imshow(detection)\n plt.show()", "title": "" }, { "docid": "5582c927e6b14b1747e539282afd0c9d", "score": "0.48907754", "text": "def algoHist( truthDic, Ncols = 2,bins=100, density=False, fixedJetP=False, jetLabels = None, variable = None, xLabel = None, yLabel=None, minx=None, maxx=None, miny=None, maxy=None):\n\n fig2, (axes) = plt.subplots(nrows=1, ncols=Ncols)\n fig2.set_size_inches(10, 5)\n plt.tight_layout(pad=0.4, w_pad=5, h_pad=1.0)\n\n\n for i in range(len(jetLabels)):\n\n\n axes[i].hist(truthDic[jetLabels[i]][variable],\n density=density,\n bins=bins,\n histtype=\"step\",\n fill=False,\n align='mid',\n label=\"Truth\",\n color=\"black\")\n\n \"\"\" If samples have all the jets with the same momentum\"\"\"\n if fixedJetP:\n root_id = truthDic[jetLabels[i]][\"jetsList\"][0][\"root_id\"]\n root_p = truthDic[jetLabels[i]][\"jetsList\"][0][\"content\"][root_id]\n jetAngle = np.arctan2(root_p[0], root_p[1])\n axes[i].axvline(jetAngle, color=\"black\", linestyle='--')\n\n # axes[i].set_xlabel(\" Jets constituents polar angle \", fontsize=15)\n # axes[i].set_ylabel(\" # of jets constituents\", fontsize=15)\n axes[i].set_xlabel(xLabel, fontsize=15)\n axes[i].set_ylabel(yLabel, fontsize=15)\n if maxx is not None:\n axes[i].set_xlim([minx,maxx])\n if miny is not None:\n axes[i].set_ylim([miny,maxy])\n\n axes[i].legend(loc='best', fontsize=15)\n axes[i].grid(which='both', axis='both', linestyle='--')\n axes[i].set_title(r\"\"+jetLabels[i][0:-4]+\" \"+jetLabels[i][-4::], fontsize = 20)", "title": "" }, { "docid": "48743c9c803cc217cd141b341ac311e3", "score": "0.48824608", "text": "def joints_to_gaussian_heatmap(joints, image_size,\n num_joints=17, stride=1,\n sigma=2):\n\n assert num_joints == joints.shape[0]\n\n tmp_size = sigma * 3\n\n height, width = image_size\n\n height //= stride\n width //= stride\n\n heatmap = np.zeros((height, width, num_joints), dtype=np.float32)\n\n for i in range(num_joints):\n\n if joints[i, 2] > 0:\n center_x = int(joints[i, 0]) // stride\n center_y = int(joints[i, 1]) // stride\n\n up_left = [center_x - tmp_size, center_y - tmp_size]\n bottom_right = [center_x + tmp_size + 1, center_y + tmp_size + 1]\n\n if center_x >= width or center_x < 0:\n continue\n\n if center_y >= height or center_y < 0:\n continue\n\n size = 2 * tmp_size + 1\n x = np.arange(0, size, 1, np.float32)\n y = x[:, np.newaxis]\n x0 = y0 = size // 2\n\n g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2))\n\n # Usable gaussian range\n g_x = max(0, -up_left[0]), min(bottom_right[0], width) - up_left[0]\n g_y = max(0, -up_left[1]), min(bottom_right[1], height) - up_left[1]\n\n # Image range\n img_x = max(0, up_left[0]), min(bottom_right[0], width)\n img_y = max(0, up_left[1]), min(bottom_right[1], height)\n\n heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1], i] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]\n\n am = np.amax(heatmap)\n if am > 0:\n heatmap /= am\n\n # 10 is scaling factor of a ground-truth gaussian heatmap.\n heatmap *= 10\n\n return heatmap", "title": "" }, { "docid": "9187583c8768c30d3c4de50eaa6b1998", "score": "0.48790148", "text": "def caculate_normalized_scalar_product(hist1, hist2):\n score = 0\n ############################\n # TODO: Add your code here #\n ############################\n score = np.dot(hist1, hist2) / (np.linalg.norm(hist1) * np.linalg.norm(hist2))\n ############################\n # END OF YOUR CODE #\n ############################\n return score", "title": "" }, { "docid": "cece2e42750b56cf3adcfd0fe28367a5", "score": "0.48609903", "text": "def calculate_partial_mutual_information(n_ij, x_i, y_j, n):\n if n_ij == 0 or x_i == 0 or y_j == 0:\n return 0\n return n_ij * ((log2(n_ij) + log2(n)) -\n (log2(x_i) + log2(y_j)))", "title": "" }, { "docid": "48ca9d9cd5bc906505b4dafab4f0e110", "score": "0.4858772", "text": "def update(probabilities, one_gene, two_genes, have_trait, p):\n for person in probabilities:\n num = 1 * (person in one_gene) + 2 * (person in two_genes)\n have = (person in have_trait)\n probabilities[person][\"gene\"][num] += p\n probabilities[person][\"trait\"][have] += p\n \n return", "title": "" }, { "docid": "252eb68e50e3fce6bbfcce1471037f96", "score": "0.485754", "text": "def changeInteraction(G, i, j):\n return G.binattr[i] * G.binattr[j]", "title": "" }, { "docid": "3753bf7d8a564eb7a409102be08e86f9", "score": "0.48480484", "text": "def compute_homography(src, dst):\n h_matrix = np.eye(3, dtype=np.float64)\n\n ### YOUR CODE HERE\n n = len(src)\n \n ## Normalization\n ### Normalization for src, p\n src_mean = np.mean(src, axis=0)\n\n p_minus_mean = np.zeros([n,2])\n for i in range(n):\n p_minus_mean[i,:] = src[i,:] - src_mean\n \n # sd_y_x = np.std(p_minus_mean, axis=0)\n # s_y = sd_y_x[0] * 1/(math.sqrt(2))\n # s_x = sd_y_x[1] * 1/(math.sqrt(2))\n test_s = np.mean(np.sqrt(np.sum(np.square(p_minus_mean), axis=1)), axis=0) * math.sqrt(2) \n s = test_s\n\n t = np.zeros([3,3])\n t[0][0] = 1/s\n t[0][2] = -1/s * src_mean[1] # mean x coordinate\n t[1][1] = 1/s\n t[1][2] = -1/s * src_mean[0] # mean y coordinate\n t[2][2] = 1\n \n # p_normalized = p_minus_mean / s\n p_normalized = transform_homography(src, t)\n \n ### Normalization for dst, p_prime\n dst_mean = np.mean(dst, axis=0)\n \n p_prime_minus_mean = np.zeros([n,2])\n\n for i in range(n):\n p_prime_minus_mean[i,:] = dst[i,:] - dst_mean\n\n # sd_prime_y_x = np.std(dst, axis=0)\n # s_prime_y = sd_prime_y_x[0] * 1/(math.sqrt(2))\n # s_prime_x = sd_prime_y_x[1] * 1/(math.sqrt(2))\n s_prime = np.mean(np.sqrt(np.sum(np.square(p_prime_minus_mean), axis=1)), axis=0) * 1 / math.sqrt(2)\n \n t_prime = np.zeros([3,3])\n t_prime[0][0] = 1/s_prime\n t_prime[0][2] = -(1/s_prime) * dst_mean[1] # mean x coordinate\n t_prime[1][1] = 1/s_prime\n t_prime[1][2] = -(1/s_prime) * dst_mean[0] # mean y coordinate\n t_prime[2][2] = 1\n \n # p_prime_normalized = p_prime_minus_mean / s_prime\n p_prime_normalized = transform_homography(dst, t_prime)\n \n ## DLT\n a_matrix = []\n \n for row in range(n):\n x = p_normalized[row][1]\n y = p_normalized[row][0]\n x_prime = p_prime_normalized[row][1]\n y_prime = p_prime_normalized[row][0]\n \n a_i = np.zeros([2,9])\n a_i[0][0] = -1 * x\n a_i[0][1] = -1 * y\n a_i[0][2] = -1\n a_i[0][6] = x * x_prime\n a_i[0][7] = y * x_prime\n a_i[0][8] = x_prime\n a_i[1][3] = -1 * x\n a_i[1][4] = -1 * y\n a_i[1][5] = -1\n a_i[1][6] = x * y_prime\n a_i[1][7] = y * y_prime\n a_i[1][8] = y_prime\n a_matrix.append(a_i[0])\n a_matrix.append(a_i[1])\n\n a_matrix = np.array(a_matrix) \n u, s, v = np.linalg.svd(a_matrix, full_matrices=True)\n #v = np.matrix.transpose(v)\n h_norm_matrix = v[:,8].reshape(3,3)\n\n t_prime_inverse = np.linalg.inv(t_prime)\n h_matrix = np.matmul(np.matmul(t_prime_inverse, h_norm_matrix),t)\n\n ### END YOUR CODE\n\n return h_matrix", "title": "" }, { "docid": "85b6f5413a936d1b8228e82e91acef88", "score": "0.48465505", "text": "def joint_prob(self, val):\r\n # TODO: finish this\r\n log_N = -np.add(0.5*np.log(2*np.pi*self.variances),np.exp(np.subtract(np.log(np.square(np.subtract(val,self.means))),np.log(2*self.variances))))\r\n return logsumexp(log_N,b=self.mixing_coefficients.reshape(1,-1))", "title": "" }, { "docid": "725993998aef01e415a05d5f4c30a937", "score": "0.48307112", "text": "def _track_objects(self, contour_imgs):\n imm = self._track_material(contour_imgs[self.material_num_label])\n imf = self._track_finger(contour_imgs[self.finger_num_label])\n return imm, imf", "title": "" }, { "docid": "29b0bdbeb890712907fc7e0fb4945cec", "score": "0.48244503", "text": "def imgHist(img):\n\trows, cols = img.shape\n\tim_h = [0] * 256\n\tim_hn = im_h.copy()\n\tMN = rows * cols\n\n\tfor i in range(rows):\n\t\tfor j in range(cols):\n\t\t\tim_h[img[i, j]] += 1\n\t\t\tim_hn[img[i, j]] += 1 / MN\n\n\treturn im_h, im_hn", "title": "" }, { "docid": "79b9b873876d03589d999986b206d55b", "score": "0.48242104", "text": "def hsv_hists(img, plt):\n plt.figure(figsize=(20,10));\n img_h = img[:,:,0]\n img_s = img[:,:,1]\n img_v = img[:,:,2]\n histo_plot(img_h, \"r\",\"H\", plt);\n histo_plot(img_s, \"g\",\"S\", plt);\n histo_plot(img_v, \"b\",\"V\", plt);", "title": "" }, { "docid": "68a140003b20736fa79d1653b3b33b55", "score": "0.48183146", "text": "def jointLocalUpdateMc(bundles, revenue, bids, targetBid, samples, verbose = False):\n# bundleRevenueDict = {}\n# for bundle, r in zip(bundles,revenue):\n# bundleRevenueDict[tuple(bundle)] = r\n muj = numpy.float64(0.0)\n goodsWon = samples <= bids\n for w in goodsWon:\n# w = sample <= bids\n w[targetBid] = True\n posIdx = numpy.where((bundles == w).all(axis=1))[0][0]\n w[targetBid] = False\n negIdx = numpy.where((bundles == w).all(axis=1))[0][0]\n muj += revenue[posIdx]-revenue[negIdx]\n \n if verbose:\n print muj / samples.shape[0]\n \n return muj/samples.shape[0]", "title": "" }, { "docid": "90a6175c181ffd9a14845478a937b565", "score": "0.48133925", "text": "def make_envelope_hists(hist_list, nom_hist=None):\n new_axis=0 #don't play around with it\n new_hist_up = deepcopy(hist_list[0])\n new_hist_up.reset()\n new_hist_down = deepcopy(new_hist_up)\n \n values = []\n variances = []\n i = 0\n for i, h in enumerate(hist_list):\n values.append(h.values(flow=True))\n variances.append(h.variances(flow=True))\n if nom_hist:\n values.append(nom_hist.values(flow=True))\n variances.append(nom_hist.variances(flow=True))\n #Stack values and variances\n s_values = np.stack(values, axis=new_axis)\n s_variances = np.stack(variances, axis=new_axis)\n \n # Find the minima and maxima per bin, corresponding to new_axis\n s_val_argmax = np.argmax(s_values, axis=new_axis, keepdims=True)\n s_val_argmin = np.argmin(s_values, axis=new_axis, keepdims=True)\n \n # Get the corresponding minima and maxima and (for now) matching variances\n s_val_max = np.take_along_axis(s_values, s_val_argmax, axis=new_axis)[0]\n s_var_max = np.take_along_axis(s_variances, s_val_argmax, axis=new_axis)[0]\n s_val_min = np.take_along_axis(s_values, s_val_argmin, axis=new_axis)[0]\n s_var_min = np.take_along_axis(s_variances, s_val_argmin, axis=new_axis)[0]\n \n # Set the values and variances accordingly, this one doesn't use new_axis\n new_hist_up.view(flow=True)[...] = np.stack([s_val_max, s_var_max], axis=-1)\n new_hist_down.view(flow=True)[...] = np.stack([s_val_min, s_var_min], axis=-1)\n \n return new_hist_up, new_hist_down", "title": "" }, { "docid": "f64024340282719cdaf3047dc2eb0432", "score": "0.4795287", "text": "def extract_features(self,imgs): \n \n features = []\n \n if self.features == 'lbpror_h' :\n for img in imgs:\n features.append(self.spatial_hist(\n feature.local_binary_pattern(img,\n self.S,self.radius, method='ror'), self.grid_x, self.grid_y))\n \n elif self.features == 'tplbp_h' :\n for img in imgs:\n features.append(self.spatial_hist(\n self.tplbp(img,\n self.S,self.radius), self.grid_x, self.grid_y))\n \n elif self.features == 'tplbp_lbpror_h' :\n for img in imgs:\n hist1 = self.spatial_hist(feature.local_binary_pattern(img,\n self.S,self.radius, method='ror'), self.grid_x, self.grid_y)\n \n hist2 = self.spatial_hist(self.tplbp(img,\n self.S,self.radius), self.grid_x, self.grid_y)\n \n # combine histograms for two methods\n hist = np.hstack((hist1, hist2))\n features.append(hist)\n \n elif self.features == 'ltp_h' :\n for img in imgs:\n (low,high) = self.ltp(img)\n hist1 = self.spatial_hist(low, self.grid_x, self.grid_y)\n hist2 = self.spatial_hist(high, self.grid_x, self.grid_y)\n # combine histograms for two methods\n hist = np.hstack((hist1, hist2))\n features.append(hist)\n else:\n for img in imgs:\n features.append(self.spatial_hist(\n feature.local_binary_pattern(img,\n self.S,self.radius), self.grid_x, self.grid_y))\n \n \n return np.vstack(features)", "title": "" }, { "docid": "5ea1c36b8f82e7f4d5054112aa25bc08", "score": "0.47946426", "text": "def caculate_normalized_scalar_product(hist1, hist2):\n score = 0\n \n ############################\n # TODO: Add your code here #\n ############################\n norm1, norm2 = (np.linalg.norm(hist1), np.linalg.norm(hist2))\n if abs(norm1) < 1e-6 or abs(norm2) < 1e-6:\n return 0\n score = hist1.dot(hist2) / (norm1 * norm2)\n ############################\n # END OF YOUR CODE #\n ############################\n \n return score", "title": "" }, { "docid": "3aa2d1440337a2d912c1632e55356774", "score": "0.47931308", "text": "def update(probabilities, one_gene, two_genes, have_trait, p):\n\n for person in probabilities:\n\n if person in one_gene:\n probabilities[person][\"gene\"][1] += p\n elif person in two_genes:\n probabilities[person][\"gene\"][2] += p\n else:\n probabilities[person][\"gene\"][0] += p\n\n if person in have_trait:\n probabilities[person][\"trait\"][True] += p\n else:\n probabilities[person][\"trait\"][False] += p", "title": "" }, { "docid": "d7813037ea3b5ef65899c05a9be97ce9", "score": "0.4792296", "text": "def joint_prob(sentence, A, B):\n p = 0 # joint log prob. of words and tags\n\n last_tag = START\n for (word, tag) in sentence:\n p += A[last_tag][tag] + B[tag][word]\n last_tag = tag\n\n assert isfinite(p) and p < 0 # Should be negative. Think why!\n return p", "title": "" }, { "docid": "929cb852c5b175b52f0f11f306b1f61c", "score": "0.4788808", "text": "def material_distances(vectors1, vectors2):\n# from multiprocessing import Pool\n# from itertools import product\n# from functools import partial\n \n B,N,d = vectors1.shape\n D,M,d = vectors2.shape\n \n similarities = Variable(torch.zeros(B,D))\n \n# hungarian = Hungarian()\n \n for ind, mat1 in enumerate(vectors1):\n for ind2, mat2 in enumerate(vectors2):\n #pdb.set_trace()\n distance_matrix = cdist_torch(mat1, mat2)\n row_ind, col_ind = linear_sum_assignment(distance_matrix.detach().cpu().numpy())\n #indices = torch.tensor([list(i) for i in hungarian.get_results()])\n #similarities[ind, ind2] = torch.exp(-distance_matrix[indices[:,0], indices[:,1]]).sum()\n similarities[ind, ind2] = torch.exp(-distance_matrix[row_ind, col_ind]).sum()\n #distance_matrix[]\n \n# pool = Pool() #defaults to number of available CPU's\n# chunksize = 20 #this may take some guessing ... take a look at the docs to decide\n# for ind, res in enumerate(pool.imap(partial(par_func, vector1=vectors1, vector2=vectors2) , product(range(B), range(D))), chunksize):\n# similarities[ind] = res\n# \n# pool.close()\n# pool.join() \n \n return similarities/max(M,N)", "title": "" }, { "docid": "ad4b3c8cf1318e125e44bba698d0f724", "score": "0.4788398", "text": "def histeq(im,nbr_bins=256):\n\n # get image histogram\n imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)\n print('bins=', bins)# 0~255사이의 값들이 \n cdf = imhist.cumsum() # cumulative distribution function\n print('cdf=', cdf)#0~1사이의 값들이255개 쭉 나온다\n cdf = 255 * cdf / cdf[-1] # normalize\n print('norm cdf=', cdf)\n\n # use linear interpolation of cdf to find new pixel values\n im2 = interp(im.flatten(),bins[:-1],cdf)\n\n return im2.reshape(im.shape), cdf", "title": "" }, { "docid": "bc3bf7e878db59535027257aaa706eca", "score": "0.4771958", "text": "def joint_prob(sentence, A, B):\n p = 0 # joint log prob. of words and tags\n\n prev_tag = START\n for word, tag in sentence:\n trans_p = A[prev_tag][tag]\n if word not in B[tag].keys():\n word = UNK\n emission_p = B[tag][word]\n p += trans_p + emission_p # log probs are added and not multiplied.\n prev_tag = tag\n # add the prob to get to end:\n trans_p = A[prev_tag][END]\n p += trans_p\n\n assert isfinite(p) and p < 0 # Should be negative. Think why!\n return p", "title": "" }, { "docid": "23c7d6cddc30df0ec21343b5e8364db6", "score": "0.47700366", "text": "def joint_probability(people, one_gene, two_genes, have_trait):\n # for clarity, compute the joint probability for parents and children separately\n # ergo, two sets of people are defined: parents and children\n parents = set()\n children = set()\n\n for person in people:\n \n # if a person has no mother, the person has no father\n # this is based on the dataset in 'data/'\n # the person who has no mother (and father) is a parent\n if people[person]['mother'] == None:\n parents.add(person)\n else:\n children.add(person)\n\n # 'antes' computes probabilities of antecedents (parents)\n # this is not strictly required for calculation\n # single variable may have sufficed\n antes = 1\n for person in parents:\n\n # neat trick to calculate no. of genes\n # any person can have one or two genes but not both\n # if the person is in 'one_gene', it evaluates to True i.e., 1 and yields 1\n # if the person is in 'two_gene', it evaluates to True i.e., 1 and yields 2\n # when 'one_gene' evaluates to True, the other evaluates to False i.e., 0\n # and vice-versa\n # have is simply the boolean representing whether the person\n # has the trait or not, based on his/her presence in 'have_trait'\n num = 1 * (person in one_gene) + 2 * (person in two_genes)\n have = (person in have_trait)\n\n # PROBS dictionary is used to evaluate probabilities for the parents\n antes *= PROBS[\"gene\"][num] * PROBS[\"trait\"][num][have]\n\n # the consequences are the children, and their joint probability is present in conses\n # this is not strictly necessary either\n conses = 1\n for person in children:\n \n # get the child (person) 's mom and dad from the people dictionary\n mom = people[person][\"mother\"]\n dad = people[person][\"father\"]\n\n # as before, evaluate the number of genes for the parents and the child\n # also evaluate whether the child has a trait\n num = 1 * (person in one_gene) + 2 * (person in two_genes)\n num_mom = 1 * (mom in one_gene) + 2 * (mom in two_genes)\n num_dad = 1 * (dad in one_gene) + 2 * (dad in two_genes)\n have = (person in have_trait)\n\n # get the value of probability of mutation for ease of use later\n mutation = PROBS[\"mutation\"]\n\n # if the child has no gene\n if num == 0:\n\n # if neither dad nor mom has the gene\n # then, neither must undergo mutation\n if num_dad == 0 and num_mom == 0:\n effect = (1 - mutation) * (1 - mutation)\n \n # if both dad and mom have 2 genes\n # both must undergo mutatino so that none is transmitted\n elif (num_dad == 2 and num_mom == 2):\n effect = mutation * mutation\n \n # if either one of the parents has 2 genes and the other has 0 genes\n # the one with 2 genes must undergo mutation and the other must not\n elif (num_dad == 2 and num_mom == 0) or \\\n (num_dad == 0 and num_mom == 2):\n effect = mutation * (1 - mutation)\n \n # if either one fo the parents has 1 gene and the other has 0 genes\n # the one with the 1 gene must not transmit with prob 0.5\n # the other must not undergo mutation\n elif (num_dad == 0 and num_mom == 1) or \\\n (num_dad == 1 and num_mom == 0):\n effect = (1 - mutation) * 0.5\n \n # if either one of the parents has 2 genes and the other has 1 gene\n # the one with 2 genes must undergo mutation\n # and other must not transmit with prob 0.5\n elif (num_dad == 2 and num_mom == 1) or \\\n (num_dad == 1 and num_mom == 2):\n effect = mutation * 0.5\n\n # else both have 1 gene each and\n # must each not transmit with prob 0.5\n else:\n effect = 0.5 * 0.5\n\n # if the child has one gene\n elif num == 1:\n\n # if both parents have no genes\n # one of them must go mutation to transmit 1 and the other must not\n # if both parents have 2 genes\n # one of them transmits naturally and the other mustn't by mutation\n if (num_dad == 0 and num_mom == 0) or \\\n (num_dad == 2 and num_mom == 2):\n effect = mutation * (1 - mutation)\n \n # if one of the parents has 2 genes and the other has no genes\n # the one with 2 can transmit and the other can't, naturally\n # or\n # the one with 2 can't transmit and the other can, by mutation\n elif (num_dad == 2 and num_mom == 0) or \\\n (num_dad == 0 and num_mom == 2):\n effect = (1 - mutation) * (1 - mutation) + mutation * mutation\n \n # if both the parents have 1 gene\n # one of them transmits with prob 0.5\n # the other doesn't with prob 0.5\n # and vice-versa\n elif (num_dad == 1 and num_mom == 1):\n effect = 2 * 0.5 * 0.5\n\n # in the rest of the cases\n # either transmit by mutation(0) or don't by mutation(2)\n # either transmit naturally(2) or don't naturally(0)\n # 50% chance of transmitting or not transmitting(1)\n # (0, 1) (1, 0) (2, 1) (1, 2)\n else:\n effect = mutation * 0.5 + (1 - mutation) * 0.5\n\n # if the child has 2 genes\n # same as for 0 genes\n # but mutation becomes (1 - mutation)\n # and vice-versa\n # because 0 and 2 are opposite cases\n # 0 never transmits and 2 always transmits\n else:\n\n if num_dad == 0 and num_mom == 0:\n effect = mutation * mutation\n \n elif (num_dad == 2 and num_mom == 2):\n effect = (1 - mutation) * (1 - mutation)\n \n elif (num_dad == 2 and num_mom == 0) or \\\n (num_dad == 0 and num_mom == 2):\n effect = mutation * (1 - mutation)\n \n elif (num_dad == 0 and num_mom == 1) or \\\n (num_dad == 1 and num_mom == 0):\n effect = mutation * 0.5\n \n elif (num_dad == 2 and num_mom == 1) or \\\n (num_dad == 1 and num_mom == 2):\n effect = (1 - mutation) * 0.5\n\n # (1, 1)\n else:\n effect = 0.5 * 0.5\n\n conses *= effect * PROBS[\"trait\"][num][have]\n\n return antes * conses", "title": "" }, { "docid": "60f2f79f48c0e253c9525794b2ebd4f0", "score": "0.47681382", "text": "def concatTwoHMMs(hmm1, hmm2):\n pi = np.zeros(hmm1['startprob'].shape[0] + hmm2['startprob'].shape[0] -1)\n pi[0:hmm1['startprob'].shape[0]] = hmm1['startprob']\n tmp = np.multiply(hmm2['startprob'], hmm1['startprob'][-1])\n pi[hmm1['startprob'].shape[0]-1:] = tmp\n\n h1len = hmm1['startprob'].shape[0]\n h2len = hmm2['startprob'].shape[0]\n \n\n dim = h1len+h2len-1\n trans = np.zeros((dim, dim))\n #first transmat\n trans[:h1len-1, : h1len-1] = hmm1['transmat'][:-1, :-1]\n\n #transition to second mat\n #print( hmm1['transmat'])\n column = np.matrix(hmm1['transmat'][:-1,-1]).T\n #print(column)\n tile = np.tile(column, (1, h2len) )\n #print(tile)\n secondPart = np.multiply(tile, np.matrix(hmm2['startprob'][:]) )\n #print(secondPart)\n secondPart = secondPart\n trans[:h1len-1, h1len-1:] = secondPart \n\n #second matrix\n trans[h1len-1:, h1len-1:] = hmm2['transmat']\n\n #means\n means = np.vstack((hmm1['means'], hmm2['means']))\n #covars\n covars = np.vstack((hmm1['covars'], hmm2['covars']))\n \n hmm3 = {}\n hmm3['startprob'] = pi\n hmm3['transmat'] = trans\n hmm3['means'] = means\n hmm3['covars'] = covars\n return hmm3", "title": "" }, { "docid": "a12e5c4e0133395d910348f468fca597", "score": "0.47561377", "text": "def compare_images(img1, img2):\n img1 = np.ravel(normalize(img1))\n img2 = np.ravel(normalize(img2))\n return np.corrcoef(img1,img2)[0,1] #this matrix is 2 by 2 ", "title": "" }, { "docid": "cd818ead0022ca2f02771fcf7430b252", "score": "0.47469017", "text": "def histeq(im,nbr_bins=256):\n # get image histogram\n imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)\n cdf = imhist.cumsum() # cumulative distribution function\n cdf = 255 * cdf / cdf[-1] # normalize\n # use linear interpolation of cdf to find new pixel values\n im2 = interp(im.flatten(),bins[:-1],cdf)\n return im2.reshape(im.shape), cdf", "title": "" }, { "docid": "ff7e8fc609fd2912f430e7a249c8604f", "score": "0.47445917", "text": "def hw2_histeq(image):\n [Histogram, CDF] = hist_cdf(image)\n New_Intensity = [0]*257\n New_Intensity = [floor(cdf*255) for cdf in CDF]\n \n return New_Intensity", "title": "" }, { "docid": "ef15b0b05b94dcc64f7d3a804da25c1e", "score": "0.47443712", "text": "def compute_kernel_histogram(x1,x2):\n value= np.vdot(x1,x2)\n return value", "title": "" }, { "docid": "9e2a902b80236d30a8d0bf45ecb24288", "score": "0.47425234", "text": "def chamfer(images1, images2):\n # Convert in the opencv data format\n images1 = images1.astype(np.uint8)\n images1 = images1 * 255\n images2 = images2.astype(np.uint8)\n images2 = images2 * 255\n N = images1.shape[0]\n size = images1.shape[-1]\n\n D1 = np.zeros((N, size, size))\n E1 = np.zeros((N, size, size))\n\n D2 = np.zeros((N, size, size))\n E2 = np.zeros((N, size, size))\n summ1 = np.sum(images1, (1, 2))\n summ2 = np.sum(images2, (1, 2))\n\n # sum of completely filled image pixels\n filled_value = int(255 * size**2)\n defaulter_list = []\n for i in range(N):\n img1 = images1[i, :, :]\n img2 = images2[i, :, :]\n\n if (summ1[i] == 0) or (summ2[i] == 0) or (summ1[i] == filled_value) or (summ2[\\\n i] == filled_value):\n # just to check whether any image is blank or completely filled\n defaulter_list.append(i)\n continue\n edges1 = cv2.Canny(img1, 1, 3)\n sum_edges = np.sum(edges1)\n if (sum_edges == 0) or (sum_edges == size**2):\n defaulter_list.append(i)\n continue\n dst1 = cv2.distanceTransform(\n ~edges1, distanceType=cv2.DIST_L2, maskSize=3)\n\n edges2 = cv2.Canny(img2, 1, 3)\n sum_edges = np.sum(edges2)\n if (sum_edges == 0) or (sum_edges == size**2):\n defaulter_list.append(i)\n continue\n\n dst2 = cv2.distanceTransform(\n ~edges2, distanceType=cv2.DIST_L2, maskSize=3)\n D1[i, :, :] = dst1\n D2[i, :, :] = dst2\n E1[i, :, :] = edges1\n E2[i, :, :] = edges2\n distances = np.sum(D1 * E2, (1, 2)) / (\n np.sum(E2, (1, 2)) + 1) + np.sum(D2 * E1, (1, 2)) / (np.sum(E1, (1, 2)) + 1)\n # TODO make it simpler\n distances = distances / 2.0\n # This is a fixed penalty for wrong programs\n distances[defaulter_list] = 16\n return distances", "title": "" }, { "docid": "f0993f5fa7f09861411d468e495ef803", "score": "0.47382125", "text": "def join_image(mat1, mat2, joint_width, side, norm=True, total_width=None):\n (nrow1, ncol1) = mat1.shape\n (nrow2, ncol2) = mat2.shape\n joint_int = int(np.floor(joint_width))\n sub_pixel = joint_width - joint_int\n side = int(side)\n if sub_pixel > 0.0:\n if side == 1:\n mat1 = shift(mat1, (0, sub_pixel), mode='nearest')\n mat2 = shift(mat2, (0, -sub_pixel), mode='nearest')\n else:\n mat1 = shift(mat1, (0, -sub_pixel), mode='nearest')\n mat2 = shift(mat2, (0, sub_pixel), mode='nearest')\n if nrow1 != nrow2:\n raise ValueError(\"Two images are not at the same height!!!\")\n total_width0 = ncol1 + ncol2 + joint_int\n if (total_width is None) or (total_width < total_width0):\n total_width = total_width0\n mat_comb = np.zeros((nrow1, total_width0), dtype=np.float32)\n if side == 1:\n if norm is True:\n factor1 = np.mean(mat1[:, -3:])\n factor2 = np.mean(mat2[:, :3])\n mat2 = mat2 * factor1 / factor2\n mat_comb[:, 0:ncol1] = mat1\n mat_comb[:, (ncol1 + joint_int):total_width0] += mat2\n list_mask = np.zeros(total_width0, dtype=np.float32)\n list_mask[ncol1 - 2:ncol1 + joint_int + 3] = 1.0\n xlist = np.where(list_mask < 1.0)[0]\n ylist = np.arange(nrow1)\n finter = interpolate.RectBivariateSpline(ylist, xlist,\n mat_comb[:, xlist],\n kx=1, ky=1)\n xlist_miss = np.where(list_mask > 0.0)[0]\n if len(xlist_miss) > 0:\n x_mat_miss, y_mat = np.meshgrid(xlist_miss, ylist)\n output = finter.ev(np.ndarray.flatten(y_mat),\n np.ndarray.flatten(x_mat_miss))\n mat_comb[:, xlist_miss] = output.reshape(x_mat_miss.shape)\n else:\n if norm is True:\n factor2 = np.mean(mat2[:, -3:])\n factor1 = np.mean(mat1[:, :3])\n mat2 = mat2 * factor1 / factor2\n mat_comb[:, 0:ncol2] = mat2\n mat_comb[:, (ncol2 + joint_int):total_width0] += mat1\n list_mask = np.zeros(total_width0, dtype=np.float32)\n list_mask[ncol2 - 2:ncol2 + joint_int + 3] = 1.0\n xlist = np.where(list_mask < 1.0)[0]\n ylist = np.arange(nrow1)\n finter = interpolate.RectBivariateSpline(ylist, xlist,\n mat_comb[:, xlist],\n kx=1, ky=1)\n xlist_miss = np.where(list_mask > 0.0)[0]\n if len(xlist_miss) > 0:\n x_mat_miss, y_mat = np.meshgrid(xlist_miss, ylist)\n output = finter.ev(np.ndarray.flatten(y_mat),\n np.ndarray.flatten(x_mat_miss))\n mat_comb[:, xlist_miss] = output.reshape(x_mat_miss.shape)\n if total_width > total_width0:\n mat_comb = np.pad(\n mat_comb, ((0, 0), (0, total_width - total_width0)), mode='edge')\n return mat_comb", "title": "" }, { "docid": "7ba2322c222a50892bd184a43fe298b1", "score": "0.4735509", "text": "def predict(self, a, b, **kwargs):\n binning_alg = kwargs.get('bins', 'fd')\n return metrics.adjusted_mutual_info_score(bin_variable(a, bins=binning_alg),\n bin_variable(b, bins=binning_alg))", "title": "" }, { "docid": "7ba2322c222a50892bd184a43fe298b1", "score": "0.4735509", "text": "def predict(self, a, b, **kwargs):\n binning_alg = kwargs.get('bins', 'fd')\n return metrics.adjusted_mutual_info_score(bin_variable(a, bins=binning_alg),\n bin_variable(b, bins=binning_alg))", "title": "" }, { "docid": "0a5fafac78e6ba9b09cce1da11510679", "score": "0.47281244", "text": "def find_motion_vector(img1, img2):\r\n # print \"in function\"\r\n\tif img1 == None or img2 == None or img1.shape[2] < 3 or img2.shape[2] < 3:\r\n\t\treturn numpy.array([0.0, 0.0])\r\n\r\n\tgray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)\r\n\tfeatures1 = cv2.goodFeaturesToTrack(gray1, GF_MAX_CORNERS, \\\r\n\t\tGF_QUALITY_LEVEL, GF_MIN_DISTANCE, GF_BLOCK_SIZE)\r\n\r\n\tif features1 == None or len(features1) == 0:\r\n\t\treturn numpy.array([0.0, 0.0])\r\n\r\n\tgray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)\r\n\tfeatures2, st, err = cv2.calcOpticalFlowPyrLK(gray1, gray2, features1, \\\r\n\t\tnextPts=None, winSize=LK_WINDOW_SIZE, maxLevel=LK_MAX_LEVEL, \\\r\n\t\tcriteria=LK_CRITERIA)\r\n\r\n\tgood_features2 = features2[st==1]\r\n\tgood_features1 = features1[st==1]\r\n\r\n \r\n\tdiff = good_features1 - good_features2\r\n\r\n\t# if no correspondences are found:\r\n\tif len(diff) == 0: \r\n\t\treturn numpy.array([0.0, 0.0])\r\n \r\n\treturn numpy.mean(diff, axis=0, dtype=numpy.float32)", "title": "" }, { "docid": "de1d4fff5ce889f8f7dd2b1573777609", "score": "0.47253188", "text": "def similarity(im0, im1):\n if im0.shape != im1.shape:\n raise ValueError(\"Images must have same shapes.\")\n elif len(im0.shape) != 2:\n raise ValueError(\"Images must be 2 dimensional.\")\n\n f0 = fftshift(abs(fft2(im0)))\n f1 = fftshift(abs(fft2(im1)))\n\n h = highpass(f0.shape)\n f0 *= h\n f1 *= h\n del h\n\n f0, log_base = logpolar(f0)\n f1, log_base = logpolar(f1)\n\n f0 = fft2(f0)\n f1 = fft2(f1)\n r0 = abs(f0) * abs(f1)\n ir = abs(ifft2((f0 * f1.conjugate()) / r0))\n i0, i1 = numpy.unravel_index(numpy.argmax(ir), ir.shape)\n angle = 180.0 * i0 / ir.shape[0]\n scale = log_base ** i1\n\n if scale > 1.8:\n ir = abs(ifft2((f1 * f0.conjugate()) / r0))\n i0, i1 = numpy.unravel_index(numpy.argmax(ir), ir.shape)\n angle = -180.0 * i0 / ir.shape[0]\n scale = 1.0 / (log_base ** i1)\n if scale > 1.8:\n raise ValueError(\"Images are not compatible. Scale change > 1.8\")\n\n if angle < -90.0:\n angle += 180.0\n elif angle > 90.0:\n angle -= 180.0\n\n im2 = ndii.zoom(im1, 1.0/scale)\n im2 = ndii.rotate(im2, angle)\n\n if im2.shape < im0.shape:\n t = numpy.zeros_like(im0)\n t[:im2.shape[0], :im2.shape[1]] = im2\n im2 = t\n elif im2.shape > im0.shape:\n im2 = im2[:im0.shape[0], :im0.shape[1]]\n\n f0 = fft2(im0)\n f1 = fft2(im2)\n ir = abs(ifft2((f0 * f1.conjugate()) / (abs(f0) * abs(f1))))\n t0, t1 = numpy.unravel_index(numpy.argmax(ir), ir.shape)\n\n if t0 > f0.shape[0] // 2:\n t0 -= f0.shape[0]\n if t1 > f0.shape[1] // 2:\n t1 -= f0.shape[1]\n\n im2 = ndii.shift(im2, [t0, t1])\n\n # correct parameters for ndimage's internal processing\n if angle > 0.0:\n d = int((int(im1.shape[1] / scale) * math.sin(math.radians(angle))))\n t0, t1 = t1, d+t0\n elif angle < 0.0:\n d = int((int(im1.shape[0] / scale) * math.sin(math.radians(angle))))\n t0, t1 = d+t1, d+t0\n scale = (im1.shape[1] - 1) / (int(im1.shape[1] / scale) - 1)\n\n return im2, scale, angle, [-t0, -t1]", "title": "" }, { "docid": "02f7022695adbb500fd831a80541a1bb", "score": "0.4708374", "text": "def mutual_information(self, approx_prob=False,\n excitation_method='logarithm',\n mutual_information_method='default',\n clip=True):\n if excitation_method =='logarithm':\n # use the expansion of the mutual information around the optimal\n # point to calculate an approximation of the mutual information\n \n # determine the probabilities of receptor activations \n q_n, q_nm = self.receptor_crosstalk(ret_receptor_activity=True,\n approx_prob=approx_prob)\n \n # calculate mutual information from this\n MI = self._estimate_MI_from_q_stats(\n q_n, q_nm, method=mutual_information_method)\n\n elif excitation_method == 'overlap':\n # calculate the MI assuming that receptors are independent.\n # This expression assumes that each receptor provides a fractional \n # information H_r/N_s. Some of the information will be overlapping\n # and the resulting MI is thus smaller than the naive estimate:\n # MI < N_r * H_r\n\n # determine the probabilities of receptor activation \n q_n = self.receptor_activity(approx_prob=approx_prob)\n \n # calculate mutual information from this, ignoring crosstalk\n MI = self._estimate_MI_from_q_stats(\n q_n, 0, method=mutual_information_method)\n\n # estimate the effect of crosstalk by calculating the expected\n # overlap between independent receptors \n H_r = MI / self.Nr\n MI = self.Ns - self.Ns*(1 - H_r/self.Ns)**self.Nr\n \n else:\n raise ValueError('Unknown method `%s`' % excitation_method)\n \n if clip:\n # limit the MI to the mixture entropy\n return np.clip(MI, 0, self.mixture_entropy())\n else:\n return MI", "title": "" }, { "docid": "deeaab570a88b01bfdebab09a0af7ed9", "score": "0.47065228", "text": "def combine_data(self, image1, image2, method = 'and'):\n if (isinstance(image1, str) & isinstance(image2, str)):\n image1 = nib.load(image1).get_data()\n image2 = nib.load(image2).get_data()\n labels = np.unique(np.concatenate((np.unique(image1), np.unique(image2))))[1:]\n outdata = np.empty((image1.shape[0], image1.shape[1], image2.shape[2], labels.size))\n for i in range(labels.size):\n tempimage1 = copy.copy(image1)\n tempimage2 = copy.copy(image2)\n tempimage1[tempimage1 != labels[i]] = 0\n tempimage1[tempimage1 == labels[i]] = 1\n tempimage2[tempimage2 != labels[i]] = 0\n tempimage2[tempimage2 == labels[i]] = 1\n tempimage1.astype('bool')\n tempimage2.astype('bool')\n if method == 'and':\n tempimage = tempimage1 * tempimage2\n elif method == 'or':\n tempimage = tempimage1 + tempimage2\n else:\n raise Exception('Method support and, or now')\n outdata[...,i] = labels[i]*tempimage\n return outdata", "title": "" }, { "docid": "ca755d7fcdaf6e4adf67d89ba111cbe0", "score": "0.47020787", "text": "def _compute_stereogram(self, im1, im2):\n assert False, \"implement in subclass\"", "title": "" }, { "docid": "0731328287dcb9b51c9c42b9c517f056", "score": "0.4699471", "text": "def CompareHists( histA, histB ):\n\n # First, determine if they are the same class\n classA = histA.IsA().ClassName()\n classB = histB.IsA().ClassName()\n\n if classA != classB:\n print \"Error: Incompatable histograms\"\n print \"Histogram %s has class %s and Histogram %s has class %s\" % (histA.GetName(), classA, histB.GetName(), classB)\n raise Exception(\"Incompatable Hists - Class\")\n return False\n\n # Check that their binning is the same\n nBinsA = histA.GetNbinsX()\n nBinsB = histB.GetNbinsX()\n\n if nBinsA != nBinsB:\n print \"Error: Incompatable histograms\"\n print \"Histogram %s has NbinsX %s and Histogram %s has NbinsX %s\" % (histA.GetName(), nBinsA, histB.GetName(), nBinsB)\n raise Exception(\"Incompatable Hists - NBins\")\n return False\n\n # Check that bins match\n for itr in range(nBinsA):\n bin = itr + 1\n labelA = histA.GetXaxis().GetBinLabel(bin)\n labelB = histB.GetXaxis().GetBinLabel(bin)\n if labelA!= labelB:\n print \"Error: Incompatable histograms\"\n print \"Histogram %s has lable %s and Histogram %s has label %s for bin %s\" % (histA.GetName(), labelA, histB.GetName(), labelB, bin)\n raise Exception(\"Incompatable Hists - Labels\")\n return False\n \n return True", "title": "" } ]
1a33aa71cb2299b7dac98ae50b67f1f6
r"""Runs XFuse on the given data
[ { "docid": "db94527214781b4480b669c00dd34c4f", "score": "0.0", "text": "def model(self, xs):\n\n def _go(experiment, x):\n zs = [\n p.sample(\n f\"z-{experiment.tag}-{i}\",\n (\n # pylint: disable=not-callable\n Normal(torch.tensor(0.0, device=find_device(x)), 1.0)\n .expand([1, 1, 1, 1])\n .to_event(3)\n ),\n )\n for i in range(experiment.num_z)\n ]\n experiment.model(x, zs)\n\n for experiment, x in xs.items():\n _go(self.get_experiment(experiment), x)", "title": "" } ]
[ { "docid": "1a1581ba904d4e719c5c6aa04a0b3511", "score": "0.61807793", "text": "def multiFuse(self):\n ...", "title": "" }, { "docid": "1837128fa80992267a139987a47e61af", "score": "0.5791556", "text": "def run(data):", "title": "" }, { "docid": "f9424006c706b7763b8be66cfe03b9c9", "score": "0.57103026", "text": "def main():\n path = str(input()).rstrip()\n modules.Fusion().run(path)", "title": "" }, { "docid": "91c745be2b8de9c3881480381d60e378", "score": "0.5674608", "text": "def main():\n # Initialize key variables\n additional_help = \"\"\"\\\nThis processes XML data from FXCM http:// feeds.\n\n\"\"\"\n\n # Process the CLI\n cli_object = cli.ProcessCli(additional_help=additional_help)\n cli_args = cli_object.get_cli()\n\n # Autoingest stuff\n if cli_args.mode == 'autoingest':\n idx_ingested_list = _autoingest()\n if bool(idx_ingested_list) is True:\n _process(idx_ingested_list)\n\n # Process data\n if cli_args.mode == 'process':\n _process()", "title": "" }, { "docid": "73273e1bf46b8ed13e60538254f22cdb", "score": "0.56554574", "text": "def main():\n import sys\n if len(sys.argv) == 1:\n data, filename = fetch_current()\n elif sys.argv[1].startswith('ftp:'):\n data, filename = fetch(filename)\n else:\n filename = sys.argv[1]\n data = load(filename)\n dataset = reduce(data, filename)\n summary(dataset)", "title": "" }, { "docid": "97e904a50dec8a90c5ce29295edcd2ae", "score": "0.544574", "text": "def run_helper_xenoGI(data_filepath):\n\n code_pipeline = ['parseGenbank.py', 'runBlast.py', 'calcScores.py', 'xenoGI.py',\n 'printAnalysis.py']\n param_fn = 'params.py'\n\n # copy the starter .txt files to current directory\n os.chdir(data_filepath) \n for step in code_pipeline:\n # create the python command to run\n xeno_gi_step = os.path.join(XENO_GI_DIRECTORY, step)\n python_step = \"python3 \" + xeno_gi_step + \" \" + param_fn\n os.system(python_step) \n \n # writing the bed files too \n make_beds = os.path.join(XENO_GI_DIRECTORY, \"misc\", \"createIslandBed.py\")\n BED_PARAM = str(100)\n python_step_bedfiles = \"python3 \" + make_beds + \" \" + param_fn + \" \" + BED_PARAM \n os.system(python_step_bedfiles) \n\n output_files = glob.glob('*.out')\n bed_files= (glob.glob(\"bed/*\"))\n\n service = OutputFilesService(data_filepath, output_files, bed_files)\n service.create_zip()", "title": "" }, { "docid": "db7b48517368546d96b4acba5343b82a", "score": "0.5409114", "text": "def run(self,data):\n # Setup feed indexing\n # self.feeds : feed horn ID (in array indexing, only chosen feeds)\n # self.feedlist : all feed IDs in data file (in lvl1 indexing)\n # self.feeddict : map between feed ID and feed array index in lvl1\n fname = data.filename.split('/')[-1]\n\n self.logger(f'{fname}:{self.name}: About to get feeds')\n self.feeds, self.feed_index, self.feed_dict = self.getFeeds(data,self.feeds_select)\n\n\n # Setup output file here, we will have to write in sections.\n self.logger(f'{fname}:{self.name}: About to create {self.output_dir}')\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # Opening file here to write out data bit by bit\n self.logger(f'{fname}:{self.name}: About to get tod shape')\n self.i_nFeeds, self.i_nBands, self.i_nChannels,self.i_nSamples = data['spectrometer/tod'].shape\n\n # Average the data and apply the gains\n self.logger(f'{fname}:{self.name}: About to run average_obs')\n self.average_obs(data.filename,data)\n self.logger(f'{fname}:{self.name}: average_obs done.')\n self.logger(f'{fname}:{self.name}: About to run calibrate_data')\n self.calibrate_data(data)\n self.logger(f'{fname}:{self.name}: calibrate_data done.')", "title": "" }, { "docid": "b88234386d28201a9d76a9524e5382d4", "score": "0.5258803", "text": "def run(self):\n\t\tself.__comprun_xml = self.__comp_xml\n\t\tself.__substitute_compvars()\n\t\tself.__substitute_vars(self.__intvars, self.__nodesrc)\n\t\tself.__substitute_vars(self.__extvars, self.__nodedst)\n\t\tself.__get_datafiles()\n\t\treturn RetVal.CODE_SUCCESS", "title": "" }, { "docid": "be861ddb8ed396d38e12f4fa7d74188a", "score": "0.52482104", "text": "def data_runner(self):\n while True:\n for flow_collector in self.config[\"fcs\"]:\n # print(\"Getting data set...\")\n new_fc_data = self.get_fc_file(\n flow_collector[\"fc_ip\"],\n flow_collector[\"fc_username\"],\n flow_collector[\"fc_password\"],\n )\n self.combine_fc_data(new_fc_data)\n\n # Process all the data collected from FC's\n if self.verbose:\n print(f\"Combined Data:\\n{self.total_fc_data_cycle_current}\")\n self.process_data()\n\n # Wait retry_interval\n time.sleep(self.retry)", "title": "" }, { "docid": "8567d63ff99f7a0293193a8e8ac83324", "score": "0.5245493", "text": "def fuse(ctx, mountpoint):\n client = ctx.obj[\"CLIENT\"]\n print(f\"Starting fuse filesystem at {mountpoint}\")\n clickup_fuse(mountpoint, client)", "title": "" }, { "docid": "aaf526cbc7fd3956e151f4bb6f5c7425", "score": "0.5239055", "text": "def run(self):\n source_dir = os.path.join('../source/', self.query['id'])\n print('loading data from {}'.format(source_dir))\n data_full = dd.from_delayed([dask.delayed(self.load_data)(f) for f in os.listdir(source_dir) if f.endswith('.zip')])\n\n md = self.metadata\n metric = md['metric'].set_index('id')['name'].to_dict()\n measure = md['measure'].set_index('id')['name'].to_dict()\n\n split_by = self.config['split_by']\n by = self.config['dimension_order']\n cols = by.copy()\n cols.append('val')\n\n all_measures = list()\n measure_metric_combinations = product(self.query['measure'], self.query['metric'])\n for g in measure_metric_combinations:\n name = measure[g[0]] + ' ' + metric[g[1]]\n print(f'creating dattpoints for {name}')\n concept = to_concept_id(name)\n all_measures.append((concept, name))\n\n df = data_full.loc[(data_full.measure == g[0]) & (data_full.metric == g[1]), cols].compute()\n cols_rename = by.copy()\n cols_rename.append(concept)\n df.columns = cols_rename\n serve_datapoint(df, by, concept, split_by)", "title": "" }, { "docid": "f1077a72d5096ff63e394ec7e459ec8b", "score": "0.52336353", "text": "def main():\n\n ProcessData()", "title": "" }, { "docid": "cbb4ec50bedb13d18840ff23e65a5501", "score": "0.5219508", "text": "def main():\n args = parse_arguments()\n axfrtest(args.domain)", "title": "" }, { "docid": "0aefd0fed83294227f1e94205de8fb42", "score": "0.52057624", "text": "def execute_feature_engineering(self):\n self.prepare_data()\n self.select_features()", "title": "" }, { "docid": "9524627e0463c409340f5a36c7e45158", "score": "0.5179578", "text": "def run():\n salaries = clean_salaries_dataset('data/raw', \"salaries2.pickle\")\n stats = clean_stats_dataset('data/raw', \"advstats2.pickle\", \"regstats2.pickle\")\n rookies = clean_rookies_dataset('data/raw','rookies2.pickle')\n freeagents = clean_fa_dataset('data/raw','freeagents2.pickle')\n \n save_dataset(salaries, \"data/interim/salaries2.pickle\")\n save_dataset(stats, \"data/interim/stats2.pickle\")\n save_dataset(rookies, \"data/interim/rookies2.pickle\")\n save_dataset(freeagents, \"data/interim/fa2.pickle\")\n \n full_data = build_dataset(salaries, stats, rookies, freeagents)\n \n save_features(full_data,'data/processed/data2.pickle')", "title": "" }, { "docid": "ef48aedef6053358d07e96b361080d7b", "score": "0.5161689", "text": "def data_seeker(start, stop, msid):\n#\n#--- check a dummy 'test' file exists. it also needs param directory\n#\n if not os.path.isfile('test'):\n fo = open('./test', 'w')\n fo.close()\n \n try:\n clean_dir('param')\n except:\n cmd = 'mkdir ./param 2> /dev/null'\n os.system(cmd)\n\n mcf.rm_file('./temp_out.fits')\n#\n#--- name must starts with \"_\"\n#\n mc = re.search('deahk', msid.lower())\n mc2 = re.search('oobthr', msid.lower())\n#\n#--- deahk cases\n#\n if mc is not None:\n atemp = re.split('deahk', msid)\n val = float(atemp[1])\n if val < 17:\n name = 'rdb..deahk_temp.' + msid.upper() + '_avg'\n else:\n name = 'rdb..deahk_elec.' + msid.upper() + '_avg'\n#\n#--- oobthr cases\n#\n elif mc2 is not None:\n name = 'mtatel..obaheaters_avg._' + msid.lower() + '_avg'\n#\n#--- special cases (see the list at the top)\n#\n elif msid.upper() in special_list:\n name = msid.upper() + '_AVG'\n\n else:\n name = '_' + msid.lower() + '_avg'\n#\n#--- create dataseeker command\n#\n cmd1 = '/usr/bin/env PERL5LIB=\"\" '\n\n cmd2 = ' source /home/mta/bin/reset_param; '\n cmd2 = ' ' \n cmd2 = cmd2 + ' /home/ascds/DS.release/bin/dataseeker.pl '\n cmd2 = cmd2 + ' infile=test outfile=temp_out.fits '\n cmd2 = cmd2 + ' search_crit=\"columns=' + name\n cmd2 = cmd2 + ' timestart=' + str(start)\n cmd2 = cmd2 + ' timestop=' + str(stop) \n cmd2 = cmd2 + '\" loginFile='+ house_keeping + 'loginfile '\n\n cmd = cmd1 + cmd2 \n bash(cmd, env=ascdsenv)\n\n cmd = 'rm /data/mta/dataseek* 2>/dev/null'\n os.system(cmd)", "title": "" }, { "docid": "056cfcb3c4c00a76d7c8273e260aca33", "score": "0.51616114", "text": "def main():\n # import all the data\n # TODO: call the load_data() function here and load data from file", "title": "" }, { "docid": "182a9946f5845002076c445789ebbe10", "score": "0.51263165", "text": "def run(self, x):\n pass", "title": "" }, { "docid": "3c70ea922a0524832a2f61383b8e8267", "score": "0.5119688", "text": "def run(self,data):\n # Setup feed indexing\n # self.feeds : feed horn ID (in array indexing, only chosen feeds)\n # self.feedlist : all feed IDs in data file (in lvl1 indexing)\n # self.feeddict : map between feed ID and feed array index in lvl1\n self.feeds, self.feed_index, self.feed_dict = self.getFeeds(data,self.feeds_select)\n\n # Opening file here to write out data bit by bit\n self.i_nFeeds, self.i_nBands, self.i_nChannels,self.i_nSamples = data['spectrometer/tod'].shape\n avg_tod_shape = (self.i_nFeeds, self.i_nBands, self.i_nChannels//self.average_width, self.i_nSamples)\n self.avg_tod = np.zeros(avg_tod_shape,dtype=data['spectrometer/tod'].dtype)\n self.avg_rms = np.zeros((avg_tod_shape[0],avg_tod_shape[1],avg_tod_shape[2]),dtype=data['spectrometer/tod'].dtype)\n\n # Average the data and apply the gains\n self.average_obs(data.filename,data, self.avg_tod)", "title": "" }, { "docid": "a98926cd189e988d638d75ab7f94bb07", "score": "0.5108324", "text": "def multipipe():\n # TODO : use parallel processing\n # look for flt/flc/flm files in directories called <rootname>.flt\n fltdirlist = glob.glob(\"*.flt\")\n if not len(fltdirlist):\n raise RuntimeError(\"There is no <rootname>.flt directory!\")\n for fltdir in fltdirlist:\n runpipe(fltdir)", "title": "" }, { "docid": "222b8319f9df30ea61af9671ae69fccd", "score": "0.51027066", "text": "def main(args=None):\n\n if args is None:\n args = sys.argv[1:]\n\n print()\n print('Warning: this version of fsl_apply_x5 is a development release.\\n'\n 'Interface, behaviour, and input/output formats of future versions\\n'\n 'may differ substantially from this version.')\n print()\n\n args = parseArgs(args)\n\n if x5.inferType(args.xform) == 'linear':\n result = applyLinear(args)\n else:\n result = applyNonlinear(args)\n\n result.save(args.output)", "title": "" }, { "docid": "705436a40517b9438b03f4b56f8949bb", "score": "0.50999886", "text": "def run():\n script_path = os.path.dirname(os.path.realpath(sys.argv[0]))\n mod_path = '/'.join(script_path.split('/')[:-2]) + '/'\n input_filepath = mod_path + 'data/raw/'\n output_filepath = mod_path + 'data/processed/'\n # Generate processed dataset\n main(input_filepath, output_filepath, clean=False)\n # Generate cleaned text processed dataset\n main(input_filepath, output_filepath, clean=True)", "title": "" }, { "docid": "47dbd68fdb4de173a4599e853d31b317", "score": "0.5084285", "text": "def xtest(infile=gama_data+'/jswml/auto/kcorrz01.fits', ran_dist='vol',\n Q=Qdef, P=Pdef, key='w_p', xlimits=(0.01, 100), run=1,\n pi_lim=100, rp_lim=100, onevol=0):\n\n if run > 0:\n Mlimits = (-21, -20, -19)\n if onevol:\n zlimits = util.vol_limits(infile, Q=Q, Mlims=(Mlimits[-1],))\n else:\n zlimits = util.vol_limits(infile, Q=Q, Mlims=Mlimits[1:3])\n for ilim in xrange(2):\n if onevol:\n z_range = [0.002, zlimits[0]]\n else:\n z_range = [0.002, zlimits[ilim]]\n Mrange = Mlimits[ilim:ilim+2]\n galout = 'gal_test_{}.dat'.format(ilim)\n ranout = 'ran_test_{}.dat'.format(ilim)\n xiout = 'xi_test_{}.dat'.format(ilim)\n xi_select(infile, galout, ranout, xiout,\n z_range=z_range, nz=20, app_range=(14, 19.8),\n abs_range=Mrange,\n Q=Q, P=P, ran_dist=ran_dist, ran_fac=5, run=run)\n\n # Cross counts\n if run == 1:\n cmd = '$BIN/xi {} {} {}'.format('gal_test_0.dat', 'gal_test_1.dat',\n 'gg_test_x.dat')\n subprocess.call(cmd, shell=True)\n cmd = '$BIN/xi {} {} {}'.format('gal_test_0.dat', 'ran_test_1.dat',\n 'gr_test_x.dat')\n subprocess.call(cmd, shell=True)\n if run == 2:\n cmd = qsub_xix_cmd.format('gal_test_0.dat', 'gal_test_1.dat',\n 'gg_test_x.dat')\n subprocess.call(cmd, shell=True)\n cmd = qsub_xix_cmd.format('gal_test_0.dat', 'ran_test_1.dat',\n 'gr_test_x.dat')\n subprocess.call(cmd, shell=True)\n\n # Plot the results\n panels = []\n comps = []\n label = 'Test'\n panels.append({'files': ('xi_test_0.dat', 'xi_test_1.dat'),\n 'comps': comps, 'label': label})\n xi_plot(key, panels, xlimits=xlimits)\n plt.show()\n# xi2d_plot(xiout, binning=0, mirror=0)\n# plt.show()\n# xi2d_plot(xiout, binning=1, mirror=0)\n# plt.show()\n# xi2d_plot(xiout, binning=2, mirror=0)\n# plt.show()\n Gg = PairCounts('gg_test_x.dat')\n Gr = PairCounts('gr_test_x.dat')\n gr = PairCounts('gr_test_1.dat')\n rr = PairCounts('rr_test_1.dat')\n counts = {'Gg': Gg, 'Gr': Gr, 'gr': gr, 'rr': rr}\n xi = Xi()\n w_p_dpx = xi.est(counts, dpx, key=key, pi_lim=pi_lim, rp_lim=rp_lim)\n w_p_lsx = xi.est(counts, lsx, key=key, pi_lim=pi_lim, rp_lim=rp_lim)\n plt.clf()\n ax = plt.subplot(111)\n w_p_dpx.plot(ax, label='DPX')\n w_p_lsx.plot(ax, label='LSX')\n ax.loglog(basex=10, basey=10, nonposy='clip')\n ax.set_xlabel(r'$r_p\\ [h^{-1}{\\rm Mpc}]$')\n ax.set_ylabel(r'$w_p(r_p)$')\n ax.legend()\n plt.show()", "title": "" }, { "docid": "7d8efec902f63a7308a1b13e90e7599e", "score": "0.50816053", "text": "def run(data, preview, ipyclient):\n\n ## checks on data before starting\n raws, longbar, cutters, optim = prechecks(data, ipyclient, preview)\n\n ## nested structure to prevent abandoned temp files\n try: \n ## Truncate the input fq so it'll run faster\n ## This function returns the file name of a truncated\n ## fq file. The file should be cleaned up at the end\n ## of at the end of run()\n ##\n ## All the lists and tuples monkey business is to account\n ## for the difference in expectation of format of run()\n ## and preview_truncate_fq. Could be fixed.\n sample_fastq = []\n if preview:\n warning = \"\"\"\n Running preview mode. Selecting subset ({}) of reads for demultiplexing - {}\n \"\"\".format(data._hackersonly[\"preview_truncate_length\"], raws)\n if data._headers:\n print(warning)\n LOGGER.warn(warning)\n sample_fastq = preview_truncate_fq(data, raws)\n raws = sample_fastq\n\n ## splits up all files into chunks, returns list of list \n ## of chunks names in tuples. If the number of input files is greater\n ## than the number of processors then do not chunk, but instead just \n ## start iterating over the raw files\n if len(raws) < len(ipyclient):\n datatuples = chunker(data, raws, optim)\n else:\n datatuples = raws\n\n LOGGER.info(\"Executing %s files, in %s chunks, across %s cpus\", \\\n len(raws), len(datatuples), len(ipyclient))\n\n filenum = 0 \n for rawfilename, chunks in datatuples:\n for cutter in cutters:\n if cutter: \n ## sort chunks for this list \n parallel_sorter(data, rawfilename, chunks, cutter,\n longbar, filenum, ipyclient)\n filenum += 1\n ## TODO: combine tmps when two resolutions of ambig cutters\n ## ...\n\n ## collate tmps back into one file. Disk limited, parallelizing does \n ## not increase speed.\n for name in data.barcodes:\n collate_tmps(data, name)\n\n ## make stats\n make_stats(data, raws)\n\n finally:\n ## cleans up chunk files and stats pickles\n tmpdirs = glob.glob(os.path.join(data.dirs.fastqs, \"tmp_*_R*\"))\n tmpdirs += glob.glob(os.path.join(data.dirs.working, \"tmpchunks\"))\n if tmpdirs:\n for tmpdir in tmpdirs:\n shutil.rmtree(tmpdir)\n ## cleans up pickle files and tmp files generated by preview\n tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, \"*.pickle\"))\n if preview:\n tmpfiles += sample_fastq[0]\n if tmpfiles:\n for tmpfile in tmpfiles:\n os.remove(tmpfile)", "title": "" }, { "docid": "6f4d422aa7b4cd1152d8f58f738b0c0d", "score": "0.5079075", "text": "def call(self, ds):\n pass", "title": "" }, { "docid": "8431c42d2cc9e35e3481346b8178d76a", "score": "0.50765485", "text": "def run_express (designs_to_process, FASTQ_PREFIX, DATA_PREFIX, RESULTS_PREFIX, CLEAN_UP):\n\n\t# Results are stored in these directories\n\tfasta_out_dir = RESULTS_PREFIX+'fasta/'\n\trefs_out_dir = RESULTS_PREFIX+'refs/'\n\t#refs_gene_out_dir = RESULTS_PREFIX+'refs_gene/'\n\texpress_out_dir = RESULTS_PREFIX+'exp_transcript/'\n\texpress_gene_out_dir = RESULTS_PREFIX+'exp_gene/'\n\n\t# Location of mapping from design to raw read files\n\tFASTQ_MAP_FILENAME = DATA_PREFIX+'fastq_mapping.csv'\n\n\t# Load the FASTQ filename to design mapping \n\tfastq_mapping = load_fastq_file_mapping(FASTQ_MAP_FILENAME)\n\t\n\t# Create a location to save the Bowtie indexes and reference TUs\n\tcreate_dir_if_needed(refs_out_dir)\n\n\t# Create a location to save the Bowtie indexes and reference TUs\n\t#create_dir_if_needed(refs_gene_out_dir)\n\n\t# For each design process with eXpress\n\tfor design in designs_to_process:\n\n\t\t# -----------------------------------------------------------------------------\n\t\t# 1. BUILD BOWTIE INDEX\n\t\t# Requirements:\n\t\t# - Transcript FASTA file (tu_arch generated by GeneClusterLibrary)\n\t\t# - Genome FASTA file (all transcripts of the host, or ORFs as proxy)\n\t\t# -----------------------------------------------------------------------------\n\t\t\n\t\t# Check that we have the TU architecture for the design\n\t\tref_tu_arch_in = DATA_PREFIX+'tu_archs/tu_arch_'+design+'.fa'\n\t\tref_genome_in = DATA_PREFIX+'host_gene_seqs.fa'\n\t\tref_syn_genes_in = DATA_PREFIX+'syn_gene_seqs.fa'\n\t\tref_tus = ''\n\t\tclean_ref_tus = False\n\t\tif os.path.isfile(ref_tu_arch_in) == False:\n\t\t\tprint 'express_run.py ERROR:', ref_tu_arch_in, 'not found, skipping design', design, '.'\n\t\telse:\n\t\t\tif os.path.isfile(ref_genome_in) == False:\n\t\t\t\tref_tus = ref_tu_arch_in\n\t\t\telse:\n\t\t\t\t# Merge TUs for circuit and genome\n\t\t\t\tref_tus = refs_out_dir+'transcripts_'+design+'.fa'\n\t\t\t\tmerge_fasta_files(ref_tu_arch_in, ref_genome_in, ref_tus)\n\t\t\t\tclean_ref_tus = True\n\n\t\t\t# Build and run Bowtie command\n\t\t\tbowtie_ref_out_file = refs_out_dir+'transcripts_'+design\n\t\t\tcmd_bowtie_build_idx = 'bowtie2-build -offrate=1 -f ' + \\\n\t\t\t ref_tus + ' ' + \\\n\t\t\t bowtie_ref_out_file\n\t\t\tprint 'express-run.py RUNNING:', cmd_bowtie_build_idx\n\t\t\tsubprocess.call(cmd_bowtie_build_idx, shell=True)\n\n\t\t# -----------------------------------------------------------------------------\n\t\t# 2. PREFORM MAPPING AND STREAM TO EXPRESS FOR EXPRESSION QUANTIFICATION\n\t\t# Requirements:\n\t\t# - Bowtie index (generated in previous step)\n\t\t# - Raw reads in FASTQ format (assumes paired-end reads)\n\t\t# -----------------------------------------------------------------------------\n\n\t\t# Check design exists in mapping\n\t\tif design not in fastq_mapping.keys():\n\t\t\tprint 'express_run.py ERROR:', design, 'not found in fastq_mapping, skipping.'\n\t\telse:\n\t\t\t# Find the raw file names\n\t\t\tfastq_filenames = fastq_mapping[design]\n\n\t\t\t# Create somewhere to save eXpress expression estimate data\n\t\t\tcreate_dir_if_needed(express_out_dir+'/'+design)\n\n\t\t\t# Build and run eXpress command -p -k 1000 -p 8 --phred64\n\t\t\tcmd_express_exp_ests = 'bowtie2 -a -X 600 --no-discordant --no-mixed --rdg 6,5 --rfg 6,5 --score-min L,-.6,-.4 -x ' + \\\n\t\t\t\t\t\t\t\t bowtie_ref_out_file + ' ' + \\\n\t\t\t '-1 ' + FASTQ_PREFIX + fastq_filenames[0] + ' ' + \\\n\t\t\t '-2 ' + FASTQ_PREFIX + fastq_filenames[1] + ' | ' + \\\n\t\t\t 'express --no-bias-correct ' + ref_tus + ' ' + \\\n\t\t\t '-o ' + express_out_dir + design\n\t\t\tprint 'express_run.py RUNNING:', cmd_express_exp_ests\n\t\t\tsubprocess.call(cmd_express_exp_ests, shell=True)\n\n\t\t\t# Clean-up by removing Bowtie index for design\n\t\t\tif CLEAN_UP == True:\n\t\t\t\tcmd_clean_up = 'rm ' + bowtie_ref_out_file + '*.bt2'\n\t\t\t\tsubprocess.call(cmd_clean_up, shell=True)\n\t\t\t\tif clean_ref_tus == True:\n\t\t\t\t\tcmd_clean_up = 'rm ' + ref_tus\n\t\t\t\t\tsubprocess.call(cmd_clean_up, shell=True)\n\t\t\t\t#cmd_clean_up = 'rmdir ' + refs_out_dir\n\t\t\t\t#subprocess.call(cmd_clean_up, shell=True)\n\n\t\t# -----------------------------------------------------------------------------\n\t\t# 3. PERFORM MAPPING AND EXPRESSION QUANTIFICATION FOR HOST AND SYN GENE SEQS\n\t\t# Requirements:\n\t\t# - Genome FASTA file (ORFs as proxy)\n\t\t# - Synethic Genes FASTA file (ORFs as proxy)\n\t\t# - Raw reads in FASTQ format (assumes paired-end reads)\n\t\t# -----------------------------------------------------------------------------\n\n\t\tif design not in fastq_mapping.keys():\n\t\t\tprint 'express_run.py ERROR:', design, 'not found in fastq_mapping, skipping.'\n\t\telse:\n\t\t\tif os.path.isfile(ref_genome_in) == False or os.path.isfile(ref_syn_genes_in) == False:\n\t\t\t\tprint 'express_run.py ERROR: host and syn gene sequences not found, skipping step for design', design, '.'\n\t\t\telse:\n\n\t\t\t\t# MAPPING WITH BOWTIE\n\t\t\t\t# -------------------\n\n\t\t\t\t# Merge gene sequences for circuit and genome\n\t\t\t\tref_tus = refs_out_dir+'genes_'+design+'.fa'\n\t\t\t\tmerge_fasta_files (ref_genome_in, ref_syn_genes_in, ref_tus)\n\n\t\t\t\t# Build and run Bowtie command\n\t\t\t\t#bowtie_ref_out_file = bowtie_gene_out_dir+'transcripts_'+design\n\t\t\t\t#cmd_bowtie_build_idx = 'bowtie-build --offrate 1 ' + \\\n\t\t\t\t# ref_tus + ' ' + \\\n\t\t\t\t# bowtie_ref_out_file\n\t\t\t\t#print 'express-run.py RUNNING:', cmd_bowtie_build_idx\n\t\t\t\t#subprocess.call(cmd_bowtie_build_idx, shell=True)\n\n\t\t\t\tbwa_ref_out_file = refs_out_dir+'bwa_genes_'+design\t\t\t\t\n\t\t\t\tcmd_bwa_build_idx = 'bwa index -p ' + \\\n\t\t\t\t bwa_ref_out_file + ' ' + \\\n\t\t\t\t ref_tus\n\t\t\t\tprint 'express-run.py RUNNING:', cmd_bwa_build_idx\n\t\t\t\tsubprocess.call(cmd_bwa_build_idx, shell=True)\n\n\t\t\t\t# EXPRESSION ESTIMATION (BWA for gene-level)\n\t\t\t\t# ------------------------------------------\n\n\t\t\t\t# Find the raw file names\n\t\t\t\tfastq_filenames = fastq_mapping[design]\n\n\t\t\t\t# Create somewhere to save eXpress expression estimate data\n\t\t\t\tcreate_dir_if_needed(express_gene_out_dir+'/'+design)\n\n\t\t\t\t# Use BWA for gene mapping - we do not want multi-mapped reads here (better quality)\n\t\t\t\t#'bwa mem -t 4 ./hg19.fasta ./s1_1.fastq ./s1_2.fastq > s1.sam'\n\t\t\t\tcmd_express_exp_ests = 'bwa mem -t 4 ' + \\\n\t\t\t\t\t\t\t\t\t bwa_ref_out_file + ' ' + \\\n\t\t\t\t FASTQ_PREFIX + fastq_filenames[0] + ' ' + \\\n\t\t\t\t FASTQ_PREFIX + fastq_filenames[1] + ' | ' + \\\n\t\t\t\t 'express --no-bias-correct ' + ref_tus + ' ' + \\\n\t\t\t\t '-o ' + express_gene_out_dir + design\n\t\t\t\tprint 'express_run.py RUNNING:', cmd_express_exp_ests\n\t\t\t\tsubprocess.call(cmd_express_exp_ests, shell=True)\n\n\t\t\t\t# Build and run eXpress command\n\t\t\t\t#cmd_express_exp_ests = 'bowtie -aS -X 800 --offrate 1 ' + \\\n\t\t\t\t#\t\t\t\t\t bowtie_ref_out_file + ' ' + \\\n\t\t\t\t# '-1 ' + FASTQ_PREFIX + fastq_filenames[0] + ' ' + \\\n\t\t\t\t# '-2 ' + FASTQ_PREFIX + fastq_filenames[1] + ' | ' + \\\n\t\t\t\t# 'express ' + ref_tus + ' ' + \\\n\t\t\t\t# '-o ' + express_gene_out_dir + design\n\t\t\t\t#print 'express_run.py RUNNING:', cmd_express_exp_ests\n\t\t\t\t#subprocess.call(cmd_express_exp_ests, shell=True)\n\n\n\t\t\t\t# Clean-up by removing Bowtie index for design\n\t\t\t\tif CLEAN_UP == True:\n\t\t\t\t\tcmd_clean_up = 'rm ' + bwa_ref_out_file + '*.*'\n\t\t\t\t\tsubprocess.call(cmd_clean_up, shell=True)\n\t\t\t\t\tcmd_clean_up = 'rm ' + ref_tus\n\t\t\t\t\tsubprocess.call(cmd_clean_up, shell=True)\n\t\t\t\t\t#cmd_clean_up = 'rmdir ' + refs_gene_out_dir\n\t\t\t\t\t#subprocess.call(cmd_clean_up, shell=True)", "title": "" }, { "docid": "30b9d12415e376978e0812a2f12d18ce", "score": "0.5076372", "text": "def exec_usecase():\n #\n # DplySmplImg(\"F:/Remote/RemoteData/\")\n # tmp_db = SelfDefDb(\"F:/Remote/RemoteData/\")\n #\n # a, b = tmp_db.__getitem__(1000)\n\n a = SelfDefDb(\"F:/Remote/RemoteData/\", FolderEnum.TRAIN, 0)\n print(\"Preparaion of Data Is Finished!\")", "title": "" }, { "docid": "7a6558ffd91a7697f203c61bd1bf5467", "score": "0.50728095", "text": "def main():\n spark = create_spark_session()\n\n #only temporary solution, has to be dynamic when\n #all of the gdelt data is processed\n gdelt_date = '20201215'\n\n process_google_data(spark)\n process_gdelt_data(spark, gdelt_date)\n process_oxford_data(spark)\n\n validate(spark, gdelt_date)", "title": "" }, { "docid": "8289cad4dfc63a11a97b913087688cbb", "score": "0.5069704", "text": "def doProcessing(self, bxdFile, **kws):\n\t\tif not self.module:\n\t\t\tLogging.error(\"No module set\", \"No module was set for the dataunit to do processing with\")\t\t\n\t\tcallback = None\n\t\t\n\t\tsettings_only = kws.get(\"settings_only\", 0)\n\t\tcallback = kws.get(\"callback\", None)\n\t\ttimepoints = kws.get(\"timepoints\", range(self.getNumberOfTimepoints()))\n\t\twritebxd = kws.get(\"writebxd\", True)\n\t\t# We create the vtidatasource with the name of the dataunit file\n\t\t# so it knows where to store the vtkImageData objects\n\n\t\tnumberOfDatasets = self.module.getNumberOfOutputs()\n\t\tbxdWriters = []\n\t\tdataWriters = []\n\n\t\tif not settings_only:\n\t\t\tif numberOfDatasets > 1:\n\t\t\t\tfor i in range(numberOfDatasets):\n\t\t\t\t\tchannelBXDFile = bxdFile\n\t\t\t\t\tif bxdFile[-4:] == \".bxd\":\n\t\t\t\t\t\tchannelBXDFile = bxdFile[:-4] + \"_\" + self.sourceunits[i].getName() + \".bxd\"\n\t\t\t\t\tbxdwriter = BXDDataWriter(channelBXDFile)\n\t\t\t\t\tbxdWriters.append(bxdwriter)\n\t\t\t\t\tbxcFile = bxdwriter.getBXCFileName(channelBXDFile)\n\t\t\t\t\tdataWriter = BXCDataWriter(bxcFile)\n\t\t\t\t\tdataWriters.append(dataWriter)\n\t\t\t\t\tbxdwriter.addChannelWriter(dataWriter)\n\t\t\telse:\n\t\t\t\tbxdwriter = BXDDataWriter(bxdFile)\n\t\t\t\tbxcFile = bxdwriter.getBXCFileName(bxdFile)\n\t\t\t\tbxdWriters.append(bxdwriter)\n\t\t\t\tdataWriter = BXCDataWriter(bxcFile)\n\t\t\t\tdataWriters.append(dataWriter)\n\t\t\t\tbxdwriter.addChannelWriter(dataWriter)\n\n\t\telse:\n\t\t\tbxcFile = bxdFile\n\t\t\tbxdwriter = None\n\t\t\tbxcFile = bxcFile[:-1] + \"p\"\n\t\t\tbxdWriters.append(bxdwriter)\n\t\t\tdataWriter = BXCDataWriter(bxcFile)\n\t\t\tdataWriters.append(dataWriter)\n\t\t\t\n\t\tself.outputDirectory = os.path.dirname(bxcFile)\n\t\t#self.dataWriter = BXCDataWriter(bxcFile)\n\t\t\n\t\t#if bxdwriter:\n\t\t#\tbxdwriter.addChannelWriter(self.dataWriter)\n\n\t\tn = 1\n\t\tself.guicallback = callback\n\t\tself.module.setControlDataUnit(self)\n\n\t\tif not settings_only:\n\t\t\tfor timePoint in timepoints:\n\t\t\t\t# First we reset the module, so that we can start the operation\n\t\t\t\t# from clean slate\n\t\t\t\tscripting.processingTimepoint = timePoint\n\t\t\t\tself.module.reset()\n\t\t\t\t# We get the processed timepoint from each of the source data \n\t\t\t\t# units\n\t\t\t\t#self.module.setSettings(self.settings)\n\t\t\t\tself.module.setTimepoint(timePoint)\n\t\t\t\t\n\t\t\t\tfor dataunit in self.sourceunits:\n\t\t\t\t\timage = dataunit.getTimepoint(timePoint)\n\t\t\t\t\tself.module.addInput(dataunit, image)\n\t\t\t\t\n\t\t\t\t# Get the vtkImageData containing the results of the operation \n\t\t\t\t# for this time point\n\t\t\t\timageDatas = self.module.doOperation()\n\t\t\t\tpolydatas = self.module.getPolyDataOutput()\n\t\t\t\t# Convert output to tuples if aren't already\n\t\t\t\tif type(imageDatas) is not types.TupleType:\n\t\t\t\t\timageDatas = (imageDatas,)\n\t\t\t\tif type(polydatas) is not types.TupleType and polydatas is not None:\n\t\t\t\t\tpolydatas = (polydatas,)\n\t\t\t\t\n\t\t\t\tLogging.info(\"Executing with optimizations\",kw=\"processing\")\n\t\t\t\tfor i, imageData in enumerate(imageDatas):\n\t\t\t\t\timageData = optimize.optimize(image = imageData)\n\t\t\t\t\tLogging.info(\"Processing done\",kw=\"processing\")\n\t\t\t\t\tlib.messenger.send(None, \"update_processing_progress\", timePoint, n, len(timepoints) * len(imageDatas))\n\t\t\t\t\tn += 1\n\t\t\t\t\t# Write the image data to disk\n\t\t\t\t\tLogging.info(\"Writing timepoint %d\"%timePoint,kw=\"processing\")\n\t\t\t\t\tdataWriters[i].addImageData(imageData)\n\t\t\t\t\tif polydatas is not None and i < len(polydatas):\n\t\t\t\t\t\tdataWriters[i].addPolyData(polydatas[i])\n\t\t\t\t\tdataWriters[i].sync()\n\t\t\t\t\tdims = dataWriters[i].getOutputDimensions()\n\t\t\t\t\tself.settings.set(\"Dimensions\", str(dims))\n\t\t\t\t\t\n\t\tscripting.processingTimepoint = -1\n\t\tif settings_only:\n\t\t\tself.settings.set(\"SettingsOnly\", \"True\")\n\t\telse:\n\t\t\tself.settings.set(\"SettingsOnly\", \"False\")\n\n\t\t# Check if we have multiple outputs\n\t\t# If we do, make sure ctf is correct\n\t\tupdateCTF = 0\n\t\torigCTF = self.settings.get(\"ColorTransferFunction\")\n\t\tif len(dataWriters) > 1:\n\t\t\tupdateCTF = 1\n\n\t\tfor i, dataWriter in enumerate(dataWriters):\n\t\t\tif updateCTF:\n\t\t\t\tself.settings.set(\"ColorTransferFunction\", self.sourceunits[i].getColorTransferFunction())\n\t\t\tself.createDataUnitFile(dataWriter)\n\t\t\n\t\tself.settings.set(\"ColorTransferFunction\", origCTF)\n\n\t\tif not settings_only:\n\t\t\tfor bxdwriter in bxdWriters:\n\t\t\t\tbxdwriter.write()\n\n\t\t\t# Write references to channels in BXD file\n\t\t\tif writebxd:\n\t\t\t\ttry:\n\t\t\t\t\tfp = open(bxdFile, \"w\")\n\t\t\t\t\tprint \"Writing output to\",bxdFile\n\t\t\t\t\tfor bxdwriter in bxdWriters:\n\t\t\t\t\t\tchannelBXCFile = bxdwriter.getBXCFileName(bxdwriter.getFilename())\n\t\t\t\t\t\tpathParts = channelBXCFile.split(os.sep)\n\t\t\t\t\t\tchannelBXCFile = pathParts[-2] + os.sep + pathParts[-1]\n\t\t\t\t\t\tfp.write(\"%s\\n\"%channelBXCFile)\n\t\t\t\texcept IOError, ex:\n\t\t\t\t\tLogging.error(\"Failed to write settings\", \"CombinedDataUnit failed to open .bxd file %s for writing settings (%s)\"%(bxdFile, str(ex)))\n\t\t\t\tfp.close()\n\t\t\t\treturn bxdFile\n\t\t\telse:\n\t\t\t\treturn bxdWriters[0].getFilename()", "title": "" }, { "docid": "091904988abca0fdfb477de472035718", "score": "0.50599194", "text": "def main():\n queryFilename = \"../config/query.txt\"\n\n fb = FbData()\n fb.CollectData(queryFilename)", "title": "" }, { "docid": "40335ea138d285d21490e1e3cd0715fd", "score": "0.50533396", "text": "def data_fuser(\n df, clusters, boolean_method_single=\"provenance\", \n boolean_method_multiple=\"voting\", numeric_method_single=\"average\", \n numeric_method_multiple=\"average\", string_method_single=\"longest\",\n string_method_multiple=\"longest\", provenance_regex=\"http://dbpedia.org/\",\n progress=True):\n\n df = df.copy()\n clusters = clusters.copy()\n\n # assert that no wrong fusion methods are chosen for specific types and\n # group sizes\n assert boolean_method_single != \"voting\",\\\n \"Voting will not work for single matches\"\n assert numeric_method_single not in [\n \"voting\", \"median\"], \\\n \"Voting and median will not work for single matches\"\n assert string_method_single != \"voting\",\\\n \"Voting will not work for single matches\"\n\n function_lookup = fusion_function_lookup(\n boolean_method_single, boolean_method_multiple, numeric_method_single,\n numeric_method_multiple, string_method_single, string_method_multiple)\n\n # reattach full column prefixes to column names in cluster\n cat_cols = [col for col in df.columns if re.findall(\"http:\", col)]\n cat_cols_stripped = [re.sub(r\"^.*http://\", \"http://\", col)\n for col in cat_cols]\n cat_col_lookup_stripped_to_full = dict(zip(cat_cols_stripped, cat_cols))\n cat_col_lookup_full_to_stripped = dict(zip(cat_cols, cat_cols_stripped))\n\n for i, cluster in enumerate(clusters):\n clusters[i] = [cat_col_lookup_stripped_to_full[link] for link in cluster]\n clusters[i].sort()\n\n # fuse every cluster iteratively\n\n if progress:\n iterator = tqdm(\n enumerate(clusters), desc=\"Data Fuser - Fusing.\")\n else:\n iterator = enumerate(clusters)\n\n for i, cluster in iterator:\n # use names without prefices to generate name of fused column by\n # combining them\n \n cluster_in_df = df.loc[:, cluster]\n suffix_col_names = [cat_col_lookup_full_to_stripped[name]\n for name in cluster]\n suffix = \"_\".join(suffix_col_names)\n fused_name = \"fused_\" + suffix\n\n # detect type of columns to merge and use the appropriate function\n type_conditions = [cluster_in_df.applymap(lambda x: type(x) == bool).any().all(),\n cluster_in_df.applymap(lambda x: np.logical_or(\n type(x) == int, type(x) == float)).all().all(),\n cluster_in_df.applymap(lambda x: type(x) == str).any().all()]\n\n type_choices = [\"boolean\", \"numeric\", \"string\"]\n type_ = np.select(type_conditions, type_choices, default=None).item()\n\n # detect if single match or multiple matches\n if len(cluster) == 2:\n size = \"_single\"\n else:\n size = \"_multiple\"\n\n # look up function to use for fusion\n method = type_+size\n function = function_lookup[method]\n\n if not function:\n warnings.warn(\n \"No correct function for {method} was specified, the cluster {cluster} cannot be fused\".\n format(method=method, cluster=cluster))\n continue\n\n # create newly fused column and drop the old ones\n\n if function == provenance:\n # special case because it refers to provenance in column names not\n # to single values\n column_to_keep = provenance(\n cluster_in_df.columns, provenance_regex)\n df = df.rename(columns={column_to_keep: \"fused_\"+column_to_keep})\n columns_to_drop = [\n col for col in cluster_in_df.columns if col != column_to_keep]\n df = df.drop(columns_to_drop, axis=1)\n\n else:\n df[fused_name] = cluster_in_df.apply(function, axis=1)\n df = df.drop(cluster_in_df.columns, axis=1)\n\n return df", "title": "" }, { "docid": "cb212d359f1ddff7bc4881fe6500ca74", "score": "0.50285816", "text": "def main():\n spark = create_spark_session()\n\n #process dimensions\n df_dim_region = process_dim_region(spark)\n df_dim_time = process_dim_time(spark)\n #df_dim_symptoms = process_dim_symptoms(spark)\n\n #process facts\n #process_facts_searches(spark, df_dim_symptoms)\n process_facts_covid(spark, df_dim_region, df_dim_time)\n\n #process data validation\n validate(spark)", "title": "" }, { "docid": "1fe0f99b3fd39aed7858a56c77201f6b", "score": "0.5027978", "text": "def execute_main(): \n list_of_files = [\"../dataset/apacheicse212.cfr\", \"../dataset/berkeleydbqualityjournal.cfr\", \"../dataset/berkeleydbsplc2011.cfr\", \n \"../dataset/linkedlistsplc2011.cfr\", \"../dataset/pkjabsplc2011.cfr\", \"../dataset/prevaylersplc2011.cfr\", \"../dataset/sqlitesplc2011.cfr\",\n \"../dataset/violetsplc2011.cfr\", \"../dataset/zipmesplc2011.cfr\"]\n \n satisfiable_partial_instances = [ \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_1.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_2.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_3.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_7.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_9.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_10.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_11.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_12.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_13.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/apacheicse212_14.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_5.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_16.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_17.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_19.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_20.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_25.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_27.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_30.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_32.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbqualityjournal_40.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_1.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_2.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_4.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_5.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_6.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_8.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_11.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_13.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_14.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/berkeleydbsplc2011_15.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_14.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_18.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_19.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_24.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_33.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_37.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_38.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_40.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_46.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/pkjabsplc2011_50.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_1.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_3.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_4.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_5.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_6.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_10.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_11.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_12.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_13.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/prevaylersplc2011_15.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_22.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_27.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_31.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_52.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_94.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_1.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_2.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_6.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_8.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_11.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_12.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_13.als\", \n \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_17.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_31.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/zipmesplc2011_32.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_104.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_123.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_129.als\", \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_148.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/sqlitesplc2011_192.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_9.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_16.als\"\n ,\"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_42.als\"\n ,\"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_46.als\"\n ,\"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_98.als\" \n ,\"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_137.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_170.als\" \n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_239.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_276.als\"\n , \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/partial_configurations_dataset/linkedlistsplc2011_283.als\"]\n \n for filename in satisfiable_partial_instances:\n \n clafer_filename_satisfiable = filename[:-4] + \".cfr\"\n clafer_satisfiable_model = open(filename[:-4] + \".cfr\")\n source_file = \"/Users/rafaelolaechea/Documents/workspace/2012-models-clafermultiobjective-data-generator/dataset/\" + clafer_filename_satisfiable[clafer_filename_satisfiable.rfind(\"/\")+1:clafer_filename_satisfiable.rfind(\"_\")] + \".cfr\"\n\n# print \"SourceFile: \" + source_file\n dest_file = clafer_filename_satisfiable.replace(\"partial_configurations_dataset\", \"satisfiable_partial_configurations_dataset\")\n print \"\\\"\" + dest_file + \"\\\", \"\n \n \n# print \"DestFile\" + dest_file\n\n subprocess.check_output([\"cp\", source_file , dest_file]) \n source_fp = open(source_file, \"r\")\n dest_fp = open(dest_file, \"w\")\n \n for line_source in source_fp.readlines():\n dest_fp.write(line_source)\n if line_source.find('simpleConfig')!=-1:\n print_next = False\n for line in clafer_satisfiable_model.readlines():\n if line.find('simpleConfig')!=-1:\n print_next = True\n elif print_next == True:\n dest_fp.write(line)\n print_next = False\n dest_fp.write(\"\\n\")\n dest_fp.close()\n print clafer_filename_satisfiable", "title": "" }, { "docid": "8eac65850d0152bdca3a5a560971babc", "score": "0.5027392", "text": "def run(self, data_files, output_file, date_range, stream_data, settings):\n \n for i, data_file in enumerate(data_files):\n print(\"\\n({}/{}) Loading data from {}\".format(\n i + 1, \n len(data_files) + 2, \n data_file))\n if data_file.endswith(\".zip\"):\n self.loadZIPData(data_file, date_range)\n elif data_file.endswith(\".json\"):\n with open(data_file) as json_file:\n if stream_data:\n self.streamJSONData(json_file, date_range)\n else:\n self.loadJSONData(json_file, date_range)\n elif data_file.endswith(\".kml\"):\n self.loadKMLData(data_file, date_range)\n elif data_file.endswith(\".gpx\"):\n self.loadGPXData(data_file, date_range)\n else:\n raise NotImplementedError(\n \"Unsupported file extension for {!r}\".format(data_file))\n \n print(\"\\n({}/{}) generateMapGenerating heatmap\".format(\n len(data_files) + 1, \n len(data_files) + 2))\n m = self.generateMap(settings)\n print(\"\\n({}/{}) Saving map to {}\\n\".format(\n len(data_files) + 2,\n len(data_files) + 2,\n output_file))\n m.save(output_file)", "title": "" }, { "docid": "37ebe0a342609652c89e11eef5b731db", "score": "0.502447", "text": "def main():\n\n parser = argparse.ArgumentParser(description=\"List file size and chechksum. Needs a valid grid UI and proxy.\",\n epilog=\"Example: ./extract_file_data.py -d srm://gfe02.grid.hep.ph.ic.ac.uk/pnfs/hep.ph.ic.ac.uk/data/gridpp/gridpp/user/daniela.bauer -o myfiles.txt\")\n parser.add_argument(\"-d\", \"--directory\", help=\"full path (including storage element name) to a directory\")\n parser.add_argument(\"-f\", \"--filename\", help=\"full path (including storage element name) to a file\")\n req_grp = parser.add_argument_group(title='required arguments')\n req_grp.add_argument('-o', \"--output\", required=True, help=\"output file name\")\n args = parser.parse_args()\n # 1 is the program itself, how could I forget\n if len(sys.argv) != 5:\n print(\"Please specify [either a directory or a file] and the output file for the results.\")\n sys.exit(0)\n\n file_descriptor = open(args.output, \"w\")\n ctxt = gfal2.creat_context()\n\n if args.directory:\n # print(args.directory)\n list_dir(ctxt, args.directory, file_descriptor)\n elif args.filename:\n single_file(ctxt, args.filename, file_descriptor)\n else:\n print(\"Something went wrong.\")\n\n file_descriptor.close()", "title": "" }, { "docid": "94d3eccf354ce3afb97b2bca7c8e1919", "score": "0.50214756", "text": "def call_data():", "title": "" }, { "docid": "1d574b09cf2496b91bb232cb4d7aca12", "score": "0.5018844", "text": "def main():\n\toptions, files = process_arguments()\n\tfor filename in files:\n\t\tconvert_to_kml(filename, not options.no_zip)\n\t\tif options.gpx:\n\t\t\tconvert_to_gpx(filename)", "title": "" }, { "docid": "d81d64ef96fa82bcec968c43d9c12146", "score": "0.5018052", "text": "def main():\n # set up the program to take in arguments from the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"outTrain\",\n help=\"filename of the updated training data\")\n parser.add_argument(\"outTest\",\n help=\"filename of the updated test data\")\n parser.add_argument(\"--trainFile\",\n default=\"eng_xTrain.csv\",\n help=\"filename of the training data\")\n parser.add_argument(\"--testFile\",\n default=\"eng_xTest.csv\",\n help=\"filename of the test data\")\n args = parser.parse_args()\n # load the train and test data\n xTrain = pd.read_csv(args.trainFile)\n xTest = pd.read_csv(args.testFile)\n # extract the new features\n xNewTrain = extract_features(xTrain)\n xNewTest = extract_features(xTest)\n # select the features\n xNewTrain = select_features(xNewTrain)\n xNewTest = select_features(xNewTest)\n # preprocess the data\n xTrainTr, xTestTr = preprocess_data(xNewTrain, xNewTest)\n # save it to csv\n xTrainTr.to_csv(args.outTrain, index=False)\n xTestTr.to_csv(args.outTest, index=False)", "title": "" }, { "docid": "6c4583a6fcc2bcfb9925eb4f65509675", "score": "0.5016928", "text": "def _do_it(self, fqn2df, run_set, collector, forceRun, is_quick_run):\n self._assert_single_input()\n self.data = self.doRun(fqn2df)\n # output's doRun guarantees an action\n self._run_ancestor_and_me_postAction(run_set, collector)", "title": "" }, { "docid": "3ddc6c258b55abaa98726804fb12a800", "score": "0.50165766", "text": "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--skip_wrangling\", action='store_true', dest='skip_wrangling',\n help=\"Flag to skip wrangling when all intermediate files\"\n \"have already been generated and saved into data folder\")\n parser.add_argument(\"--skip_save_data\", action='store_true', dest='skip_save_data',\n help=\"Flag to skip saving intermediate data tables\")\n parser.add_argument(\"--run_find_chains\", action='store_true', dest='run_find_chains',\n help=\"Flag to run chain analysis step\")\n parser.add_argument(\"--skip_figures\", action='store_true', dest='skip_figures',\n help=\"Flag to skip generating and saving EDA figures\")\n parser.add_argument(\"--run_clustering\", action='store_true', dest='run_clustering',\n help=\"Flag to run clustering step\")\n parser.add_argument(\"--run_google_maps_api\", action='store_true', dest='run_google_maps_api',\n help=\"Flag to run the Google Maps API to create a distance matrix\")\n parser.add_argument(\"--run_simulators\", action='store_true', dest='run_simulators',\n help=\"Flag to run the protocol and historical simulators\")\n args = parser.parse_args()\n # use os.listdir(\"data\")\n data_files = os.listdir(os.path.join(os.getcwd(), \"data\", \"raw\"))\n\n\n # codes = os.path.join(os.getcwd(), \"data\", \"raw\", \"incidentdescriptions.csv\")\n stations = os.path.join(os.getcwd(), \"data\", \"raw\", \"HFD_stations.csv\")\n raw_bays = os.path.join(os.getcwd(), \"data\", \"raw\", \"raw_bays.csv\")\n territories_shp = os.path.join(os.getcwd(), \"data\", \"raw\", \"territories.shp\")\n fire_protocols = os.path.join(os.getcwd(), \"data\", \"raw\", \"fire_dispatching_protocol.csv\")\n fire_protocols = pd.read_csv(os.path.join(os.getcwd(), \"data\", fire_protocols))\n ems_protocols = os.path.join(os.getcwd(), \"data\", \"raw\", \"ems_dispatching_protocol.csv\")\n ems_protocols = pd.read_csv(os.path.join(os.getcwd(), \"data\", ems_protocols))\n current_allocation = os.path.join(os.getcwd(), \"data\", \"raw\", \"HFD_allocation.csv\")\n\n # separate incident, dispatch, time, and hospital files\n incident_files = []\n dispatch_files = []\n time_files = []\n hospital_files = []\n\n for file in data_files:\n name = file.split(os.sep)[-1]\n if name.startswith(\"incidents_\"):\n incident_files.append(os.path.join(os.getcwd(), \"data\", \"raw\", file))\n elif name.startswith(\"dispatches_\"):\n dispatch_files.append(os.path.join(os.getcwd(), \"data\", \"raw\", file))\n elif name.startswith(\"timereports_\"):\n time_files.append(os.path.join(os.getcwd(), \"data\", \"raw\", file))\n elif name.startswith(\"hospital_\"):\n hospital_files.append(os.path.join(os.getcwd(), \"data\", \"raw\", file))\n\n # call run_pipeline with lists\n run_pipeline(incident_files, dispatch_files, time_files, raw_bays, territories_shp, stations,\n fire_protocols, ems_protocols, current_allocation, args, save_data=(not args.skip_save_data),\n save_figs=(not args.skip_figures))", "title": "" }, { "docid": "551ddfff02267447d9999fe9d6b85207", "score": "0.50164676", "text": "def fuse(self):\n ...", "title": "" }, { "docid": "2d6e2c17780a9751e137e024a42d0f2c", "score": "0.5015961", "text": "def _execute(self, x, *args, **kwargs):\n\n # init\n self._labels = sp.zeros((len(self.crange) * self.repeats,\n x.shape[0]), dtype=int) - 1\n self._gof = sp.zeros(len(self.crange) * self.repeats,\n dtype=self.dtype)\n self._ll = sp.zeros(len(self.crange) * self.repeats,\n dtype=self.dtype)\n self._parameters = [None] * len(self.crange) * self.repeats\n\n # clustering\n fit_func = {\n 'kmeans': self._fit_kmeans,\n 'gmm': self._fit_gmm,\n #'vbgmm': self._fit_vbgmm,\n 'dpgmm': self._fit_dpgmm,\n 'spectral': self._fit_spectral,\n 'meanshift': self._fit_mean_shift,\n 'dbscan': self._fit_dbscan\n }[self.clus_type](x)\n\n self._winner = sp.nanargmin(self._gof)\n self.parameters = self._parameters[self._winner]\n self.labels = self._labels[self._winner]", "title": "" }, { "docid": "fd85e31d7168e7cc8567fb1ea314fb29", "score": "0.5010381", "text": "def main():\n args = _parser()\n\n data = fits.getdata(args.fitsname)\n hdr = fits.getheader(args.fitsname)\n print(data)\n print(type(data))\n print(data.shape)\n if isinstance(data, fits.fitsrec.FITS_rec):\n wave = data.field(0)\n flux = data.field(1)\n else:\n wave = np.arange(len(data))\n flux = data\n\n cf = pg.ContinuumInteractive(wave, flux)\n\n cf.plot([wave[0], wave[-1]], [1, 1], 'k--')\n\n # Opens the GUI and starts the interactive session.\n c = cf.findContinuum()\n norm_flux = c[\"normalizedData\"]\n\n if args.flux_errors:\n flux_error(wave, flux, c)\n\n if args.plot:\n plt.title(\"Normalized data\")\n plt.plot(wave, norm_flux, 'b.--')\n plt.xlabel(\"Wavelength\")\n plt.ylabel(\"Normalized Flux\")\n plt.show()\n\n\n\n fitssave = args.fitsname.replace(\".fits\", \".{!s}.fits\".format(args.suffix))\n if len(data) == 2:\n fits.writeto(fitssave, norm_flux, header=hdr)\n else:\n fits.writeto(fitssave, norm_flux, header=hdr)", "title": "" }, { "docid": "052e74b876836a76ef28e2ee45980a7e", "score": "0.4998573", "text": "def main():\r\n file_name_scraped_data = 'scraper_data_example.csv'\r\n file_name_tool_data = 'tool_data_example.csv'\r\n run_whole_analysis(file_name_scraped_data, file_name_tool_data)", "title": "" }, { "docid": "729a4ae2fabc12864540eca20be31493", "score": "0.49976248", "text": "def main():\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-H\", \"--host\", help=\"Hostname of FTP server\")\r\n parser.add_argument(\"-u\", \"--user\", help=\"FTP Server user\")\r\n parser.add_argument(\"-P\", \"--passwd\", help=\"FTP Server password\")\r\n parser.add_argument(\"-f\", \"--fileinput\", help=\"List of files to process in JSON format optional\")\r\n parser.add_argument(\"-p\", \"--path\", help=\"Path for storage results\")\r\n parser.add_argument(\"-j\", \"--json\", help=\"JSON in string format\")\r\n parser.add_argument(\"-c\", \"--coord\", help=\"Coordinates and elevation in Json string format, LatLongElev\")\r\n parser.add_argument(\"files\", nargs='*', help=\"files to process\")\r\n args = parser.parse_args()\r\n\r\n files = []\r\n geos = []\r\n path = './data'\r\n host = 'ftp.ncdc.noaa.gov'\r\n user = ''\r\n passwd = ''\r\n coord = ''\r\n\r\n if args.host:\r\n host = args.host\r\n\r\n if args.path:\r\n path = args.path\r\n\r\n if args.user:\r\n user = args.user\r\n\r\n if args.passwd:\r\n passwd = args.passwd\r\n\r\n if args.fileinput:\r\n with open(args.fileinput, 'r') as jsonFile:\r\n jsonData = json.load(jsonFile)\r\n files = jsonData['Urls']\r\n else:\r\n files = args.files\r\n\r\n if args.json:\r\n jsonData = json.loads(args.json)\r\n files = jsonData[\"Urls\"]\r\n\r\n if args.coord:\r\n jsonD = json.loads(args.coord)\r\n geos = jsonD[\"Geolocs\"]\r\n\r\n #Download and Decompress files\r\n startTime = time.time()\r\n for file, geo in zip(files, geos):\r\n downloadedFilename = path + '/' + file.split('/')[-1].replace(\".op.gz\", geo + \".op.gz\")\r\n\r\n DownloadFileFTP(host, file, downloadedFilename, user=user, passw=passwd)\r\n \r\n endTime = time.time()\r\n print(\"Execution time {} sec\".format(endTime - startTime))", "title": "" }, { "docid": "0566671795b3e0ade10416733ab5b2b6", "score": "0.4993814", "text": "def lfo_run(data,l=-1,x=0,c=1,m='1,2',d=1,L=1000,k=30,r=-1, f= 1.2,O=0, V=1):\n\n if (l == -1):\n l = len(data)\n \n if (r == -1):\n r = len(data)/1000\n \n if (O == 0):\n acf, msg = tiseanio('lfo-run','-l',l,'-x',x,'-c',c,'-m',m,'-d',d,'-L',L,'-k',k,'-r',r,'-f',f,'-V',V,data=data)\n else: \n acf, msg = tiseanio('lfo-run','-l',l,'-x',x,'-c',c,'-m',m,'-d',d,'-L',L,'-k',k,'-r',r,'-f',f,'-O','-V',V,data=data)\n \n return acf", "title": "" }, { "docid": "18db7c3cb763283174533cd1dffe80d9", "score": "0.49925095", "text": "def process_data():\n data_processor.process_data()", "title": "" }, { "docid": "95a2806dc0a1d1ca918102e70d4ed5d3", "score": "0.49922103", "text": "def openfits_but_cmd_xpa(self):\n self.regiontemp = 'temp_ds9_forinspection.reg'\n idstr = str(\"%05d\" % self.currentobj)\n lockstr = self.lockds9string()\n ds9cmd = ' '\n\n if not self.ds9windowopen:\n ds9cmd = ds9cmd+'ds9 -geometry 1000x600 -scale zscale '+lockstr+' -tile grid layout 4 1'\n self.pds9 = subprocess.Popen(ds9cmd,shell=True,executable=os.environ[\"SHELL\"])\n time.sleep(1.1)# sleep to make sure ds9 appear in PIDlist\n self.ds9PID = vi.getPID('ds9',verbose=False) # get PID of DS9 process\n self.ds9windowopen = True\n time.sleep(1.0)\n out = commands.getoutput('xpaset -p ds9 frame new rgb')\n out = commands.getoutput('xpaset -p ds9 frame new')\n out = commands.getoutput('xpaset -p ds9 frame new')\n out = commands.getoutput('xpaset -p ds9 frame new')\n out = commands.getoutput('xpaset -p ds9 frame 1')\n out = commands.getoutput('xpaset -p ds9 frame hide')\n\n out = commands.getoutput('xpaset -p ds9 tile')\n\n Fstart = 2\n for pstamp in self.pstamplist:\n pstampname = '_'.join(pstamp.split('.')[0].split('_')[2:])\n fitsstamp = pstamp.replace('.png','.fits')\n\n if fitsstamp.endswith('_ha.fits'):\n pass\n else:\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n if 'rgb' in fitsstamp:\n out = commands.getoutput('xpaset -p ds9 rgb red')\n out = commands.getoutput('xpaset -p ds9 file '+fitsstamp.replace('rgb','rgb_r')+'[0]')\n out = commands.getoutput('xpaset -p ds9 rgb green')\n out = commands.getoutput('xpaset -p ds9 file '+fitsstamp.replace('rgb','rgb_g')+'[0]')\n out = commands.getoutput('xpaset -p ds9 rgb blue')\n out = commands.getoutput('xpaset -p ds9 file '+fitsstamp.replace('rgb','rgb_b')+'[0]')\n else:\n regionfile = self.regiontemp.replace('.reg',pstampname+'.reg')\n self.ds9textregion(pstampname,filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fitsstamp+'[0]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n\n out = commands.getoutput('xpaset -p ds9 zoom to fit')\n\n if self.showingHamaps: # sho the Halpha map fits file\n pstampname = 'Halpha'\n fitsstamp = self.Hamap.replace('.png','.fits')\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',pstampname+'.reg')\n self.ds9textregion(pstampname,filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fitsstamp+'[0]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n else:\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n out = commands.getoutput('xpaset -p ds9 frame clear')", "title": "" }, { "docid": "a23ba8d5601dde6d239288915481a236", "score": "0.4968561", "text": "def main():\n if len(sys.argv) < 1:\n print(\"\\nCommand line interface\\n\\tpython plot_mds.py <str: absolute file path of binary data set>\")\n\n try:\n X_mds = np.fromfile(sys.argv[1]).reshape(ROWS, COLS)\n for label in set(y):\n idx = np.where(y == label)[0]\n plt.scatter(X_mds[idx, 0], X_mds[idx, 1], label=\"Label = %d\" % label)\n plt.legend()\n plt.xlabel(\"Component 1\"); plt.ylabel(\"Component 2\")\n plt.title(\"MDS Visualization\")\n plt.show()\n except Exception as e:\n print(\"Error loading data set because %s\" % str(e))", "title": "" }, { "docid": "560b6f11a21d326aea3929954e40307b", "score": "0.49668896", "text": "def main():\n from sys import argv\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--disk-speed', type=int, default=80*1024*1024, help='disk speed')\n parser.add_argument('--nodes', type=int, default=20, help='number of datanodes')\n parser.add_argument('--files', type=int, default=30, help='number of generate files')\n args = parser.parse_args()\n print(args)\n\n hdfs = create_hdfs(number_of_datanodes=args.nodes, default_disk_speed=args.disk_speed,\n do_debug=True,\n )\n if True:\n hdfs.put_files(args.files, 64*1024*1024)\n else:\n hdfs.regenerate_blocks(args.files)", "title": "" }, { "docid": "91481220ad36500bcca1b7fb36f8a04c", "score": "0.49551007", "text": "def main():\r\n () = call_data()\r\n () = sankey_plot()", "title": "" }, { "docid": "9f59d6ac00189090b66655b25de78867", "score": "0.49544787", "text": "def main(analysis_dir, freesurfer_dir):\n\n for feat_path in find_in_bids(analysis_dir, generator=True, extension='.feat'):\n lg.debug(f'Reading {feat_path}')\n coreg_feat2freesurfer(feat_path, freesurfer_dir)", "title": "" }, { "docid": "dfbd0c8a834a253f10107e52568b2caa", "score": "0.49484447", "text": "def apply(self):\n logger.debug(f\"start {self.description} fusion...\")\n input_name_to_nodes = self.model.input_name_to_nodes()\n output_name_to_node = self.model.output_name_to_node()\n\n # This assumes that two search ops will not be fused at same time!\n for search_op_type in self.search_op_types:\n for node in self.model.get_nodes_by_op_type(search_op_type):\n graph = self.model.get_graph_by_node(node)\n if graph is None:\n raise Exception(\"Can not find node in any graph\")\n self.this_graph_name = graph.name\n self.fuse(node, input_name_to_nodes, output_name_to_node)\n\n op_list = [node.op_type for node in self.nodes_to_add]\n if self.fused_count:\n for key, value in self.fused_count.items():\n if value:\n logger.info(f\"Fused {key}: {value}\")\n else:\n count = op_list.count(self.fused_op_type)\n if count > 0:\n logger.info(f\"Fused {self.description}: {count}\")\n\n self.model.remove_nodes(self.nodes_to_remove)\n self.model.add_nodes(self.nodes_to_add, self.node_name_to_graph_name)\n\n if self.prune_graph:\n self.model.prune_graph()\n elif self.nodes_to_remove or self.nodes_to_add:\n self.model.update_graph()", "title": "" }, { "docid": "dd61b81d152512fe8dd384c6fe298314", "score": "0.4948203", "text": "def run(self, datasets):\n datasets = Datasets(datasets=datasets)\n\n if not datasets.energy_axes_are_aligned:\n raise ValueError(\"All datasets must have aligned energy axes.\")\n\n if \"TELESCOP\" in datasets.meta_table.colnames:\n telescopes = datasets.meta_table[\"TELESCOP\"]\n if not len(np.unique(telescopes)) == 1:\n raise ValueError(\n \"All datasets must use the same value of the\"\n \" 'TELESCOP' meta keyword.\"\n )\n\n rows = []\n\n meta = {\n \"n_sigma\": self.n_sigma,\n \"n_sigma_ul\": self.n_sigma_ul,\n \"sed_type_init\": \"likelihood\",\n }\n\n rows = parallel.run_multiprocessing(\n self.estimate_flux_point,\n zip(\n repeat(datasets),\n self.energy_edges[:-1],\n self.energy_edges[1:],\n ),\n backend=self.parallel_backend,\n pool_kwargs=dict(processes=self.n_jobs),\n task_name=\"Energy bins\",\n )\n\n table = Table(rows, meta=meta)\n model = datasets.models[self.source]\n return FluxPoints.from_table(\n table=table,\n reference_model=model.copy(),\n gti=datasets.gti,\n format=\"gadf-sed\",\n )", "title": "" }, { "docid": "f3f6b1c73c91ea3d4219e8e5f28cd61a", "score": "0.49430466", "text": "def main():\n # Initialize configurations\n config = PysentelConfig()\n\n # Initialize InfluxDB connection\n influxdb = InfluxDataIngest(url=config.influxdb['url'],\n org=config.influxdb['org'],\n bucket=config.influxdb['bucket'],\n token=config.influxdb['token'])\n\n # Run loop as long as this service is running\n while True:\n datapoints = []\n # Initialize 1-wire and get available sensors\n for sensor in W1ThermSensor.get_available_sensors([Sensor.DS18B20]):\n # Append all sensor datapoints to ingest-list with correct\n # informations\n datapoints.append({\n 'measurement': 'temperature',\n 'tags': {\n 'location': config.sensors[sensor.id],\n 'type': sensor.type.name,\n 'sensor-id': sensor.id},\n 'fields': {\n 'value': sensor.get_temperature()}\n })\n\n # Ingest data\n influxdb.write_points(datapoints)\n\n time.sleep(config.interval)", "title": "" }, { "docid": "3960905c0b4b4cc867134c124fb02b39", "score": "0.49412766", "text": "def writeXdmf(dims,dx,filename,h5_file):\n\n f = open(filename,'w')\n f.write('<?xml version=\"1.0\" ?>\\n')\n f.write('<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\\n')\n f.write('<Xdmf xmlns:xi=\"http://www.w3.org/2003/XInclude\" Version=\"2.1\">\\n')\n f.write('<Domain>\\n')\n\n f.write('<Grid Name=\"my_Grid\" GridType=\"Uniform\">\\n')\n f.write('<Topology TopologyType=\"3DCoRectMesh\" Dimensions=\"%d %d %d\">\\n'%(dims[0],dims[1],dims[2]))\n f.write('</Topology>\\n')\n\n f.write('<Geometry GeometryType=\"Origin_DxDyDz\">\\n')\n f.write('<DataItem Dimensions=\"3\" NumberType=\"Integer\" Format=\"XML\">\\n')\n f.write('0 0 0\\n') \n f.write('</DataItem>\\n')\n f.write('<DataItem Dimensions=\"3\" NumberType=\"Integer\" Format=\"XML\">\\n')\n f.write('%g %g %g\\n'%(dx,dx,dx))\n f.write('</DataItem>\\n')\n f.write('</Geometry>\\n')\n\n f.write('<Attribute Name=\"velocity\" AttributeType=\"Vector\" Center=\"Node\">\\n')\n f.write('<DataItem ItemType=\"Function\" Function=\"JOIN($0, $1, $2)\" Dimensions=\"%d %d %d 3\">\\n'%(dims[0],dims[1],dims[2]))\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n #f.write('out'+str(i)+'.h5:/velo_group/x_velo\\n')\n f.write('%s:/velo_group/x_velo\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n #f.write('out'+str(i)+'.h5:/velo_group/y_velo\\n')\n f.write('%s:/velo_group/y_velo\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n #f.write('out'+str(i)+'.h5:/velo_group/z_velo\\n')\n f.write('%s:/velo_group/z_velo\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('</DataItem>\\n')\n f.write('</Attribute>\\n')\n \n # -------------\n f.write('<Attribute Name=\"force\" AttributeType=\"Vector\" Center=\"Node\">\\n')\n f.write('<DataItem ItemType=\"Function\" Function=\"JOIN($0, $1, $2)\" Dimensions=\"%d %d %d 3\">\\n'%(dims[0],dims[1],dims[2]))\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n \n f.write('%s:/force_group/x_force\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n \n f.write('%s:/force_group/y_force\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n \n f.write('%s:/force_group/z_force\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('</DataItem>\\n')\n f.write('</Attribute>\\n')\n \n # -------------\n\n f.write('<Attribute Name=\"pressure\" AttributeType=\"Scalar\" Center=\"Node\">\\n')\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n #f.write('out'+str(i)+'.h5:/pres_group/presmag\\n')\n f.write('%s:/pres_group/presmag\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('</Attribute>\\n')\n\n f.write('<Attribute Name=\"velocityMagnitude\" AttributeType=\"Scalar\" Center=\"Node\">\\n')\n f.write('<DataItem Dimensions=\"%d %d %d\" NumberType=\"Float\" Format=\"HDF\">\\n'%(dims[0],dims[1],dims[2]))\n #f.write('out'+str(i)+'.h5:/velo_group/velmag\\n')\n f.write('%s:/velo_group/velmag\\n'%h5_file)\n f.write('</DataItem>\\n')\n f.write('</Attribute>\\n')\n\n f.write('</Grid>\\n')\n f.write('</Domain>\\n')\n f.write('</Xdmf>\\n')\n\n f.close()", "title": "" }, { "docid": "90dc6693e959b6c21ae68ef9166eabe2", "score": "0.49399224", "text": "def main(args):\n args = parse_args(args)\n\n data_instance = BenchmarkData(\n raw_path=args.input_folder,\n clean_path=args.output_folder,\n return_edgelist=args.return_edgelist,\n )\n\n print(\"Transforming data... This could take a while.\")\n data_instance.run()", "title": "" }, { "docid": "e32ff6db847f38a74f61d34966b09061", "score": "0.49342456", "text": "def run():\r\n # read gene expression data\r\n V = read()\r\n for rank in xrange(2, 4):\r\n run_one(V, rank)", "title": "" }, { "docid": "e32ff6db847f38a74f61d34966b09061", "score": "0.49342456", "text": "def run():\r\n # read gene expression data\r\n V = read()\r\n for rank in xrange(2, 4):\r\n run_one(V, rank)", "title": "" }, { "docid": "326448100c640c373191ec66cfc417d4", "score": "0.49325234", "text": "def xi_execution(xi_config, peak_files, fasta_files, memory=None, output_file=\"xi_results\",\n additional_parameters=list(),\n xi_path=\"XiSearch.jar\"):\n assert type(peak_files) == type(fasta_files) == type(additional_parameters) == list, \\\n \"\"\"type of the following files needs to be list. It is actually:\n peak_files: {}\n fasta_files: {}\n additional_parameters: {}\"\"\" \\\n .format(type(peak_files), type(fasta_files), type(additional_parameters))\n\n list_of_all_files = peak_files + fasta_files\n list_of_all_files.append(xi_path)\n for f in list_of_all_files:\n if not os.path.exists(f):\n raise IOError(\"Could not find Xi executable. Is the path correct? '{}'\"\n .format(os.path.abspath(f)))\n\n if not os.path.exists(os.path.split(output_file)[0]):\n os.makedirs(os.path.split(output_file)[0])\n output_file = os.path.splitext(output_file)[0] + \".csv\"\n\n # generate xi commands\n xi_cmd = XiWrapper.build_xi_arguments(xi_path=xi_path,\n xi_config=xi_config,\n peak_files=peak_files,\n fasta_files=fasta_files,\n memory=memory,\n output=output_file,\n additional_parameters=additional_parameters)\n\n # call xi\n starttime = time.time()\n logger.info(\"XiSearch cmd: {}\".format(\" \".join(map(str, xi_cmd))))\n process = subprocess.Popen(xi_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # real time output of Xi messages\n while True:\n output = process.stdout.readline()\n exit_code = process.poll()\n if output == '' and exit_code is not None:\n break\n elif output:\n # print output.strip()\n logger.debug(\"XiSearch: \" + output.strip())\n if \"java.lang.OutOfMemoryError\" in output:\n process.kill()\n raise XiSearchOutOfMemoryException(returncode=1, cmd=xi_cmd, out_file=output_file, output=output)\n elif \"could not daemonise BufferedResultWriter_batchforward\" in output:\n process.kill()\n raise XiSearchDaemoniseFailureException(\n returncode=1, cmd=xi_cmd, out_file=output_file, output=output\n )\n\n if exit_code != 0: # if process exit code is non zero\n raise XiSearchException(exit_code, xi_cmd, output_file, 'XiSearch exited with error message!')\n logger.info(\"XiSearch execution took {} for cmd: {}\"\n .format(XiWrapper.calculate_elapsed_time(starttime), xi_cmd))\n return output_file", "title": "" }, { "docid": "683d84ca626fb5ba8959ea97b904905b", "score": "0.49269488", "text": "def run_dataset(data: DataSet) -> None:\n\n meta_data = MetaDataSet(data.data_path)\n\n meta_data.remove_submodels()\n data.init_reference()\n _create_image_list(data, meta_data)\n\n if meta_data.image_groups_exists():\n _read_image_groups(meta_data)\n else:\n _cluster_images(meta_data, data.config[\"submodel_size\"])\n\n _add_cluster_neighbors(meta_data, data.config[\"submodel_overlap\"])\n _save_clusters_geojson(meta_data)\n _save_cluster_neighbors_geojson(meta_data)\n\n meta_data.create_submodels(meta_data.load_clusters_with_neighbors())", "title": "" }, { "docid": "985256f6cda65e327a1d518142611035", "score": "0.4925508", "text": "def download_ftp():\n\n #ftp proxy required for work\n site = FTP(\"gw-ftp.dmz\")\n msg = site.login('[email protected]', 'essential')\n #for without proxy below\n #site = FTP(\"dissemination.ecmwf.int\")\n #msg = site.login('wmo', 'essential')\n \n #get a list of the folders\n files = []\n\n try:\n files = site.nlst()\n except ftplib.error_perm:\n if str(resp) == \"550 No files found\":\n print(\"No files in this directory\")\n else:\n raise\n\n #change directory to the latest model runs\n site.cwd(\"/\"+files[-1])\n \n #now list the files in that directory\n files = []\n\n try:\n files = site.nlst()\n except ftplib.error_perm:\n if str(resp) == \"550 No files found\":\n print(\"No files in this directory\")\n else:\n raise\n \n #grab MSL pressure Grib file names\n msl_files = []\n for file in files:\n if 'msl' in file:\n if 'em' in file:\n continue\n if 'es' in file:\n continue\n msl_files.append(file)\n \n \n # make temp files and make each one an xarray, then add to list\n \n xarrays = []\n \n filename = 'mytempgrib_'\n x = 0\n for run in msl_files[0:5]:\n newfilename = filename+str(x)+'.grib'\n localfile = open(newfilename, 'wb')\n x +=1\n try:\n site.retrbinary('RETR ' + run, localfile.write, 1024)\n except:\n print(\"Error\")\n localfile.close()\n \n ds = xr.open_dataset(os.getcwd()+'/'+newfilename, engine='cfgrib')\n xarrays.append(ds)\n site.close()\n return xarrays", "title": "" }, { "docid": "8bc7a6b502c2f86cf860e9ab2ac12d5f", "score": "0.4923862", "text": "def oldFuse(self):\n ...", "title": "" }, { "docid": "79cde82f6ffd38c9fcf349cba237725c", "score": "0.49172887", "text": "def run_process_with_input(self, data_that_is_list_of_lists):\n\t\t#self.start_process(True)\n\t\t# TODO : THIS FUNCTION!!!", "title": "" }, { "docid": "4ff30093ca0caee2810bd6c21ca9db45", "score": "0.4912886", "text": "def __call__(self, data):\n\n return self.run(data)", "title": "" }, { "docid": "024f88035ae4033561e1b9fc2c6e0eb3", "score": "0.48974118", "text": "def setup(self):\n\n self.data_dir = os.path.abspath(\"../data\")\n\n self.quick_data = {'min':5, 'mean':10, 'max':15}\n\n self.dummy_data = xr.Dataset({\n 'min': xr.DataArray(self.quick_data['min']),\n 'max': xr.DataArray(self.quick_data['max']),\n 'mean': xr.DataArray(self.quick_data['mean']),\n })\n\n self.filenames = {\n \"core\": [f\"{self.data_dir}/lai_A2017156.tif\",\n f\"{self.data_dir}/lai_A2017161.tif\",\n f\"{self.data_dir}/lai_A2017166.tif\",\n f\"{self.data_dir}/lai_A2017171.tif\",\n f\"{self.data_dir}/lai_A2017176.tif\",\n f\"{self.data_dir}/lai_A2017181.tif\"],\n \"unc\": [f\"{self.data_dir}/lai_A2017156_unc.tif\",\n f\"{self.data_dir}/lai_A2017161_unc.tif\",\n f\"{self.data_dir}/lai_A2017166_unc.tif\",\n f\"{self.data_dir}/lai_A2017171_unc.tif\",\n f\"{self.data_dir}/lai_A2017176_unc.tif\",\n f\"{self.data_dir}/lai_A2017181_unc.tif\"]\n }\n\n # Instantiate data handler for the data directory\n self.dh = DataHandling(self.data_dir)", "title": "" }, { "docid": "b7f33a4b16ff7c631989df36c32fa81b", "score": "0.48892856", "text": "def main():\n args = parse_arguments()\n\n res = resdk.Resolwe(url=SERVER_URL)\n res.login()\n collection = res.collection.get(name=args.collection)\n\n types = parse_types(args.types)\n for data in collection.data:\n if data.status != \"OK\":\n continue\n\n for type_ in types:\n # type is a tuple of size 1 or 2: (field_name) or (field_name, process_type)\n if len(type_) == 2:\n if not data.process.type.strip(\":\").endswith(type_[1]):\n continue\n\n field_name = type_[0]\n\n if field_name not in data.output:\n continue\n\n if isinstance(data.output[field_name], list):\n for item in data.output[field_name]:\n # Check if file name of the file to-be-downloaded will be\n # clashing with existing filenames in download direcory. If\n # so, rename existing file to unexisting name.\n original_name = os.path.basename(item[\"file\"])\n rename_if_clashing(original_name, args.directory)\n else:\n original_name = os.path.basename(data.output[field_name][\"file\"])\n rename_if_clashing(original_name, args.directory)\n\n print(\"Downloading {} output of data {} ...\".format(field_name, data.name))\n data.download(field_name=field_name, download_dir=args.directory)", "title": "" }, { "docid": "92fec57faebc687d64b2ff6d36a9b5e4", "score": "0.4888848", "text": "def main():\n # Data server location:\n dataset_uri = \"http://data.ordnancesurvey.co.uk/\"\n # Dataset identifier:\n dataset_id = \"http://data.ordnancesurvey.co.uk/id/data/50k-gazetteer\"\n\n desc = lodataset.LODatasetDescription(dataset_uri)\n\n # Prints the datasets described in the file:\n for d in desc.get_datasets():\n print(\"-----------------\")\n print(\"Dataset id: \", d.id)\n if d[\"dataDump\"] is None:\n print(\"Dumps not available.\")\n else:\n print(\"Dumps available at: \", d[\"dataDump\"])\n print(\"-----------------\")\n\n # Get reference to the dataset we look for:\n gz = desc[dataset_id]\n\n # Instantiate a IPFS publisher on default location:\n ipfs = IPFSLODPublisher(gz)\n\n # Loop forever updating and publishing:\n try:\n while True:\n ipfs.publish()\n time.sleep(60*60) # Suspend for an hour\n ipfs.update()\n except KeyboardInterrupt:\n print(\"\\n\")\n print(\"Last IPFS hash for the folder of the dataset:\" + ipfs.ipfs_addr)\n print(\"To retrieve the contents use: $ipfs ls <hash>\")", "title": "" }, { "docid": "26ebd7a8b9a3b71491e44ce50cbac891", "score": "0.4881117", "text": "def run(self):\r\n inputs = [ np.arange(var[0], var[1]+1, 1) for var in self.io_ranges]\r\n b = []\r\n for i in range(3) :\r\n b.append( [membership_f(self.mf_types[i], inputs[i], a) for a in self.f_ssets[i] ])\r\n\r\n # visualize.visualize_mf(b,inputs)\r\n # fuzzify Error and delta error to obtain their membership values for corr. fuzzy subsets\r\n muval_e = fuzzify(inputs[0], b[0], self.error)\r\n muval_de = fuzzify(inputs[1], b[1], self.delta_e) \r\n\r\n # print 'muval_e:', muval_e\r\n # print 'muval_de:', muval_de\r\n # Obtain the rule strength matrix\r\n f_mat = fuzzy_matrix(muval_e, muval_de)\r\n # obtian the y value clipped by output activation for output fuzzy subsets\r\n output = rule_base(b, f_mat)\r\n aggregated = np.fmax(output[0], np.fmax(output[1],np.fmax(output[2], np.fmax(output[3], output[4]))))\r\n out_final = fuzz.defuzz(inputs[2], aggregated, 'centroid')\r\n print \"output:\",out_final\r\n # plotting final output\r\n visualize.visualize_output(b, inputs, output, out_final, aggregated)\r\n plt.show()", "title": "" }, { "docid": "ea9e282d12a5e6f1b64f5b4baabf1cc9", "score": "0.48789155", "text": "def main():\n dat_path = \"/home/kedar/Focus/Experiment2\"\n list_of_files = glob(dat_path + \"/*.dat\")\n\n # XXX Remove absolute path\n schema_path = \"blip/database/dbxactions/training_data/training_schema.json\"\n keyspace_name = \"training_bt_kd\"\n\n # Table Type <-> Table Name mapping\n table_type_name_dict = {\n \"ssid\" : \"ssid_t\",\n \"cell\" : \"cell_t\",\n \"section\" : \"section_t\",\n \"floor\" : \"floor_t\",\n \"property\" : \"property_t\" }\n\n\n training_dbO = initDatabase([\"127.0.0.1\"], \"cassandra\", \"cassandra\", keyspace_name, schema_path, table_type_name_dict)\n\n dataParsing(list_of_files[0], training_dbO)", "title": "" }, { "docid": "73a82608a2f75e31a5b3f64db3c4bf1e", "score": "0.48654208", "text": "def execute(self, data: list):\n\n\t\tcuttedlayernr = 0\n\t\tx = 0.\n\t\ty = 0.\n\t\te = 0\n\t\tz = 0\n\t\tcutZ = 0\n\t\tfanonafter = 4\n\t\tfanonaftercounter = 0\n\t\tcurrent_z = 0.\n\t\tfromZ = self.getSettingValueByKey(\"fromz\")\n\n\t\t# T = ExtruderManager.getInstance().getActiveExtruderStack().getProperty(\"material_print_temperature\", \"value\")\n\t\t# with open(\"out.txt\", \"w\") as f:\n\t\t\t# f.write(T)\n\n\t\t# use offset to calculate the current height: <current_height> = <current_z> - <layer_0_z>\n\t\tlayer_0_z = 0.\n\n\t\tremovelines = 1\n\n\t\tresumegcode = \"\"\";+++++++++++++ resumegcode +++++++++++++++\n\t\tM104 S220 ;Uncomment to add your own temperature line\n\t\tM42 P3 S150\n\t\tG21\t\t ;metric values\n\t\tG90\t\t ;absolute positioning\n\t\tM82\t\t ;set extruder to absolute mode\n\t\tM107\t ;start with the fan off\n\t\tG28 X0 Y0 ;move X/Y to min endstops\n\t\tM109 S220 ;Uncomment to add your own temperature line\n\t\tG92 E0\t\t\t\t\t;zero the extruded length\n\t\tG1 F200 E3\t\t\t\t;extrude 3mm of feed stock\n\t\tG92 E0\t\t\t\t\t;zero the extruded length again\n\t\tG1 F9000\n\t\tM220 S30\n\t\tM221 S150\n\n\t\t;+++++++++++++ end resume +++++++++++++++\\n\\n\"\"\"\n\n\n\t\tgot_first_g_cmd_on_layer_0 = False\n\t\tfor layer_nr, layer in enumerate(data):\n\t\t\tlines = layer.split(\"\\n\")\n\t\t\t#print(\"/nlayer: {}\".format(layer))\n\t\t\tfor line_nr, line in enumerate(lines):\n\n\t\t\t\t#print(\"Getvalue: {}\".format(self.getValue(line, \"Z\", z)))\n\t\t\t\t#print(\"fromZ: {}\".format(fromZ))\n\t\t\t\t#print(\"cutZ: {}\".format(cutZ))\n\t\t\t\t#print(\"tada layer_nr: {}\".format(layer_nr))\n\n\t\t\t\tif self.getValue(line, \"E\", e) > 0:\n\t\t\t\t\te = self.getValue(line, \"E\", e)\n\n\t\t\t\tif cutZ == 1:\n\t\t\t\t\tprint(\"hola\")\n\t\t\t\t\tremovelines = 0\n\t\t\t\t\tlineinsert=(\"\\n;+++++++++++++ start resume code +++++++++++++++\")\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(resumegcode)\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(\";+++++++++++++ cutted from here +++++++++++++++\")\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(\";LAYER:\"+str(layer_nr-2))\t \n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(\"G92 \")\n\t\t\t\t\tlineinsert+=(\"E%0.4f \" %(e))\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(\"G92 \")\n\t\t\t\t\tlineinsert+=(\"Z%0.4f \" %(z))\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(\";+++++++++++++ end resume code +++++++++++++++\")\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tlineinsert+=line\n\t\t\t\t\tline = lineinsert\n\t\t\t\t\tprint(lineinsert)\n\t\t\t\t\tcuttedlayernr = layer_nr\n\t\t\t\t\tcutZ = 2\n\t\t\t\t\t#continue\n\n\t\t\t\tif layer_nr == cuttedlayernr + fanonafter and cutZ == 2:\n\t\t\t\t\tlineinsert=(\";+++++++++++++ fan on +++++++++++++++\")\n\t\t\t\t\tlineinsert+=(\"\\nM106 S255\")\n\t\t\t\t\tlineinsert+=(\"\\n\")\n\t\t\t\t\tline+=lineinsert\n\t\t\t\t\tcutZ = 3\n\t\t\t\t\t#continue\n\n\t\t\t\tif self.getValue(line, \"Z\", z) > fromZ and cutZ == 0:# and layer_nr > 0:\n\t\t\t\t\tprint(\"hola2\")\n\t\t\t\t\tz = self.getValue(line, \"Z\", z)\n\t\t\t\t\tcutZ = 1\n\t\t\t\t\t#line = \";removed\\n\"\n\t\t\t\t\t#continue\n\n\t\t\t\t# have to actively empty the 'lines' for the things I want to through away\n\t\t\t\tif cutZ == 0 and layer_nr > 1:\n\t\t\t\t\tprint(\"hola3\")\n#\t\t\t\t lines.pop(line_nr)\n\t\t\t\t\t#print(\"should be removed lines, line_nr: {}\".format(lines[line_nr]))\n\t\t\t\t\t#line = \";999\"\n\t\t\t\t\t#continue\n\n\n\t\t\t\tlines[line_nr] = line\n\n\n\t\t\tprint(\"is this cutZ: {}\".format(cutZ))\n\t\t\tif cutZ == 0:\n\t\t\t\tprint(\"tada line_nr ++++: {}\".format(layer_nr))\n\t\t\t\tdata[layer_nr] = \"\\n;removed\"\n\t\t\t\t#print(\"tada lines that should be removed: {}\".format(lines))\n\t\t\t\t#lines.pop(line_nr)\n\t\t\t\t#del data[layer_nr] #this worked sort of... removes every other line\n\t\t\t\t#lines.remove(999)\n\t\t\t\t#continue\n\t\t\telse:\n\t\t\t\tprint(\"binnen: {}\".format(lines))\n\t\t\t\tdata[layer_nr] = \"\\n\".join(lines)\n#\t\t\tdata[layer_nr] = \"\\n\".join(lines)\n\n\t\tprint(data)\n\t\treturn data", "title": "" }, { "docid": "0c7f18ff0151f7a24b6494cf49d64129", "score": "0.4854642", "text": "def main():\n if len(sys.argv) <= 1:\n raise SystemExit(\"usage %s <datafile>\" % sys.argv[0])\n \n sock = socket.socket()\n try:\n sock.connect( (CARBON_SERVER, CARBON_PICKLE_PORT) )\n except socket.error:\n raise SystemExit(\"Couldn't connect to %(server)s on port %(port)d, is carbon-cache.py running?\" % { 'server':CARBON_SERVER, 'port':CARBON_PICKLE_PORT })\n \n try:\n run(sock, sys.argv[1])\n except KeyboardInterrupt:\n sys.stderr.write(\"\\nExiting on CTRL-c\\n\")\n sys.exit(0)", "title": "" }, { "docid": "55df6d8a5472d0a06cd8a53b97f4c220", "score": "0.48512918", "text": "def execute(self, kernel, data):\n raise NotImplementedError()", "title": "" }, { "docid": "0e902c3090f3a3826cfe7e0725dd3f41", "score": "0.48495668", "text": "def main(definitions, train, test, max_entries, batch_size): # noqa: C901\n logger = logging.getLogger(__name__)\n logger.info(\"making final data set from raw data\")\n\n with open(definitions) as yaml_file:\n # The FullLoader parameter handles the conversion from YAML\n # scalar values to Python the dictionary format\n defn = yaml.load(yaml_file, Loader=yaml.FullLoader)\n\n spectators = defn[\"spectators\"]\n labels = defn[\"labels\"]\n n_feature_sets = defn[\"n_feature_sets\"]\n if not batch_size:\n batch_size = defn[\"batch_size\"]\n if train:\n dataset = \"train\"\n elif test:\n dataset = \"test\"\n else:\n logger.info(\"You need to specify if they are training/testing dataset by setting --train or --test\")\n files = defn[f\"{dataset}_files\"]\n\n counter = -1\n total_entries = 0\n done = False\n for input_file in files:\n in_file = uproot.open(input_file)\n tree = in_file[defn[\"tree_name\"]]\n nentries = tree.num_entries\n logger.info(f\"opening {input_file} with {nentries} events\")\n for k in range(0, nentries, batch_size):\n counter += 1\n if os.path.isfile(f\"{project_dir}/data/processed/{dataset}/newdata_{counter}.h5\"):\n logger.info(f\"{project_dir}/data/processed/{dataset}/newdata_{counter}.h5 exists... skipping\")\n continue\n arrays = tree.arrays(spectators, library=\"np\", entry_start=k, entry_stop=k + batch_size)\n spec_array = np.expand_dims(np.stack([arrays[spec] for spec in spectators], axis=1), axis=1)\n real_batch_size = spec_array.shape[0]\n total_entries += real_batch_size\n\n feature_arrays = {}\n for j in range(n_feature_sets):\n feature_arrays[f\"features_{j}\"] = np.zeros(\n (real_batch_size, defn[f\"nobj_{j}\"], len(defn[f\"features_{j}\"])),\n dtype=float,\n )\n arrays = tree.arrays(\n defn[f\"features_{j}\"],\n entry_start=k,\n entry_stop=k + batch_size,\n library=\"ak\",\n )\n for i, feature in enumerate(defn[f\"features_{j}\"]):\n feat = to_np_array(arrays[feature], maxN=defn[f\"nobj_{j}\"])\n feature_arrays[f\"features_{j}\"][:, :, i] = feat\n # For PyTorch channels-first style networks\n feature_arrays[f\"features_{j}\"] = np.ascontiguousarray(np.swapaxes(feature_arrays[f\"features_{j}\"], 1, 2))\n\n arrays = tree.arrays(labels, library=\"np\", entry_start=k, entry_stop=k + batch_size)\n target_array = np.zeros((real_batch_size, 2), dtype=float)\n target_array[:, 0] = arrays[\"sample_isQCD\"] * arrays[\"fj_isQCD\"]\n target_array[:, 1] = arrays[\"fj_isH\"]\n\n os.makedirs(f\"{project_dir}/data/processed/{dataset}\", exist_ok=True)\n with h5py.File(f\"{project_dir}/data/processed/{dataset}/newdata_{counter}.h5\", \"w\") as h5:\n logger.info(f\"creating {h5.filename} h5 file with {real_batch_size} events\")\n feature_data = h5.create_group(f\"{dataset}ing_subgroup\")\n target_data = h5.create_group(\"target_subgroup\")\n spec_data = h5.create_group(\"spectator_subgroup\")\n for j in range(n_feature_sets):\n feature_data.create_dataset(\n f\"{dataset}ing_{j}\",\n data=feature_arrays[f\"features_{j}\"].astype(\"float32\"),\n )\n np.save(\n f\"{project_dir}/data/processed/{dataset}/{dataset}_{counter}_features_{j}.npy\",\n feature_arrays[f\"features_{j}\"].astype(\"float32\"),\n )\n target_data.create_dataset(\"target\", data=target_array.astype(\"float32\"))\n np.save(\n f\"{project_dir}/data/processed/{dataset}/{dataset}_{counter}_truth.npy\",\n target_array.astype(\"float32\"),\n )\n spec_data.create_dataset(\"spectators\", data=spec_array.astype(\"float32\"))\n np.save(\n f\"{project_dir}/data/processed/{dataset}/{dataset}_{counter}_spectators.npy\",\n spec_array.astype(\"float32\"),\n )\n h5.close()\n if max_entries and total_entries >= max_entries:\n done = True\n break\n if done:\n break", "title": "" }, { "docid": "2ce973fc3bde8d563f5b2caafda3a395", "score": "0.48478526", "text": "def main(inputDataset, outputFile, groundTruth):\n groundTruthFile = pickle.load(open(groundTruth, \"rb\"))\n with open(outputFile, 'w') as outFile:\n for file in os.listdir(inputDataset):\n if file.endswith(\".xml\"):\n with open(inputDataset + \"/\" + file) as inputRunFile:\n xml.sax.parse(inputRunFile, HyperpartisanNewsTFExtractor(outFile, groundTruthFile))\n\n outFile.close()", "title": "" }, { "docid": "5b0f066cb397c9492212357e5d7d15e8", "score": "0.48445225", "text": "def openfits_but_cmd_xpa(self):\n self.regiontemp = 'temp_ds9_forinspection.reg'\n idstr = str(\"%05d\" % self.currentobj)\n lockstr = self.lockds9string()\n ds9cmd = ' '\n\n if not self.ds9windowopen:\n ds9cmd = ds9cmd+'ds9 -geometry 1200x600 -scale zscale '+\\\n lockstr+' -tile grid layout 4 '+str(2*int(self.Npamax))\n self.pds9 = subprocess.Popen(ds9cmd,shell=True,executable=os.environ[\"SHELL\"])\n time.sleep(1.1)# sleep to make sure ds9 appear in PIDlist\n self.ds9PID = vi.getPID('ds9',verbose=False) # get PID of DS9 process\n self.ds9windowopen = True\n time.sleep(1.0)\n for ii in np.arange(1,17):\n out = commands.getoutput('xpaset -p ds9 frame new')\n out = commands.getoutput('xpaset -p ds9 tile')\n\n Fstart = 1\n for PA in self.PAs:\n PAstr = '-'+str(\"%03d\" % int(PA))+'-'\n if self.MASTfiles:\n searchexpression = self.dir+'*'+idstr+'*-pa'+PAstr[1:-1]+'_*2d.fits'\n else:\n searchexpression = self.dir+'*'+PAstr+'*'+idstr+'*2D.fits'\n fits_2D = glob.glob(searchexpression)\n\n for ii in xrange(len(fits_2D)):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'DSCI.reg')\n self.ds9textregion('DSCI PA='+str(int(PA)),filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[DSCI]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'SCI.reg')\n self.ds9textregion('SCI PA='+str(int(PA)),filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[SCI]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'CONTAM.reg')\n self.ds9textregion('CONTAM PA='+str(int(PA)),filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[CONTAM]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'SCI-CONTAM.reg')\n self.ds9textregion('SCI-CONTAM PA='+str(int(PA)),filename=regionfile)\n contamsub = self.subtractcontam(fits_2D[ii]) # creating file with contam. subtracted spectrum\n out = commands.getoutput('xpaset -p ds9 file '+contamsub)\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n\n # If a sextractor region file for the SCI-CONTAM image exists, show it.\n sexregion = fits_2D[ii].split('.fit')[0]+'_SCI-CONTAM.reg'\n if os.path.exists(sexregion):\n out = commands.getoutput('xpaset -p ds9 regions '+sexregion)\n Fstart += 1\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -", "title": "" }, { "docid": "5b0f066cb397c9492212357e5d7d15e8", "score": "0.48445225", "text": "def openfits_but_cmd_xpa(self):\n self.regiontemp = 'temp_ds9_forinspection.reg'\n idstr = str(\"%05d\" % self.currentobj)\n lockstr = self.lockds9string()\n ds9cmd = ' '\n\n if not self.ds9windowopen:\n ds9cmd = ds9cmd+'ds9 -geometry 1200x600 -scale zscale '+\\\n lockstr+' -tile grid layout 4 '+str(2*int(self.Npamax))\n self.pds9 = subprocess.Popen(ds9cmd,shell=True,executable=os.environ[\"SHELL\"])\n time.sleep(1.1)# sleep to make sure ds9 appear in PIDlist\n self.ds9PID = vi.getPID('ds9',verbose=False) # get PID of DS9 process\n self.ds9windowopen = True\n time.sleep(1.0)\n for ii in np.arange(1,17):\n out = commands.getoutput('xpaset -p ds9 frame new')\n out = commands.getoutput('xpaset -p ds9 tile')\n\n Fstart = 1\n for PA in self.PAs:\n PAstr = '-'+str(\"%03d\" % int(PA))+'-'\n if self.MASTfiles:\n searchexpression = self.dir+'*'+idstr+'*-pa'+PAstr[1:-1]+'_*2d.fits'\n else:\n searchexpression = self.dir+'*'+PAstr+'*'+idstr+'*2D.fits'\n fits_2D = glob.glob(searchexpression)\n\n for ii in xrange(len(fits_2D)):\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'DSCI.reg')\n self.ds9textregion('DSCI PA='+str(int(PA)),filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[DSCI]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'SCI.reg')\n self.ds9textregion('SCI PA='+str(int(PA)),filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[SCI]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'CONTAM.reg')\n self.ds9textregion('CONTAM PA='+str(int(PA)),filename=regionfile)\n out = commands.getoutput('xpaset -p ds9 file '+fits_2D[ii]+'[CONTAM]')\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n Fstart += 1\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n out = commands.getoutput('xpaset -p ds9 frame '+str(Fstart))\n regionfile = self.regiontemp.replace('.reg',PAstr+'SCI-CONTAM.reg')\n self.ds9textregion('SCI-CONTAM PA='+str(int(PA)),filename=regionfile)\n contamsub = self.subtractcontam(fits_2D[ii]) # creating file with contam. subtracted spectrum\n out = commands.getoutput('xpaset -p ds9 file '+contamsub)\n out = commands.getoutput('xpaset -p ds9 regions '+regionfile)\n\n # If a sextractor region file for the SCI-CONTAM image exists, show it.\n sexregion = fits_2D[ii].split('.fit')[0]+'_SCI-CONTAM.reg'\n if os.path.exists(sexregion):\n out = commands.getoutput('xpaset -p ds9 regions '+sexregion)\n Fstart += 1\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -", "title": "" }, { "docid": "8cc93fdb2912ccc4d536edc3c439b6b9", "score": "0.4842645", "text": "def data_relaying_loop(self):\n # Print Welcome Massage\n # ---------------------\n print('\\n\\n'\n ' =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\\n'\n ' Start : XFFTS Data Relaying Loop \\n'\n ' =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-'\n '\\n\\n')\n\n # ROS setting\n # -----------\n pub = rospy.Publisher('XFFTS_SPEC', XFFTS_msg, queue_size=10)\n pub2 = rospy.Publisher('XFFTS_PM', XFFTS_pm_msg, queue_size=10) # PM = Power Meter\n XFFTS_SPEC = XFFTS_msg()\n XFFTS_PM = XFFTS_pm_msg()\n\n # data making loop\n # ------------------\n while True:\n\n if self._stop_loop: break\n\n # get data\n # --------\n header = data_header()\n timestamp = header.timestamp\n BE_num = header.BE_num\n \n #make data\n spec = np.random.normal(5000, 2000, (header.BE_num, 32768))\n pow = np.sum(spec, axis=1)\n timestamp = str(time.time())\n\n # ROS Data Trans\n # --------------\n # Spectru\n XFFTS_SPEC.timestamp = timestamp\n XFFTS_SPEC.BE_num = BE_num\n XFFTS_SPEC.SPEC_BE1 = spec[0]\n XFFTS_SPEC.SPEC_BE2 = spec[1]\n XFFTS_SPEC.SPEC_BE3 = spec[2]\n XFFTS_SPEC.SPEC_BE4 = spec[3]\n XFFTS_SPEC.SPEC_BE5 = spec[4]\n XFFTS_SPEC.SPEC_BE6 = spec[5]\n XFFTS_SPEC.SPEC_BE7 = spec[6]\n XFFTS_SPEC.SPEC_BE8 = spec[7]\n XFFTS_SPEC.SPEC_BE9 = spec[8]\n XFFTS_SPEC.SPEC_BE10 = spec[9]\n XFFTS_SPEC.SPEC_BE11 = spec[10]\n XFFTS_SPEC.SPEC_BE12 = spec[11]\n XFFTS_SPEC.SPEC_BE13 = spec[12]\n XFFTS_SPEC.SPEC_BE14 = spec[13]\n XFFTS_SPEC.SPEC_BE15 = spec[14]\n XFFTS_SPEC.SPEC_BE16 = spec[15]\n XFFTS_SPEC.SPEC_BE17 = [0] #spec[16]#\n XFFTS_SPEC.SPEC_BE18 = [0] #spec[17]#\n XFFTS_SPEC.SPEC_BE19 = [0] #spec[18]#\n XFFTS_SPEC.SPEC_BE20 = [0] #spec[19]#\n pub.publish(XFFTS_SPEC)\n\n # total power\n XFFTS_PM.timestamp = timestamp\n XFFTS_PM.BE_num = BE_num\n XFFTS_PM.POWER_BE1 = pow[0]\n XFFTS_PM.POWER_BE2 = pow[1]\n XFFTS_PM.POWER_BE3 = pow[2]\n XFFTS_PM.POWER_BE4 = pow[3]\n XFFTS_PM.POWER_BE5 = pow[4]\n XFFTS_PM.POWER_BE6 = pow[5]\n XFFTS_PM.POWER_BE7 = pow[6]\n XFFTS_PM.POWER_BE8 = pow[7]\n XFFTS_PM.POWER_BE9 = pow[8]\n XFFTS_PM.POWER_BE10 = pow[9]\n XFFTS_PM.POWER_BE11 = pow[10]\n XFFTS_PM.POWER_BE12 = pow[11]\n XFFTS_PM.POWER_BE13 = pow[12]\n XFFTS_PM.POWER_BE14 = pow[13]\n XFFTS_PM.POWER_BE15 = pow[14]\n XFFTS_PM.POWER_BE16 = pow[15]\n XFFTS_PM.POWER_BE17 = 0 # pow[16]\n XFFTS_PM.POWER_BE18 = 0 # pow[17]\n XFFTS_PM.POWER_BE19 = 0 # pow[18]\n XFFTS_PM.POWER_BE20 = 0 # pow[19]\n pub2.publish(XFFTS_PM)\n time.sleep(0.02)#shiotani added\n\n # Print Shut Down Massage\n # -----------------------\n print('\\n\\n'\n ' =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-\\n'\n ' Shut Down : XFFTS Data Relaying Loop \\n'\n ' =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-'\n '\\n\\n')\n return", "title": "" }, { "docid": "93b27de64a013b7a2198e78ac8efad10", "score": "0.48401418", "text": "def call(self, x, mask=None):\r\n # Extract atom_features\r\n atom_features = x[0]\r\n\r\n # Extract graph topology\r\n deg_slice, membership, deg_adj_lists = x[1], x[2], x[3:]\r\n\r\n # Perform the mol gather\r\n atom_features = graph_pool(atom_features, deg_adj_lists, deg_slice,\r\n self.max_deg, self.min_deg)\r\n\r\n return atom_features", "title": "" }, { "docid": "ebdb7bf53b79c2bd34891a969fe41610", "score": "0.48388705", "text": "def run(**kwargs):\n print(\"Args: {0}\".format(kwargs))\n print(\"Running disaster by felipepenha\")\n split(**kwargs) # generate train/valid/test sets\n process(**kwargs) # clean text for NLP tasks\n features(**kwargs) # generate dataset for training\n select(**kwargs) # feature selection based on training data only\n train(**kwargs) # training model and save to filesystem\n metadata(**kwargs) # performance report\n predict(**kwargs) # predictions on new data", "title": "" }, { "docid": "936d52919a5ce1d66469412b8a6f2b8a", "score": "0.4837951", "text": "def main():\n # Logging to the console.\n setup_logger_console()\n\n # Command line.\n args = parse_cl()\n\n # XPath expression.\n if isinstance(args.xpath_expr, bytes):\n # Python 2 Bytestring to Unicode.\n args.xpath_expr = args.xpath_expr.decode(\"utf-8\")\n if not build_xpath(args.xpath_expr):\n sys.exit(60)\n\n # XPath function and XML parser.\n (xpath_fn, xml_parser) = xp_prepare(args)\n\n # Use XPath on XML sources.\n extra_new_line = False\n for xml_s in args.xml_sources:\n if extra_new_line:\n print()\n elif not (args.files_with_hits or args.files_without_hits):\n extra_new_line = True\n xpath_on_xml(xml_s, xml_parser, xpath_fn, args)\n\n if not args.xml_sources:\n # Read from a pipe when no XML source is specified.\n if not sys.stdin.isatty():\n xpath_on_xml(sys.stdin, xml_parser, xpath_fn, args)\n else:\n sys.stderr.write(\"Error: no XML source specified\\n\")\n sys.exit(70)", "title": "" }, { "docid": "9a4d672adb1a340dfb7a31c36ee8e286", "score": "0.4835829", "text": "def test(infile=gama_data+'/jswml/auto/kcorrz01.fits', ran_dist='vol',\n Q=Qdef, P=Pdef, key='w_p', xlimits=(0.01, 100)):\n\n Mlimits = (-22, -21)\n zlimits = util.vol_limits(infile, Q=Q, Mlims=Mlimits)\n z_range = [0.002, zlimits[1]]\n galout = 'gal_test.dat'\n ranout = 'ran_test.dat'\n xiout = 'xi_test.dat'\n xi_select(infile, galout, ranout, xiout,\n z_range=z_range, nz=20, app_range=(14, 19.8),\n abs_range=Mlimits,\n Q=Q, P=P, ran_dist=ran_dist, ran_fac=1)\n\n # Run the clustering code executable in $BIN/xi, compiled from xi.c\n# cmd = '$BIN/xi {} {} {}'.format(galout, ranout, xiout)\n# subprocess.call(cmd, shell=True)\n cmd = '$BIN/xi {} {}'.format(galout, 'gg_test.dat')\n subprocess.call(cmd, shell=True)\n cmd = '$BIN/xi {} {} {}'.format(galout, ranout, 'gr_test.dat')\n subprocess.call(cmd, shell=True)\n cmd = '$BIN/xi {} {}'.format(ranout, 'rr_test.dat')\n subprocess.call(cmd, shell=True)\n\n # Plot the results\n panels = []\n comps = []\n label = 'Test'\n panels.append({'files': (xiout, ), 'comps': comps, 'label': label})\n xi_plot(key, panels, xlimits=xlimits)\n plt.show()\n xi2d_plot(xiout, binning=0, mirror=0)\n plt.show()\n xi2d_plot(xiout, binning=1, mirror=0)\n plt.show()\n xi2d_plot(xiout, binning=2, mirror=0)\n plt.show()", "title": "" }, { "docid": "864411532bd0a41c20d8de0f691d695a", "score": "0.48324317", "text": "def run():\n # init stream manager\n stream_manager_api = StreamManagerApi()\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(os.path.realpath(args.pipeline), 'rb') as f:\n pipeline_str = f.read()\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # prepare data\n data = np.load(os.path.join(args.data_dir, args.data_file.format(args.mode))).astype(np.float16)\n income = np.load(os.path.join(args.data_dir, args.income_file.format(args.mode))).astype(np.float16)\n married = np.load(os.path.join(args.data_dir, args.married_file.format(args.mode))).astype(np.float16)\n\n if(data.shape[0] != income.shape[0] or income.shape[0] != married.shape[0]):\n print(\"number of input data not completely equal\")\n exit()\n rows = data.shape[0]\n\n # statistical variable\n income_labels = []\n married_labels = []\n income_preds = []\n married_preds = []\n infer_total_time = 0\n\n # write predict results\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n for i in range(rows):\n # fetch data\n data_batch = data[i]\n income_batch = income[i]\n married_batch = married[i]\n\n # data shape\n data_shape = (-1, args.num_features)\n\n # data type\n data_type = np.float16\n\n # send data\n stream_name = b'MMoE'\n if not send_appsrc_data(0, 'data', data_batch, stream_name, stream_manager_api, data_shape, data_type):\n return\n\n # Obtain the inference result by specifying streamName and uniqueId.\n key_vec = StringVector()\n key_vec.push_back(b'mxpi_tensorinfer0')\n start_time = time.time()\n infer_result = stream_manager_api.GetProtobuf(stream_name, 0, key_vec)\n infer_total_time += time.time() - start_time\n if infer_result.size() == 0:\n print(\"inferResult is null\")\n return\n if infer_result[0].errorCode != 0:\n print(\"GetProtobuf error. errorCode=%d\" % (infer_result[0].errorCode))\n return\n\n # updata variable\n income_pred, married_pred = post_process(infer_result)\n income_preds.extend(income_pred)\n married_preds.extend(married_pred)\n income_labels.extend(income_batch)\n married_labels.extend(married_batch)\n\n income_preds = np.array(income_preds)\n married_preds = np.array(married_preds)\n income_labels = np.array(income_labels)\n married_labels = np.array(married_labels)\n np.save(os.path.join(args.output_dir, 'income_preds_{}.npy'.format(args.mode)), income_preds)\n np.save(os.path.join(args.output_dir, 'married_preds_{}.npy'.format(args.mode)), married_preds)\n np.save(os.path.join(args.output_dir, 'income_labels_{}.npy').format(args.mode), income_labels)\n np.save(os.path.join(args.output_dir, 'married_labels_{}.npy'.format(args.mode)), married_labels)\n income_auc = get_auc(income_labels, income_preds)\n married_auc = get_auc(married_labels, married_preds)\n print('<<======== Infer Metric ========>>')\n print('Mode: {}'.format(args.mode))\n print('Number of samples: {}'.format(rows))\n print('Total inference time: {}'.format(infer_total_time))\n print('Average inference time: {}'.format(infer_total_time/rows))\n print('Income auc: {}'.format(income_auc))\n print('Married auc: {}'.format(married_auc))\n print('<<===============================>>')\n stream_manager_api.DestroyAllStreams()", "title": "" }, { "docid": "bcabd31123651224bcfa152ef31dd601", "score": "0.48323286", "text": "def main():\n parser = argparse.ArgumentParser(\n description=\"Extracts a set of features from a given dataset \"\n \"or audio file and saves them into the 'features' folder of \"\n \"the dataset or the specified single file.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"in_path\",\n action=\"store\",\n help=\"Input dataset dir or audio file\")\n parser.add_argument(\"-j\",\n action=\"store\",\n dest=\"n_jobs\",\n type=int,\n help=\"Number of jobs (only for collection mode)\",\n default=4)\n parser.add_argument(\"-o\",\n action=\"store\",\n dest=\"out_file\",\n type=str,\n help=\"Output file (only for single file mode)\",\n default=\"out.json\")\n parser.add_argument(\"-d\",\n action=\"store\",\n dest=\"ds_name\",\n default=\"*\",\n help=\"The prefix of the dataset to use \"\n \"(e.g. Isophonics, SALAMI)\")\n parser.add_argument(\"-fs\",\n action=\"store_true\",\n dest=\"framesync\",\n help=\"Use frame-synchronous features\",\n default=False)\n args = parser.parse_args()\n start_time = time.time()\n\n # Setup the logger\n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',\n level=logging.INFO)\n\n # Run the main process\n process(args.in_path, out_file=args.out_file, n_jobs=args.n_jobs, framesync=args.framesync)\n\n # Done!\n logging.info(\"Done! Took %.2f seconds.\" % (time.time() - start_time))", "title": "" }, { "docid": "0d42aa96d2a62c822bb866a9a56917a3", "score": "0.48310605", "text": "def runAll(self):\n self.readData()\n self.tileCCD()\n self.writeFITSfile()", "title": "" }, { "docid": "a051bc50cc65cc605940704a603adc19", "score": "0.4821654", "text": "def main():\n spark = create_spark_session()\n \n ## Input and output paths\n output_data = \"s3a://ychang-output/\"\n input_data = \"\"\n \n process_demographics_data(spark, input_data, output_data)\n process_immigration_data(spark, input_data, output_data)\n process_countries_data(spark, input_data, output_data)", "title": "" }, { "docid": "1483975361b8122f50dde33c1fdde10e", "score": "0.4813013", "text": "def main():\n description = \"Retrieves feature from database and runs predictions\"\n parser = argparse.ArgumentParser(description=description)\n setup_arguments(parser)\n setup_options(parser)\n args = parser.parse_args()\n error = validate_arguments(args)\n if error != None:\n parser.error(error)\n\n ch = logging.StreamHandler()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n ch.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARN)\n ch.setLevel(logging.WARN)\n logger.addHandler(ch)\n process(args.xval, args.outfile)", "title": "" }, { "docid": "0b78dbcff248e798c30d1ccdbe209a48", "score": "0.48008874", "text": "def run(self, grph, task, opts, comm=None):\n if comm is not None:\n if comm.size > 1:\n raise RuntimeError(\"Fluxcal worker should only be called with one process\")\n\n log = get_logger()\n\n node = grph[task]\n\n frm = []\n flat = []\n sky = []\n star = []\n for input in node[\"in\"]:\n inode = grph[input]\n if inode[\"type\"] == \"frame\":\n frm.append(input)\n elif inode[\"type\"] == \"fiberflat\":\n flat.append(input)\n elif inode[\"type\"] == \"sky\":\n sky.append(input)\n elif inode[\"type\"] == \"stdstars\":\n star.append(input)\n if len(frm) != 1:\n raise RuntimeError(\"fluxcal needs exactly one frame file\")\n if len(flat) != 1:\n raise RuntimeError(\"fluxcal needs exactly one fiberflat file\")\n if len(sky) != 1:\n raise RuntimeError(\"fluxcal needs exactly one sky file\")\n if len(star) != 1:\n raise RuntimeError(\"fluxcal needs exactly one star file\")\n\n framefile = graph_path(frm[0])\n flatfile = graph_path(flat[0])\n skyfile = graph_path(sky[0])\n starfile = graph_path(star[0])\n outfile = graph_path(task)\n\n #qafile, qafig = qa_path(outfile)\n\n options = {}\n options[\"infile\"] = framefile\n options[\"fiberflat\"] = flatfile\n #options[\"qafile\"] = qafile\n #options[\"qafig\"] = qafig\n options[\"sky\"] = skyfile\n options[\"models\"] = starfile\n options[\"outfile\"] = outfile\n options.update(opts)\n optarray = option_list(options)\n\n # at debug level, write out the equivalent commandline\n com = [\"RUN\", \"desi_compute_fluxcalibration\"]\n com.extend(optarray)\n log.info(\" \".join(com))\n\n args = fluxcal.parse(optarray)\n fluxcal.main(args)\n\n return", "title": "" }, { "docid": "33888796f8d6f11a180a20301f268f69", "score": "0.47944012", "text": "def colfaxtest( getflts=True, username=None, password=None, runpipeline=True):\n import urllib.request, urllib.error, urllib.parse\n import os\n import sys\n import time\n\n start = time.time()\n\n thisfile = sys.argv[0]\n if 'ipython' in thisfile :\n thisfile = __file__\n thisdir = os.path.dirname( os.path.abspath( thisfile ) )\n\n if getflts :\n tgzfile = os.path.join( thisdir, 'sndrizpipe/colfax_test.tgz' )\n os.system( 'tar -xvzf %s'%tgzfile )\n\n if False :\n # Preferred method for getting the flt files for the colfax test suite.\n # This is not yet working. Maybe due to the CADC reboot.\n # TODO : check if flt downloads work after April, 2014\n\n # create a password manager\n password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()\n\n # Add the username and password.\n # If we knew the realm, we could use it instead of None.\n top_level_url = \"HTTP://WWW.CADC-CCDA.HIA-IHA.NRC-CNRC.GC.CA/DATA/PUB/HSTCA/\"\n password_mgr.add_password(None, top_level_url, username, password)\n\n handler = urllib.request.HTTPBasicAuthHandler(password_mgr)\n\n # create \"opener\" (OpenerDirector instance)\n opener = urllib.request.build_opener(handler)\n\n # Install the opener.\n # Now all calls to urllib2.urlopen use our opener.\n urllib.request.install_opener(opener)\n\n # Go and fetch all the flt files:\n fltlist = ['IBTM7MLDQ_FLT','IBTM7MLGQ_FLT','IBTMADMJQ_FLT',\n 'IBTMADMNQ_FLT','IBOEABOSQ_FLT','IBOEABOWQ_FLT',\n 'IBWJCBD3Q_FLT','IBWJCBDBQ_FLT','IBOE3CJHQ_FLT',\n 'IBOE3CJKQ_FLT','IBOE3CJOQ_FLT','IBOE3CJRQ_FLT',\n ]\n\n for fltroot in fltlist :\n url = 'HTTP://WWW.CADC-CCDA.HIA-IHA.NRC-CNRC.GC.CA/DATA/PUB/HSTCA/'\n flt = urllib.request.urlopen(url+fltroot)\n\n\n if runpipeline :\n runpipe_cmdline.runpipe( 'colfax', onlyfilters=['F160W'], onlyepochs=[0,1,2],\n doall=True, refcat='goodsn_mosaic.cat',\n refepoch=1, reffilter='F160W',\n mjdmin=56010, mjdmax=56300, epochspan=5,\n ra=189.156538, dec=62.309147,\n clobber=False, verbose=True, debug=False )\n\n end = time.time()\n print(( \"SNDRIZPIPE : colfax test completed in %.2f min\"%((end-start)/60.) ))\n return 0", "title": "" }, { "docid": "c46a63d71a111c33c80ce8efd9d534fb", "score": "0.47934473", "text": "def run(self, data: list):\n print(\"Here's your output data: {}\".format(data))", "title": "" }, { "docid": "3c65b6df18f6009ed7ce5ba5b9bb1008", "score": "0.47920525", "text": "def run(self):\n raw_map_components = self.load_raw_data()\n map_components = self.add_geometry_representative(raw_map_components)\n data_dfs = self.select_data_for_borough(map_components)\n\n # data_hand = data_handler.DataHandler()\n map_plotter.plot_raw_data(data_dfs)\n\n # include only data around central park for debugging\n # data_dfs = self.zoom_on_data(data_dfs, -73.97, 40.77, 0.01, False)\n # central park, small\n # data_dfs = self.zoom_on_data(data_dfs, -73.97, 40.77, 0.01, True)\n # central park, big\n # data_dfs = self.zoom_on_data(data_dfs, -73.97, 40.77, 0.02, True)\n\n # data_dfs = self.zoom_on_data(data_dfs, -73.994, 40.740, 0.01, False)\n # data_dfs = self.zoom_on_data(data_dfs, -73.994, 40.740, 0.01, True)\n # self.plot_data(data_dfs)\n\n # south manhattan???\n # data_dfs = self.zoom_on_data(data_dfs, -73.99, 40.73, 0.02, True)\n\n # need this to run to add features to the dfs\n # adds the rep_x_rad fields\n # data_dfs = self.zoom_on_data(data_dfs, -73.97, 40.77, 1., False)\n\n self.write_processed_data_to_file(data_dfs)\n\n print('Converting data to segments')\n data_dict = self.extract_segments_from_df(data_dfs)\n print('Getting connections from segments')\n conn_dict = self.convert_segments_to_vertex(data_dict)\n print('Adding weights')\n connections_dict = self.add_weights(conn_dict, data_dfs)\n print('writing connections to file')\n self.write_data_to_file(connections_dict)\n\n # checking for duplicates\n for i in range(len(conn_dict['vertex_start'])):\n for j in range(i+1, len(conn_dict['vertex_start'])):\n if conn_dict['vertex_start'][i] == conn_dict['vertex_start'][j]:\n if conn_dict['vertex_end'][i] == conn_dict['vertex_end'][j]:\n print(i, j)\n if conn_dict['vertex_start'][i] == conn_dict['vertex_end'][j]:\n if conn_dict['vertex_end'][i] == conn_dict['vertex_start'][j]:\n print(i, j)\n\n return", "title": "" }, { "docid": "c3b09a4b1cba2eb0097c7f4d0b9339e9", "score": "0.47895196", "text": "def run(args):\n if os.path.isfile(args.incat):\n # Basic information\n data = fits.open(args.incat)[1].data\n idx = (args.id)\n rerun = (args.rerun).strip()\n prefix = (args.prefix).strip()\n filt = (args.filter).strip().upper()\n\n # Bright star catalog\n if args.brightStar:\n starCat = hUtil.getStarCatalog()\n else:\n starCat = None\n\n # Keep a log\n if args.sample is not None:\n logPre = prefix + '_' + args.sample\n else:\n logPre = prefix\n logFile = logPre + '_prep_' + filt + '.log'\n if not os.path.isfile(logFile):\n os.system('touch ' + logFile)\n\n # Start the loop\n if args.verbose:\n print(\"\\n## Will deal with %d galaxies ! \" % len(data))\n\n for galaxy in data:\n # Galaxy ID and prefix\n galID = str(galaxy[idx]).strip()\n galPrefix = prefix + '_' + galID + '_' + filt + '_full'\n if args.verbose:\n print(\"\\n## Will Deal with %s now ! \" % galID)\n\n # Folder for the data\n galRoot = os.path.join(galID, filt)\n if not os.path.isdir(galRoot):\n warnings.warn('### Cannot find folder %s' % galRoot)\n with open(logFile, \"a\") as logMatch:\n logStr = \"%25s %10s NDIR \\n\"\n try:\n logMatch.write(logStr % (galPrefix, rerun))\n fcntl.flock(logMatch, fcntl.LOCK_UN)\n except IOError:\n pass\n continue\n\n # Image\n galImg = galPrefix + '_img.fits'\n if not os.path.isfile(os.path.join(galRoot, galImg)):\n warnings.warn('### Cannot find image %s' % galImg)\n with open(logFile, \"a\") as logMatch:\n logStr = \"%25s %10s NIMG \\n\"\n try:\n logMatch.write(logStr % (galPrefix, rerun))\n fcntl.flock(logMatch, fcntl.LOCK_UN)\n except IOError:\n pass\n continue\n\n try:\n if rerun == 'default':\n ccp.coaddCutoutPrepare(\n galPrefix,\n root=galRoot,\n rerun='default',\n bSizeH=10.0,\n bSizeC=60.0,\n thrH=2.5,\n thrC=1.5,\n galR1=1.6,\n galR2=3.1,\n galR3=6.5,\n growH=2.1,\n growW=4.0,\n growC=5.0,\n sigma=6.0,\n sigthr=0.02,\n minDetH=4,\n minDetC=8,\n debThrH=32,\n debThrC=64,\n debConH=0.0005,\n debConC=0.0001,\n kernel=4,\n central=1,\n maskMethod=1,\n growMethod=1,\n useSigArr=False,\n noBkgC=False,\n noBkgH=False,\n combBad=True,\n combDet=True,\n brightStar=starCat,\n multiMask=args.multiMask)\n with open(logFile, \"a\") as logMatch:\n logStr = \"%25s %10s DONE \\n\"\n try:\n logMatch.write(logStr % (galPrefix, rerun))\n fcntl.flock(logMatch, fcntl.LOCK_UN)\n except IOError:\n pass\n elif rerun == 'smallR1':\n ccp.coaddCutoutPrepare(\n galPrefix,\n root=galRoot,\n rerun='smallR1',\n bSizeH=10.0,\n bSizeC=40.0,\n thrH=2.5,\n thrC=1.1,\n growH=2.5,\n growW=5.5,\n growC=7.5,\n galR1=1.4,\n galR2=2.5,\n galR3=4.0,\n sigma=9.0,\n sigthr=0.01,\n kernel=4,\n central=1,\n maskMethod=1,\n growMethod=1,\n useSigArr=False,\n noBkgC=False,\n noBkgH=False,\n minDetH=5,\n minDetC=8,\n debThrH=16,\n debThrC=32,\n debConH=0.001,\n debConC=0.0025,\n combBad=True,\n combDet=True,\n brightStar=starCat,\n multiMask=False)\n with open(logFile, \"a\") as logMatch:\n logStr = \"%25s %10s DONE \\n\"\n try:\n logMatch.write(logStr % (galPrefix, rerun))\n fcntl.flock(logMatch, fcntl.LOCK_UN)\n except IOError:\n pass\n elif rerun == 'largeR1':\n ccp.coaddCutoutPrepare(\n galPrefix,\n root=galRoot,\n rerun='largeR1',\n bSizeH=10.0,\n bSizeC=40.0,\n thrH=3.0,\n thrC=1.5,\n growH=1.5,\n growW=3.0,\n growC=4.5,\n galR1=2.5,\n galR2=5.0,\n galR3=7.0,\n sigma=7.0,\n sigthr=0.02,\n kernel=4,\n central=1,\n maskMethod=1,\n growMethod=1,\n useSigArr=False,\n noBkgC=False,\n noBkgH=False,\n minDetH=5,\n minDetC=8,\n debThrH=16,\n debThrC=32,\n debConH=0.001,\n debConC=0.0025,\n combBad=True,\n combDet=True,\n brightStar=starCat,\n multiMask=False)\n with open(logFile, \"a\") as logMatch:\n logStr = \"%25s %10s DONE \\n\"\n try:\n logMatch.write(logStr % (galPrefix, rerun))\n fcntl.flock(logMatch, fcntl.LOCK_UN)\n except IOError:\n pass\n else:\n ccp.coaddCutoutPrepare(\n galPrefix,\n root=galRoot,\n rerun=rerun,\n bSizeH=args.bSizeH,\n bSizeC=args.bSizeC,\n thrH=args.thrH,\n thrC=args.thrC,\n growH=args.growH,\n growW=args.growW,\n growC=args.growC,\n kernel=args.kernel,\n central=args.central,\n maskMethod=args.mask,\n growMethod=args.grow,\n useSigArr=args.useSigArr,\n noBkgC=args.noBkgC,\n noBkgH=args.noBkgH,\n minDetH=args.minDetH,\n minDetC=args.minDetC,\n debThrH=args.debThrH,\n debThrC=args.debThrC,\n debConH=args.debConH,\n debConC=args.debConC,\n combBad=args.combBad,\n combDet=args.combDet,\n brightStar=starCat,\n multiMask=args.multiMask)\n with open(logFile, \"a\") as logMatch:\n logStr = \"%25s %10s DONE \\n\"\n try:\n logMatch.write(logStr % (galPrefix, rerun))\n fcntl.flock(logMatch, fcntl.LOCK_UN)\n except IOError:\n pass\n except Exception, errMsg:\n warnings.warn('\\n### The preparation is failed for %s in %s' %\n (galPrefix, filt))\n logging.warning('### The preparation is failed for %s in %s' %\n (galPrefix, filt))\n print(str(errMsg))\n with open(logFile, \"a\") as logMatch:\n logStr = \"%25s %10s FAIL \\n\"\n try:\n logMatch.write(logStr % (galPrefix, rerun))\n fcntl.flock(logMatch, fcntl.LOCK_UN)\n except IOError:\n pass\n else:\n raise Exception(\"\\n### Can not find the input catalog: %s\" % args.incat)", "title": "" }, { "docid": "0042adda551169d2a82d19d1576ee15d", "score": "0.4783602", "text": "def create_mxd(data_location, map_template, output_name):\n import arcpy\n shutil.copyfile(map_template, os.path.join(data_location, \"{0}.mxd\".format(output_name)))\n mxd = arcpy.mapping.MapDocument(os.path.join(data_location, \"{0}.mxd\".format(output_name)))\n if mxd.description == '':\n mxd.description = os.path.basename(mxd.filePath)\n df = arcpy.mapping.ListDataFrames(mxd)[0]\n\n types = ('*.shp', '*.gdb', '*.mxd', '*.lyr')\n all_data = []\n for files in types:\n all_data.extend(glob.glob(os.path.join(data_location, files)))\n for ds in all_data:\n if ds.endswith('.shp'):\n # Add all shapefiles to the mxd template.\n layer = arcpy.MakeFeatureLayer_management(ds, '{0}_'.format(os.path.basename(ds)[:-3]))\n arcpy.mapping.AddLayer(df, layer.getOutput(0))\n elif ds.endswith('.gdb'):\n # Add all feature classes to the mxd template.\n arcpy.env.workspace = ds\n feature_datasets = arcpy.ListDatasets('*', 'Feature')\n if feature_datasets:\n for fds in feature_datasets:\n arcpy.env.workspace = fds\n for fc in arcpy.ListFeatureClasses():\n layer = arcpy.MakeFeatureLayer_management(fc, '{0}_'.format(fc))\n arcpy.mapping.AddLayer(df, layer.getOutput(0))\n arcpy.env.workspace = ds\n for fc in arcpy.ListFeatureClasses():\n layer = arcpy.MakeFeatureLayer_management(fc, '{0}_'.format(fc))\n arcpy.mapping.AddLayer(df, layer.getOutput(0))\n for raster in arcpy.ListRasters():\n layer = arcpy.MakeRasterLayer_management(raster, '{0}_'.format(raster))\n arcpy.mapping.AddLayer(df, layer.getOutput(0))\n elif ds.endswith('.lyr'):\n # Add all layer files to the mxd template.\n arcpy.mapping.AddLayer(df, arcpy.mapping.Layer(ds))\n mxd.save()\n return mxd.filePath", "title": "" }, { "docid": "d863472ffa67731ab8c0079b55e5237c", "score": "0.47828564", "text": "def run_data(config):\n # the folders in which to save data\n data_path = make_absolute(config[\"data_path\"])\n raw_path = os.path.join(data_path, config[\"raw_folder\"])\n adj_close_path = os.path.join(raw_path, config[\"adj_close_folder\"])\n iv_path = os.path.join(raw_path, config[\"iv_folder\"])\n processed_path = os.path.join(data_path, config[\"processed_folder\"])\n\n # delete then recreate the data folders\n # this is to completly overwrite all data if it exists\n shutil.rmtree(data_path, ignore_errors=True)\n os.mkdir(data_path)\n os.mkdir(raw_path)\n os.mkdir(adj_close_path)\n os.mkdir(iv_path)\n os.mkdir(processed_path)\n\n # download raw data\n download_data(config)\n\n # process the data\n process_data(config)", "title": "" }, { "docid": "61a859982037861411d8b5ebff81e5ae", "score": "0.4777348", "text": "def main():\n args = arg_parser().parse_args()\n\n logging.basicConfig(filename=os.path.abspath(args.log_file), \n level=getattr(logging, args.log_level))\n log = logging.getLogger(__name__)\n log.debug(\"Got command args %(args)s\" % vars())\n \n gen = gen_metrics(args.hosts_per_thread, args.num_metrics, args.num_ticks, \n args.time_step)\n \n with CarbonClient(args.carbon_host, 2004) as client:\n data = []\n for row in gen:\n data.append(row)\n if len(data) == args.batch_size:\n #print \"SEND\", data\n client.send(data)\n del data[:]\n client.send(data)\n sys.exit(0)", "title": "" }, { "docid": "7cf7cc4d91673cc93b4cde65b5333251", "score": "0.47737035", "text": "def run(self,data):\n # Setup feed indexing\n # self.feeds : feed horn ID (in array indexing, only chosen feeds)\n # self.feedlist : all feed IDs in data file (in lvl1 indexing)\n # self.feeddict : map between feed ID and feed array index in lvl1\n self.feeds, self.feed_index, self.feed_dict = self.getFeeds(data,self.feeds_select)\n\n # Opening file here to write out data bit by bit\n self.nFeeds, self.nBands, self.nChannels,self.nSamples = data['spectrometer/tod'].shape\n\n frequency = data['spectrometer/frequency'][...]\n features = np.log(self.getFeatures(data))/np.log(2)\n gd = (features != 13) & np.isfinite(features)\n cname, gain, tsys,spikes = self.getcalibration_obs(data)\n\n self.mask = np.zeros((self.nFeeds,self.nBands, self.nChannels),dtype=bool)\n self.peak_freqs= np.empty((self.nFeeds, self.nBands),dtype=object)\n self.peak_amps = np.empty((self.nFeeds, self.nBands),dtype=object)\n for ifeed,feed in enumerate(self.feeds):\n for iband in range(self.nBands):\n self.mask[ifeed,iband], self.peak_amps[ifeed,iband], self.peak_freqs[ifeed,iband] = self.find_tsys_spikes(tsys[0,ifeed,iband,:],frequency[iband])\n \n tsamp = float(data['comap'].attrs['tsamp'])\n bw = 2e9/1024\n\n self.tsys_rms = np.zeros((self.nFeeds, self.nBands, self.nChannels))\n self.auto_rms = np.zeros((self.nFeeds, self.nBands, self.nChannels))\n for ifeed, feed in enumerate(tqdm(self.feeds,desc=self.name)):\n d = data['spectrometer/tod'][ifeed,...] \n d = d[...,gd]\n N = int(d.shape[-1]//2*2)\n \n d = d/gain[0,ifeed,...,None]\n self.tsys_rms[ifeed] = tsys[0,ifeed]/np.sqrt(bw*tsamp)\n self.auto_rms[ifeed] = np.nanstd(d[...,1:N:2]-d[...,0:N:2],axis=-1)/np.sqrt(2)", "title": "" }, { "docid": "58aef897cf94f1c770f34c6aad509ef7", "score": "0.47671825", "text": "def execute(self, synode_context):\n filename_retriever = filename_retriever_gui.FilenameRetriever(\n synode_context.parameters['directory'].value,\n synode_context.parameters['search_pattern'].value)\n\n selected_fq_filenames = filename_retriever.filenames(\n fully_qualified=True,\n recursive=synode_context.parameters['recursive'].value)\n\n for fq_filename in selected_fq_filenames:\n datasource = dsrc.File()\n datasource.encode_path(fq_filename)\n synode_context.output['port1'].append(datasource)", "title": "" }, { "docid": "2883e86558c539e1212bd305a06c0550", "score": "0.47671565", "text": "def process_data(gtf_data: list, data5_data: list, data6_data: list) -> (list, list, list):\n # Apply each data file to the correct processing function.\n return process_gtf_data(gtf_data), process_psl(data5_data), process_psl(data6_data)", "title": "" }, { "docid": "d6e3de5078e11597537221207e1b7907", "score": "0.47670442", "text": "def run(self, dataRef, dstype, tableName, host, db, port=3306, user=None, viewName=None):\n self.ingest(dataRef.get(dstype), tableName, host, db, port, user, viewName)", "title": "" }, { "docid": "951d121c01bb47eb118b382f70ec694b", "score": "0.47620532", "text": "def run(self, data):\n # The 'x' corresponds to name of input placeholder\n return self.sess.run(self.activation, feed_dict={\"x:0\": data})", "title": "" }, { "docid": "43003c2708ca4da01d48a0f132af3144", "score": "0.47619116", "text": "def run(self,data):\n\n fname = data.filename.split('/')[-1]\n\n # Read in data that is required:\n freq = data['spectrometer/frequency'][...]\n mjd = data['spectrometer/MJD'][:]\n el = data['pointing/elActual'][:]\n\n self.feeds,self.feed_inds,self.feed_dict = self.getFeeds(data,self.feeds_select)\n\n self.mjd = np.mean(mjd)\n self.elevation = np.nanmedian(el)\n\n # Keep TOD as a file object to avoid reading it all in\n tod = data['spectrometer/tod']\n el = data['spectrometer/pixel_pointing/pixel_el'][0,:]\n btod = data['spectrometer/band_average']\n nHorns, nSBs, nChan, nSamps = tod.shape\n\n\n # Setup for calculating the calibration factors, interpolate temperatures:\n if mjd[0] < Time(datetime(2019,3,1),format='datetime').mjd: # Early observations before antenna0\n tHot = np.nanmean(data['hk/env/ambientLoadTemp'][:])/100. + self.tHotOffset\n hkMJD = data['hk/env/MJD'][:]\n # Observations before vane change:\n elif (mjd[0] > Time(datetime(2019,3,1),format='datetime').mjd) & (mjd[0] < Time(datetime(2019,8,23),format='datetime').mjd):\n tHot = np.nanmean(data['hk/antenna0/env/ambientLoadTemp'][:])/100. + self.tHotOffset\n hkMJD = data['hk/antenna0/env/utc'][:]\n else:\n tHot = np.nanmean(data['hk/antenna0/vane/Tvane'][:])/100. + self.tHotOffset\n hkMJD = data['hk/antenna0/vane/utc'][:]\n\n vanePositions, nVanes = self.FindVane(data)\n\n self.vane_samples = np.zeros((nVanes,2))\n # Create output containers\n self.Tsys = np.zeros((nVanes, nHorns, nSBs, nChan))\n self.Gain = np.zeros((nVanes, nHorns, nSBs, nChan))\n self.RMS = np.zeros((nVanes, nHorns, nSBs, nChan))\n self.Spikes = np.zeros((nVanes, nHorns, nSBs, nChan),dtype=bool)\n\n self.elevations = np.zeros((nVanes))\n for vane_event, (start,end) in enumerate(vanePositions):\n self.elevations[vane_event] = np.nanmean(el[start:end])\n\n # Now loop over each event:\n for horn, feedid in enumerate(tqdm(self.feeds,desc=self.name)):\n for vane_event, (start, end) in enumerate(vanePositions):\n # Get the mean time of the event\n if horn == 0:\n self.vane_samples[vane_event,:] = int(start), int(end)\n\n tod_slice = tod[horn,:,:,start:end]\n btod_slice = btod[horn,:,start:end]\n\n try:\n idHot, idCold = self.findHotCold(btod_slice[0,:])\n except (NoHotError,NoColdError) as e:\n self.logger(f'{fname}:{self.name}: No vane found in feed {feedid}.',error=e)\n break\n\n\n time= np.arange(tod_slice.shape[-1])\n for sb in range(nSBs):\n vHot = np.nanmedian(tod_slice[sb,:,idHot],axis=0)\n vCold= np.nanmedian(tod_slice[sb,:,idCold],axis=0)\n Y = vHot/vCold\n tsys = ((tHot - self.tCold)/(Y - 1.) ) - self.tCold\n self.Tsys[vane_event,horn,sb,:] = tsys\n self.Gain[vane_event,horn,sb,:] = ((vHot - vCold)/(tHot - self.tCold))\n\n if self.do_plots:\n pass\n\n chans = np.arange(tsys.size)\n peaks,properties = find_peaks(tsys,prominence=5,width=[0,60])\n widths = (properties['right_ips']-properties['left_ips'])*2\n bad = np.zeros(tsys.size,dtype=bool)\n bad[[0,512,len(bad)-1]] = True\n bad[np.arange(0,1024,64,dtype=int)] = True\n frequency = data['spectrometer/frequency'][sb,:]\n\n if self.do_plots:\n pass\n\n for peak,width in zip(peaks,widths):\n sel = np.abs(chans - peak) < width\n bad[sel] = True\n f0 = np.argmin((chans - (peak - width))**2)\n f1 = np.argmin((chans - (peak + width))**2)\n if self.do_plots:\n pyplot.fill_between([frequency[f0],frequency[f1]],[0,0],[200,200],color='grey',alpha=0.5,hatch='/')\n self.Spikes[vane_event,horn,sb,:] = bad\n\n tod_slice[sb,:,:] /= self.Gain[vane_event,horn,sb,:,np.newaxis]\n\n if self.do_plots:\n pyplot.ylim(0,200)\n pyplot.xlabel('Frequency (GHz)')\n pyplot.ylabel(r'$T_\\mathrm{sys}$')\n pyplot.xlim(26,30)\n pyplot.savefig('figures/Tsys_Spikes_Example.png')\n pyplot.show()", "title": "" } ]
0f103fc209bd989834da306461e14d99
Copy weights of Trainers model to this models weights.
[ { "docid": "91516ef8a36fe1c7703d319f56089274", "score": "0.6777867", "text": "def copyModel(self, model):\n self.model.set_weights(model.get_weights())", "title": "" } ]
[ { "docid": "f3a34e4992226f0c7fe056c772df87bc", "score": "0.72203034", "text": "def set_weights(self, weights):\n # Legacy support\n if legacy_models.needs_legacy_support(self):\n layers = legacy_models.legacy_sequential_layers(self)\n for layer in layers:\n nb_param = len(layer.weights)\n layer.set_weights(weights[:nb_param])\n weights = weights[nb_param:]\n\n if self.model is None:\n self.build()\n self.model.set_weights(weights)", "title": "" }, { "docid": "b184aeb83446a91657f2d0648026f7dc", "score": "0.7064983", "text": "def set_weights(self, *args, **kwargs):\n self.model.set_weights(*args, *kwargs)", "title": "" }, { "docid": "060e60a15d895798fa36da35744500cc", "score": "0.69995004", "text": "def set_weights(self, params):\n self.model.set_weights(params)", "title": "" }, { "docid": "31e4a58b94ee336c6306b3f408222495", "score": "0.69338673", "text": "def tie_weights(self):\n self._tie_or_clone_weights(self.pred_layer.proj, self.transformer.embeddings)", "title": "" }, { "docid": "b974bf2beeca026b362a8f0106a82c8a", "score": "0.6933844", "text": "def _update_weights(self):\n # Update the weights layer by layers\n for layer in self.layers:\n layer.updateWeights(self.learning_rate)", "title": "" }, { "docid": "0477cf6cea8eccf5c3b1d9b9014fcdde", "score": "0.68990934", "text": "def set_weights(self, weights):\n self.w = weights[\"w\"]", "title": "" }, { "docid": "b4e1fc583dcf136b978850a6474ff860", "score": "0.68729144", "text": "def set_weights(self, weights):\n self.weights = np.array(weights).flatten().copy()", "title": "" }, { "docid": "d0051e4b347494414f0ba6b01aac6a20", "score": "0.6865517", "text": "def set_model_weights(self, params):", "title": "" }, { "docid": "142ca9a348f4bd7f162df76a49e63740", "score": "0.6861835", "text": "def set_weights(self, weights):\n self._weights = weights\n self.normalize_weights()", "title": "" }, { "docid": "5811e3ba3e60902c890d72ab3904ed76", "score": "0.6856639", "text": "def set_weights(self, weights):\n pass", "title": "" }, { "docid": "3a583f96be8bba647e163c9a603fdcf7", "score": "0.6849612", "text": "def transfer_weights_pre(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n noise = np.random.rand(1)[0]/10\n target_W[i] = W[i]+noise\n self.target_model.set_weights(target_W)", "title": "" }, { "docid": "4aa377ce700068cb12252aeb3c9e2726", "score": "0.6844025", "text": "def set_weights(self, weights):\n\n self.weights = weights\n return self", "title": "" }, { "docid": "f592b014944de696bd2d5f9e2016eaef", "score": "0.6824989", "text": "def tie_weights(self):\n self._tie_or_clone_weights(self.lm_head,\n self.transformer.wte)", "title": "" }, { "docid": "af077e874975027a38e7aee159ea50b6", "score": "0.6823507", "text": "def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)", "title": "" }, { "docid": "70d5db5f0b6a8bb110cb5688ec1cdec0", "score": "0.67944413", "text": "def tie_weights(self):\n self._tie_or_clone_weights(self.lm_loss, self.transformer.word_embedding)", "title": "" }, { "docid": "118246db94dfa17539e1093ed5b6115a", "score": "0.6762178", "text": "def _append_weights(self):\n if self.layer_names is None: # get all trainable weights\n weights = []\n for layer in self.model.layers:\n w_list = [keras.backend.get_value(w) for w in layer.trainable_weights]\n weights.append((layer, w_list))\n # self.weights.append(weights)\n else:\n weights = []\n for layer in self.layer_names:\n w_list = [keras.backend.get_value(w) for w in self.layer_dict[layer].trainable_weights]\n weights.append((layer, w_list))\n # self.weights.append(weights)\n self.weights.append(weights)", "title": "" }, { "docid": "e92ca950877fb0870ae7772645106c38", "score": "0.6731311", "text": "def set_weights(self, weights):\n self.actor_critic.load_state_dict(weights)\n\n # Update target networks by polyak averaging.\n self.iter += 1\n self.update_target_networks()\n self.update_epsilon()", "title": "" }, { "docid": "e3393ab51ed4878f02bebd973d7d336a", "score": "0.6716801", "text": "def set_weights(self):\n\n # Make train/val weights\n self.train_weights = self.data_weighter.weighting_function(self.prop_train)\n self.val_weights = self.data_weighter.weighting_function(self.prop_val)\n\n # Create samplers\n self.train_sampler = WeightedRandomSampler(\n self.train_weights, num_samples=len(self.train_weights), replacement=True\n )\n self.val_sampler = WeightedRandomSampler(\n self.val_weights, num_samples=len(self.val_weights), replacement=True\n )", "title": "" }, { "docid": "6228bdf7781104ace78f5b80ac0ceeb6", "score": "0.6698209", "text": "def network_weights(self, weights: typing.List[np.ndarray]) -> None:\n logging.debug(\"Assigning new network weights\")\n for parameter, sample in zip(self.model.parameters(), weights):\n parameter.copy_(torch.from_numpy(sample))", "title": "" }, { "docid": "16e61d449ea3aafa7da16d281449d3cc", "score": "0.669005", "text": "def presets_with_weights(cls):\n return copy.deepcopy(backbone_presets_with_weights)", "title": "" }, { "docid": "6969a4017bf665fc44cb0fc70fdf4f49", "score": "0.66759306", "text": "def update_weights(self, w):\n pass", "title": "" }, { "docid": "fd4781331e9656d5c6f01a065d0f364a", "score": "0.6641532", "text": "def init_weights(self):\n # Prune heads if needed\n if self.config.pruned_heads:\n self.prune_heads(self.config.pruned_heads)\n\n\n # Initialize weights\n self.apply(self._init_weights)\n\n # Tie weights should be skipped when not initializing all weights\n # since from_pretrained(...) calls tie weights anyways\n # self.tie_weights()", "title": "" }, { "docid": "c17d870cd7bdfd3666a22c40b3944be5", "score": "0.659062", "text": "def weights(self):\n return deepcopy(self._weights)", "title": "" }, { "docid": "b2cc8b0fc0bc3699516c0fd1e188b898", "score": "0.6557338", "text": "def tie_weights(self):\n self._tie_or_clone_weights(self.cls.predictions.decoder,\n self.bert.embeddings.word_embeddings)", "title": "" }, { "docid": "c72c4b0b1f4d2af7ea87b1cc8cfaac46", "score": "0.65496105", "text": "def assign_weights_to_keras_model(cls, reference_model, keras_model):\n if not isinstance(reference_model, ModelWeights):\n raise TypeError('Reference model must be an instance of '\n 'compression_process_adapter.ModelWeights.')\n\n def assign_weights(keras_weights, tff_weights):\n for k, w in zip(keras_weights, tff_weights):\n k.assign(w)\n\n assign_weights(keras_model.trainable_weights, reference_model.trainable)\n assign_weights(keras_model.non_trainable_weights,\n reference_model.non_trainable)", "title": "" }, { "docid": "f61757183595680a64b3b87d02bb7017", "score": "0.65462756", "text": "def copy_weights(self, other_model, early_stop=False):\n for this_layer, other_layer in zip(self._model.layers, other_model._model.layers):\n if this_layer.name != other_layer.name:\n if early_stop:\n break\n else:\n continue\n this_layer.set_weights(other_layer.get_weights())", "title": "" }, { "docid": "ea5e1dfb3205d51106afa84b6d7336cf", "score": "0.6494198", "text": "def update_target_weights(self):\n self.qnetwork_target.model.set_weights(self.qnetwork_local.model.get_weights())", "title": "" }, { "docid": "7bfb719775903caf0b8d6e9bcb7725b0", "score": "0.64844924", "text": "def init_weights(self, pretrained=None):\n self.backbone.init_weights(pretrained)\n if self.with_neck:\n self.neck.init_weights()\n if self.with_keypoint:\n self.keypoint_head.init_weights()", "title": "" }, { "docid": "c7de30b9f69d79cf5ba6182c1f9d78d8", "score": "0.6467431", "text": "def save_weights(self):\n self.model.save_weights(self.checkpoint_path)", "title": "" }, { "docid": "c4cca3ec9679263bf1255690dfb8162f", "score": "0.6399715", "text": "def _legacy_weights(model):\n return model.trainable_weights + model.non_trainable_weights", "title": "" }, { "docid": "b353c2b175e79eae1c063b3075360fe6", "score": "0.63961214", "text": "def set_weights(self, W):\r\n if W.shape != self.weights.shape:\r\n return -1\r\n self.weights = W", "title": "" }, { "docid": "019e0e45c2aa152e30e09472c2998dc1", "score": "0.63913494", "text": "def get_model_weights(self):", "title": "" }, { "docid": "a2e0fbf1e00cafeeea1bffca4d0a0c0c", "score": "0.638197", "text": "def save_weight(self, weights):\n self._weights.append(weights)", "title": "" }, { "docid": "0b9de33ba9e70353584f92e8ca8ee8cf", "score": "0.6371199", "text": "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights()) # only replace the weights of our new model with the existing model", "title": "" }, { "docid": "5273de62a16f10a1f88ab7f27cdc9676", "score": "0.6368048", "text": "def init_weights(self):\n self.apply(self._init_weights)", "title": "" }, { "docid": "a07fa49d6f11a39229c25e48c7981979", "score": "0.6344491", "text": "def update_weights(self, weights):\n self.conv1[0].weight += weights['module.conv1.0.weight']\n self.conv1[0].bias += weights['module.conv1.0.bias']\n self.conv2[0].weight += weights['module.conv2.0.weight']\n self.conv2[0].bias += weights['module.conv2.0.bias']\n self.conv3[0].weight += weights['module.conv3.0.weight']\n self.conv3[0].bias += weights['module.conv3.0.bias']\n self.conv4[0].weight += weights['module.conv4.0.weight']\n self.conv4[0].bias += weights['module.conv4.0.bias']\n self.conv5[0].weight += weights['module.conv5.0.weight']\n self.conv5[0].bias += weights['module.conv5.0.bias']\n self.conv6[0].weight += weights['module.conv6.0.weight']\n self.conv6[0].bias += weights['module.conv6.0.bias']\n self.fc1[0].weight += weights['module.fc1.0.weight']\n self.fc1[0].bias += weights['module.fc1.0.bias']\n self.fc2[0].weight += weights['module.fc2.0.weight']\n self.fc2[0].bias += weights['module.fc2.0.bias']\n self.fc3.weight += weights['module.fc3.weight']\n self.fc3.bias += weights['module.fc3.bias']", "title": "" }, { "docid": "908f8cf207689da01c909204087b85ea", "score": "0.63375413", "text": "def load_weights(self):\n assert self.model is not None\n # print(self.name)\n # for layer in self.model.layers:\n # print(layer.name, [x.shape for x in layer.weights])\n self.model.load_weights(self.checkpoint_path, by_name=True)", "title": "" }, { "docid": "23502c255d0f060c894ba2afed179694", "score": "0.63348955", "text": "def presets_with_weights(cls):\n return copy.deepcopy(\n {**backbone_presets_with_weights, **retinanet_presets}\n )", "title": "" }, { "docid": "ec4979a371625e43e4aeea5efbd7242c", "score": "0.6331651", "text": "def init_weights(self, pretrained=None):\n if pretrained is not None:\n print_log('load model from: {}'.format(pretrained), logger='root')\n self.online_net[0].init_weights(pretrained=pretrained) # backbone\n self.online_net[1].init_weights(init_linear='kaiming') # projection\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data.copy_(param_ol.data)\n # init the predictor in the head\n self.head.init_weights()", "title": "" }, { "docid": "03c6afd339ce9464e02b9de6484f7b93", "score": "0.6323746", "text": "def with_weights(self, weights):\n\t\tself.variables['weights'] = weights\n\t\treturn self", "title": "" }, { "docid": "31dd0b610d1fd34367593d42caa02002", "score": "0.6322041", "text": "def cfg_update_weights(self, path) -> None:\n self.cfg.MODEL.WEIGHTS = path", "title": "" }, { "docid": "5eb3c3fd5fdf0bd16c23046c24c7c578", "score": "0.6315637", "text": "def set_weights(self, weights):\n raise NotImplementedError(\n str(type(self)) + \" does not implement set_weights.\")", "title": "" }, { "docid": "2a40eb88924d543486db03fa53b272b3", "score": "0.63108295", "text": "def tie_weights(self):\n output_embeddings = self.get_output_embeddings()\n if output_embeddings is not None:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())", "title": "" }, { "docid": "d1936956a29c3314e38d265a8aca7990", "score": "0.62963593", "text": "def init_weights(self, pretrained=None):\n self.backbone.init_weights(pretrained=pretrained)\n if self.with_neck:\n if isinstance(self.neck, nn.Sequential):\n for m in self.neck:\n m.init_weights()\n else:\n self.neck.init_weights()\n if self.with_rpn:\n self.rpn_head.init_weights()\n if self.with_roi_head:\n self.roi_head.init_weights(pretrained)", "title": "" }, { "docid": "598733fc7f6313e280e838744fce2c84", "score": "0.6264176", "text": "def initialize_weights(self):\n self.weights = np.random.random(self.num_train_inputs + 1).reshape(-1, 1)\n # self.weights = np.zeros(self.num_train_inputs + 1).reshape(-1, 1)\n\n return self.weights", "title": "" }, { "docid": "725bab86bd6d1501a555f2bcbf6947d9", "score": "0.626294", "text": "def init_weights(self) -> None:\n default_init_weights(self, 1)", "title": "" }, { "docid": "d84ad8c6b25fbcd7fb0043a9482f325d", "score": "0.62612516", "text": "def get_weights(self):\n return self.weights.copy()", "title": "" }, { "docid": "24a0c6b574945e9a31fb6c057a199dda", "score": "0.6250459", "text": "def initialize_weights(self):\n # TODO: it would be less error-prone to save the weights with\n # the regression params rather than as regression attributes.\n self.time_w = np.ones(self.y_obs_dim)\n self.obs_w = np.ones(self.num_obs)", "title": "" }, { "docid": "cbe1dd7e79897526105b81f5592d880d", "score": "0.6236472", "text": "def save_weights(self):\n\n # Directory creation\n if not os.path.exists(self.weights_directory):\n os.mkdir(self.weights_directory)\n\n # Save the model weights\n self.model.save_weights(self.model_file)", "title": "" }, { "docid": "62c59e72af905103407ba76376d528c2", "score": "0.6234076", "text": "def reset_weights(self):\n self.input_block.reset_weights()\n self.backbone.reset_weights()\n self.action_head.reset_weights()\n self.q_head.reset_weights()", "title": "" }, { "docid": "bcaba6ea3164d88d90eb175f4dbcfe85", "score": "0.62268066", "text": "def updateWeights(self, inputs):\n\t\tself.layers[0].updateWeights(inputs)", "title": "" }, { "docid": "532a2f715a669a7ff4f20c38e8c2a3d0", "score": "0.6226505", "text": "def init_weights(self, pretrained=None):\n self.generator.init_weights(pretrained)", "title": "" }, { "docid": "965aef43905d82f94697002ad4d60386", "score": "0.6202174", "text": "def broadcast_new_weights(self):\n raise NotImplementedError", "title": "" }, { "docid": "b3e921e6dfdf50ef6502efb076a671eb", "score": "0.62007093", "text": "def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)", "title": "" }, { "docid": "fcf727eebd74ddfeaf44e00797242f8c", "score": "0.61999947", "text": "def update_weights(self, model):\n for fct in self.objfcts:\n fct.update_weights(model)", "title": "" }, { "docid": "963c48f11cf491cd337320b1b55aa2af", "score": "0.61719406", "text": "def get_weights(self):\n\n return self._model.weights", "title": "" }, { "docid": "b1c8bd210a15d5e58d9ca61eb53c8caf", "score": "0.6144328", "text": "def tie_weights(self):\n output_embeddings = self.get_output_embeddings()\n if output_embeddings is not None and self.config.tie_word_embeddings:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:\n if hasattr(self, self.base_model_prefix):\n self = getattr(self, self.base_model_prefix)\n self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)", "title": "" }, { "docid": "d736d8b38a79160655be2476fc6df1fb", "score": "0.6136646", "text": "def restore_model_weights(self, model_weights):\n\n self._check_model_weights()\n self.model_weights = model_weights", "title": "" }, { "docid": "bdbd1a6cefc2a416c24a2d8b219aa95a", "score": "0.61289334", "text": "def get_weights(self):\n # Legacy support\n if legacy_models.needs_legacy_support(self):\n layers = legacy_models.legacy_sequential_layers(self)\n weights = []\n for layer in layers:\n weights.append(layer.get_weights())\n return weights\n\n if self.model is None:\n self.build()\n return self.model.get_weights()", "title": "" }, { "docid": "4a77ffe98a431a4c24410c1e1628203c", "score": "0.6119925", "text": "def set_weights(self, weights):\n if np.size(weights) == 1:\n weights = np.full(self.npoly, weights)\n self.mangle_.weights = weights", "title": "" }, { "docid": "22f76188e4faa60a9f6a9ab94e78d0fe", "score": "0.61197114", "text": "def reinitialize_all_weights(self):\n # TEMPORARY\n np.random.seed(5)\n for key, layer in list(self.layers.items())[1:]:\n self.layers[key][\"weights\"] = np.random.randn(self.layers[key][\"weights\"].shape[0], self.layers[key][\"weights\"].shape[1])\n self.layers[key][\"biases\"] = np.random.randn(self.layers[key][\"biases\"].shape[0], self.layers[key][\"biases\"].shape[1])", "title": "" }, { "docid": "8f02217d0327689054e3788b61d776e5", "score": "0.6108455", "text": "def tie_weights(self):\n output_embeddings = self.get_output_embeddings()\n input_embeddings = self.get_input_embeddings()\n\n if getattr(self.config, \"tie_word_embeddings\", True):\n output_embeddings.weight = input_embeddings.weight\n if input_embeddings.num_additional_embeddings > 0:\n assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings\n output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight\n\n if hasattr(output_embeddings, \"out_features\") and hasattr(input_embeddings, \"num_embeddings\"):\n output_embeddings.out_features = input_embeddings.num_embeddings\n if hasattr(output_embeddings, \"out_additional_features\") and hasattr(\n input_embeddings, \"num_additional_embeddings\"\n ):\n output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings", "title": "" }, { "docid": "871267a7becc1e63c2c4f5a22d573d42", "score": "0.610617", "text": "def set_weights(self, weights):\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('Length of the specified weight list (' +\n str(len(weights)) +\n ') does not match the number of weights '\n 'of the optimizer (' + str(len(params)) + ')')\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Optimizer weight shape ' + str(pv.shape) +\n ' not compatible with '\n 'provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)", "title": "" }, { "docid": "e75f1d59ac5e7ed95367319290b9c8c9", "score": "0.61008275", "text": "def load_weigths_into_target_network(self):\n logging.debug(\"Transfer Weight!\")\n logging.debug(\"Epsilon: {}\".format(self.epsilon))\n self.network.save_weights(self._save_path)\n self.target_network.load_weights(self._save_path)", "title": "" }, { "docid": "354fabcb67fee841153221c12d99a488", "score": "0.6096222", "text": "def reset_weights(self):\n self.weights = np.array([])\n for i in range(self.layers - 1):\n size = (self.sizes[i] + 1, self.sizes[i+1])\n self.weights = np.append(self.weights,\n np.random.normal(scale=0.1, size=size))", "title": "" }, { "docid": "917f89c6b3b527aa9fc89132dfba0b85", "score": "0.6095068", "text": "def sync_weights(self) -> None:\n if self.remote_workers():\n weights = ray.put(self.local_worker().get_weights())\n for e in self.remote_workers():\n e.set_weights.remote(weights)", "title": "" }, { "docid": "a5bdf4ef613ca9d6f6bd0b61e1b131a0", "score": "0.6093802", "text": "def get_weights(self):\n return self.model.get_weights()", "title": "" }, { "docid": "a5bdf4ef613ca9d6f6bd0b61e1b131a0", "score": "0.6093802", "text": "def get_weights(self):\n return self.model.get_weights()", "title": "" }, { "docid": "bf69e45f8293510f8eecfae5266737b1", "score": "0.6093731", "text": "def set_weights(self, weights):\n self.method.mpo.load_weights(weights)\n self.method.vqvae.load_state_dict(weights['vae_state_dict'])", "title": "" }, { "docid": "3040e7d5fb29f542acf3ab8ae1c2f461", "score": "0.6090987", "text": "def normalize_weights(self) -> 'Graph':\n new_edges = self.edges / self.out_degree[self.senders]\n return self.update(edges=new_edges)", "title": "" }, { "docid": "c7eca49d5bb1f259b7ca4a0e00ffbfa0", "score": "0.60894984", "text": "def target_train(self):\n weights_model = self.model.get_weights()\n weights_target = self.target_model.get_weights()\n for i in range(len(weights_model)):\n weights_target[i] = self.TAU * weights_model[i] + (\n (1 - self.TAU) * (weights_target[i]))\n self.target_model.set_weights(weights_target)", "title": "" }, { "docid": "5d1c1b21910e201a21012669a9ff2c2a", "score": "0.6065806", "text": "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n self.fc_h_to_t.bias.data.fill_(0)\n self.fc_h_to_t.weight.data.uniform_(-0.1, 0.1)\n self.fc_ctx_to_t.bias.data.fill_(0)\n self.fc_ctx_to_t.weight.data.uniform_(-0.1, 0.1)", "title": "" }, { "docid": "98cf4c6cfad98c278c9a81e33ec1387b", "score": "0.60575414", "text": "def init_weights(self):\r\n\r\n for name, param in self.rnn.named_parameters():\r\n if 'weight' in name:\r\n torch.nn.init.xavier_uniform_(param.data)\r\n if 'bias' in name:\r\n param.data.fill_(0)\r\n nn.init.kaiming_uniform_(self.embed.weight)", "title": "" }, { "docid": "d700431479908d5c57c96d54ad1a3669", "score": "0.60550326", "text": "def set_tied(self):\r\n self.lm_head.set_embeddings_weights(self.transformer.wte.weight)", "title": "" }, { "docid": "71790ac9d5d13ce0238da63f72741c54", "score": "0.6054073", "text": "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.init_h_s.weight.data.uniform_(-0.1, 0.1)\n self.init_h_s.bias.data.fill_(0)\n self.init_c_s.weight.data.uniform_(-0.1, 0.1)\n self.init_c_s.bias.data.fill_(0)\n self.unbind.weight.data.uniform_(-0.1, 0.1)\n self.unbind.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)", "title": "" }, { "docid": "f1372f3ba1bc3a9670e23568b78526e6", "score": "0.60433716", "text": "def train_all(self):\n for key, layer in list(self.layers.items())[1:]:\n self.layers[key][\"weights_trainable\"] = np.ones_like(\n self.layers[key][\"weights_trainable\"]\n )\n self.layers[key][\"biases_trainable\"] = np.ones_like(\n self.layers[key][\"biases_trainable\"]\n )", "title": "" }, { "docid": "3a12e6cc4f913af0f0655135ecc8cea3", "score": "0.6034738", "text": "def load_weights(self, weights):\n pass", "title": "" }, { "docid": "0f98c022ff0efd92bae0caca35440158", "score": "0.6008206", "text": "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n self.fcStop.bias.data.fill_(0)\n self.fcStop.weight.data.uniform_(-0.1, 0.1)", "title": "" }, { "docid": "5043b677e406d8beea78669bc04b619c", "score": "0.60081965", "text": "def load_weights(self, precomp):\n\t\tfor n in range(len(self.layers[0])):\n\t\t\tneuron = self.layers[0].neurons[n]\n\t\t\tneuron.weights = precomp[0]", "title": "" }, { "docid": "a08e166c0f838f7467ae98d6b5dabd82", "score": "0.6003627", "text": "def _update_weights_current_algorithm(self):\n for idx in range(len(self.layers)):\n fullyconnectedlayer = self.layers[idx].FullyConnectedLayer\n weights = fullyconnectedlayer.weights\n delta_update = self.weight_deltas[idx]\n fullyconnectedlayer.weights = weights - delta_update * self.learning_rate", "title": "" }, { "docid": "e0f169ed16dfb66cdc0fd0c93bd79ab1", "score": "0.6002468", "text": "def _load_weights(self):\n if self.last_epoch is not None and self.last_epoch >= 0:\n self.params = t2v.load_params(\n (\n self.model_path / \"model_{}.npz\".format(self.last_epoch)\n ).as_posix()\n )\n else:\n self.params = t2v.load_params(self._get_last_model().as_posix())", "title": "" }, { "docid": "7801466a367dc0643d419749597edbe1", "score": "0.60016996", "text": "def weights(self):\n # Select vectors for vocab words.\n weights = torch.stack([\n self.loader.vectors[self.loader.stoi[s]]\n for s in self.vocab\n ])\n\n # Padding + UNK zeros rows.\n return torch.cat([\n torch.zeros((2, self.loader.dim)),\n weights,\n ])", "title": "" }, { "docid": "1503a6a5130eb7f19c3fd77d6838601b", "score": "0.5996078", "text": "def presets_with_weights(cls):\n return copy.deepcopy(\n {**backbone_presets_with_weights, **yolo_v8_detector_presets}\n )", "title": "" }, { "docid": "4b4e6d9e90d4f8d0e6df2c51cfb26318", "score": "0.5994921", "text": "def init_weights(self):\n self.mlc.bias.data.fill_(0)\n self.mlc.weight.data.uniform_(-0.1, 0.1)\n self.embedding.weight.data.uniform_(-0.1, 0.1)", "title": "" }, { "docid": "148906802f7a9e5ffba7d3066457a83b", "score": "0.5994814", "text": "def save_weights(self):\n self.__encoder.save_weights(os.path.join(self._serialization_path, self._model_name + \"_weights.h5\"))", "title": "" }, { "docid": "ab51f688f63e8c60ab96740024592896", "score": "0.59885156", "text": "def _copy_weights(self, source_network, target_network):\n for target_param, source_param in zip(\n target_network.parameters(), source_network.parameters()\n ):\n target_param.data.copy_(source_param.data)", "title": "" }, { "docid": "335939716fa8a89ac8d9da37ec3f3b4e", "score": "0.598822", "text": "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight.data)", "title": "" }, { "docid": "99cb857dc28306233f3b681efa940eb1", "score": "0.598797", "text": "def _copy_weights(self, source_network, target_network):\n for target_param, source_param in zip(target_network.parameters(), source_network.parameters()):\n target_param.data.copy_(source_param.data)", "title": "" }, { "docid": "cdf1d81df27215d232f67dc3e1339af8", "score": "0.5973659", "text": "def extract_weights(model):\n trainable_weights = []\n for layer in model.layers:\n trainable_weights += collect_trainable_weights(layer)\n return trainable_weights", "title": "" }, { "docid": "6b76d13388ff4afbb79c5dfa7e62971f", "score": "0.59698224", "text": "def initialize_weights(self, weights_initializer, bias_initializer):\n wshapes = [[self.input_size, self.hidden_size[0]]]\n layer_indices = np.arange(1, len(self.hidden_size))\n for i in layer_indices:\n wshapes.append([self.hidden_size[i-1], self.hidden_size[i]])\n wshapes.append([self.hidden_size[-1], self.output_size])\n\n bshapes = []\n bias_indices = np.arange(0, len(self.hidden_size))\n for i in bias_indices:\n bshapes.append([1, self.hidden_size[i]])\n bshapes.append([1, self.output_size])\n\n self.weights = [init_weights(s, weights_initializer) for s in wshapes]\n self.biases = [init_weights(s, bias_initializer) for s in bshapes]\n\n self.trainable_variables = self.weights + self.biases", "title": "" }, { "docid": "0c4bea206f05a8e4c56489e89acffeab", "score": "0.5964016", "text": "def receive_weights(self, model_params):\n self.model.load_state_dict(copy.deepcopy(model_params))", "title": "" }, { "docid": "fbe066b7ba4549970e9aff70c52519f5", "score": "0.59633505", "text": "def init_weights(self):\n for m in self.modules():\n if type(m) is torch.nn.Linear:\n torch.nn.init.normal_(m.weight)\n torch.nn.init.zeros_(m.bias)", "title": "" }, { "docid": "fbe066b7ba4549970e9aff70c52519f5", "score": "0.59633505", "text": "def init_weights(self):\n for m in self.modules():\n if type(m) is torch.nn.Linear:\n torch.nn.init.normal_(m.weight)\n torch.nn.init.zeros_(m.bias)", "title": "" }, { "docid": "fbe066b7ba4549970e9aff70c52519f5", "score": "0.59633505", "text": "def init_weights(self):\n for m in self.modules():\n if type(m) is torch.nn.Linear:\n torch.nn.init.normal_(m.weight)\n torch.nn.init.zeros_(m.bias)", "title": "" }, { "docid": "fbe066b7ba4549970e9aff70c52519f5", "score": "0.59633505", "text": "def init_weights(self):\n for m in self.modules():\n if type(m) is torch.nn.Linear:\n torch.nn.init.normal_(m.weight)\n torch.nn.init.zeros_(m.bias)", "title": "" }, { "docid": "76a647fcbb8075236af0ce084f9ac104", "score": "0.5962387", "text": "def _init_weights(self):\r\n nn.init.xavier_normal_(self.out.weight)", "title": "" }, { "docid": "9cd47ea04de72a8c82a274c40f8f9882", "score": "0.59586823", "text": "def set_weights(self, weights):\n\n if isinstance(weights, type(self._weights)) == False:\n self._weights = np.array(shape=(len(weights), 1), dtype='float')\n\n self._weights = weights", "title": "" }, { "docid": "6cac75d67b597cca5fe3089884942c80", "score": "0.5947711", "text": "def set_weights(model: torch.nn.ModuleList, weights: fl.common.Weights) -> None:\n state_dict = OrderedDict(\n {\n k: torch.tensor(np.atleast_1d(v))\n for k, v in zip(model.state_dict().keys(), weights)\n }\n )\n model.load_state_dict(state_dict, strict=True)", "title": "" }, { "docid": "c5379ac79f3de98dd90b851974632b17", "score": "0.5947521", "text": "def save(self):\n self.model.save_weights(self.path)", "title": "" }, { "docid": "e0a1517c0a712c1aee1bc980fa53359a", "score": "0.5940898", "text": "def on_train_begin(self, logs={}):\n self.weights = []\n self.weights_diff = None\n self.scale_stats = None\n super(Weights_each_batch, self).__init__()\n\n if self.layer_names is not None:\n self.layer_dict = {layer.name: layer for layer in self.layers}\n if not self.store_all:\n self._scale_stats_list = []\n self.weights_diff = []\n self._append_weights()", "title": "" } ]
0cb0fb633d76572e5b881ae74abbc422
Update the hash by removing the previous item previtm and adding the next item nextitm. Return the updateed hash value.
[ { "docid": "213175198ac4dd069bc90b5e87438ad7", "score": "0.71016806", "text": "def slide(self, previtm, nextitm):\n self.curhash = (self.curhash * self.HASH_BASE) + ord(nextitm)\n self.curhash -= ord(previtm) * (self.HASH_BASE ** self.seqlen)\n return self.curhash", "title": "" } ]
[ { "docid": "7bcef7fa2b1c6e9d48abb1a27dfdd9d4", "score": "0.59157664", "text": "def __re_cal_rhash(self):\n time_stp = datetime.datetime.now()\n self._prev_hashes.append(Hash(self.root_hash, time_stmp))", "title": "" }, { "docid": "304a18c014e428671494e225b163d128", "score": "0.56726354", "text": "def add_hash(self, hashval, item):\n\n self._hash[hashval] = item", "title": "" }, { "docid": "0e48f64bc775b210119f041966e0e734", "score": "0.55551034", "text": "def rehash(self,oldhash,size):\n return ((oldhash + 1) % size)", "title": "" }, { "docid": "3efcba3142bc84afb9998487fa8ef8e0", "score": "0.553378", "text": "def add_hash(self, hashval, item):\n\n self._hash.setdefault(hashval, []).append(item)", "title": "" }, { "docid": "8921757055818c8fe740bc8c7f7b0bb2", "score": "0.5471714", "text": "def rehash(self, old_hash: int, size: int) -> int:\n return (old_hash + 1) % size", "title": "" }, { "docid": "d98652e2ae18e56b1a186652cbd021b0", "score": "0.5465609", "text": "def rehash(self, oldhash, size):\n return (oldhash + 1) % size", "title": "" }, { "docid": "a6012be213e9e9a2b910151e3e1907b3", "score": "0.54593873", "text": "def hash(self, item):\n return item % len(self.keys)", "title": "" }, { "docid": "f7aca20edc50dbbc52a5b0edf6de2a96", "score": "0.54004574", "text": "def rehash(self, hashVal):\n return (hashVal + 1) % len(self.keys)", "title": "" }, { "docid": "38cc8b6a71ea2dbf2ab40ce1bc304884", "score": "0.5331807", "text": "def add(self, item):\n if self.size == int(len(self.table) * LOAD_FACTOR):\n # Rehash whole table\n table = [SENTINEL] * (GROW * len(self.table))\n for x in self.table:\n if x is not SENTINEL:\n Hash.__add(table, x)\n\n self.table = table\n\n self.size += Hash.__add(self.table, item)", "title": "" }, { "docid": "0d444c00a094a5512e08851b72d66429", "score": "0.5314634", "text": "def slide(self, old, new):\n self.hash = (self.hash*self.base - ord(old)*self.magic + ord(new)) % self.prime", "title": "" }, { "docid": "7d6309d0cee6a97e243852a807474592", "score": "0.5304252", "text": "def update(self, item):\n # Remove LRU node from current LFU node\n item['lfu'].val[1].remove(item['lru'])\n\n # If next LFU does not exist or is not +1 freq, create a new node\n # with +1 freq and insert as next LFU node.\n if (not item['lfu']._next or\n item['lfu']._next.val[0] != item['lfu'].val[0] + 1):\n\n # Create new node\n newLFU = Node((item['lfu'].val[0] + 1, DLL()))\n\n # Update tail if necessary\n if not item['lfu']._next:\n self.lfu.tail = newLFU\n\n # Update next\n newLFU._next = item['lfu']._next\n try:\n item['lfu']._next._prev = newLFU\n except AttributeError:\n pass\n\n # Update previous\n newLFU._prev = item['lfu']\n item['lfu']._next = newLFU\n\n self.lfu.length += 1\n\n # Remove LFU node if contains no LRU nodes\n if len(item['lfu'].val[1]) == 0:\n self.lfu.remove(item['lfu'])\n\n # Update LFU to next\n item['lfu'] = item['lfu']._next\n\n # Insert LRU node into new LRU DLL\n item['lfu'].val[1].insert(item['lru'].val)\n item['lru'] = item['lfu'].val[1].head", "title": "" }, { "docid": "d8da856845b961643acd38d9bd05b21a", "score": "0.5230364", "text": "def hashItem(self):\n\n return _hash(str(self.itsPath))", "title": "" }, { "docid": "5cefc8348e52f04abb95c3929e20a1ed", "score": "0.5212471", "text": "def remove(self, key):\n pos = key % self.size\n if self.hash[pos] == None:\n return\n head = self.hash[pos]\n if head.key == key:\n self.hash[pos] = head.next\n return\n prev, head = head, head.next\n while head:\n if head.key == key:\n prev.next = head.next\n return\n prev, head = head, head.next", "title": "" }, { "docid": "973ac92402130801669da0a7899aca08", "score": "0.5202224", "text": "def calcHash(self, startHash: int, inst: ghidra.program.model.listing.Instruction) -> int:\n ...", "title": "" }, { "docid": "e36d788d1d84e2b9856767b42a33b33a", "score": "0.5197462", "text": "def insertItem(self, item):\n if item.key in hashdict:\n if(item in item_list):\n print(item_list)\n # Move the existing item to the head of item_list.\n item_index = item_list.index(item)\n item_list[:] = item_list[:item_index] + item_list[item_index+1:]\n item_list.insert(0, item)\n else:\n # Remove the last item if the length of cache exceeds the upper bound.\n if len(item_list) >= self.length:\n self.removeItem(item_list[-1]) #delete\n # If this is a new item, just append it to\n # the front of item_list.\n hashdict[item.key] = item\n item_list.insert(0, item) #put\n #print(f\"Cache-->Number of Users={self.item_list}\\nNumber of Users Cached={len(hash)}\")", "title": "" }, { "docid": "7e0ff65a828b517008c2aec646fe3603", "score": "0.51785254", "text": "def add_item(self, key, value):\n if self.head is None:\n self.head = Node(key, value)\n self.head.index = self.hash(key)\n return\n\n curr_node = self.head\n while curr_node.next is not None:\n if curr_node.next.index == self.hash(key):\n curr_node.next.value = Node(key, value).value\n return\n else:\n curr_node = curr_node.next\n\n curr_node.next = Node(key, value)\n curr_node.next.index = self.hash(key)", "title": "" }, { "docid": "934aea3622f9eee26d6a901ca41d01c0", "score": "0.51781267", "text": "def rehash(self, newlen):\n old_hash_table = self.hash_table[:]\n self.len = newlen\n self.hash_table = [set() for _ in range(self.len)]\n for chain in old_hash_table:\n for hash_number in chain:\n self.hash_table[hash_number % self.len].add(hash_number)\n print(self.hash_table)", "title": "" }, { "docid": "8cdd84873a4ada8d3f6e7a5271d49378", "score": "0.513518", "text": "def add(self, item):\n digests = []\n\n for i in range(self.hash_count):\n digest = mmh3.hash(item, i) % self.size\n digests.append(digest)\n\n self.bit_array[digest] = True", "title": "" }, { "docid": "2844118225648bed7f7ed3690b97b0a2", "score": "0.5130361", "text": "def calculateHash(self):\n value = str(self.index) + str(self.prevHash) + str(self.timestamp) + str(self.nonce)\n for hash in self.transactions:\n value += self.transactions[hash].calculateHash()\n sha = hashlib.sha256(value.encode('utf-8'))\n return str(sha.hexdigest())", "title": "" }, { "docid": "c9e1457ec1c7eff27dfddf2b87177bd7", "score": "0.51262856", "text": "def add(self, item):\n # Get hash values tuple from get_hash function\n # To improve performance avoid multiple hash computes. Compute hash only once and use it in linear equations.\n hash_val1, hash_val2 = self.get_hash(str(item), self.case_sensitive)\n\n # Use above hash values in linear family of equation to simulate multiple hash functions.\n # Performance is significantly improved by skipping need to compute hash for every hash function iteration.\n # Add item by setting bit to 1 for every position generated by each hash function.\n try:\n current_hash = hash_val1\n for i in range(self.hash_count):\n position = current_hash % self.filter_size\n self.bloom_bitarray[position] = 1\n current_hash += hash_val2\n self.counter += 1\n except Exception as e:\n # ToDo: Implement granular exception handling\n logger.log(str(e))\n sys.exit(1)", "title": "" }, { "docid": "61e383d16f2cd4610e7f8bf6dbf8920a", "score": "0.5093433", "text": "def prev_hashes(self) -> Iterable[Hash32]:\n ...", "title": "" }, { "docid": "9819ff5c64f57f2a9623d5f4490f5d29", "score": "0.5089689", "text": "def pop(self):\n # TODO: Write function to pop here\n result = self.head\n self.head = self.head.next\n if self.head is not None:\n self.head.previous_hash = \"0\"\n return result.data", "title": "" }, { "docid": "9d5012d361f18b8adf3fbb7ca033f9fd", "score": "0.5082364", "text": "def previous_hash(self):\n return self._previous_hash", "title": "" }, { "docid": "e607db1d5f1bf3081fda4c4305f22765", "score": "0.5053349", "text": "def item_hash(self):\n self_copy = self.copy()\n self_copy.pop('keycomments', None)\n try:\n return hash(frozenset(self_copy.items()))\n except Exception:\n return", "title": "" }, { "docid": "5eb615db93dbd5a56c7e991b309130b5", "score": "0.50518763", "text": "def __updAttr(self):\n self.len = (1 if self.__head else 0)\n self.min = (self.__head.data if self.__head else None)\n self.max = (self.__head.data if self.__head else None)\n self.sum = (self.__head.data if self.__head else None)\n\n temp = (self.__head.next if self.__head else None)\n while (temp != None):\n self.len += 1\n self.min = (temp.data if (temp.data < self.min) else self.min)\n self.max = (temp.data if (temp.data > self.max) else self.max)\n self.sum += temp.data\n temp = temp.next", "title": "" }, { "docid": "e1ad43fd68204b18ff520e22524eacba", "score": "0.5034893", "text": "def calculate_hash(self):\n\n to_hash = (w[\"key\"] for w in self.widgets)\n self.hash = hash(frozenset(to_hash))", "title": "" }, { "docid": "343430b4875f8049743802f759d60efa", "score": "0.5027678", "text": "def put(self, key, value):\n # Your code here\n## automatic rehashing\n\n print('load factor ', self.get_load_factor())\n if self.get_load_factor() >= 0.7:\n print('warning load factor is: ', self.get_load_factor())\n self.resize(self.capacity*2)\n print('resizing to: ', self.get_load_factor())\n\n## get the index for the key\n index = self.hash_index(key)\n # create a new entry\n new_entry = HashTableEntry(key, value)\n## search the linked list at the index for the key\n # reference the item or linked list at the index\n slot = self.storage[index]\n # if there is nothing at the index create a LL and insert the entry at head\n## insert the key and value at the head of the list at that index\n if slot is None:\n ll = LinkedList()\n ll.insert_at_head(new_entry)\n # assign the LL at the right index\n self.storage[index] = ll\n self.number_of_elements += 1\n # else if there is already something at the hash_index\n else: \n## if the key is found, overwrite the value stored there\n if slot.find_by_key(key) is not None:\n slot.find_by_key(key).value = value\n else:\n # if not then add entry to head\n slot.insert_at_head(new_entry)\n self.number_of_elements += 1", "title": "" }, { "docid": "852cd9e7589a0f4f5f084989df3ead67", "score": "0.5022441", "text": "def removeItem(self, item):\n del hashdict[item.key]\n del item_list[item_list.index(item)]\n #print(f\"Cache-->Number of Users={len(self.item_list)}\\nNumber of Users Cached={len(hash)}\")", "title": "" }, { "docid": "96cc68745af39fca658a2957bad144cc", "score": "0.50210166", "text": "def _set_hash(self):\n self._hash = sum(map(hash,self.cart))\n for key, value in self.__class__._properties.iteritems():\n try:\n self._hash += hash(value)\n except TypeError:\n pass", "title": "" }, { "docid": "36ed7a8a7afbb0c04e2394c04d777acb", "score": "0.50102854", "text": "def link(self, block):\n\t\tself.prev_hash = block.hash", "title": "" }, { "docid": "2e66a85350b257923129513bb78260bf", "score": "0.49871993", "text": "def _update_head(self, item):\n if item is self._head:\n return\n self._unlink(item)\n item.next = self._head\n if self._head is not None:\n self._head.prev = item\n self._head = item\n if self._tail is None:\n self._tail = item", "title": "" }, { "docid": "eee175d35030b6bf790839262e1fe196", "score": "0.49870154", "text": "def hash_list(self, inlist):\n h = 0\n i = 1\n for v in inlist:\n h = h + v*i\n i = i + 1\n return h", "title": "" }, { "docid": "7e29e5cc74db832c095183222cd3c703", "score": "0.49813887", "text": "def update_inv_rem(item: str) -> str:\n with open('inventory.p', 'rb') as fin:\n inv_dict = pickle.load(fin)\n needed_list = inv_dict[item]['ids']\n inv_dict[item]['num'] -= 1\n removed_item_id = needed_list.pop()\n # updates the list in the dictionary with the new list\n # (the list with the item_id removed)\n inv_dict[item]['ids'] = needed_list\n with open(\"inventory.p\", 'wb') as fin:\n pickle.dump(inv_dict, fin)\n return removed_item_id", "title": "" }, { "docid": "c349defa7120101755ec1346dae9ed54", "score": "0.49811608", "text": "def update(self, data):\n for algo in self.hashers:\n self.hashers[algo].update(data)", "title": "" }, { "docid": "8b3edc3450a58a7836f89bc807dd1c6d", "score": "0.4961823", "text": "def update_hash(self):\n genh = str(self.nn_param_choices['p_values']) + str(self.nn_param_choices['d_values']) + str(self.nn_param_choices['q_values']) +str(self.nn_param_choices['sp_values']) + str(self.nn_param_choices['sd_values']) + str(self.nn_param_choices['sq_values'])\n self.hash = hashlib.md5(genh.encode(\"UTF-8\")).hexdigest()\n self.accuracy = 0.0", "title": "" }, { "docid": "c965b2cd9086bbf3ef38cfeec296454d", "score": "0.4959743", "text": "def __next_hash(self):\n self.__idx += self.__idx_hashkey\n self.__idx %= self.__capacity\n return self.__idx", "title": "" }, { "docid": "9fe462fe54c6b97e263ff3d33c013703", "score": "0.4952077", "text": "def add(self,obj):\n flag = 0\n #i.e. Table is empty\n #Note: We require the below \"if\" so that we can assign the \"front\" a reference\n if self.size == 0: # [None for _ in range(self.initial_num_buckets)]\n index = self.hash_function(obj,self.initial_num_buckets)\n self.table[index] = ChainNode(obj)\n self.size += 1\n self.front = self.table[index]\n self.back = self.front\n else:\n #REHASH IF THE SIZE LIMIT HAS BEEN REACHED\n if (self.size // self.initial_num_buckets) > 0.75:\n recordObj = [None for _ in range(self.size)]\n count = 0\n itr = self.front\n while itr != None:\n recordObj[count] = itr\n count += 1\n itr = itr.link\n self.initial_num_buckets *= 2\n newLinkedHashTable = LinkedHashTable( self.initial_num_buckets )\n self = newLinkedHashTable\n for index in recordObj:\n self.add(index.obj)\n\n index = self.hash_function(obj,self.initial_num_buckets)\n\n #We also need to identify whether the incoming element is being added to the hash table\n #or being linked. Bcoz it is only then that we will be able to decide whether the \"chain\" or \"link\"\n #or \"prev\" is to be assigned a reference\n\n #Since it is a hashTable entry, it will be next in order; So,we link and not chain\n\n if(self.table[index] == None):\n self.table[index] = ChainNode(obj)\n self.table[index].prev = self.back\n self.size += 1\n\n #***Note: If something is to be added in the chain, then it will be added at some point, else we\n # should keep it \"none\"\n self.back.link = self.table[index]\n self.back = self.table[index]\n else:\n #Chaining the elements in a slot of the hash table\n #We iterate till the point we get none\n forIter = self.table[index]\n while forIter.chain != None:\n #Note: We do not add an object if it has already been added once\n if(forIter.chain.obj == obj):\n flag = 1\n break\n if flag == 0 and not self.contains(obj):\n temp = forIter\n forIter.chain = ChainNode(obj)\n self.size += 1\n forIter.chain.prev = temp\n self.back.link = forIter.chain\n self.back = forIter.chain", "title": "" }, { "docid": "f59c4975bef938448253d8152cf15ba5", "score": "0.49472588", "text": "def push(self, key, value):\n\n if key in self.hash:\n # Move the existing item to the head of item_list.\n delnode=self.hash[key]\n self.linked_list.remove(delnode)\n delnode.data=value\n self.linked_list.push(delnode)\n self.hash[key]=delnode\n \n else:\n\n # If this is a new item, just append it to\n # the front of item_list.\n # If Cache is full then First Pop least recent item from the list\n if len(self.hash)>=self.length:\n delnode=self.linked_list.pop() \n self.hash.pop(delnode.key)\n addnode=Node(data=value,key=key)\n self.linked_list.push(addnode)\n \n self.hash[key] = addnode", "title": "" }, { "docid": "f402d879334e848949ef5db9851557ef", "score": "0.4935453", "text": "def remove(self,obj):\n #Resizing...\n if self.size < ( (1 - self.load_limit) * self.initial_num_buckets ):\n recordObj = [None for _ in range(self.size)]\n if self.size > 0:\n count = 0\n itr = self.front\n\n #Now we will never get the error \"list out of range\" for recordObj[]\n while itr != None and count < self.size:\n recordObj[count] = itr\n count += 1\n itr = itr.link\n self.initial_num_buckets //= 2\n self.table = [None for _ in range(self.initial_num_buckets)]\n self.size = 0\n for index in recordObj:\n self.add(index.obj)\n\n index = self.hash_function(obj,self.initial_num_buckets)\n if self.table[index] == None:\n print(\"Element doesn't exist in the table\")\n\n elif self.table[index].obj == obj:\n #If the obj is not the first element of the linkedHashTable\n if self.table[index].prev != None:\n self.table[index].prev.link = self.table[index].link\n self.table[index] = self.table[index].chain\n self.size -= 1\n #If the obj is the first element of the linkedHashTable\n else:\n if self.table[index].prev == None and self.size == 1:\n print(\"Hash table has been emptied!\")\n self.table[index] = None\n self.front = None\n self.size = 0\n elif self.table[index].prev == None:\n self.front = self.table[index].link\n self.table[index] = self.table[index].chain\n #i.e. the next \"front\" is in the same chain\n if self.table[index] == self.front:\n self.table[index].prev = None #Making it the \"front\"\n else:\n self.front.prev = None\n #We will have a new front now\n\n self.size -= 1\n else:\n #Now iterate through the hash chain to find the perfect match\n forIter = self.table[index]\n while forIter != None:\n if forIter.chain != None and forIter.chain.obj == obj:\n break\n forIter = forIter.chain\n if forIter != None:\n forIter.chain = forIter.chain.chain\n self.size -= 1\n else:\n print(\"Element doesn't exist in the table\")", "title": "" }, { "docid": "d16aa9a18119de6d3b2aefafdc04e6e3", "score": "0.49213415", "text": "def __add(table, item):\n for i in Hash.__index_from_item(item, table):\n if table[i] is SENTINEL:\n table[i] = item\n return 1\n elif table[i] == item:\n break\n\n # Never reached without break\n return 0", "title": "" }, { "docid": "f5d9d7bb3c085db577a8984fdb70f3af", "score": "0.4915619", "text": "def insertItem(self, item):\n\n if item.key in self.hash:\n # Move the existing item to the head of item_list.\n item_index = self.item_list.index(item)\n self.item_list[:] = self.item_list[:item_index] + self.item_list[item_index+1:]\n self.item_list.insert(0, item)\n else:\n # Remove the last item if the length of cache exceeds the upper bound.\n if len(self.item_list) > self.length:\n self.removeItem(self.item_list[-1])\n\n # If this is a new item, just append it to\n # the front of item_list.\n self.hash[item.key] = item\n self.item_list.insert(0, item)", "title": "" }, { "docid": "df246d170bb91c284b96b36172aabee4", "score": "0.49060935", "text": "def _hashes(self, item):\r\n m = hashlib.sha1()\r\n m.update(item)\r\n digits = m.hexdigest()\r\n\r\n # Add another 160 bits for every 8 (20-bit long) hashes we need\r\n for i in range(self.num_hashes / 8):\r\n m.update(str(i))\r\n digits += m.hexdigest()\r\n\r\n hashes = [int(digits[i*5:i*5+5], 16) % self.hashbits for i in range(self.num_hashes)]\r\n return hashes", "title": "" }, { "docid": "cefb28d9cae829997289134007bd9a27", "score": "0.48714116", "text": "def hash(self):\n return self.curhash", "title": "" }, { "docid": "acb2a3cd59006508cb5dfa84ba9080a8", "score": "0.486815", "text": "def add(self, item):\n if item not in self:\n self.map[item] = [item, self.end[self.PREV], self.end]\n self.end[self.PREV][self.NEXT] = self.map[item]\n self.end[self.PREV] = self.map[item]", "title": "" }, { "docid": "25b1fd065d0444fb1d679b5fc3c355be", "score": "0.48639905", "text": "def remove(self, old):\n self.magic = (self.magic*self.base_inverse) % self.prime\n self.hash = (self.hash - ord(old)*self.magic) % self.prime", "title": "" }, { "docid": "ab0d0c35bcbd80f8d8570f91783b2be6", "score": "0.48615184", "text": "def insert(self, item):\n\n item = str(item)\n h = hashes(item, self.d)\n for di in range(self.d):\n self.store[di][h[di] % self.w] += 1", "title": "" }, { "docid": "60880dc78a65bcc84ed02ebf2ef2b9c6", "score": "0.4850893", "text": "def create_hash(self):\n data = f\"{self.index}{self.timestamp}{self.previous_hash}{self.transaction}\"\n new_hash = sha256(data.encode()).hexdigest()\n return new_hash", "title": "" }, { "docid": "9f4163131497817b1c8bef2f0054bdea", "score": "0.48474437", "text": "def rehash(self):\n new_table = [None]*self.capacity\n temp_table = self.table\n self.table = new_table\n self.size = 0\n for i in range(len(temp_table)):\n nodel = temp_table[i]\n if nodel is not None:\n self.insert(nodel.key, nodel.value)", "title": "" }, { "docid": "cb6495742b460693ebee97e24c4e980a", "score": "0.4846568", "text": "def add(self, item):\n if isinstance(self.last, int):\n self.last = (self.last + 1) % self.size\n else:\n self.last = 0\n self.first = 0\n self.buffer[self.last] = item\n return\n self.buffer[self.last] = item\n if self.first == self.last:\n self.first = (self.first + 1) % self.size", "title": "" }, { "docid": "4167c21edc1718ff8063b15391ececab", "score": "0.48456827", "text": "def put(self, key, value):\n pos = key % self.size\n if self.hash[pos] == None:\n self.hash[pos] = Node(key, value)\n return\n head = prev = self.hash[pos]\n while head:\n if head.key == key:\n head.value = value\n return\n prev = head\n head = head.next\n prev.next = Node(key, value)", "title": "" }, { "docid": "c15c439ee55a06e350c2a1132c8bb3db", "score": "0.48339352", "text": "def _rehash(self):\n # print \"Rehashing to:\", self._size * 2\n\n self._size *= 2\n new_hash = HashMap(self._size)\n\n for bucket in self._array:\n if bucket:\n for entry in bucket:\n for val in entry[VAL]:\n # Needs to be put value per value or python freaks out\n # Some perf is lost but whatchu' gon' do\n new_hash.put(entry[KEY], val)\n\n self._array = new_hash.get_array()", "title": "" }, { "docid": "5f34435781cc34d5228c2378fd0e1b60", "score": "0.48333487", "text": "def add(self, item):\n\t\tdigests = []\n\t\t\n\t\tfor i in range(self.hash_count):\n\n\t\t\t# create digest for given item.\n\t\t\t# i work as seed to mmh3.hash() function\n\t\t\t# With different seed, digest created is different\n\t\t\tdigest = mmh3.hash(item, i) % self.size\n\t\t\tdigests.append(digest)\n\n\t\t\t# set the bit True in bit_array\n\t\t\tself.bit_array[digest] = True", "title": "" }, { "docid": "12c3cf8a82c3305d07c2770b3bd1d604", "score": "0.48111567", "text": "def add_hash(d):\n if isinstance(d, dict):\n if \"_id\" in d:\n d[\"hash\"] = self.hash(serialization=json.dumps(d))\n for k, v in d.items():\n d[k] = add_hash(v)\n elif isinstance(d, list):\n for i, x in enumerate(d):\n d[i] = add_hash(x)\n return d", "title": "" }, { "docid": "70ff30a1da03d39e5c768477db7a0e23", "score": "0.48082453", "text": "def put(self, key, value):\n index = self.hash_index(key)\n\n current_entry = self.storage[index]\n\n while current_entry is not None and current_entry != key:\n current_entry = current_entry.next\n\n if current_entry is not None:\n current_entry.value = value\n else:\n new_entry = HashTableEntry(key, value)\n new_entry.next = self = self,storage[index]\n self.storage[index] = new_entry", "title": "" }, { "docid": "826711511afe5a77905dbadaaded755b", "score": "0.48004076", "text": "def insert(self, e):\n self.current_items = self.current_items + 1\n if self.current_items > self.len - ((self.len // 4) + 1):\n self.rehash(self.len * 2)\n self.hash_table[e % self.len].add(e)", "title": "" }, { "docid": "1dd64f41437cf07a9c1c435b726f215f", "score": "0.47901466", "text": "def remove(self, key):\n rem_item = self.find(key)\n prev_node = None\n curr_node = self.head\n next_node = curr_node.next\n count = 0\n while curr_node.next is not None:\n if rem_item == curr_node.value:\n break\n prev_node = curr_node\n curr_node = prev_node.next\n next_node = curr_node.next\n count += 1\n\n prev_node.next = next_node\n next_node = prev_node.next", "title": "" }, { "docid": "01039bb0101a022c4754cbd118736c44", "score": "0.47891334", "text": "def append(self, new):\n self.hash = (self.hash*self.base + ord(new)) % self.prime\n self.magic = (self.magic*self.base) % self.prime", "title": "" }, { "docid": "3ad1e5bdac06332bbce7ed931cbe2ca0", "score": "0.47849432", "text": "def add(self, item):\r\n for i in range(self.hash_count): \r\n # create digest for given item. \r\n # use i as random seed for mmh3 hash function\r\n array_index = mmh3.hash(item, i) % self.size \r\n \r\n # set the bit True in bit_array \r\n self.bit_array[array_index] = True", "title": "" }, { "docid": "d3a3ea5031ab565313c9763b8415aade", "score": "0.47673973", "text": "def __hash__(self) -> int:\n if getattr(self, '_hash', None) is None:\n self._hash = _t.ItemsView(self)._hash() # type: ignore\n return self._hash # type: ignore", "title": "" }, { "docid": "7c9fa095bfe9c55cbd05adf6878d29ab", "score": "0.47424194", "text": "def inc(self, key: str) -> None:\n if key in self.dict:\n cur_block = self.dict[key]\n cur_block.keys.remove(key)\n else:\n cur_block = self.head\n \n if cur_block.val + 1!= cur_block.after.val:\n new_block = Block(cur_block.val + 1)\n cur_block.insert_after(new_block) \n \n cur_block.after.keys.add(key)\n \n self.dict[key] = cur_block.after\n if cur_block.val != 0 and not cur_block.keys:\n cur_block.remove()", "title": "" }, { "docid": "c2db19c947ee351166afce513b743914", "score": "0.47102684", "text": "def update_adjacent(self, new_addr, new_hash, flag):\n if (flag == 0):\n self.pred_hash = new_hash\n self.pred_addr = new_addr\n else:\n self.succ_hash = new_hash\n self.succ_addr = new_addr", "title": "" }, { "docid": "2fd8e9e74beb6aaa7d8a5e2a94eefeb4", "score": "0.47089422", "text": "def _update_hist_tracker( self):\n self.update_history.append( self.prev_vals)", "title": "" }, { "docid": "58f26d8e09247e35438d8f768357045a", "score": "0.47042957", "text": "def update_hashes(self):\n for algo in (\"MD5\", \"SHA-1\", \"SHA-256\"):\n self.helper.log_debug(\n f'[VirusTotal] updating hash {algo}: {self.attributes[algo.lower().replace(\"-\", \"\")]}'\n )\n self.helper.api.stix_cyber_observable.update_field(\n id=self.observable[\"id\"],\n input={\n \"key\": f\"hashes.{algo}\",\n \"value\": self.attributes[algo.lower().replace(\"-\", \"\")],\n },\n )", "title": "" }, { "docid": "86872f4a8e49407ce7e6414ef939367e", "score": "0.46866283", "text": "def update(self, data):\n tss_lib.Tspi_Hash_UpdateHashValue(self.get_handle(), len(data),\n _c_byte_array(data))", "title": "" }, { "docid": "ca241ecb8cdc867fd641376b10ab30e8", "score": "0.46748185", "text": "def remove(self, key):\n\n previous = self.head\n \n if self.head is None:\n return None\n \n if self.head.data == key:\n self.head = previous.next\n return str(key) + \" was removed successfully\"\n \n else:\n while previous:\n if previous.next.data == key:\n previous.next = previous.next.next\n return str(key) + \" was removed successfully\"\n else:\n previous = previous.next", "title": "" }, { "docid": "36e7ec9d5c0e74aaf82405a9508384cb", "score": "0.4673151", "text": "def put(self, key, value):\n i = self.hash_index(key)\n if self.__table[i] is None:\n self.__table[i] = HashTableEntry(key, value)\n self.__size += 1\n else:\n curr = self.__table[i]\n # iterate through the linked list until we either find the existing\n # item with the given key or reach the end of the list\n while curr.key != key and curr.next is not None:\n curr = curr.next\n # key exists, overwrite\n if curr.key == key:\n curr.value = value\n # append the value to the linked list\n else:\n curr.next = HashTableEntry(key, value)\n self.__size += 1", "title": "" }, { "docid": "6fb7b0979e6d88f1a49870d3c1288ece", "score": "0.46584907", "text": "def remove(self, key):\n index = self.hash(key)\n item = prev = self.map[index] # Keep track of current and previous items\n if not item: return\n if item.pair[0] == key:\n self.map[index] = item.next\n else:\n item = item.next\n while item: # Move through the list, if there's an item that matches what we want to remove, delete the pointers referencing it\n if item.pair[0] == key:\n prev.next = item.next\n break\n else:\n item, prev = item.next, prev.next", "title": "" }, { "docid": "a1eddd032132316a22211fbf5d5dd7a7", "score": "0.46568635", "text": "def __hash__(self):\n # only use last 5 steps...because that's probably the most relevant\n # for what to do next\n return hash(self.current_subgoal)", "title": "" }, { "docid": "d5378362a66be64a7b5ddb60e7a05f71", "score": "0.46539837", "text": "def update_hash(self):\n genh = str(self.geneparam['optimizer'])\n for layer in self.geneparam['layers']:\n genh += str(layer['nb_neurons']) + layer['activation']\n\n self.hash = hashlib.md5(genh.encode(\"UTF-8\")).hexdigest()\n\n self.total_score = 0", "title": "" }, { "docid": "5a9568a470877e471f526c7e28e5520a", "score": "0.4652417", "text": "def _memoize(old_memoize: defaultdict) -> defaultdict:\n new_memoize = defaultdict(dict)\n for key_sum, nums_dict in old_memoize.items():\n for nums_key, indices in nums_dict.items():\n for index, element in enumerate(self.nums):\n new_key_sum = key_sum + element\n\n if index not in indices:\n new_set = indices | set([index])\n new_nums_key = self._add_element_to_str(nums_key, element)\n new_memoize[new_key_sum][new_nums_key] = new_set.copy()\n\n return new_memoize", "title": "" }, { "docid": "e590715781db9f067d2b6ec378fb7726", "score": "0.46515068", "text": "def remove(self, value):\n # TODO: Write function to remove here\n if self.head is not None and self.head.data == value:\n self.head = self.head.next\n return\n current_block = self.head\n while current_block.next:\n if current_block.next.data == value:\n current_block.next = current_block.next.next\n if current_block.next.next is not None:\n #set previous hash of the next block to the one before the removed block\n current_block.next.next.previous_hash = current_block.hash\n break\n current_block = current_block.next", "title": "" }, { "docid": "3c010fafb30962b08ca6bbeab870cb15", "score": "0.4647725", "text": "def remove(self, key: int) -> None:\n _hash = key%self.arr_size\n if self.arr[_hash] == -1:\n return\n node = self.arr[_hash]\n if node.key == key:\n self.arr[_hash] = node.next\n if not self.arr[_hash]:\n self.arr[_hash] = -1\n return\n prev = node\n node = node.next\n while node:\n if node.key == key:\n prev.next = node.next\n return\n prev = node\n node = node.next\n return", "title": "" }, { "docid": "fae8401f34eca5ab52fee1c299180075", "score": "0.46450135", "text": "def __gen_Hash(self, lst):\r\n m = hashlib.md5()\r\n for elem in lst:\r\n m.update(elem.encode('utf-8'))\r\n return m.hexdigest()", "title": "" }, { "docid": "5623a6b9478bf434c92769249a761e83", "score": "0.46389222", "text": "def link(self, message):\n\t\tself.prev_hash = message.hash", "title": "" }, { "docid": "140e5178c8d273960c906a7021d48d6e", "score": "0.46350497", "text": "def remove(self, item):\n \"\"\"\n :type item: Node() \n \"\"\" \n prev = self.head\n curr = self.head\n \n temp = Node(item)\n \n while curr:\n if self.head.getData() == temp.getData():\n self.head = curr.getNext()\n if self.end.getData() == temp.getData():\n self.end = None\n break\n elif self.end.getData() == temp.getData():\n self.end = prev\n break\n elif curr.getData() == temp.getData():\n prev.setNext(curr.getNext())\n curr = None\n break\n prev = curr\n curr = curr.getNext()", "title": "" }, { "docid": "831edbaa4fd3d7e73d00e396de00e0fd", "score": "0.4628475", "text": "def updateEdge(self, oldKey, newKey, flag):\n if flag == 0 and oldKey in self.last.keys():\n weight = self.last.pop(oldKey)\n self.last[newKey] = weight\n return 0\n elif flag == 1 and oldKey in self.next.keys():\n weight = self.next.pop(oldKey)\n self.next[newKey] = weight\n return 0\n else:\n return -1", "title": "" }, { "docid": "ee7f5749b130ce8ddac3a0a105a3c791", "score": "0.46278387", "text": "def move_to_end(self, key):\n link_prev, link_next, _ = self._OrderedDict__map[key]\n link_prev[1] = link_next # update link_prev[NEXT]\n link_next[0] = link_prev # update link_next[PREV]\n root = self._OrderedDict__root\n last = root[0]\n last[1] = root[0] = self._OrderedDict__map[key] = [last, root, key]", "title": "" }, { "docid": "31214d9fe30e0031422cd48ec6b4b348", "score": "0.46245888", "text": "def set_hash(self):\n\t\tself._hash = self.hash()\n\t\treturn self._hash", "title": "" }, { "docid": "fea9991c45891111774f37493085a98a", "score": "0.46050325", "text": "def shift(self):\n oldTail = self.tail\n try:\n self.tail = self.tail.prev_node\n try:\n self.tail.next_node = None\n except AttributeError:\n self.head = None\n except AttributeError:\n raise ValueError(\"The list is empty\")\n\n return oldTail.val", "title": "" }, { "docid": "3ff17c75d1af0854b2a017ac417728f1", "score": "0.45918462", "text": "def update_hash(hash, card, deck, event):\n if event == 'draw':\n hash -= card * 10 ** (2 * len(deck))\n elif event == 'win':\n hash *= 100\n hash += card\n\n return hash", "title": "" }, { "docid": "78cd417600a6da09e5e58951db89401c", "score": "0.45872104", "text": "async def update_sig_chain_block_hash(\n self,\n Action=None,\n Error=None,\n Desc=None,\n Result=None,\n Version=None\n ):\n self._latest_hash = Result", "title": "" }, { "docid": "975aaf46b982a7caa39a98cd8b7f9442", "score": "0.45683762", "text": "def _expand(self):\n old_pairs = list(iter(self))\n self._size_factor += 1\n self._num_buckets = 2 ** self._size_factor - 1\n self._buckets = [None for _ in range(self._num_buckets)]\n self._length = 0\n\n for key, value in old_pairs:\n self.set(key, value)", "title": "" }, { "docid": "39b88c1fe7897d387ba25dc69391d843", "score": "0.45636407", "text": "def item_hash(\n item,\n): # assumption -> input is only json comparable type (dict/list/scalar)\n dhash = hashlib.md5() # nosec\n if isinstance(item, dict):\n item = {k: item_hash(v) for k, v in item.items()}\n if isinstance(item, list):\n item = [item_hash(i) for i in item].sort()\n encoded = json.dumps(item, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()", "title": "" }, { "docid": "e3a6d5bdf83f1f117e0524e3c389a5fa", "score": "0.45623457", "text": "def update(self, key, increment):\n for row, hash_function in enumerate(self.hash_functions):\n column = hash_function(abs(hash(key)))\n if self.count[row, column] < 0:\n\t\t\t\t\t\t\tself.count[row, column] = 0\n self.count[row, column] += increment", "title": "" }, { "docid": "36ccd6e1872d2f26e6d90f962239fa11", "score": "0.4559118", "text": "def put(self, key, value):\n # going to need the hash of the key\n k = self.fnv1(key)\n\n # if there is already an entire then tack on another entry on the chain\n # otherwise start a new chain\n if self.data[k] is not None:\n # add the kv pair to the head of the root node in the linked list\n self.data[k].next = HashTableEntry(key, value)\n else:\n # create a new linked list at the index in the hashtable, this will\n # cache collisions\n self.data[k] = HashTableEntry(key, value)\n self.occupied += 1\n return", "title": "" }, { "docid": "6686fa66c66a04187c19d334373f2f38", "score": "0.45587656", "text": "def inc(self, key):\n if key not in self.bucket_of_key:\n self.bucket_of_key[key] = self.buckets.insert(self.buckets.begin(), Node(0, set([key])))\n\n bucket, next_bucket = self.bucket_of_key[key], self.bucket_of_key[key].next\n if next_bucket is self.buckets.end() or next_bucket.value > bucket.value+1:\n next_bucket = self.buckets.insert(next_bucket, Node(bucket.value+1, set()))\n next_bucket.keys.add(key)\n self.bucket_of_key[key] = next_bucket\n\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)", "title": "" }, { "docid": "4e526eb89f1b747ac676af79c2072c9f", "score": "0.4554509", "text": "def updateShoppingList(shoppingList, itemBought):\n\n shoppingList.remove(itemBought) #removes the item\n\n #itemsBoughtList.append(itemBought)\n return shoppingList#, itemsBoughtList", "title": "" }, { "docid": "3f7855a5e050a9c747c653ca103574f7", "score": "0.45541424", "text": "def _unlink(self, item):\n if item is self._head:\n self._head = item.next\n if item is self._tail:\n self._tail = item.prev\n if item.prev is not None:\n item.prev.next = item.next\n if item.next is not None:\n item.next.prev = item.prev\n item.next = None\n item.prev = None", "title": "" }, { "docid": "5fc3ac5327dbf6b0c7a50dec6cb61868", "score": "0.45461363", "text": "def new_up_next(self, nxt):\n \n self.up_next = nxt", "title": "" }, { "docid": "bb03057ce16e710cb6bbcb263e2a9f52", "score": "0.45440397", "text": "def move_up(self, item):\n while item // 2 > 0:\n if self.heap_list[item] < self.heap_list[item // 2]:\n temp = self.heap_list[item // 2]\n self.heap_list[item // 2] = self.heap_list[item]\n self.heap_list[item] = temp\n item = item // 2", "title": "" }, { "docid": "f173ddf837a2c161b698f84e76c97238", "score": "0.45428094", "text": "def hash(self, val):\n if val is not None:\n for m in self:\n setattr(m, 'hash', val)", "title": "" }, { "docid": "aba94617af2afc977519181e90f5d9dc", "score": "0.45319173", "text": "def return_prev_block_hash(b):\n prev_block_hash = b[4:36]\n return reverse(str(prev_block_hash.hex()))", "title": "" }, { "docid": "2f73ca86fcf8b60fe1e9d03eb96f42e8", "score": "0.45299017", "text": "def previous_hashes(self) -> Optional[Iterable[Hash32]]:\n ...", "title": "" }, { "docid": "a85a8d06053098b0dce9576e917085d4", "score": "0.4524526", "text": "def update_hash(self, path: pathlib.Path) -> None:\n hash_dir = self._hash_dir(path=path)\n hash_dir.mkdir(exist_ok=True, parents=True)\n\n new_hsh = compute_hash(text=path.read_text())\n\n pth = hash_dir / new_hsh\n pth.write_text('passed')", "title": "" }, { "docid": "60871fb51796af899557aaf78edb338d", "score": "0.4521043", "text": "def update_stats(iface: str, rx_bytes: int, tx_bytes: int) -> None:\n storage = shelve.open(__PREV_DATA__)\n rx_total, tx_total = storage.get(__total__.format(iface), (0, 0))\n storage[__total__.format(iface)] = (rx_total + rx_bytes, tx_total + tx_bytes)\n storage[__old__.format(iface)] = (rx_bytes, tx_bytes)\n storage.close()\n return", "title": "" }, { "docid": "2c9140a99ea6e7c8938f9e026aa49ff5", "score": "0.45158276", "text": "def advance(prev, cur):\n save_cur = cur\n save_next = cur.next\n cur.next = prev\n return (save_cur, save_next)", "title": "" }, { "docid": "8725952b0586cb05eb292a00e4765bc0", "score": "0.45146078", "text": "def update(self, arg):\n self.__init_state()\n self.__list.add(Node(arg, None))\n self.__hash(self.__list.toString())", "title": "" }, { "docid": "6a26bf0c4fc9c71143aa520f02c6b5d2", "score": "0.45144868", "text": "def remove_dups_hashable(linkedlist):\n unique_data = {}\n\n n = linkedlist.head\n while n is not None:\n try:\n unique_data[n.data]\n except KeyError:\n unique_data[n.data] = None\n else:\n prev.next = n.next\n n = prev # move n back 1 link so the duplicate is ignored\n prev = n\n n = n.next", "title": "" }, { "docid": "ac03bca29d5feec3dcf87468cbae9987", "score": "0.45135555", "text": "def popitem(self):\n if self.used == 0:\n raise KeyError(\"empty dictionary\")\n entry0 = self.table[0][0]\n entry = entry0\n if entry0.value is None:\n i = entry0.hash\n if i >= self.size or i < i:\n i = 1\n entry = self.table[i][0]\n while entry.value is None:\n i += 1\n if i >= self.size:\n i = 1\n entry = self.table[i][0]\n res = entry.key, entry.value\n self._del(entry)\n entry0.hash = i + 1\n return res", "title": "" }, { "docid": "3b0373ff19834a889cbf39bfc5be8bba", "score": "0.4507812", "text": "def update(self, new_state):\n assert new_state.key == self.key\n assert len(self._lr_items) == len(new_state._lr_items)\n\n def item_key(item):\n return item.prod_index, item.offset, item.lookahead\n\n new_followed_by = {\n item_key(item): item.followed_by\n for item in new_state._lr_items\n }\n\n # If none of the new items adds any new followed_by symbols,\n # then there's nothing to update.\n if not any(new_followed_by[item_key(item)] - item.followed_by\n for item in self._lr_items):\n return False\n\n # Really do the work of merging the two states.\n self._lr_items = OrderedFrozenSet(\n LRItem(*item_key(item),\n item.followed_by | new_followed_by[item_key(item)])\n for item in self._lr_items\n )\n return True", "title": "" } ]
c3ebe951af73157175644fe9992ba196
gives a list of the faces
[ { "docid": "36e3fe5aabc13febf76929dbb5a3bb45", "score": "0.8191274", "text": "def list_faces(self):\n\t\tface = sorted(self.faces.items(), key=itemgetter(1), reverse =True);f =[]\n\t\tfor each in face: f.append(each[0])\n\t\treturn f", "title": "" } ]
[ { "docid": "8b5285d9316ec1c056307da87a87dc2c", "score": "0.76194733", "text": "def faces(img):\n return detector(img, 1)", "title": "" }, { "docid": "f65231c2483b76edc38e022adfab5345", "score": "0.7515052", "text": "def detect_faces(self):\n try:\n response = self.rekognition_client.detect_faces(\n Image=self.image, Attributes=['ALL'])\n faces = [RekognitionFace(face) for face in response['FaceDetails']]\n print(f\"Detected {len(faces)} faces.\" )\n except ClientError:\n print(\"Couldn't detect faces in %s.\", self.image_name)\n raise\n else:\n return faces", "title": "" }, { "docid": "83c8868b937ef8abdd9050a51119366a", "score": "0.75014144", "text": "def faces(self):\n return self._get_pass_thru_attr_(PassThruAttr.Faces)", "title": "" }, { "docid": "8273bee6bcdf9c5e181d89fcf8ec44ea", "score": "0.74958295", "text": "def define_faces(self):\n faces = []\n for edge in self.edges:\n linked_faces = list(edge.link_faces)\n faces += linked_faces\n faces = list(set(faces))\n\n return sorted(faces, key=lambda x: x.index)", "title": "" }, { "docid": "1f4c9746ae58f71b5e019f1d4f737f3c", "score": "0.7438012", "text": "def detect_faces(self):\n try:\n response = self.rekognition_client.detect_faces(\n Image=self.image, Attributes=['ALL'])\n faces = [RekognitionFace(face) for face in response['FaceDetails']]\n logger.info(\"Detected %s faces.\", len(faces))\n except ClientError:\n logger.exception(\"Couldn't detect faces in %s.\", self.image_name)\n raise\n else:\n return faces", "title": "" }, { "docid": "c24b8d68048175590713669e20232cce", "score": "0.7390304", "text": "def findFaces(self, image):\n foundFaces = []\n # The following is called as recommended, will get rid of magic numbers\n detected = cv.HaarDetectObjects(image, self.cascade, self.storage,\n 1.2, 3, cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))\n if detected: # Yay it can find multiple faces!\n for (x,y,w,h),n in detected:\n foundFaces.append((x,y,w,h))\n\n self.faces = foundFaces", "title": "" }, { "docid": "c24b8d68048175590713669e20232cce", "score": "0.7390304", "text": "def findFaces(self, image):\n foundFaces = []\n # The following is called as recommended, will get rid of magic numbers\n detected = cv.HaarDetectObjects(image, self.cascade, self.storage,\n 1.2, 3, cv.CV_HAAR_DO_CANNY_PRUNING, (100,100))\n if detected: # Yay it can find multiple faces!\n for (x,y,w,h),n in detected:\n foundFaces.append((x,y,w,h))\n\n self.faces = foundFaces", "title": "" }, { "docid": "b50503dafe0b48931ba814a0aa13706c", "score": "0.7328871", "text": "def get_faces(self, filename):\n face_cascade = cv2.CascadeClassifier('../haarcascades/haarcascade_frontalface_default.xml')\n eye_cascade = cv2.CascadeClassifier('../haarcascades/haarcascade_eye.xml')\n img = cv2.imread(filename)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.5, 5) # Originally 1.1\n\n face_images = []\n\n for (x, y, w, h) in faces:\n roi_gray = gray[y:y + h, x:x + w]\n roi_color = img[y:y + h, x:x + w]\n\n eyes = eye_cascade.detectMultiScale(roi_gray)\n if len(eyes) >= 1:\n face_images.append(roi_color)\n\n return face_images", "title": "" }, { "docid": "e127370f79882a2fe874436519677a1f", "score": "0.7230001", "text": "def vertex_faces(self, vertex: BRepVertex) -> List[BRepFace]:\n map = TopTools_IndexedDataMapOfShapeListOfShape()\n topexp_MapShapesAndUniqueAncestors(\n self.native_brep, TopAbs_VERTEX, TopAbs_FACE, map\n )\n results = map.FindFromKey(vertex.occ_vertex)\n iterator = TopTools_ListIteratorOfListOfShape(results)\n faces = []\n while iterator.More():\n face = topods_Face(iterator.Value())\n faces.append(BRepFace(face))\n iterator.Next()\n return faces", "title": "" }, { "docid": "b5fbeab92efeec24b3fba174ca499658", "score": "0.7220828", "text": "def get_faces_in_frame(self, frame):\n logger.trace(\"Getting faces for frame: '%s'\", frame)\n return self.data.get(frame, list())", "title": "" }, { "docid": "ee6bc8195c9ec0104dff3ce0714b76e8", "score": "0.720979", "text": "def getElemFaces(self):\n pass", "title": "" }, { "docid": "d36c4b75c37a6328959039b5a54a1a16", "score": "0.7167989", "text": "def GetMultipleInstanceFaces(self,surface_points,pointtolerance):\n return self.DM.globals[\"GetMultipleFace\"](self.fe_inst.faces,surface_points,pointtolerance)", "title": "" }, { "docid": "43a71ac932244530366bd23b3d3b715f", "score": "0.7128547", "text": "def image_faces(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = FACE_CASCADE.detectMultiScale(gray, 1.3, 5)\n return faces", "title": "" }, { "docid": "f2abafc70c6390c9822d18a6de1e4c4e", "score": "0.71206224", "text": "def faces(self, embedding = None):\n # Which embedding should we use ?\n if embedding is None:\n # Is self._embedding available ?\n if self._check_embedding_validity():\n embedding = self._embedding\n else:\n if self.is_planar(set_embedding=True):\n embedding = self._embedding\n self._embedding = None\n else:\n raise ValueError(\"No embedding is provided and the graph is not planar.\")\n\n from sage.sets.set import Set\n\n # Establish set of possible edges\n edgeset = Set([])\n for edge in self.to_undirected().edges():\n edgeset = edgeset.union(Set([(edge[0],edge[1]),(edge[1],edge[0])]))\n\n # Storage for face paths\n faces = []\n path = []\n for edge in edgeset:\n path.append(edge)\n edgeset -= Set([edge])\n break # (Only one iteration)\n\n # Trace faces\n while (len(edgeset) > 0):\n neighbors = embedding[path[-1][-1]]\n next_node = neighbors[(neighbors.index(path[-1][-2])+1)%(len(neighbors))]\n tup = (path[-1][-1],next_node)\n if tup == path[0]:\n faces.append(path)\n path = []\n for edge in edgeset:\n path.append(edge)\n edgeset -= Set([edge])\n break # (Only one iteration)\n else:\n path.append(tup)\n edgeset -= Set([tup])\n if (len(path) != 0): faces.append(path)\n return faces", "title": "" }, { "docid": "e9ffcc0d8aa30d1a544662fcf60b4daf", "score": "0.71180165", "text": "def h3_get_faces(h):\n pass", "title": "" }, { "docid": "8e5a71b34463ca24d51f4bb22f6b82c5", "score": "0.7114189", "text": "def findFaces(self, image):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.equalizeHist(gray)\n faces = self.faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.15,\n minNeighbors=5,\n minSize=(5,5),\n flags = cv2.CASCADE_SCALE_IMAGE\n )\n \n names = []\n for face in faces:\n (x,y,w,h) = face\n cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)\n names.append(self.__recognize(gray, face)) \n return (image, gray, faces, names)", "title": "" }, { "docid": "7e41fe776426c21c241ae7f0c8d241e4", "score": "0.7072871", "text": "def faces_so(F):\n return [ f.edge.idx for f in F ]", "title": "" }, { "docid": "e97b86507bf4c0d7001ace4946ba20d0", "score": "0.7029804", "text": "def edge_faces(self, edge: BRepEdge) -> List[BRepFace]:\n map = TopTools_IndexedDataMapOfShapeListOfShape()\n topexp_MapShapesAndUniqueAncestors(\n self.native_brep, TopAbs_EDGE, TopAbs_FACE, map\n )\n results = map.FindFromKey(edge.occ_edge)\n iterator = TopTools_ListIteratorOfListOfShape(results)\n faces = []\n while iterator.More():\n face = topods_Face(iterator.Value())\n faces.append(BRepFace(face))\n iterator.Next()\n return faces", "title": "" }, { "docid": "25fad594e8a4825371307e6abc5dd37f", "score": "0.70042056", "text": "def detect_faces(path):\n vision_client = vision.Client()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision_client.image(content=content)\n\n faces = image.detect_faces()\n\n print('Faces:')\n for face in faces:\n print('anger: {}'.format(face.emotions.anger))\n print('joy: {}'.format(face.emotions.joy))\n print('surprise: {}'.format(face.emotions.surprise))\n print vars(face)", "title": "" }, { "docid": "3892169b673515632e99263a423fe0a9", "score": "0.7003379", "text": "def get_face_verts(self, f_ix: int) -> List[mn.Vector3]:\n verts: List[mn.Vector3] = []\n for ix in range(3):\n index = int(f_ix * 3 + ix)\n v_ix = self.mesh_data.indices[index]\n verts.append(\n self.mesh_data.attribute(mn.trade.MeshAttribute.POSITION)[v_ix]\n )\n return verts", "title": "" }, { "docid": "4c3577823a5bb138b05105612112eeaf", "score": "0.6950026", "text": "def faces_edge(self):\n for n, face in enumerate(self.faces_v):\n faces = []\n # get the key of the edge from its vertices\n combis = combinations(face, 2) # [('a','b'), ('a', 'c'), …]\n for edge in combis:\n edge_ = tuple(sorted(edge))\n faces.extend([k for k, v in self.edges_v.items()\n if v == edge_])\n # update the faces dict\n self.faces_e[n] = sorted(faces)", "title": "" }, { "docid": "dfb4444f947ee773f9049d4c87ece8b0", "score": "0.6911607", "text": "def list_faces(CollectionId=None, NextToken=None, MaxResults=None):\n pass", "title": "" }, { "docid": "e77f5e70e1267ded80f95b6965ca82f3", "score": "0.69044214", "text": "def _find_faces(self, image):\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n found_faces = self._detector(gray, 1)\n shapes = []\n\n for face_rect in found_faces:\n shapes.append(utils.dlib.shape_to_np(self._predictor(gray, face_rect)))\n\n return found_faces, shapes", "title": "" }, { "docid": "1eaa04e1647b9f512717f490596d5261", "score": "0.6889507", "text": "def faces(self) -> set:\n self._face_set = set()\n\n for simplex in self._simplices:\n _num_nodes = len(simplex)\n\n for r in range(_num_nodes, 0, -1):\n for face in combinations(simplex, r):\n self._face_set.add(face)\n\n return self._face_set", "title": "" }, { "docid": "4cd7f35b22c0a0831517684fe380d493", "score": "0.68200845", "text": "def get_boundary_faces(self):\n return self.indices(self.has_face_tag(FaceTag.BOUNDARY))", "title": "" }, { "docid": "da1d12ea9239636fbd50d2110751e33e", "score": "0.68180704", "text": "def get_all_face(self):\n queryset = Person.objects.all()\n\n all_face = list()\n for person in queryset:\n face = dict()\n face['id'] = person.id\n face['name'] = person.name\n face['image_link'] = person.image_link\n face['face_encoding'] = [np.fromstring(person.face_encoding, dtype=float, sep=',')]\n all_face.append(face)\n return all_face", "title": "" }, { "docid": "2f054bf07ece4bf37f8e2b08135c5b04", "score": "0.6801", "text": "def detect_faces(self,frame):\n original = frame\n # for dlib detector\n frame = frame.convert('RGB')\n width, height = frame.size\n\n frame = np.copy(np.asarray(frame))\n frame.setflags(write=True)\n\n dets = self.detector(frame, 1)\n windows = []\n faces = []\n for i, d in enumerate(dets):\n window = self._pad_box([d.left(), d.top(), d.right(), d.bottom()],width, height)\n windows.append(window)\n #cv2.rectangle(frame, (d.left(), d.top()), (d.right(), d.bottom()), (255, 0, 255), 2)\n faces.append(frame[d.top():d.bottom(), d.left():d.right()])\n\n faces = [original.crop(window) for window in windows]\n for face,window in zip(faces,windows):\n face.window = window\n\n return faces", "title": "" }, { "docid": "d71c12427ab0751d71bb031922ddfb3a", "score": "0.6799738", "text": "def find_faces_at_vertices(faces, npoints):\n faces_at_vertices = [[] for i in range(npoints)]\n for face_id, face in enumerate(faces):\n for vertex in face:\n faces_at_vertices[vertex].append(face_id)\n\n return faces_at_vertices", "title": "" }, { "docid": "6782be859fc556279c7f73072124538a", "score": "0.679176", "text": "def GetMultiplePartFaces(self,surface_points,pointtolerance):\n return self.DM.globals[\"GetMultipleFaces\"](self.fe_part.faces,\n surface_points,\n pointtolerance)", "title": "" }, { "docid": "8636265fdcddfbfef0abc1a743f05ae6", "score": "0.67687035", "text": "def find_faces(img, model):\n h, w = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0,\n\t(300, 300), (104.0, 177.0, 123.0))\n model.setInput(blob)\n res = model.forward()\n faces = []\n for i in range(res.shape[2]):\n confidence = res[0, 0, i, 2]\n if confidence > 0.5:\n box = res[0, 0, i, 3:7] * np.array([w, h, w, h])\n (x, y, x1, y1) = box.astype(\"int\")\n faces.append([x, y, x1, y1])\n return faces", "title": "" }, { "docid": "19cda92f68bceabdb582fb9855253bd5", "score": "0.67674935", "text": "def toQuads(self):\n faces = []\n for key, voxel in self.voxels.items():\n self._getObjFaces(voxel, faces)\n return faces", "title": "" }, { "docid": "0a874d88fac80013eba9a7ca088bd8ce", "score": "0.67642564", "text": "def faces_to_element_facelist(faces: Faces, node0: int) -> vtk.vtkIdList:\n face_idlist = vtk.vtkIdList()\n\n nfaces = len(faces)\n face_idlist.InsertNextId(nfaces) # Number faces that make up the cell.\n for face in faces: # Loop over all the faces\n #print(face)\n face_idlist.InsertNextId(len(face)) # Number of points in face\n\n # Insert the pointIds for the face\n #for i in face:\n #face_idlist.InsertNextId(i + node0)\n [face_idlist.InsertNextId(i + node0) for i in face]\n return face_idlist", "title": "" }, { "docid": "58b9ffa94c9c1f4fcb1622df2526a813", "score": "0.6717889", "text": "def draw_faces(self, faces=None, color=None):\n self.face_color = color\n faces = faces or self.faces\n vertex_xyz = self.vertex_xyz\n facets = []\n for face in faces:\n facets.append(\n {\n \"points\": [vertex_xyz[vertex] for vertex in self.volmesh.halfface_vertices(face)],\n \"name\": \"{}.face.{}\".format(self.volmesh.name, face),\n \"color\": self.face_color[face].rgb255,\n }\n )\n return compas_rhino.draw_faces(facets, layer=self.layer, clear=False, redraw=False)", "title": "" }, { "docid": "c08635d66813300a919223d94929fa66", "score": "0.67145306", "text": "def faces(self):\n arr1d = vtk2numpy(self._data.GetPolys().GetData())\n if arr1d is None:\n return []\n\n #Get cell connettivity ids as a 1D array. vtk format is:\n #[nids1, id0 ... idn, niids2, id0 ... idm, etc].\n if len(arr1d) == 0:\n arr1d = vtk2numpy(self._data.GetStrips().GetData())\n if arr1d is None:\n return []\n\n i = 0\n conn = []\n n = len(arr1d)\n if n:\n while True:\n cell = [arr1d[i+k] for k in range(1, arr1d[i]+1)]\n conn.append(cell)\n i += arr1d[i]+1\n if i >= n:\n break\n return conn # cannot always make a numpy array of it!", "title": "" }, { "docid": "689325fee1d6985dcc5054d41a95189e", "score": "0.6700543", "text": "def get_internal_faces(self):\n return self.indices(self.has_not_face_tag(FaceTag.BOUNDARY))", "title": "" }, { "docid": "5e5720c7cde83ff238c48f40a4edd248", "score": "0.6695563", "text": "def findFaces(self):\n\t\trects = self.detectAll()\n\t\tif len(rects)==0:\n\t\t\trects = []\n\t\telse:\n\t\t\trects[:, 2:] += rects[:, :2]\n\t\tself.analyzeFrame(rects)", "title": "" }, { "docid": "9487b6ef45c9a1b99c1de7d7821e4818", "score": "0.66625476", "text": "def detect_faces(self, frame):\n rows, cols = frame.shape[:2]\n\n self.all_faces = self.face_cascade.detectMultiScale(\n frame, scaleFactor=1.1, minNeighbors=3, flags=0,\n minSize=(int(rows / 5), int(rows / 5)),\n maxSize=(int(rows * 2 / 3), int(rows * 2 / 3)))\n\n if len(self.all_faces) == 0:\n return []\n\n self.all_faces[:,2:] += self.all_faces[:,:2]\n return self.all_faces", "title": "" }, { "docid": "eea1c0f5d5e1e030b2b68f11bf096867", "score": "0.66571057", "text": "def extract_and_adjust_faces(self, image):\n\n found_faces, shapes = self._find_faces(image)\n grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # create list of face images from gray\n ret_list = []\n for face_rect in found_faces:\n x, y, w, h = utils.dlib.rect_to_bb(face_rect)\n grey_face = HogImgProcessor._resize_face(grey[y:y + h, x:x + w])\n\n ret_list.append(grey_face)\n\n return ret_list", "title": "" }, { "docid": "2b2d51979c0f45df80edc7de9e12ef39", "score": "0.6632576", "text": "def find_faces(img_array):\n face_detect = models[\"face detect\"]\n\n # Number of times to upscale image before detecting faces.\n # When would you want to increase this number?\n upscale = 1 \n\n detections = face_detect(img_array, upscale) # returns sequence of face-detections\n detections = list(detections)\n if len(detections) > 0:\n det = detections[0] # first detected face in image\n\n # bounding box dimensions for detection\n l, r, t, b = det.left(), det.right(), det.top(), det.bottom()\n return detections", "title": "" }, { "docid": "14fb5bd74f381fbe3dc0f135b697f56a", "score": "0.6603479", "text": "def find_faces(image: Image) -> Iterable[CropData]:\n detector = cv2.CascadeClassifier(\n str(\n MODELS_DIR / \"haarcascades\" / \"haarcascade_frontalface_default.xml\"\n )\n )\n grayscale_image: Image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n return detector.detectMultiScale(\n grayscale_image,\n scaleFactor=1.3,\n minNeighbors=5\n )", "title": "" }, { "docid": "b4807ee57bc1abed1b5d67b919fad73a", "score": "0.65786636", "text": "def make_faces():\n\n\t# make the differents flags consistent\n\tfor e in me.edges:\n\t\tif e.flag & E_selected :\n\t\t\te.v1.sel = 1\n\t\t\te.v2.sel = 1\n\t\n\tNF =[]\t\t\t # NF : New faces\n\tfor f in me.faces:\n\t\tV = f.v\n\t\tnV = len(V)\n\t\tenumV = range(nV)\n\t\tE = [me.findEdge(V[i],V[(i+1) % nV]) for i in enumV]\n\t\tEsel = [x.flag & E_selected for x in E]\n\t\t\n\t\t# look for selected vertices and creates a list containing the new vertices\n\t\tnewV = V[:] \n\t\tchanges = False\n\t\tfor (i,v) in enumerate(V):\n\t\t\tif v.sel :\n\t\t\t\tchanges = True\n\t\t\t\tif Esel[i-1] == 0 and Esel[i] == 1 : newV[i] = get_v(v,V[i-1])\n\t\t\t\telif Esel[i-1] == 1 and Esel[i] == 0 : newV[i] = get_v(v,V[(i+1) % nV])\n\t\t\t\telif Esel[i-1] == 1 and Esel[i] == 1 : newV[i] = get_v(v,V[i-1],V[(i+1) % nV])\n\t\t\t\telse :\t\t\t\t\t\t\t\t newV[i] = [get_v(v,V[i-1]),get_v(v,V[(i+1) % nV])]\n\t\t\n\t\tif changes:\n\t\t\t# determine and store the face to be created\n\n\t\t\tlenV = [len(x) for x in newV]\n\t\t\tif 2 not in lenV :\t\t\t \n\t\t\t\tnew_f = NMesh.Face(newV)\n\t\t\t\tif sum(Esel) == nV : new_f.sel = 1\n\t\t\t\tNF.append(new_f)\n\t\t\t\t\n\t\t\telse :\n\t\t\t\tnb2 = lenV.count(2)\n\t\t\t\t\n\t\t\t\tif nV == 4 :\t\t\t\t# f is a quad\n\t\t\t\t\tif nb2 == 1 :\n\t\t\t\t\t\tind2 = lenV.index(2)\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-1],newV[ind2][0],newV[ind2][1],newV[ind2-3]]))\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-1],newV[ind2-2],newV[ind2-3]]))\n\t\t\t\t\t\n\t\t\t\t\telif nb2 == 2 :\n\t\t\t\t\t\t# We must know if the tuples are neighbours\n\t\t\t\t\t\tind2 = ''.join([str(x) for x in lenV+lenV[:1]]).find('22')\n\t\t\t\t\t\t\n\t\t\t\t\t\tif ind2 != -1 :\t # They are \n\t\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2][0],newV[ind2][1],newV[ind2-3][0],newV[ind2-3][1]]))\n\t\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2][0],newV[ind2-1],newV[ind2-2],newV[ind2-3][1]]))\n\t\t\t\t\t\t\n\t\t\t\t\t\telse:\t\t\t # They aren't\n\t\t\t\t\t\t\tind2 = lenV.index(2)\n\t\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2][0],newV[ind2][1],newV[ind2-2][0],newV[ind2-2][1]]))\n\t\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2][1],newV[ind2-3],newV[ind2-2][0]]))\n\t\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2][0],newV[ind2-1],newV[ind2-2][1]]))\n\t\t\t\t\t\n\t\t\t\t\telif nb2 == 3 :\n\t\t\t\t\t\tind2 = lenV.index(3)\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-1][1],newV[ind2],newV[ind2-3][0]]))\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-1][0],newV[ind2-1][1],newV[ind2-3][0],newV[ind2-3][1]]))\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-3][1],newV[ind2-2][0],newV[ind2-2][1],newV[ind2-1][0]]))\n\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tif\t(newV[0][1].co-newV[3][0].co).length + (newV[1][0].co-newV[2][1].co).length \\\n\t\t\t\t\t\t\t< (newV[0][0].co-newV[1][1].co).length + (newV[2][0].co-newV[3][1].co).length :\n\t\t\t\t\t\t\tind2 = 0\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tind2 = 1\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-1][0],newV[ind2-1][1],newV[ind2][0],newV[ind2][1]]))\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2][1],newV[ind2-3][0],newV[ind2-2][1],newV[ind2-1][0]]))\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-3][0],newV[ind2-3][1],newV[ind2-2][0],newV[ind2-2][1]]))\n\t\t\t\t\n\t\t\t\telse :\t\t\t\t\t # f is a tri\n\t\t\t\t\tif nb2 == 1:\n\t\t\t\t\t\tind2 = lenV.index(2)\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-2],newV[ind2-1],newV[ind2][0],newV[ind2][1]]))\n\t\t\t\t\t\n\t\t\t\t\telif nb2 == 2:\n\t\t\t\t\t\tind2 = lenV.index(3)\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-1][1],newV[ind2],newV[ind2-2][0]]))\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-2][0],newV[ind2-2][1],newV[ind2-1][0],newV[ind2-1][1]]))\n\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tind2 = min( [((newV[i][1].co-newV[i-1][0].co).length, i) for i in enumV] )[1]\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-1][1],newV[ind2][0],newV[ind2][1],newV[ind2-2][0]]))\n\t\t\t\t\t\tNF.append(NMesh.Face([newV[ind2-2][0],newV[ind2-2][1],newV[ind2-1][0],newV[ind2-1][1]]))\n\t\t\t\t\n\t\t\t\t# Preparing the corners\n\t\t\t\tfor i in enumV:\n\t\t\t\t\tif lenV[i] == 2 :\t NC.setdefault(V[i],[]).append(newV[i])\n\t\t\t\t\n\t\t\t\n\t\t\told_faces.append(f)\n\t\t\t\n\t\t\t# Preparing the Edges\n\t\t\tfor i in enumV:\n\t\t\t\tif Esel[i]:\n\t\t\t\t\tverts = [newV[i],newV[(i+1) % nV]]\n\t\t\t\t\tif V[i].index > V[(i+1) % nV].index : verts.reverse()\n\t\t\t\t\tNE.setdefault(E[i],[]).append(verts)\n\t\n\t# Create the faces\n\tfor f in NF: me.addFace(f)", "title": "" }, { "docid": "ef94975a981115c86dad65b2af0d782c", "score": "0.6564459", "text": "def getFacesFromSubelement(self):\n ...", "title": "" }, { "docid": "3ac4d24600b0d53b00a3d4d6edc548fb", "score": "0.6556624", "text": "def face_detection(image):\n\n face_cascade = cv2.CascadeClassifier(PATH_XML)\n faces = face_cascade.detectMultiScale(image, 1.3, 5)\n\n images = []\n\n for face in faces:\n x_beginning, y_beginning, face_width, face_height = face\n roi_img = image[y_beginning:y_beginning + face_height, x_beginning:x_beginning + face_width]\n\n images.append(roi_img)\n\n return faces, images", "title": "" }, { "docid": "4100cd97c921cd56d6eb7a536b1bf52b", "score": "0.6536479", "text": "def getfacecoordinates(self, frame=None):\n if frame is None:\n frame = self.getpicfromCamera()\n\n scale = .6\n frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_AREA)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n frameSize = gray.shape\n #print(frameSize)\n faces = self.face_Cascade.detectMultiScale(gray, 1.05, 5)\n\n if len(faces) == 0: #If no face is found\n return faces\n\n face_sizes = faces[:,2] * faces[:, 3]\n biggestFaceIndex = np.argmax(face_sizes)\n faces = faces[biggestFaceIndex].reshape(1, 4) #Prunes the face list down to the biggest face\n\n faces = faces.astype(dtype = np.float)\n for x in range(0, len(faces)): #TODO : Change the float creation into numpy array operation\n faces[x][0] /= frameSize[1]\n faces[x][1] /= frameSize[0]\n faces[x][2] /= frameSize[1]\n faces[x][3] /= frameSize[0]\n return faces", "title": "" }, { "docid": "9b348d8dfb30a7e1fa29ae9764b9ce00", "score": "0.65155965", "text": "def detect_faces(path):\n from google.cloud import vision\n import io\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.face_detection(image=image)\n faces = response.face_annotations\n\n # Names of likelihood from google.cloud.vision.enums\n likelihood_name = (0, 1, 2, 3,\n 4, 5)\n print('Faces:')\n anger = 0\n joy = 0\n for face in faces:\n anger += likelihood_name[face.anger_likelihood]\n joy += likelihood_name[face.joy_likelihood]\n\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))\n return [anger, joy]", "title": "" }, { "docid": "0705ead5c3fe3bec015cac94b1484330", "score": "0.65117836", "text": "def find_faces_with_vertex(index, faces):\n faces_with_vertex = [x for x in faces if index in x]\n\n return faces_with_vertex", "title": "" }, { "docid": "11026d25219c478bca669913584760b7", "score": "0.65076095", "text": "def face_detector(image_path):\n # load BGR image\n # for this opencv model, we need to convert color images to grayscale\n image = cv2.imread(image_path)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return faces", "title": "" }, { "docid": "effaaeacea080b680daf4d159c571477", "score": "0.6479351", "text": "def faces(self, faces: np.ndarray):\n\n self.faces = faces\n\n self.refresh()\n self.events.data()", "title": "" }, { "docid": "499b4120a52fa03cb0201dac4cc9c484", "score": "0.64527345", "text": "def get_identical_faces(face):\t\n\tidentical_faces = []\n\tvertices = obj.data.vertices\n\tfaces = obj.data.polygons\n\tface_idx = list(faces).index(face)\n\tface_coords = []\n\tfor v in face.vertices:\n\t\tface_coords.append(vertices[v].co)\n\tfor f in faces:\n\t\titer_coords = []\n\t\tsum_matches = 0\n\t\tfor v in f.vertices:\n\t\t\tif not vertices[v].co in face_coords:\n\t\t\t\tbreak\n\t\t\tsum_matches += 1\n\t\tif sum_matches == 3:\n\t\t\tidentical_faces.append(f)\n\t\t\t\n\treturn identical_faces", "title": "" }, { "docid": "cf22fabf3445a3a29b953d13bc7dde3d", "score": "0.6448472", "text": "def _make_faces_from_pb(faces):\n return [Face.from_pb(face) for face in faces]", "title": "" }, { "docid": "9563bfb3055e678c088425fb74a702bd", "score": "0.64218134", "text": "def get_edges_from_face(f: torch.Tensor):\n _assert_tensor(f)\n n = f.numel()\n edges = []\n for i in range(n):\n if f[i] < f[(i + 1) % n]:\n edges.append((f[i].item(), f[(i + 1) % n].item()))\n else:\n edges.append((f[(i + 1) % n].item(), f[i].item()))\n return edges", "title": "" }, { "docid": "e5ae0512a22333788097a0165858c648", "score": "0.63998765", "text": "def get_node_faces(self, node):\n mesh_data = self.meshes.get(node.mesh_key)\n if mesh_data is None:\n return None\n return mesh_data.faces", "title": "" }, { "docid": "8760208f5da22ba539ce6353b1a291be", "score": "0.638048", "text": "def unpack_faces(faces):\n return np.reshape(faces, (-1, 4))[:, 1:]", "title": "" }, { "docid": "06d7d82b887534c9584314dbb89b86b8", "score": "0.6345097", "text": "def draw_faces(self, fkeys=None, color=None):\n fkeys = fkeys or list(self.mesh.faces())\n colordict = to_valuedict(fkeys, color, self.defaults['face.color'])\n faces = []\n for fkey in fkeys:\n faces.append({\n 'points': self.mesh.face_coordinates(fkey),\n 'name' : \"{}.face.{}\".format(self.mesh.attributes['name'], fkey),\n 'color' : colordict[fkey],\n })\n return compas_rhino.xdraw_faces(faces, layer=self.layer, clear=False, redraw=False)", "title": "" }, { "docid": "544aebff32f87eb82c01a436b0b30474", "score": "0.6338192", "text": "def detect_faces_cloud_storage(uri):\n vision_client = vision.Client()\n image = vision_client.image(source_uri=uri)\n\n faces = image.detect_faces()\n\n print('Faces:')\n for face in faces:\n print('anger: {}'.format(face.emotions.anger))\n print('joy: {}'.format(face.emotions.joy))\n print('surprise: {}'.format(face.emotions.surprise))", "title": "" }, { "docid": "05fda12f1250f07111f84e4990bc37bc", "score": "0.6335745", "text": "def load_faces(directory):\n faces = list()\n for filename in listdir(directory):\n path = directory + filename\n face = extract_face(path)\n faces.append(face)\n return faces", "title": "" }, { "docid": "7b4b4fe23fbfeea95f7d8fc74c52474c", "score": "0.6317729", "text": "def assemble_face_from_faces(self, faces):\n\n # Generate a local copy of this list.\n other_faces = list(faces)\n if self in other_faces:\n other_faces.remove(self)\n\n # Find all edges.\n available_edges = []\n for other_face in other_faces:\n try:\n edge = VoronoiEdge(edge_face=self,\n intersecting_face=other_face)\n except Exception:\n continue\n\n # Store the data.\n available_edges.append(edge)\n\n # Error check.\n if len(available_edges) < 3:\n raise Exception(\"Not enough edges.\")\n\n # available_edges.sort(cmp=self.edge_comp)\n available_edges.sort(key=functools.cmp_to_key(self.edge_comp))\n\n return self.assemble_face_from_edges(available_edges)", "title": "" }, { "docid": "ff3924edce9704354915a6bdff930e34", "score": "0.63153315", "text": "def draw_faces(img, faces):\n for x, y, x1, y1 in faces:\n cv2.rectangle(img, (x, y), (x1, y1), (0, 0, 255), 3)", "title": "" }, { "docid": "bb84cf99a6a7b4f72290cc95fd4399e3", "score": "0.6310943", "text": "def faces_from_sphere_vertices(vertices):\n hull = ConvexHull(vertices, qhull_options='Qbb Qc')\n faces = np.ascontiguousarray(hull.simplices)\n if len(vertices) < 2**16:\n return np.asarray(faces, np.uint16)\n else:\n return faces", "title": "" }, { "docid": "75e8dd1b7936a07b739f308a0ffdf418", "score": "0.6304157", "text": "def poly_faces(poly):\n facets = []\n for vertex in poly.vertices():\n # We first make all points integers\n point = vertex.vector()\n denom = point.denominator()\n point *= denom\n facets.append([-denom] + list(point))\n n = len(facets[0]) - 1\n for i in range(n):\n row = [0]*(n+1)\n row[i+1] = 1\n facets.append(row)\n\n return facets", "title": "" }, { "docid": "f7ae978f8ec123091315899bdcb5e69a", "score": "0.62998235", "text": "def draw_faces(frame, faces):\n for (x, y, w, h) in faces:\n cv.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)", "title": "" }, { "docid": "74d8c9ced2b72444170b3d588b5c0a11", "score": "0.6290078", "text": "def GetInstanceFace(self,surface_points,pointtolerance):\n return self.DM.globals[\"GetFace\"](self.fe_inst.faces,surface_points,pointtolerance)", "title": "" }, { "docid": "4ab5dfc98437f4a1b3f1a116d478cab9", "score": "0.62887275", "text": "def detect_faces(Image=None, Attributes=None):\n pass", "title": "" }, { "docid": "5f8a24978a34b34d02375d48d04afd82", "score": "0.6287379", "text": "def vertex_faces_from_face_verts(faces):\n n_vertices = np.amax(faces) + 1\n vertex_faces = [ [] for _ in range(n_vertices) ]\n\n for face_idx, face in enumerate(faces):\n vertex_faces[face[0]].append(face_idx)\n vertex_faces[face[1]].append(face_idx)\n vertex_faces[face[2]].append(face_idx)\n\n vertex_orders = list(map(len, vertex_faces))\n max_vertex_order = max(vertex_orders)\n # Pad the adjacency list with out of bounds values\n vertex_faces = np.array([vf + [n_vertices] * (max_vertex_order - vo)\n for vf, vo in zip(vertex_faces, vertex_orders)])\n return vertex_faces", "title": "" }, { "docid": "f71fda0acf35da286ed916511d8d25cb", "score": "0.6284581", "text": "def get_known_faces(pic_path, image_names):\n N = len(image_names) # Number of uploaded images (known people)\n\n known_face_encodings = []\n known_face_names = [0 for i in range(N)]\n\n for i in range(N):\n path_to_image = os.path.join(pic_path, image_names[i])\n image = face_recognition.load_image_file(path_to_image)\n # Get encodings\n known_face_encodings.append(face_recognition.face_encodings(image)[0])\n # Get names\n name = os.path.basename(path_to_image)\n known_face_names[i] = os.path.splitext(name)[0]\n\n return (known_face_names, known_face_encodings)", "title": "" }, { "docid": "4ba04a077f53ba9c01a59f38ce92222b", "score": "0.62808734", "text": "def detect_faces_uri(uri):\n image = vision.types.Image()\n image.source.image_uri = uri\n\n\n #with io.open(image_path, 'rb') as image_file:\n #content = image_file.read()\n\n #image = vision.types.Image(content=content)\n response = client.face_detection(image=image)\n faceAnnotations = response.face_annotations\n \n likehood = (0, 1, 2, 3, 4, 5)\n\n #Return variables\n level = 0\n isAngry = ''\n isJoy = ''\n isSad = ''\n # confidence = 0\n\n for face in faceAnnotations:\n \n level = face.detection_confidence\n isAngry = likehood[face.anger_likelihood]\n isJoy = likehood[face.joy_likelihood]\n isSad = likehood[face.sorrow_likelihood]\n\n list=[isAngry,isJoy,isSad]\n \n #print(level)\n # print(max(list))\n if(isAngry == max(list)):\n print('Angry',level,isAngry)\n \n elif(isJoy == max(list)):\n print('Joy',level,isJoy)\n \n elif(isSad == max(list)):\n print('Sad',level,isSad)", "title": "" }, { "docid": "f5e2bf99599e0562b46720c194b5896c", "score": "0.6260476", "text": "def FindFaces(images: List[str]):\n raise NotImplementedError(\"Batch face detection not yet implemented\")\n pass", "title": "" }, { "docid": "cbf192fc594fc4ef0d66aae214453829", "score": "0.62589306", "text": "def detection_faces(imagem_url: str) -> int:\n carregaAlgoritmo = cv2.CascadeClassifier(\"haarcascade/haarcascade_frontalface_default.xml\")\n imagem = url_to_image_array(imagem_url)\n\n #imagem = cv2.imread('caminho')\n imagem = cv2.resize(imagem, (0,0), fx=0.7, fy=0.7) \n imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)\n\n #DETECÇÃO DAS FACES\n faces = carregaAlgoritmo.detectMultiScale(\n imagemCinza, \n scaleFactor=1.1, \n minNeighbors=3, #abordagem de vizihança, (^) = + perder os verdadeiros positivos, (v) = + falsos positivos\n minSize=(20,20) #tamanho da detecção de uma face\n )\n\n #Pequena regra de negócio\n count = 0\n try:\n for faces_x in faces:\n print(faces[count])\n count += 1\n #print(\"Quat. de pessoas: \", count)\n return (count)\n\n except Exception as erro:\n print(\"Erro: \", EOFError)\n return (count)", "title": "" }, { "docid": "81634d1876a689f6973a26498e62622f", "score": "0.62336576", "text": "def get_faces():\n for file in os.listdir(folders):\n img = cv2.imread(os.path.join(folders, file))\n data = {}\n for file in os.listdir(folders):\n img = cv2.imread(os.path.join(folders, file))\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_casc().detectMultiScale(gray, 1.1, 4)\n data[file] = faces.tolist()\n for (x, y, w, h) in faces: # ROI - region of interest\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n with open('faces_dump.pickle', 'wb') as fl:\n \"\"\"Sending dictionary to the pickle.dump\n \"\"\"\n pickle.dump(data, fl)\n return data", "title": "" }, { "docid": "e9d5a625790823bb2f4192d713d39e75", "score": "0.6228901", "text": "def find_edges(faces):\n edges = [ ]\n for face in faces:\n for edge in [face[0:2], face[1:3], [face[0], face[2]] ]:\n if not edge in edges: # I know that this is costly\n edges.append(edge)\n\n return edges", "title": "" }, { "docid": "daf010332af94ba7de41be2a811cdcec", "score": "0.61874306", "text": "def find_faces_at_edges(faces):\n\n faces_at_edges = {}\n for face_id, face in enumerate(faces):\n for edge in [face[0:2], face[1:3], [face[0], face[2]] ]:\n faces_at_edges.setdefault((edge[0], edge[1]), []).append(face_id)\n faces_at_edges.setdefault((edge[1], edge[0]), []).append(face_id) # make it symmetric\n\n return faces_at_edges", "title": "" }, { "docid": "035c2a64827837b1c8baa6a5e33a779f", "score": "0.6164131", "text": "def get_vf_list(vertices,facet):\n\tvf_list = [[] for k in range(len(vertices))]\n\tfor k in range(len(facet)):\n\t\tfor j in range(3):\n\t\t\tvf_list[facet[k,j]].append(k)\n\treturn vf_list", "title": "" }, { "docid": "ac2aff43bdd792ee871f543d6a4ea653", "score": "0.61577845", "text": "def detect_faces(self, image):\n minsize = 50\n threshold = [self.pnet_threshold, self.rnet_threshold, self.onet_threshold]\n factor = 0.709\n bounding_boxes, _ = align.detect_face.detect_face(image, minsize, self.pnet, self.rnet, self.onet, threshold,\n factor)\n # Filter out poor detections\n good_bboxes = []\n for bounding_box in bounding_boxes:\n xmin, ymin, xmax, ymax, acc = bounding_box\n if acc > self.detect_acc_thresh:\n good_bboxes.append([int(xmin), int(ymin), int(xmax), int(ymax)])\n return good_bboxes", "title": "" }, { "docid": "23b1de249488ef17a185fe1a201c12f7", "score": "0.61557513", "text": "def _find_faces(self, image: Image):\n try:\n im = self._compress(image)\n except Exception as e:\n im = np.array(image)\n # Magic with exposure\n try:\n p2, p98 = np.percentile(im, (2, 98))\n im = exposure.rescale_intensity(im, in_range=(p2, p98))\n except Exception as e:\n pass\n\n # Let's try simple angles first\n r_im = im\n r_angles = [0, 90, -90, 180]\n right_angle = 0\n right_faces = []\n max_square = 0\n\n for a in r_angles:\n max_square, right_angle, right_faces = self._rotate_and_get_faces(a, im, max_square, right_angle,\n right_faces)\n # If there were nothing found\n # we can try with other angles\n if len(right_faces) == 0:\n r_angles1 = r_angles\n r_angles = [i for i in range(0, 360, 10) if i not in r_angles1]\n for a in r_angles:\n max_square, right_angle, right_faces = self._rotate_and_get_faces(a, im, max_square, right_angle,\n right_faces)\n\n if right_faces is not None:\n r_im = imutils.rotate_bound(im, right_angle)\n return r_im, right_faces", "title": "" }, { "docid": "75c4e247c7c6c33fd7b2bf61f10e6193", "score": "0.61474687", "text": "def detect_face(self, img):\n #convert the test image to gray image as opencv face detector expects gray images\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #let's detect multiscale (some images may be closer to camera than others) images\n #result is a list of faces\n faces = self.face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n\n #if no faces are detected then return None\n if (len(faces) == 0):\n return None, None\n\n #under the assumption that there will be only one face,\n #extract the face area\n (x, y, w, h) = faces[0]\n\n #return only the face part of the image\n return gray[y:y+w, x:x+h], faces[0]", "title": "" }, { "docid": "68811aa07a22d652a12c73bb5425fd8b", "score": "0.61314344", "text": "def GetPartFace(self,surface_points,pointtolerance):\n return self.DM.globals[\"GetFace\"](self.fe_part.faces,\n surface_points,\n pointtolerance)", "title": "" }, { "docid": "454eaef24b2c375fadbfdcdd6c214fd1", "score": "0.6128512", "text": "def detect_face(image, method=\"hog\"):\n if method not in [\"cnn\", \"hog\"]:\n print(\"Method not identified\")\n exit()\n bounding_boxes = face_recognition.face_locations(image, model=method)\n return bounding_boxes", "title": "" }, { "docid": "28f158d8c8aa29ebc219966fd6249624", "score": "0.6125886", "text": "def highlight_faces(self, faces, output_filename):\n im = Image.open(self.__filename)\n draw = ImageDraw.Draw(im)\n\n for face in faces:\n box = [(v['x'], v['y']) for v in face['fdBoundingPoly']['vertices']]\n draw.line(box + [box[0]], width=5, fill='#00ff00')\n\n del draw\n im.save(output_filename)", "title": "" }, { "docid": "2c0d14f8786a6a572349bd65b1081480", "score": "0.6106247", "text": "def detect_face(img_path):\n clf_path = get_path('haarcascade_frontalface_alt.xml')\n face_cascade = cv2.CascadeClassifier(clf_path)\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0", "title": "" }, { "docid": "66affd4b861bdad3708d779551d2d706", "score": "0.6096899", "text": "def GetMultipleInstanceFacesRegion(self,surface_points,pointtolerance):\n return self.DM.regionToolset.Region(faces=self.GetMultipleInstanceFace(surface_points,pointtolerance))", "title": "" }, { "docid": "82e93bcddfe7c3f9f13a373986b40a05", "score": "0.60966754", "text": "def detectFacesInWebcam():\n\tif cnn.loadModel():\n\t\tcap = cv2.VideoCapture(0)\n\t\twhile(cap.isOpened()):\n\t\t\tret, frame = cap.read()\n\t\t\tcoordinates,gray = d.detectFace(frame,True)\n\t\t\tfor (x,y,w,h) in coordinates:\n\t\t\t\tface = cv2.resize(gray[y:y+h,x:x+w],d.FACE_DIMENSIONS)\n\t\t\t\toutputProcessing(frame,x,y,w,h,cnn.predictImageLables(face)[0])\n\t\t\tcv2.imshow('frame',frame)\n\t\t\tif cv2.waitKey(25) & 0xFF == ord('q'):\n\t\t\t\tbreak\n\t\tcap.release()\n\t\tcv2.destroyAllWindows()\n\telse:\n\t\tprint(\"Invalid Model path \"+cnn.MODEL_PATH)", "title": "" }, { "docid": "4f321a6b1badb54fc880323b5806800f", "score": "0.60963327", "text": "def mesh_select_faces(mesh, message='Select mesh faces.'):\n return FaceSelector.select_faces(mesh)", "title": "" }, { "docid": "9dcc20f880bd574cd0d13f9117224ce5", "score": "0.60936487", "text": "def rawFeatureExtractorFace(datum):\r\n features = []\r\n\r\n for x in range(FACE_DATUM_WIDTH):\r\n for y in range(FACE_DATUM_HEIGHT):\r\n if datum.getPixel(x, y) > 0:\r\n cell_value = 1\r\n else:\r\n cell_value = 0\r\n features.append(cell_value)\r\n\r\n return features", "title": "" }, { "docid": "3ad395af05c1d4e58e68652e090d7ded", "score": "0.6093019", "text": "def __detectFaces_dlib(self, img):\n rectangles = self.__detector(img)\n return [imgproc.rectangle2Rect(rectangle) for rectangle in rectangles]", "title": "" }, { "docid": "b5acad3b78a9f2b2e331a43d0edd0adc", "score": "0.6086844", "text": "def clear_faces(self):\n guids = compas_rhino.get_objects(name=\"{}.face.*\".format(self.volmesh.name))\n compas_rhino.delete_objects(guids, purge=True)", "title": "" }, { "docid": "9e9e4327f7918ba67f5a5406ec4530eb", "score": "0.6061214", "text": "def detectFace(image):\r\n\r\n gray = convertToGRAY(image) # detection is works well on gray-scale image.\r\n # load the face classifier\r\n faceClassifier = cv2.CascadeClassifier('haarcascades\\\\haarcascade_frontalface_alt.xml')\r\n\r\n if faceClassifier.empty(): # handling the classifier empty error.\r\n print(\"Your cascade is empty.\")\r\n exit()\r\n\r\n minSize = widthHeightDividedBy(gray, 8) # getting min size for detection.\r\n faces = faceClassifier.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, flags=cv2.CASCADE_SCALE_IMAGE,\r\n minSize=minSize)\r\n if len(faces) == 0: # checking if faces found or not.\r\n return None, None\r\n\r\n x, y, w, h = faces[0] # considering that there will be only one face in the image.\r\n return gray[x:x+w, y:y+h], faces[0]", "title": "" }, { "docid": "990c2eecdd93438c27632affdecacefc", "score": "0.6049446", "text": "def get_selected_uv_faces(bm, uv_layer):\n\tfaces = []\n\tfor face in bm.faces:\n\t\tif face.select:\n\t\t\tcount = 0\n\t\t\tfor loop in face.loops:\n\t\t\t\tif loop[uv_layer].select:\n\t\t\t\t\tcount+=1\n\t\t\tif count == len(face.loops):\n\t\t\t\tfaces.append(face)\n\treturn faces", "title": "" }, { "docid": "113ef8e615fc3887cba91bfe11ee083a", "score": "0.6027842", "text": "def get_flavors(self):\n return self._calib_dict.keys()", "title": "" }, { "docid": "62543595da0062a2b1c835d7399202f1", "score": "0.60129714", "text": "async def faceset(self, ctx):\n\t\tpass", "title": "" }, { "docid": "bb07f9c2342405767a564280385067c2", "score": "0.6007834", "text": "def get_face(X,i):\n return X[face_index[i],:]", "title": "" }, { "docid": "88ebd949af79137a71fffa40ed095c4d", "score": "0.5999757", "text": "def draw_faces(self, faces=None, color=None, join_faces=False):\n self.face_color = color\n faces = faces or self.faces\n vertex_xyz = self.vertex_xyz\n facets = []\n for face in faces:\n facets.append({\n 'points': [vertex_xyz[vertex] for vertex in self.mesh.face_vertices(face)],\n 'name': \"{}.face.{}\".format(self.mesh.name, face),\n 'color': self.face_color.get(face, self.default_facecolor)\n })\n guids = compas_rhino.draw_faces(facets, layer=self.layer, clear=False, redraw=False)\n if join_faces:\n guid = compas_rhino.rs.JoinMeshes(guids, delete_input=True)\n compas_rhino.rs.ObjectLayer(guid, self.layer)\n compas_rhino.rs.ObjectName(guid, '{}.mesh'.format(self.mesh.name))\n compas_rhino.rs.ObjectColor(guid, color)\n guids = [guid]\n return guids", "title": "" }, { "docid": "0677226f456974ba07270d87abff1816", "score": "0.59959793", "text": "def detect_face(img, multi=False):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face_cascade = cv2.CascadeClassifier('opencv-files/lbpcascade_frontalface.xml')\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);\n if len(faces) == 0:\n return None, None\n if multi:\n fcs = []\n for face in faces:\n (x, y, w, h) = face\n fcs.append((gray[y:y + w, x:x + h], face))\n return fcs\n (x, y, w, h) = faces[0]\n return gray[y:y + w, x:x + h], faces[0]", "title": "" }, { "docid": "d9ef00ee0a3fcb9b49a152b9fe56abfc", "score": "0.5987272", "text": "def export_angular_faces(self):\n texcoords = np.hstack((self.texcoords, np.zeros((len(self.texcoords), 1), dtype=np.float32)))\n return self.vertices, texcoords, self.normals, self.faces[:, (0, 2, 1)] + 1", "title": "" }, { "docid": "a9ed4579b1487fa8dd26695368a9545d", "score": "0.5976077", "text": "def _filter_faces(self, faces):\n return faces[np.all(np.any(faces[:, (0, 1, 2), :]-faces[:, (2, 0, 1), :], axis=2), axis=1)]", "title": "" }, { "docid": "4a2f9ebe46e1e5a9e307c9e43646de4a", "score": "0.59751296", "text": "def face_detector(img_path):\n face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n img = cv2.imread(img_path)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray)\n return len(faces) > 0", "title": "" }, { "docid": "21ed4d8ebdd3ed6e3afaf951f60b2b8f", "score": "0.5955273", "text": "def detect_faces(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n\n response = client.face_detection(image=image)\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))\n\n face = response.face_annotations[0]\n emotions = {\n ANGRY: face.anger_likelihood,\n HAPPY: face.joy_likelihood,\n SURPRISED: face.surprise_likelihood,\n SAD: face.sorrow_likelihood\n }\n print(emotions)\n if sum(emotions.values()) == 4:\n res = NEUTRAL\n else:\n res = max(emotions, key=emotions.get)\n\n return res", "title": "" }, { "docid": "aaa767f2d54b0c611fda9e39b763ec26", "score": "0.59522825", "text": "def find_face(frame):\n img = np.array(frame)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face = detector(img, 0)\n return face, img", "title": "" }, { "docid": "aaba81623e637076925f667ca6588a6b", "score": "0.59417963", "text": "def detect_faces(frame, annotate=True, verbose=True):\n if 'FACEDETECTOR' not in globals():\n global FACEDETECTOR\n from car.models import FaceDetector\n FACEDETECTOR = FaceDetector()\n print_all(\"Instantiated a FaceDetector object!\")\n\n faces = FACEDETECTOR.detect(frame, annotate=annotate)\n n = len(faces)\n if verbose:\n print_all(\"Found {} face{}.\".format(n, 's' if n != 1 else ''))\n return faces", "title": "" }, { "docid": "0e0f958bf7203e7b44cfc06a1c057c4a", "score": "0.5932602", "text": "def image_faces_rectangle(img, faces):\n for (x, y, w, h) in faces:\n image_rectangle(img, x, y, w, h)\n return img", "title": "" }, { "docid": "e3954d4981a7b778caa555b7086f4b2c", "score": "0.5928436", "text": "def getSurfaceFromEdge(self, edge):\n # Its not efficient but it works - scales with Nface not constant\n surfaces = []\n for isurf in range(self.nFace):\n for iedge in range(4):\n if self.edgeLink[isurf][iedge] == edge:\n surfaces.append([isurf, iedge])\n\n return surfaces", "title": "" } ]
0c052b6fe1ad9e414a31d295b27d6d14
Return the postprocessed estimated halfwidth of the confidence interval.
[ { "docid": "c3ef6a9f70af4c303d73ee4b2f351256", "score": "0.0", "text": "def epsilon_estimated_processed(self) -> float:\n return self._epsilon_estimated_processed", "title": "" } ]
[ { "docid": "122d794a68ecb72583b6485cbec6a157", "score": "0.5895502", "text": "def band_width(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"band_width\")", "title": "" }, { "docid": "480a4bb0243d51ce844d30275d4c3e80", "score": "0.58554256", "text": "def get_unscaled_capsule_half_height(self):\n return 0.000000", "title": "" }, { "docid": "96d43f9c662ec0b8b938d7b1cae8306f", "score": "0.5818376", "text": "def find_full_width_at_half_max(self, index, mz_array, intensity_array, signal_to_noise):\n try:\n left = find_left_width(\n mz_array, intensity_array, index, signal_to_noise)\n except np.linalg.LinAlgError:\n left = 1e-7\n try:\n right = find_right_width(\n mz_array, intensity_array, index, signal_to_noise)\n except np.linalg.LinAlgError:\n right = 1e-7\n\n if left < 1e-6:\n left = right\n elif right < 1e-6:\n right = left\n if right < 1e-6 and left < 1e-6:\n left = right = 0.15\n fwhm = left + right\n self.partial_fit_state.left_width = left\n self.partial_fit_state.right_width = right\n self.partial_fit_state.full_width_at_half_max = fwhm\n\n return fwhm", "title": "" }, { "docid": "d197d4a87ee3dca34b8146a63a60f200", "score": "0.57852274", "text": "def get_height_width(prediction: np.array) -> (float, float):\n height, width = inertia_tensor_eigvals(prediction)\n return height, width", "title": "" }, { "docid": "c366f275cbe7bd31c61a4874bf062e2d", "score": "0.5777361", "text": "def width(x):\n return (upper_bound(x) - lower_bound(x)) / 2", "title": "" }, { "docid": "c366f275cbe7bd31c61a4874bf062e2d", "score": "0.5777361", "text": "def width(x):\n return (upper_bound(x) - lower_bound(x)) / 2", "title": "" }, { "docid": "c366f275cbe7bd31c61a4874bf062e2d", "score": "0.5777361", "text": "def width(x):\n return (upper_bound(x) - lower_bound(x)) / 2", "title": "" }, { "docid": "c366f275cbe7bd31c61a4874bf062e2d", "score": "0.5777361", "text": "def width(x):\n return (upper_bound(x) - lower_bound(x)) / 2", "title": "" }, { "docid": "868ad3dabaa2b6962b5c7604fb42a234", "score": "0.57576126", "text": "def width(x):\r\n return (upper_bound(x) - lower_bound(x)) / 2", "title": "" }, { "docid": "71e292f44489b967d94227a49f0a0550", "score": "0.5753664", "text": "def get_scaled_capsule_half_height(self):\n return 0.000000", "title": "" }, { "docid": "7935ce1df52ea86c1b2783648438cf0c", "score": "0.56893736", "text": "def get_strip_width(self):\n width, height = self._image.size\n\n # calculate distances between all adjacent vertical lines\n distances = []\n for i in xrange(width-1):\n distances.append(self._line_distance(i, i+1))\n\n # break the lines up into intervals and score them\n scores = []\n for i in xrange(2, width/2+1):\n if width % i == 0:\n scores.append((i, interval_score(distances, i)))\n\n # strip width is the interval with the highest score\n strip_width = max(scores, key=lambda x: x[1])[0]\n\n return strip_width", "title": "" }, { "docid": "f1cec52ded4b325fc93bc582a553a6b6", "score": "0.55807453", "text": "def s2_kernel_widths(self):\n return self.s2_kwidth", "title": "" }, { "docid": "fdd1b9a4f8ed6811a502b7eb089369a9", "score": "0.5563341", "text": "def get_scaled_capsule_half_height_without_hemisphere(self):\n return 0.000000", "title": "" }, { "docid": "285fb734833a28b9fdec625a99fd9a05", "score": "0.5563327", "text": "def get_width(self):\r\n df = self.df[self.j]\r\n width = np.ones((self.Nkmax,1)) * df[np.newaxis,:]\r\n width[self.maskslice]=0\r\n return width", "title": "" }, { "docid": "e12efa0831b4ebd084f51548d4580f73", "score": "0.55468947", "text": "def width(self) -> float:\n\n if self._lines:\n return float(\n max((line.real.max() for line in self._lines))\n - min((line.real.min() for line in self._lines))\n )\n else:\n return 0.0", "title": "" }, { "docid": "a1c315c0a7cbdf0a17cb74fcb04aec9f", "score": "0.55377305", "text": "def gp_interp_linewidth(self):\n return self._gp_interp_linewidth", "title": "" }, { "docid": "49855e6375be1f084e968ea40d5d50e6", "score": "0.55272204", "text": "def half(self):\n if not self._half:\n self._half = _get_font_max_size_info(self.font, 0x23, 0x7e)\n return self._half", "title": "" }, { "docid": "3f2a8ff179501444cef28d39e9c47650", "score": "0.5523549", "text": "def GetCurrentTrackWidth(self):\n return _pcbnew.BOARD_GetCurrentTrackWidth(self)", "title": "" }, { "docid": "d40c68f699236f78dec5863d623dd775", "score": "0.5508838", "text": "def get_unscaled_capsule_half_height_without_hemisphere(self):\n return 0.000000", "title": "" }, { "docid": "53c1da9b3c81138fba1887ac241eb4b5", "score": "0.549955", "text": "def get_custom_width_of_class_interval(self, start_point, end_point, number_of_intervals):\n self.width_of_class_interval = round((end_point - start_point)/number_of_intervals)\n return self.width_of_class_interval", "title": "" }, { "docid": "e5be6c49c23efa47b183883edd45fe6c", "score": "0.5487525", "text": "def get_width_of_class_interval(self, number_of_intervals):\n self.number_of_intervals = number_of_intervals\n self.width_of_class_interval = round(((math.ceil(max(self.dataset))-min(self.dataset))/(self.number_of_intervals)), ndigits=1)\n if self.width_of_class_interval < 0.2:\n self.get_width_of_class_interval(number_of_intervals-1)\n else:\n return self.width_of_class_interval", "title": "" }, { "docid": "7d5ada12afab73e30047639d007ad736", "score": "0.5463927", "text": "def _get_width(self):\n return simplify_width(self._width_type, self._width_amount)", "title": "" }, { "docid": "af286675dcd83491fc6e5d60bade1441", "score": "0.5453948", "text": "def width_f(self):\n return self._size[1]", "title": "" }, { "docid": "154ef53d204295f476470a1d4ca55db0", "score": "0.54501116", "text": "def band_width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"band_width\")", "title": "" }, { "docid": "154ef53d204295f476470a1d4ca55db0", "score": "0.54501116", "text": "def band_width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"band_width\")", "title": "" }, { "docid": "169c3e9f2115fc0fc953b075172be6b9", "score": "0.544398", "text": "def get_width(self):\n return abs(abs(self.btr.x) - abs(self.btl.x))", "title": "" }, { "docid": "4ecb873fceddbf3df7f68b065ab2d7a3", "score": "0.54419065", "text": "def width(self):\n return (self._bbox[2] - self._bbox[0]) + 1 if self.include_margin else 0", "title": "" }, { "docid": "ece54385fec3e0906f4bd313a89ac81d", "score": "0.5435265", "text": "def fwidth(self):\n return self.getimageinfo()[1]", "title": "" }, { "docid": "d5de992244dce5e1b238de10bc3069b0", "score": "0.5405288", "text": "def post_width(post, conv):\n return depth_dist(conv)[post_depth(post, conv)]", "title": "" }, { "docid": "89337ffa1167ddbfad5d248ced4ea5ee", "score": "0.5399566", "text": "def cg_w(self):\n return (self.tlf + self.h - self.tuf)/2", "title": "" }, { "docid": "8a87eb6f12d8efc1b1113ea0810cece4", "score": "0.5396075", "text": "def getWidth(self):\n return _pymaxwell5.SpectralImage_getWidth(self)", "title": "" }, { "docid": "47c7cfbb77fefe5437cd40c0a194342d", "score": "0.5394775", "text": "def width(self):\n return self._fwhm", "title": "" }, { "docid": "b49f13576a1169affbd430da859534c0", "score": "0.5382249", "text": "def get_normalized_width(partition, index):\n if partition.middle[index] == '*':\n return 1.0\n if IS_CAT[index] is False:\n low = partition.width[index][0]\n high = partition.width[index][1]\n width = float(ATT_TREES[index].sort_value[high]) - float(ATT_TREES[index].sort_value[low])\n else:\n width = partition.width[index]\n return width * 1.0 / QI_RANGE[index]", "title": "" }, { "docid": "45ac7be860c61a42db430474507aaf2b", "score": "0.5377323", "text": "def GetWidth(self):\n return _pcbnew.DRAWSEGMENT_GetWidth(self)", "title": "" }, { "docid": "b1b42830eea3134b196a1d748c1b34e1", "score": "0.5368179", "text": "def database_cut_half():\n cut_rows = classes.Promoters.e_value_normalizer(25.3866, 200, 4)\n cut_cols = classes.Promoters.e_value_normalizer(117.1372, 200, 4)\n self.fifty_percent = (cut_rows, cut_cols)", "title": "" }, { "docid": "10d7fc9de444054ef8c664ab508cc48a", "score": "0.53638446", "text": "def GetThreshold(self) -> \"double\":\n return _itkIsolatedWatershedImageFilterPython.itkIsolatedWatershedImageFilterIF2IF2_GetThreshold(self)", "title": "" }, { "docid": "8e085cec9d04f3d51743bce80bdebcbd", "score": "0.53581405", "text": "def GetThreshold(self) -> \"double\":\n return _itkIsolatedWatershedImageFilterPython.itkIsolatedWatershedImageFilterISS2ISS2_GetThreshold(self)", "title": "" }, { "docid": "229fc489d9d96925467188abd0ed39be", "score": "0.5338217", "text": "def get_spline_length(self):\n return 0.000000", "title": "" }, { "docid": "80d24cd01c5362b01d48b0b114cd1f8c", "score": "0.53327227", "text": "def get_smooth_length(self, part_type, segment):\n #There is a different kernel definition, as in gadget the kernel goes from 0 to 2,\n #whereas I put it between zero and 1.\n return self.get_data(part_type, \"SmoothingLength\",segment=segment)/2", "title": "" }, { "docid": "97dd66803588775108ae6fc88ff3d538", "score": "0.53274256", "text": "def _hoeffding_bound(range_val, confidence, n):\n return math.sqrt((range_val * range_val * math.log(1.0 / confidence)) / (2.0 * n))", "title": "" }, { "docid": "8d586d6cf114b46a38c0968376d7a878", "score": "0.5322988", "text": "def width(self) -> Natural:\n return self.current_width", "title": "" }, { "docid": "aef699d7b095a6fefbfb2c2ead44f404", "score": "0.532286", "text": "def width(self):\n return self.x_br - self.x_tl", "title": "" }, { "docid": "63b0ac86c7c224656d112b546b028883", "score": "0.5320194", "text": "def crop_width(self):\r\n\r\n return self.__faceIdentification.crop_width()", "title": "" }, { "docid": "520cc08d4bbea770f82153c5414c0e7c", "score": "0.53201157", "text": "def GetThreshold(self) -> \"double\":\n return _itkIsolatedWatershedImageFilterPython.itkIsolatedWatershedImageFilterIUC2IUC2_GetThreshold(self)", "title": "" }, { "docid": "fe5c55d12fb309bbfd72ed444e6c0bce", "score": "0.5316973", "text": "def border_width(self) -> float:\n return self._border_width", "title": "" }, { "docid": "d514ea392290b3bc3a651916d3165774", "score": "0.52937675", "text": "def width(self) -> int:\n return self._bounding_box[2] - self._bounding_box[0]", "title": "" }, { "docid": "b14f52a993ce8c4e3a12fb103d3adb63", "score": "0.52873707", "text": "def spike_broadening(spike_width_list):\n\n first_spike = spike_width_list[0]\n mean_following_spikes = numpy.mean(spike_width_list[1:])\n broadening = first_spike / mean_following_spikes\n\n return broadening", "title": "" }, { "docid": "cc54383b71dedc974a7387c9422cae80", "score": "0.5285341", "text": "def GetWidthIU(self):\n return _pcbnew.PAGE_INFO_GetWidthIU(self)", "title": "" }, { "docid": "b6dacc84243cac8652ab5dc1b2b1f791", "score": "0.52742845", "text": "def _getImageWidth(self):\n return self.device.Width.Value", "title": "" }, { "docid": "b3c322a4d34208741acf88e83fedd0af", "score": "0.5266727", "text": "def _get_crop_width(self):\r\n return self._crop_size[0]", "title": "" }, { "docid": "1acc91eb0b960fd07805d139dac1b0e0", "score": "0.52501255", "text": "def len_width(self):\n width, _ = self.osh.level_dimensions[self.level]\n # width // self.patch_size --> if stride == patch_size\n return SlideSet.shape_helper(width, self.patch_size, self.stride)", "title": "" }, { "docid": "b2178273da7fc3b55f1b35aae299367e", "score": "0.5238503", "text": "def calc_base_barywidth(self):\n \"\"\"ASSUMPTION: spacing_factor = 2\"\"\"\n if not self.init_update:\n raise AttributeError(\"Must execute p.update() before passing the SyncParams class to this function\")\n if self.CFO != 0:\n warnings.warn(\"calc_base_barywidth() was called with non-zero CFO in argument parameters\")\n\n # Finding base barywidth\n full_sim_tmp = self.full_sim\n CFO_tmp = self.CFO\n self.full_sim = False\n self.update()\n barypos, baryneg, _, _ = self.estimate_bary()\n self.add(basewidth=barypos-baryneg)\n\n\n \n # Finding barywidth slope\n loc = 0.05\n \n self.CFO = -1*loc*self.f_symb\n self.update()\n barypos, baryneg, _, _ = self.estimate_bary()\n lowidth = barypos-baryneg\n \n self.CFO = loc*self.f_symb\n self.update()\n barypos, baryneg, _, _ = self.estimate_bary()\n hiwidth = barypos-baryneg\n \n slope = (hiwidth - lowidth)/(self.CFO*2)\n\n # putting state back to proper values\n self.CFO = CFO_tmp\n self.full_sim = full_sim_tmp\n self.update()\n self.add(baryslope=slope)\n self.init_basewidth = True", "title": "" }, { "docid": "3b8e91aeeb2327b442af8bc5bbcec7a1", "score": "0.5237209", "text": "def compute_width_p(self):\n self.width_p = self.partial_widths()[\"total\"]", "title": "" }, { "docid": "58d667403041d1b8bbebba2bec171dc6", "score": "0.523703", "text": "def cheeger_halfway_approx(self) -> float:\n return 0.5 * (self.cheeger_lower_bound + self.cheeger_upper_bound)", "title": "" }, { "docid": "26ddedece8ba18268a0415b60b9496cb", "score": "0.5236843", "text": "def mid(self):\n return per(self.nums(), 0.5)", "title": "" }, { "docid": "6dbd3cbad29a3a6bb79c05692cf489a5", "score": "0.5216061", "text": "def n_steps_w(self):\n return int((self.src_cols - self.window_size_w) / (self.step_size_w + epsilon)) + 2", "title": "" }, { "docid": "ad247d65324e98f0ad1f04d423abf29c", "score": "0.521147", "text": "def confInt(x):\n\n correct = stats.t.ppf(0.975, len(x)-1 )\n\n lower = np.mean(x) - correct * np.std(x)/np.sqrt(len(x))\n upper = np.mean(x) + correct * np.std(x)/np.sqrt(len(x))\n\n return (lower, upper)", "title": "" }, { "docid": "4be80fce0f90741ba3e27b85d4845b0c", "score": "0.521101", "text": "def calcWidth(x,y):\n width = np.zeros( len(y[:,0]) )\n f = positiveCheck(y)\n if f == True:\n flag_first = True\n flag_second = False\n else:\n flag_first = False\n flag_second = True\n\n for i, event in enumerate(y):\n if f:\n m = max(event)\n else:\n m = min(event)\n m_index = np.where(event == m)[0][0]\n thresh = m*0.5\n try:\n first = interpolateThreshold(x[:m_index+1], event[:m_index+1], thresh, rise=flag_first)\n second = interpolateThreshold(x[m_index-1:], event[m_index-1:], thresh, rise=flag_second)\n except:\n continue\n width[i] = second - first\n return np.mean(width), rms(width), width", "title": "" }, { "docid": "bdfa57fb448c95c7378c4c13341674c3", "score": "0.52103007", "text": "def get_current_round(h):\n return len(h)//2", "title": "" }, { "docid": "3ce97ec805628f408cc636d5f4d284c1", "score": "0.5208377", "text": "def width(self) -> int:\n return self._ffprobe[\"width\"]", "title": "" }, { "docid": "7519dc04cc38259027d0546a72560a1c", "score": "0.51953626", "text": "def get_height(side_length: float) -> float:\n return side_length * sqrt(3) / 2", "title": "" }, { "docid": "f0497947d80e586b6afe935a5832558f", "score": "0.5190227", "text": "def get_bandwidth(width, threshold):\n\n best = width\n min_err = float(\"inf\")\n for rows_per_band in range(1, width + 1):\n try:\n num_bands = 1. / (threshold ** rows_per_band)\n except ZeroDivisionError:\n return best\n err = abs(width - num_bands * rows_per_band)\n if err < min_err:\n best = rows_per_band\n min_err = err\n return best", "title": "" }, { "docid": "fa2fde5983ff1fb58c2489f25911b871", "score": "0.51852006", "text": "def _width(self):\n return sum([img.shape[1] for img in self.imgs])", "title": "" }, { "docid": "a88f3bf03fbbda7a8470df632a188b79", "score": "0.51847214", "text": "def window_size(self):\n p = self.params\n return p.left_context + p.right_context + 1", "title": "" }, { "docid": "7deb949d38f5cdd9d1b9dab8b5e9714d", "score": "0.51833946", "text": "def _get_width(self):\r\n if self._transform_image:\r\n return self._transform_image.get_width()", "title": "" }, { "docid": "fdb0e51530c5d3521c886eedfde2f7af", "score": "0.5167285", "text": "def equivalent_width(spectrum):\n # Continuum is always assumed to be 1.0\n avg_cont = np.median(spectrum.flux)\n\n # Average dispersion in the line region\n avg_dx = np.mean(spectrum.wavelength[1:] - spectrum.wavelength[:-1])\n\n # Calculate equivalent width\n ew = ((avg_cont - spectrum.flux) * (avg_dx / avg_cont)).sum()\n\n return ew", "title": "" }, { "docid": "e1b2f0abad02cd1a56caa8677ffb30c6", "score": "0.5166617", "text": "def figureWidth(wa=0.7, rb=0.9, wp=210):\n return mm2inch(wa*rb*wp)", "title": "" }, { "docid": "989d9dc28ef5b47c6affcd7114506bfd", "score": "0.51638126", "text": "def fx(self):\n return _get_full_grid_size(self.nx, self.halo_width, self.cx,\n self.num_boundary_points)", "title": "" }, { "docid": "2a5a9bbda4ff5a8e72d9217f1c989a70", "score": "0.516228", "text": "def _get_output_width(self):\r\n return self._output_size[0]", "title": "" }, { "docid": "7eab775f5b7f6fb9a25736b29b81e16b", "score": "0.515825", "text": "def get_aspect_ratio(self):\n return 0.000000", "title": "" }, { "docid": "bde43678268daed6592ce7d98d4d5f8b", "score": "0.5155725", "text": "def calc_sample_thickness(self):\n with xr.open_dataset(self.file_path, group=\"Beam\") as ds_beam:\n sth = self.sound_speed * ds_beam.sample_interval / 2\n return sth.mean(dim='ping_time') # use mean over all ping_time", "title": "" }, { "docid": "3e47a3b9c86fcca6317fbbcb6d216f00", "score": "0.5152453", "text": "def width(self):\n return denormalize(self._width)", "title": "" }, { "docid": "5aecce60a27112e072cd61ad2ef0f2e7", "score": "0.5150197", "text": "def GetWidth(self):\n return _pcbnew.TRACK_GetWidth(self)", "title": "" }, { "docid": "a1744d756f1ce791bb26d013ed6c2e73", "score": "0.5149212", "text": "def delta_widths(self):\n \n if hasattr( self, '_delta_widths'):\n return self._delta_widths\n else:\n if len(self.well_channel_metrics) < 2:\n return None\n elif ( self.well_channel_metrics[0].width_mean_hi is None \\\n or self.well_channel_metrics[1].width_mean_hi is None ):\n return None\n\n self._delta_widths = self.well_channel_metrics[0].width_mean_hi \\\n - self.well_channel_metrics[1].width_mean_hi\n return self._delta_widths", "title": "" }, { "docid": "24318cdc8835c41758f487a1a6f79161", "score": "0.51461315", "text": "def GetLowerBoundaryCropSize(self) -> \"itkSize2\":\n return _itkCropLabelMapFilterPython.itkCropLabelMapFilterLM2_GetLowerBoundaryCropSize(self)", "title": "" }, { "docid": "d03256b70563651dc5dcbbd6d4f3e0e8", "score": "0.51364875", "text": "def GetTrackWidth(self):\n return _pcbnew.NETCLASS_GetTrackWidth(self)", "title": "" }, { "docid": "e137441f3a6502395a45b22e8a6a4cb0", "score": "0.51273507", "text": "def getWidth(self):\r\n return width", "title": "" }, { "docid": "3744d89deac4d58240f96f152faeb30f", "score": "0.5116105", "text": "def width(self):\n return self.x2 - self.x1", "title": "" }, { "docid": "0ef43cd6bf81a00f21f8ece4ca307958", "score": "0.5114268", "text": "def ywidth(self) -> float:\n return self._ywidth", "title": "" }, { "docid": "c5d814776e0acf0170ca1924ad160478", "score": "0.5112251", "text": "def border_width(self) -> PageProperty[float]:\n return self._border_width", "title": "" }, { "docid": "564e73a9093218162e11da7530c05b69", "score": "0.51121515", "text": "def width(self):\n return self.shape[1]", "title": "" }, { "docid": "2f8492839f7dbbea5c63a0905918019c", "score": "0.5103929", "text": "def xwidth(self) -> float:\n return self._xwidth", "title": "" }, { "docid": "334aa5124cda86ba4fd72fd143209ea0", "score": "0.50978976", "text": "def getDesiredStepWidth(self):\r\n try:\r\n return wx.GetApp().getController(0).getBehaviour().getCoronalStepWidth()\r\n except Exception:\r\n return 0", "title": "" }, { "docid": "e1e98579bc2c668aec0cb42167359fa2", "score": "0.5096474", "text": "def get_width(self):\n return self.width", "title": "" }, { "docid": "57b8ed400261433ef03319021a4dbe44", "score": "0.5086654", "text": "def pulse_width(self):\n return self.header.get(\"PULSEON\", None)", "title": "" }, { "docid": "1845dc555d6756d2ad3483b31b598b57", "score": "0.50849116", "text": "def _get_width(self, duration: ParameterValueType) -> ParameterValueType:\n sigma_sec = self.experiment_options.sigma * self._get_dt()\n\n return duration - 2 * sigma_sec * self.experiment_options.risefall", "title": "" }, { "docid": "1f4bc53cebfd6ee58795c3fd9e37f8fa", "score": "0.5080901", "text": "def _get_width(self):\r\n return self._get_size()[0]", "title": "" }, { "docid": "9e2c44547dd11c8b6f43fc271da8e1d0", "score": "0.50787896", "text": "def get_width(self):\r\n return self._width", "title": "" }, { "docid": "9e2c44547dd11c8b6f43fc271da8e1d0", "score": "0.50787896", "text": "def get_width(self):\r\n return self._width", "title": "" }, { "docid": "2871d47cf174abc0755fb81eee32c878", "score": "0.507721", "text": "def __len__(self):\n if self.is_conti:\n return 1\n else:\n if self.is_emb and (self.is_y==False):\n return self.hidden_size\n else:\n width = len(self.top_freq)+1\n width =1 if width==2 else width\n return width", "title": "" }, { "docid": "269d48ffcdd23c3efb7f0c9dbd39a716", "score": "0.5076164", "text": "def _upper_level(self):\n return (1 + self.confidence_level) / 2.", "title": "" }, { "docid": "b812fa0b91bc773a616212ce5da57d98", "score": "0.50714105", "text": "def Width(self):\n return self.Scale", "title": "" }, { "docid": "36bbaa79d14065c94cacc52a4f409ce7", "score": "0.50691015", "text": "def binWidth(self):\n return self.__binWidth", "title": "" }, { "docid": "25fe9db5365dcc22e65986e2b69e46e1", "score": "0.50670606", "text": "def get_widths():", "title": "" }, { "docid": "9a5e375cd8a30a930d89535bbae400af", "score": "0.5066007", "text": "def get_width(self):\n return self.__width", "title": "" }, { "docid": "f7fe3b3d049a4247631c3dafadbf0be1", "score": "0.5065866", "text": "def width(self):\n return _to_int(self.width_f)", "title": "" }, { "docid": "745edd1f5bd3827baedfd5e2106e669a", "score": "0.5060965", "text": "def midpoint(self):\r\n\r\n return (self.lower + self.upper) / 2", "title": "" }, { "docid": "7f3450d28f3a4a063351c5f4471e77c6", "score": "0.5058949", "text": "def get_binary_rf_area(self):\n\n if self.thr is None:\n raise LookupError('To get the area, the receptive field should be thresholded!!')\n\n alt_step = abs(np.mean(np.diff(self.altPos).astype(np.float)))\n azi_step = abs(np.mean(np.diff(self.aziPos).astype(np.float)))\n\n return len(self.weights) * alt_step * azi_step", "title": "" }, { "docid": "84b6502e82352d6d8e86ae6ab4ab9fee", "score": "0.5048333", "text": "def halfpi() -> float:\n return libspice.halfpi_c()", "title": "" }, { "docid": "74ed28fd263700daff29705c1e1583f6", "score": "0.5048285", "text": "def GetThreshold(self) -> \"double\":\n return _itkIsolatedWatershedImageFilterPython.itkIsolatedWatershedImageFilterISS3ISS3_GetThreshold(self)", "title": "" }, { "docid": "d8eccbc120e09c76e13379643ead8c9e", "score": "0.5047498", "text": "def GetWidth(self):\n return _pcbnew.DIMENSION_GetWidth(self)", "title": "" } ]
8c455e029d6f65c2169ccaf702a3d6e0
Gnerating data from homogenized diffusion problem.
[ { "docid": "05e6111125de46140914ec76980f55c1", "score": "0.0", "text": "def GenerateData(n_traj: int, length_traj: float, filename: str):\n\n U,U_fd,U_ref = [],[],[]\n\n # epss=np.logspace(-6,-3,21)\n eps = 1e-5\n\n for _ in range(n_traj):\n ttin = timeit.default_timer()\n print('traj #'+str(_))\n initial_profile = get_random_IC(same_BC=True)\n u, u_fd, u_ref, T, x, x_fd, x_ref = diffusion_solver(initial_profile,length_traj,eps=eps)\n\n U.append(u)\n U_fd.append(u_fd)\n U_ref.append(u_ref)\n print('This trajectory took {} seconds'.format(timeit.default_timer() - ttin))\n\n \n U = np.stack(U).squeeze()\n U_fd = np.stack(U_fd).squeeze()\n U_ref = np.stack(U_ref).squeeze()\n\n np.savez(filename, U=U, U_fd=U_fd, U_ref=U_ref, T=T, x=x, x_fd=x_fd, x_ref=x_ref,eps=eps)", "title": "" } ]
[ { "docid": "c5fe77eb85d7111315011d81b34c315c", "score": "0.6364072", "text": "def diffusion():", "title": "" }, { "docid": "e78cfd5e581ba3e0c686a8f133343e1c", "score": "0.60929483", "text": "def g():\r\n\t# use only genes that are correlated with liver disease\r\n\tgene_cor_sorted_indices = correlation_argsort(GE, LIVER_DISEASE)\r\n\tgenes = gene_cor_sorted_indices[:400]\r\n\tge_improved = GE[:,genes]\r\n\r\n\t# split to train and test sets\r\n\tge_train, ge_test, liver_train, liver_test = train_test_choice(ge_improved, LIVER_DISEASE, TRAIN_PERCENTAGE)\r\n\t# run knn\r\n\tprediction_lables = [knn(ge_train, liver_train, sample, K) for sample in ge_test] \r\n\t# clac accuracy\r\n\taccuracy = check_accuracy_knn(liver_test, prediction_lables)\r\n\treturn accuracy", "title": "" }, { "docid": "3d114ca3b5584ea99bf66cdb2d6aa953", "score": "0.58094424", "text": "def expert_to_gates(self):\n # split nonzero gates for each expert\n return torch.split(self._nonzero_gates, self._part_sizes, dim=0)", "title": "" }, { "docid": "a4402b19bb8f10e15efac34182ca88a6", "score": "0.5728645", "text": "def Dis_train(g_model,d_model,data,lines,batchsize,patch_shape,log_dir,n_epochs=30):\r\n np.random.shuffle(lines)\r\n iter_per_epo=1000\r\n n_steps=iter_per_epo*n_epochs\r\n num_epo=0\r\n index=-batchsize/2\r\n print(index)\r\n for i in range(n_steps):\r\n #prepare real and fake samples\r\n index=int(index+2)\r\n X_real,y_real=generate_real_samples(data,lines,int(batchsize/2),patch_shape,index) \r\n X_fake,y_fake=generate_fake_samples(g_model,data,lines,int(batchsize/2),patch_shape,index) \r\n Xdata=np.concatenate((X_real,X_fake),axis=0)\r\n Ydata=np.concatenate((y_real,y_fake),axis=0)\r\n # updata discriminator model\r\n d_loss=d_model.train_on_batch(Xdata,Ydata)\r\n # summarize loss on this batch\r\n print('>%d, d1=%.3f' % (i+1, d_loss))\r\n # record history\r\n if (i+1) % (iter_per_epo)==0:\r\n index=-batchsize/2\r\n np.random.shuffle(lines)\r\n num_epo=num_epo+1\r\n d_model.save(log_dir+str(num_epo)+'_'+str(d_loss)+'.h5')", "title": "" }, { "docid": "8de6a51eb853f7418e4ec514e35ba17c", "score": "0.5663148", "text": "def HoG(self):\n # these parameters are for a 45x45 image\n features = image_feature.hog(\n self.img,\n orientations=9,\n pixels_per_cell=(5, 5),\n cells_per_block=(2, 2),\n visualise=False,\n feature_vector=True,\n block_norm=\"L2-Hys\"\n )\n return features", "title": "" }, { "docid": "942cbfd7c70ddec56a87ca92fe086691", "score": "0.5577317", "text": "def prepare_data(interaction, drug_fea,cell_fea):\r\n \r\n # define necessary variables\r\n feature = []\r\n label = []\r\n\r\n for i in range(0, interaction.shape[0]):\r\n for j in range(0, interaction.shape[1]):\r\n tmp_fea=[]\r\n # concatenating cell line and drug similarities for each pair\r\n for k in range(len(cell_fea[i])):\r\n tmp_fea.append(cell_fea[i][k])\r\n for k in range(len(drug_fea[j])):\r\n tmp_fea.append(drug_fea[j][k])\r\n label.append(interaction[i,j])\r\n feature.append(tmp_fea)\r\n \r\n label=np.array(label)\r\n feature=np.array(feature)\r\n return feature, label", "title": "" }, { "docid": "1f639ed4b7df5e5b922cd02d685fb733", "score": "0.5529071", "text": "def expert_to_gates(self):\n return tf.split(\n self._nonzero_gates, self._part_sizes_tensor, 0, num=self._num_experts)", "title": "" }, { "docid": "0b093e2bae847b26ba1fd727160adfeb", "score": "0.55290467", "text": "def giant_adherent(self):\n \n look_up = self.look_up\n Gactivity_table = self.activity\n \n H = self.Gcc\n g = open(str(self.dir)+\"/csv/adherent_giant.csv\",\"w\")\n g1 = open(str(self.dir)+\"/csv/adherent_giant_ck_ids.csv\",\"w\")\n print>>g, \",\".join(map(str,self.header))\n \n for n in H.nodes():\n if self.G.node[n][\"weigh_ins\"] < 5:\n H.remove_node(n)\n \n nx.write_gml(H,str(self.dir)+\"/networks/method3_adherent_giant.gml\")\n\n\n nodes = map(int,H.nodes())\n\n print \"nodes\", nodes\n\n for n in nodes:\n print look_up[\"weigh_ins\"][n]\n\n\n print \"length of adherent giant component\", len(H)\n print \"length of the giant of the adherent component giant\",len(nx.connected_component_subgraphs(H)[0])\n\n result = [(n, look_up[\"ck_id\"][n],look_up[\"initial_weight\"][n],look_up[\"weigh_ins\"][n],\\\n activity_table[\"activity\"][n], look_up[\"weight_change\"][n],look_up[\"percentage_weight_change\"][n],\\\n look_up[\"time_in_system\"][n],look_up[\"age\"][n],look_up[\"height\"][n],look_up[\"initial_bmi\"][n],\\\n look_up[\"final_bmi\"][n]) for n in nodes]\n \n print \"result\", result\n\n\n norm = filter(lambda x: float(x[10]) < 25.0,result)\n over = filter(lambda x: 25< float(x[10]) < 30, result)\n ob = filter(lambda x: float(x[10]) > 30.0,result)\n \n x = open(str(self.dir)+\"/csv/adherent_giant_normal.csv\",\"w\")\n y = open(str(self.dir)+\"/csv/adherent_giant_overweight.csv\",\"w\")\n z = open(str(self.dir)+\"/csv/adherent_giant_obese.csv\",\"w\")\n \n #list of ck_ids for each group \n xx = open(str(self.dir)+\"/csv/adherent_giant_normal_ck_ids.csv\",\"w\")\n yy = open(str(self.dir)+\"/csv/adherent_giant_overweight_ck_ids.csv\",\"w\")\n zz = open(str(self.dir)+\"/csv/adherent_giant_obese_ck_ids.csv\",\"w\")\n \n self.over_ad = over\n self.obese_ad = ob\n \n bmi_data = 2*[norm,over,ob]\n files = [x,y,z,xx,yy,zz]\n \n #headers\n for ii in range(len(bmi_data)):\n if ii<3:\n print>>files[ii],\",\".join(map(str,self.header))\n \n #write the csv files to for each set\n for ii in range(len(bmi_data)):\n for n in bmi_data[ii]:\n if ii<3:\n print>>files[ii],\",\".join(map(str,n))\n else:\n print>>files[ii],n[1]\n \n for x in files:\n x.close()\n \n for r in result: \n print>>g, \",\".join(map(str,r))\n print>>g1, r[1]\n g.close()\n g1.close()\n\n return H, result", "title": "" }, { "docid": "84bb2ffbc3a290053380af0b0f160acd", "score": "0.5508421", "text": "def data_gr():\n pass", "title": "" }, { "docid": "e473b466532c746f4c80c3e1ea679f86", "score": "0.54654235", "text": "def diffusion(self, x):\n ...", "title": "" }, { "docid": "d38ad5ef44f9721ee6634a9dad1a1b0c", "score": "0.54622257", "text": "def test_diffusion_hotnet(network_file: \"network file, use a network with weights\",\n geneset_file: \"csv geneset file\",\n rwr_matrix_filename: \"hdf5 RWR matrix obtained with pygna \",\n output_table: \"output results table, use .csv extension\",\n name_column: \"Column to use as name (default is deseq2)\" = \"gene_name\",\n weight_column: \"Column to use as weight (default is deseq2)\" = \"stat\",\n filter_column: \"Column used to define the significant genes (default is deseq2)\" = \"padj\",\n filter_condition: \"Condition for significance\" = \"less\",\n filter_threshold: \"threshold for significance\" = 0.01,\n normalise: 'pass this flag for using only positive values in the analysis' = False,\n size_cut: \"removes all genesets with a mapped length < size_cut\" = 20,\n number_of_permutations: \"number of permutations for computing the empirical pvalue\" = 500,\n cores: \"Number of cores for the multiprocessing\" = 1,\n in_memory: \"set if you want the large matrix to be read in memory\" = False,\n ):\n\n # Reading network file\n network = rc.ReadTsv(network_file).get_network()\n network = nx.Graph(network.subgraph(max(nx.connected_components(network), key=len)))\n\n # Read geneset\n table = rc.ReadCsv(geneset_file, column_to_fill=name_column).get_data()\n if len(table.columns) < 2:\n logging.error(\"Error: the function takes a csv file as input, the read file has less than 2 columns, \"\n \"check that the table is comma separated\")\n\n # Filter table for significant genes\n table[name_column] = table[name_column].fillna(0).apply(str)\n table = pe.TableElaboration.clean_table(table=table, stat_col=weight_column)\n geneset = utils.filter_table(table, filter_column=filter_column, alternative=filter_condition,\n threshold=filter_threshold)[name_column]\n if normalise:\n table[weight_column] = np.abs(table[weight_column].values)\n\n if len(geneset) < size_cut:\n logging.error('The number of significant genes is lower than %d. \\\n \\n Change size_cut if necessary' % size_cut)\n\n # Read RWR matrix\n rw_dict = {\"nodes\": read_distance_matrix(rwr_matrix_filename, in_memory=in_memory)[0],\n \"matrix\": read_distance_matrix(rwr_matrix_filename, in_memory=in_memory)[1]}\n # setting output\n output1 = out.Output(network_file, output_table, \"diffusion\", geneset_file, geneset_file)\n output1.create_st_table_empirical()\n logging.info(\"Results file = \" + output1.output_table_results)\n\n # initialising test\n st_test = sd.DiffusionTest(sd.hotnet_diffusion_statistic, rw_dict[\"nodes\"], rw_dict[\"matrix\"], table,\n names_col=name_column, weights_col=weight_column)\n\n observed, pvalue, null_d, n_mapped, n_geneset = st_test.empirical_pvalue(geneset, max_iter=number_of_permutations,\n alternative=\"greater\", cores=cores)\n if n_mapped < size_cut:\n logging.info(\"Results removed, since nodes mapped are < %d\" % size_cut)\n else:\n logging.info(\"Observed: %g p-value: %g\" % (observed, pvalue))\n output1.update_st_table_empirical(geneset_file, n_mapped, n_geneset, number_of_permutations, observed, pvalue,\n np.mean(null_d), np.var(null_d))\n output1.close_temporary_table()\n\n # if results_figure:\n # paint.paint_diffusion_matrix(output1.output_table_results, results_figure, alternative='greater',)", "title": "" }, { "docid": "c9e63a54e404aec6c1f4d9a627303990", "score": "0.54609364", "text": "def dti_tracking_analysis():", "title": "" }, { "docid": "9462b102d5e4ff29ef9007d0056585d5", "score": "0.54367065", "text": "def get_aged_g2_from_g12(g12, age_edge, age_center):\n\n m, n, noqs = g12.shape\n g2_aged = {}\n for q in range(noqs):\n g12q = g12[:, :, q]\n g2q_aged = get_aged_g2_from_g12q(g12q, age_edge, age_center)\n if q == 0:\n keys = list(g2q_aged.keys())\n for key in keys:\n if q == 0:\n g2_aged[key] = np.zeros([len(g2q_aged[key]), noqs])\n g2_aged[key][:, q] = g2q_aged[key]\n # print( q, key )\n\n return g2_aged", "title": "" }, { "docid": "45085c476e124b05707c5e10a34fe293", "score": "0.54299843", "text": "def _HOG(images):\n\n WIN_SIZE = (32, 32)\n BLOCK_SIZE = (8, 8)\n BLOCK_STRIDE = (4, 4)\n CELL_SIZE = (4, 4)\n NBINS = 9\n\n hog_desriptor = cv2.HOGDescriptor(WIN_SIZE, BLOCK_SIZE, BLOCK_STRIDE, CELL_SIZE, NBINS)\n hog_features = [np.squeeze(hog_desriptor.compute(images[idx])) for idx in range(images.shape[0])]\n return np.stack(hog_features, axis=0)", "title": "" }, { "docid": "d6704a922cc344dc21a9cc69fc878190", "score": "0.54216605", "text": "def giant_nonadherent(self):\n \n H = self.G\n Hcc = nx.connected_component_subgraphs(H)[0]\n look_up = self.look_up\n activity_table = self.activity\n \n g = open(str(self.dir)+\"/csv/nonadherent_giant.csv\",\"w\")\n g1 = open(str(self.dir)+\"/csv/nonadherent_giant_ck_ids.csv\",\"w\")\n print>>g, \",\".join(map(str,self.header))\n \n for n in Hcc.nodes():\n if self.G.node[n][\"weigh_ins\"] >=5:\n Hcc.remove_node(n)\n\n print \"giant nonadherent\", len(Hcc)\n \n nx.write_gml(Hcc,str(self.dir)+\"/networks/method3_nonadherent_giant.gml\")\n\n result = [(n, look_up[\"ck_id\"][n],look_up[\"initial_weight\"][n],look_up[\"weigh_ins\"][n],\\\n activity_table[\"activity\"][n], look_up[\"weight_change\"][n],look_up[\"percentage_weight_change\"][n],\\\n look_up[\"time_in_system\"][n],look_up[\"age\"][n],look_up[\"height\"][n],look_up[\"initial_bmi\"][n],\\\n look_up[\"final_bmi\"][n]) for n in list(map(int,Hcc.nodes()))]\n \n norm = filter(lambda x: float(x[10]) < 25.0,result)\n over = filter(lambda x: 25< float(x[10]) < 30, result)\n ob = filter(lambda x: float(x[10]) > 30.0,result)\n \n x = open(str(self.dir)+\"/csv/nonadherent_giant_normal.csv\",\"w\")\n y = open(str(self.dir)+\"/csv/nonadherent_giant_overweight.csv\",\"w\")\n z = open(str(self.dir)+\"/csv/nonadherent_giant_obese.csv\",\"w\")\n \n #list of ck_ids for each group \n xx = open(str(self.dir)+\"/csv/nonadherent_giant_normal_ck_ids.csv\",\"w\")\n yy = open(str(self.dir)+\"/csv/nonadherent_giant_overweight_ck_ids.csv\",\"w\")\n zz = open(str(self.dir)+\"/csv/nonadherent_giant_obese_ck_ids.csv\",\"w\")\n \n bmi_data = 2*[norm,over,ob]\n files = [x,y,z,xx,yy,zz]\n \n #headers\n for ii in range(len(bmi_data)):\n if ii<3:\n print>>files[ii],\",\".join(map(str,self.header))\n \n #write the csv files to for each set\n for ii in range(len(bmi_data)):\n for n in bmi_data[ii]:\n if ii<3:\n print>>files[ii],\",\".join(map(str,n))\n else:\n print>>files[ii],n[1]\n \n for x in files:\n x.close()\n\n for r in result:\n print>>g, \",\".join(map(str,r))\n print>>g1, r[1]\n\n print \"length of the nonadherent giant\", len(Hcc)\n \n g.close()\n g1.close()\n \n return H, result", "title": "" }, { "docid": "b910d2c0554808de3106246e31f65161", "score": "0.5418618", "text": "def buildDiffusionModel(self):\n\t\tdm=lm.DiffusionModel()\n\t\n\t\tnumReactions=0\n\t\tfor r in self.regions:\n\t\t\tnumReactions += self.regions[r].getReactionCount()\n\t\tLMLogger.info(\"number of reactions = %d\", numReactions)\n\t\tdm.set_number_reactions(numReactions)\n\n\t\tnumSpecies=len(self.particleMap)\n\t\tLMLogger.info(\"number of species = %d\", numSpecies)\n\t\tdm.set_number_species(numSpecies)\n\n\t\tnumSiteTypes=len(self.siteTypes)\n\t\tLMLogger.info(\"number of sitetypes = %d\", numSiteTypes)\n\t\tdm.set_number_site_types(numSiteTypes)\n\t\t\n\t\tlx,ly,lz = [int(round(cDim / self.latticeSpacing)) for cDim in self.continousDimensions]\n\t\tparticlesPerSite = lm.getCompiledLatticeMaxOccupancy();\n\t\tLMLogger.info(\"Lattice is %d x %d x %d with %g nm spacing and %d particles per site\", lx, ly, lz, self.latticeSpacing*1e9, particlesPerSite)\n\t\tdm.set_lattice_x_size(lx)\n\t\tdm.set_lattice_y_size(ly)\n\t\tdm.set_lattice_z_size(lz)\n\t\tdm.set_lattice_spacing(self.latticeSpacing)\n\t\tdm.set_particles_per_site(particlesPerSite)\n\t\tdm.set_bytes_per_particle(self.bytesPerParticle)\n\n\t\tD=[0] * numSpecies * numSiteTypes * numSiteTypes\n\t\tRL=[0] * numSiteTypes * numReactions\n\t\trxnum=0\n\n\t\t# This is about to get messy fast. Use a function to determine the \n\t\t# proper offset in the diffusion matrix given the site types and species\n\t\tdix=lambda _t1,_t2,_p: _t1*(numSiteTypes*numSpecies)+_t2*(numSpecies)+_p\n\n\t\t# Get unique reactions and the locations where they occur\n\t\tuniqueReactions = []\n\t\tuniqueReactionsLoc = []\n\t\tfor region in self.regions:\n\t\t\tfor rx in self.regions[region].reactions:\n\t\t\t\trxnHash = (rx[0],rx[1],rx[2])\n\t\t\t\tif rxnHash in uniqueReactions:\n\t\t\t\t\tidx = uniqueReactions.index(rxnHash)\n\t\t\t\t\tuniqueReactionsLoc[idx].append(region)\n\t\t\t\telse:\n\t\t\t\t\tuniqueReactions.append(rxnHash)\n\t\t\t\t\tuniqueReactionsLoc.append( [region] )\n\n\t\tfor k in self.regions:\n\t\t\tr=self.regions[k]\n\t\t\tLMLogger.debug(\"D for %s (%d)\",k,self.siteTypes[k])\n\t\t\tfor s in range(numSpecies):\n\t\t\t\tLMLogger.debug(\"\\tD[%d][%d][%d] ([%d]) = %g\", self.siteTypes[k],self.siteTypes[k],s, dix(self.siteTypes[k],self.siteTypes[k],s), r.defaultDiffusionRate)\n\t\t\t\t#D[offset+s]=r.defaultDiffusionRate\n\t\t\t\tD[dix(self.siteTypes[k],self.siteTypes[k],s)]=r.defaultDiffusionRate\n\t\t\tfor s in r.diffusionRate:\n\t\t\t\tLMLogger.debug(\"\\tD[%d][%d][%d] ([%d]) = %g\", self.siteTypes[k],self.siteTypes[k],self.particleMap[s]-1, dix(self.siteTypes[k],self.siteTypes[k],self.particleMap[s]-1), r.diffusionRate[s])\n\t\t\t\tD[dix(self.siteTypes[k], self.siteTypes[k], (self.particleMap[s]-1))]=r.diffusionRate[s]\n\n\t\tfor i, rx in enumerate(uniqueReactions):\n\t\t\tfor region in uniqueReactionsLoc[i]:\n\t\t\t\tloc = self.siteTypes[region]\n\t\t\t\tLMLogger.debug(\"RL[%d][%d] is being set cell %d in %s\",rxnum, loc, rxnum * numSiteTypes + loc, region)\n\t\t\t\tRL[rxnum * numSiteTypes + loc]=1\n\t\t\trxnum+=1\n\n\t\tLMLogger.debug(\"Transitions:\")\n\t\tfor t in self.transitionRates:\n\t\t\ts=t[0]\n\t\t\tvia=t[1]\n\t\t\tto=t[2]\n\t\t\trate=t[3]\n\t\t\tLMLogger.debug(\"\\tD[%d][%d][%d] ([%d]) = %g\", self.siteTypes[via],self.siteTypes[to],self.particleMap[s]-1, dix(self.siteTypes[via],self.siteTypes[to],self.particleMap[s]-1), rate)\n\t\t\tD[dix(self.siteTypes[via],self.siteTypes[to],self.particleMap[s]-1)]=rate\n\n\t\tLMLogger.debug(\"D: %s\", D)\n\t\tLMLogger.debug(\"RL: %s\", RL)\n\t\tfor d in range(len(D)):\n\t\t\tdm.add_diffusion_matrix(D[d])\n\n\t\tfor rx in range(len(RL)):\n\t\t\tdm.add_reaction_location_matrix(RL[rx])\n\n\t\treturn dm", "title": "" }, { "docid": "9c36dd33583046df7c91801efe619410", "score": "0.54175425", "text": "def recreate_grpha_data(graph_dict, n_feats, target_id):\n print('------------------ Convert to DLG Graph -------------------')\n # --- Step 1: collect all types of nodes together\n rel_list = []\n node_id_list = {}\n for can_etype, src_dst_tuple in graph_dict.items():\n\n src_type, dst_type = can_etype.split('<>')\n src_origin, dst_origin = np.array(src_dst_tuple[0]), np.array(src_dst_tuple[1])\n\n rel_list.append(((src_type, dst_type), (src_origin, dst_origin)))\n # rel_list.append(((dst_type, dst_type + '<>' + src_type, src_type), (dst_origin, src_origin)))\n\n if node_id_list.get(src_type) is not None:\n node_id_list[src_type] = np.append(node_id_list.get(src_type), src_origin)\n else:\n node_id_list[src_type] = src_origin\n\n if node_id_list.get(dst_type) is not None:\n node_id_list[dst_type] = np.append(node_id_list.get(dst_type), dst_origin)\n else:\n node_id_list[dst_type] = dst_origin\n\n # --- Step 2: for each type of node, unique their IDs and store\n node_new_list = {}\n for ntype, nid_list in node_id_list.items():\n # get new id\n nid_old, nid_new = np.unique(nid_list, return_inverse=True)\n node_new_list[ntype] = (nid_old, nid_new)\n\n # --- Step 3: map new node IDs to old node IDs\n rel_dict = {}\n node_type_idx = {}\n for rel in rel_list:\n src_type, dst_type = rel[0]\n src, dst = rel[1]\n\n _, nid_new = node_new_list[src_type]\n if node_type_idx.get(src_type) is not None:\n src_new = nid_new[node_type_idx.get(src_type):node_type_idx.get(src_type) + src.size]\n node_type_idx[src_type] = node_type_idx.get(src_type) + src.size\n else:\n src_new = nid_new[0: 0 + src.size]\n node_type_idx[src_type] = 0 + src.size\n\n _, nid_new = node_new_list[dst_type]\n if node_type_idx.get(dst_type) is not None:\n dst_new = nid_new[node_type_idx.get(dst_type):node_type_idx.get(dst_type) + dst.size]\n node_type_idx[dst_type] = node_type_idx.get(dst_type) + dst.size\n else:\n dst_new = nid_new[0: 0 + dst.size]\n node_type_idx[dst_type] = 0 + dst.size\n\n rel_dict[(src_type, src_type + '<>' + dst_type, dst_type)] = (th.from_numpy(src_new), th.from_numpy(dst_new))\n rel_dict[(dst_type, dst_type + '<>' + src_type, src_type)] = (th.from_numpy(dst_new), th.from_numpy(src_new))\n\n # Add target self-loop\n target_nid_old = node_new_list['target'][0]\n target_nid_new = np.arange(target_nid_old.shape[0])\n rel_dict[('target', 'self_relation', 'target')] = (th.from_numpy(target_nid_new),\n th.from_numpy(target_nid_new))\n\n # Extract the new target node id\n new_pred_target_id = th.tensor(np.searchsorted(target_nid_old, target_id)).long()\n\n print(\"New target node id: {}\".format(new_pred_target_id))\n\n # --- Step 4: process n_feats dictionary to get feature tensor\n new_n_feats = {}\n for in_ntype, in_feat_dict in n_feats.items():\n old_ids, _ = node_new_list[in_ntype]\n\n feats = []\n for old_id in old_ids:\n feats.append(in_feat_dict[str(old_id)])\n\n if in_ntype == 'target':\n global TARGET_FEAT_MEAN, TARGET_FEAT_STD\n np_feats = np.array(feats).astype(np.float32)\n th_feat = th.from_numpy(np_feats)\n norm_feat = (th_feat - TARGET_FEAT_MEAN) / TARGET_FEAT_STD\n\n new_n_feats[in_ntype] = norm_feat\n else:\n new_n_feats[in_ntype] = th.Tensor(feats)\n\n # --- Step 5: build DGL graph\n graph = dgl.heterograph(rel_dict)\n print(graph)\n\n return graph, new_n_feats, new_pred_target_id", "title": "" }, { "docid": "2611dce17f464d3266678b409e66a0c2", "score": "0.536765", "text": "def create_GAN_data(N, class_ratio=0.5, random_state=None):\n if random_state is not None: np.random.seed(random_state)\n # Group indicator\n #group = sp.binom.rvs(p=0.25, n=1, size=N)\n group = np.concatenate([np.zeros([int(N*(1-class_ratio))]), np.ones([int(N*class_ratio)])])\n\n # Continuous variables\n x0 = sp.poisson.rvs(mu=np.where(group==1,1.,2.))\n x1 = sp.norm.rvs(loc=np.where(group==1,-2,2),\n scale=1)\n x2 = sp.norm.rvs(loc=x1,\n scale=1)\n x3 = (x0**2 + x1**2)\n x456 = sp.multivariate_normal.rvs(mean=[0,0,0], cov= np.stack(([1,0.8,0.2], [0.8,1.,0.], [0.2,0.,1.]),axis=0), size=N)\n\n # Discrete variables\n # Binary\n x7 = sp.binom.rvs(p=np.where(group==1,0.6,0.3),n=1)\n # Three class\n x890_0 = sp.multinomial.rvs(p=[0.7,0.2,0.1],n=1,size=N)\n x890_1 = sp.multinomial.rvs(p=[0.2,0.7,0.1],n=1,size=N)\n x890 = np.zeros([N,3])\n for i in range(N):\n if group[i]==1:\n x890[i,] = x890_1[i,]\n else:\n x890[i,] = x890_0[i,]\n\n\n data = pd.DataFrame(np.column_stack([x0,x1,x2,x3,x456,group,x7,x890]))\n data.rename({7:\"group\"}, axis=\"columns\", inplace=True)\n return data", "title": "" }, { "docid": "e50d90f4d4afbb8c746ce98d101d7221", "score": "0.5356715", "text": "def get_data():\n no_features = 30\n redundant_features = int(0.1*no_features)\n informative_features = int(0.6*no_features)\n repeated_features = int(0.1*no_features)\n print no_features,redundant_features,informative_features,repeated_features\n x,y = make_classification(n_samples=500,n_features=no_features,flip_y=0.03,\\\n n_informative = informative_features, n_redundant = redundant_features \\\n ,n_repeated = repeated_features,random_state=7)\n return x,y", "title": "" }, { "docid": "ed8ff75a373da930601749f1ee23ae15", "score": "0.53511864", "text": "def data(g):\n g.dataset = LoadDataset(rflow.FSResource(\n \"dataset/CVPPP2017_LCC_training/training\"))\n\n g.dataset_split = SplitDataset(\n rflow.FSResource(\"dataset/dataset-split.pkl\"))\n with g.dataset_split as args:\n args.dataset = g.dataset\n args.train_size = 0.9\n args.val_size = 0.05\n args.test_size = 0.05\n\n g.train_dataset_view = ViewSegmentationDataset()\n g.train_dataset_view.args.dataset = g.dataset_split[0]\n\n g.test_dataset_view = ViewSegmentationDataset()\n g.test_dataset_view.args.dataset = g.dataset_split[2]", "title": "" }, { "docid": "271b0bc5e5361f5c78c65755730a0826", "score": "0.53467304", "text": "def gather_info(self):\n # diversity = np.ones(len(self.agents))\n exploration = np.ones(len(self.agents))\n foraging = np.ones(len(self.agents))\n fittest = np.ones(len(self.agents))\n diversity = np.ones(len(self.agents))\n postcondition = np.ones(len(self.agents))\n constraints = np.ones(len(self.agents))\n selector = np.ones(len(self.agents))\n for id in range(len(self.agents)):\n exploration[id] = self.agents[id].exploration_fitness()\n foraging[id] = self.agents[id].food_collected\n fittest[id] = self.agents[id].individual[0].fitness\n diversity[id] = self.agents[id].diversity_fitness\n postcondition[id] = self.agents[id].postcond_reward\n constraints[id] = self.agents[id].constraints_reward\n selector[id] = self.agents[id].selectors_reward\n\n beta = self.agents[-1].beta\n\n mean = Best(\n self.pname, self.connect, self.sn, 1, 'MEAN', self.stepcnt,\n beta, np.mean(fittest), np.mean(diversity), np.mean(exploration),\n np.mean(foraging), np.mean(postcondition), np.mean(constraints),\n np.mean(selector), \"None\", \"None\", db=False\n )\n mean.save()\n\n std = Best(\n self.pname, self.connect, self.sn, 1, 'STD', self.stepcnt, beta,\n np.std(fittest), np.std(diversity), np.std(exploration),\n np.std(foraging), np.mean(postcondition), np.mean(constraints),\n np.mean(selector), \"None\", \"None\", db=False\n )\n std.save()\n\n # Compute best agent for each fitness\n self.best_agents(diversity, beta, \"DIVERSE\")\n self.best_agents(exploration, beta, \"EXPLORE\")\n self.best_agents(foraging, beta, \"FORGE\")\n self.best_agents(postcondition, beta, \"PCOND\")\n self.best_agents(constraints, beta, \"CNSTR\")\n self.best_agents(selector, beta, \"SELECT\")\n self.best_agents(fittest, beta, \"OVERALL\")\n return np.argmax(foraging)", "title": "" }, { "docid": "a9f4fb86ce79ea7da2c873cfc41e8a00", "score": "0.5342994", "text": "def geizer():\n w = 2*pi/6\n vertices = []\n faces = []\n for i in range(6):\n vertices.append(np.array([cos(i*w), 0, sin(i*w)]))\n faces.append(np.array([i, (i+1)%6, 6+i]))\n faces.append(np.array([(i+1)%6, 6+i, 6+((i+1)%6)]))\n for i in range(6):\n vertices.append(np.array([cos(i*w), 0.2 , sin(i*w)]))\n faces.append(np.array([6+i, (i+1)%6+6, 12+i]))\n faces.append(np.array([(i+1)%6+6, 12+i, 12+((i+1)%6)]))\n for i in range(6):\n vertices.append(np.array([0.7*cos(i*w), 0.6 , 0.7*sin(i*w)]))\n faces.append(np.array([12+i, (i+1)%6+12, 12+18]))\n vertices.append(np.array([0,0.4,0]))\n vertices = 0.25*np.array(vertices, dtype=np.float)\n normals = vertices.copy() # approximation\n return vertices, normals, faces", "title": "" }, { "docid": "bd8bd7d45bfe4b51fdfc13d154806b33", "score": "0.5326409", "text": "def cell_differentiate(self):\n for index in range(self.number_agents):\n # if the cell is GATA6 high and pluripotent\n if self.GATA6[index] > self.NANOG[index] and self.states[index] == 0:\n # increase the differentiation counter by 0 or 1\n self.diff_counters[index] += r.randint(0, 1)\n\n # if the differentiation counter is greater than or equal to the threshold, differentiate\n if self.diff_counters[index] >= self.pluri_to_diff:\n self.states[index] = 1 # set state to differentiated\n self.NANOG[index] = 0 # set NANOG to low", "title": "" }, { "docid": "1cded1b53309b7ff993a00974614d88d", "score": "0.5323887", "text": "def hpffre(self):\n # Load #\n df = gain_loss_net_data.hpffre.copy()\n # Join the biomass conversion and expansion factors bcef #\n index = ['country', 'year']\n df = df.left_join(self.bcef, on=index)\n # Join the root to shoot ratio #\n df = df.left_join(self.root_ratio, on=index)\n # Convert the losses to tons of carbon #\n df['loss_per_ha'] *= df['bcefr'] * (1 + df['root_ratio']) * self.carbon_fraction\n # Remove unnecessary columns #\n columns_to_keep = ['country', 'year', 'loss_per_ha']\n df = df[columns_to_keep]\n # Add source #\n df.insert(0, 'source', \"hpffre\")\n # Return #\n return df", "title": "" }, { "docid": "96007615ded8f049d44cef4f052c5d18", "score": "0.5321801", "text": "def _get_hog_features(img, orient, pix_per_cell, cell_per_block):\n features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), block_norm='L2-Hys',\n transform_sqrt=True,\n feature_vector=True)\n return features", "title": "" }, { "docid": "99fd2cec3b7fa5b6e2bf44ae7dd98cc0", "score": "0.53180474", "text": "def gethists():\n histdict = {}\n\n histdict[\"h_uncert_energy\"] = TH1F('h_uncert_energy', 'Energy difference between reco and gen lepton; E_gen - E_reco [GeV]', 100, -1, 1)\n\n\n histdict[\"h_uncert_rescaling\"] = TH1F('h_uncert_rescaling', 'Energy difference between parton energy and jet energy; E_{jet} / E_{parton} - 1 [GeV]', 100, -0.2, 0.2)\n histdict[\"h_uncert_theta\"] = TH1F('h_uncert_theta', 'Theta difference between reco and gen jet;theta_gen - theta_reco [rad]', 200, -0.1, 0.1)\n histdict[\"h_uncert_phi\"] = TH1F('h_uncert_phi', 'Phi difference between reco and gen jet;phi_gen - phi_reco [rad]', 100, -0.1, 0.1)\n histdict[\"h_uncert_log_boost\"] = TH1F('h_uncert_log_boost', 'log(boost) difference between gen and reco jet', 100, -6, 0)\n\n histdict[\"h_corr_theta_alpha\"] = TH2F('h_corr_alpha_theta', 'Correlation between theta and the rescaling coefficient;theta[rad];alpha', 120, -3, 3, 200, -1, 3)\n histdict[\"h_corr_phi_alpha\"] = TH2F('h_corr_alpha_phi', 'Correlation between phi and the rescaling coefficient;phi[rad];alpha', 60, -3, 3, 200, -1, 3)\n histdict[\"h_corr_theta_phi\"] = TH2F('h_corr_phi_theta', 'Correlation between theta and phi jet;theta[rad];phi[rad]', 60, -3, 3, 120, -6, 6)\n histdict[\"h_corr_log_boost_alpha\"] = TH2F('h_corr_alpha_log_boost', 'Correlation between the logarithm of the boost and the rescaling coefficient;log(boost);alpha', 200, -2, 2, 200, -2, 3)\n histdict[\"h_corr_log_boost_theta\"] = TH2F('h_corr_theta_log_boost', 'Correlation between the logarithm of the boost and theta;log(boost);theta', 200, -2, 2, 200, -2, 2)\n histdict[\"h_corr_log_boost_phi\"] = TH2F('h_corr_phi_log_boost', 'Correlation between the logarithm of the boost and phi;log(boost);phi', 200, -2, 2, 200, -10, 10)\n\n return histdict", "title": "" }, { "docid": "ae31c79d71ea7bc9ea65609a339e26bf", "score": "0.53117156", "text": "def _read_genel(self, data: bytes, n: int) -> int:\n self.op2.log.info('geom skipping GENEL in GEOM2')\n #op2.log.info(f'geom skipping GENEL in GEOM2; len(data)={len(data)-12}')\n #print(n)\n ints = np.frombuffer(data[n:], dtype='int32').copy()\n #floats = np.frombuffer(data[n:], dtype='float32').copy()\n i = 0\n while i < len(ints):\n #1 EID I Element identification number\n eid = ints[i]\n\n iminus1 = np.where(ints[i+1:] == -1)[0]\n #print('iminus1', iminus1)\n idelta0 = iminus1[0]\n idelta1 = iminus1[1]\n # print('idelta0', idelta0)\n uc = ints[i+1:i+1+idelta0].reshape(idelta0//2, 2)\n print(uc)\n j = i + 1 + idelta0 + 1\n #2 UI I Independent grid point identification number\n #3 CI I Component number\n #Words 2 through 3 repeat until End of Record\n\n nrows = ints[j]\n print('nrows=', nrows)\n mucd = ints[j:i+1+idelta1]\n mc = mucd[0]\n nucd = len(mucd) - 1\n ucd = mucd[1:].reshape(nucd//2, 2)\n print(f'M(c) = {mc}')\n print(ucd)\n i = i + 1 + idelta1 + 1\n #4 M(C) I Number of rows and columns in K or Z and rows in S\n #5 UD I Dependent grid point identification number\n #6 CD I Component number\n #Words 5 through 6 repeat until End of Record\n\n # ---------------\n print('-----------------')\n\n #7 N(C) I Number of columns in S (4)\n #8 F I 1 means Z, 2 means K -> Z\n\n #9 KZIJ RS Lower triangular terms of the K or Z matrix. See Notes.\n #Word 9 repeats MM times\n # 10 NZERO(C) I\n # NZERO =1 Actually \" 0\"\n # 11 SIJ RS Terms of the S matrix\n # Word 11 repeats M times\n # Word 11 repeats N times\n # NZERO =0\n #End NZERO\n #12 UNDEF none\n #Word 12 repeats until End of Record\n\n nc = ints[i]\n f = ints[i+1]\n i += 2\n print(f'nc={nc} f={f}')\n #print(ints[i:].min())\n #print(ints[i+55])\n #print(floats[i:i+55].tolist())\n #print(ints[i+55:].tolist())\n #print(ints[i:].tolist())\n #print(floats[i:])\n #print(len(floats[i:]))\n break\n #self.show_data(data[12:])\n return len(data)", "title": "" }, { "docid": "7429ddd85dbd3428fbaf471c8e1f50aa", "score": "0.5309542", "text": "def hhh():\n\n\n\n\tfilein1 = 'raw_data/AtRegNet.txt'\n\tfileout = 'enrichment_results/AtRegNet_results.txt'\n\n\tAtReg_data = read_data(filein1)\n\tDbase = parse_data(AtReg_data)\n\t#write_data(fileout, Dbase)\n\n\n\tfilein2 = 'enrichment_results/TF_location_Ligterink_2014.txt'\n\tresults_data = read_data(filein2)\n\tparse_res = parse_results(results_data)\n\n\n\n\n\t#----------------------------------------------------------------------\n\t#draw a network!\n\t#----------------------------------------------------------------------\n\n\tprint \"Initializing a graph, standby...\"\n\tG = nx.Graph()\n\n\n\tprint \"Retrieving nodes, edges, labels and positions, standby...\"\n\n\tedges, nodes = filter_data(Dbase, \"AG\")\n\te_nodes = enumerate(nodes)\n\n\t#The labels \n\tlabels = {}\n\tfor i in range(0, len(nodes)):\n\t\tlabels[i] = nodes[i]\n\t\t\n\t#inverted labels (inverted dictionary)\n\tinv_labels = {v:k for k,v in labels.iteritems()}\n\n\te_nodes = []\n\tfor node in nodes:\n\t\te_nodes.append(inv_labels[node])\n\n\te_edges = []\n\tfor edge in edges:\n\t\ttemp_edge1 = inv_labels[edge[0]]\n\t\ttemp_edge2 = inv_labels[edge[1]]\n\t\tprint (temp_edge1, temp_edge2)\n\t\te_edges.append((temp_edge1, temp_edge2))\n\t\t#G.add_edge(temp_edge1, temp_edge2)\n\n\n\tpos=nx.spectral_layout(G)\n\tprint \"----------------------------------------\"\n\tprint \"The positions:\"\n\tfor k, v in pos.iteritems():\n\t\tprint k, v\n\tprint \"----------------------------------------\"\n\tprint \"The labels:\"\n\tfor k, v in labels.iteritems():\n\t\tprint k, v\n\tprint \"----------------------------------------\"\n\n\n\t# nodes\n\tnx.draw_networkx_nodes(G,pos, nodelist=e_nodes, node_color='b', node_size=100, alpha=0.8)\n\n\t# edges\n\tnx.draw_networkx_edges(G,pos, edgelist=e_edges, width=0.1, alpha=0.5, edge_color='r')\n\t\t\t\t\t\t \n\tprint \"added %s edges to the graph\"%len(edges)\n\tprint \"added %s nodes to the graph\"%len(nodes)\n\tprint \"added %s labels to the graph\"%len(labels)\n\n\n\tprint \"Commencing the buildup of the graph\"\n\n\n\tnx.draw(G)\n\tnx.draw_networkx_labels(G, pos, labels,font_size=8)\n\tplt.show()\n\n\tprint \"Saving graph...\"\n\tplt.savefig(\"enrichment_results/result_network.png\")", "title": "" }, { "docid": "17fb994c87d8c0dda8c88edfdbbebab8", "score": "0.53068805", "text": "def expert_to_gates(self):\n return self._ep(\n tf.concat,\n transpose_list_of_lists(\n self._dp(lambda d: d.expert_to_gates(), self._dispatchers)), 0)", "title": "" }, { "docid": "3cd7c044493764fb203cde237add4565", "score": "0.52912223", "text": "def getInfectiousfamilydata(InfectiousAgent, Infectedstate, HouseAgents, TotalHouse):\r\n AgentID, Agent_ind, Agent_indloc = np.intersect1d(HouseAgents[:,0], InfectiousAgent, return_indices=True) # get the active infectious individuals \r\n AsagentID = np.intersect1d(HouseAgents[:,0], InfectiousAgent[Infectedstate == 2]) # get the asy infectious individuals\r\n ActiveworkerID = np.intersect1d(HouseAgents[HouseAgents[:,7]==1,0], InfectiousAgent) # get the infectious outworkers\r\n ActivestudentID = np.intersect1d(HouseAgents[HouseAgents[:,8]==1,0], InfectiousAgent) # get the infectious students\r\n ActivedhelperID = np.intersect1d(HouseAgents[HouseAgents[:,9]==1,0], InfectiousAgent) # get the infectious students\r\n AsyactiveworkerID = np.intersect1d(HouseAgents[HouseAgents[:,7]==1,0], InfectiousAgent[Infectedstate == 2]) # get the asy_infectious outworkers\r\n AsyactivestudentID = np.intersect1d(HouseAgents[HouseAgents[:,8]==1,0], InfectiousAgent[Infectedstate == 2]) # get the asy_infectious students\r\n AsyactivedhelperID = np.intersect1d(HouseAgents[HouseAgents[:,9]==1,0], InfectiousAgent[Infectedstate == 2]) # get the asy_infectious students\r\n Temp_HouseID, Temp_familyactivenumber = np.unique(HouseAgents[Agent_ind, 6].astype(int), return_counts=True) # get the house ID and infectious number\r\n HouseID = np.c_[Temp_HouseID, Temp_familyactivenumber]\r\n HouseID = HouseID[HouseID[:,0].argsort()] \r\n Familysize = TotalHouse[HouseID[:,0], 1]\r\n Familyoutworkersize = TotalHouse[HouseID[:,0], 2]\r\n Familyoutstudentsize = TotalHouse[HouseID[:,0], 3]\r\n Familydomestic = TotalHouse[HouseID[:,0], 4]\r\n Familyhomeagents = TotalHouse[HouseID[:,0], 5] # total - 1,2,3,4\r\n Familyasyactivenumber = insertunbalancedfamilycount(HouseID, AsagentID, HouseAgents)\r\n Familyactiveworkernumber = insertunbalancedfamilycount(HouseID, ActiveworkerID, HouseAgents)\r\n Familyactivestudentnumber = insertunbalancedfamilycount(HouseID, ActivestudentID, HouseAgents)\r\n Familyactivedhelpernumber = insertunbalancedfamilycount(HouseID, ActivedhelperID, HouseAgents)\r\n Familyasyactiveworkernumber = insertunbalancedfamilycount(HouseID, AsyactiveworkerID, HouseAgents)\r\n Familyasyactivestudentnumber = insertunbalancedfamilycount(HouseID, AsyactivestudentID, HouseAgents)\r\n Familyasyactivedhelpernumber = insertunbalancedfamilycount(HouseID, AsyactivedhelperID, HouseAgents)\r\n UsedFamilydata = np.c_[HouseID,Familyasyactivenumber,Familyactiveworkernumber,Familyactivestudentnumber,Familyactivedhelpernumber,Familyasyactiveworkernumber,Familyasyactivestudentnumber,Familyasyactivedhelpernumber,Familysize,Familyoutworkersize,Familyoutstudentsize,Familydomestic,Familyhomeagents].astype(int) \r\n return UsedFamilydata", "title": "" }, { "docid": "2b1273f1f2e1ee0f54e8626a404b79c8", "score": "0.52681965", "text": "def model_gr():", "title": "" }, { "docid": "d8d69e1a0a265bd061de3fbd0512993c", "score": "0.5252605", "text": "def ghk_calculator(sim, cells, p):\n\n\n # FIXME the Goldman calculator must be altered to account for network pumps and channels!!\n # begin by initializing all summation arrays for the cell network:\n sum_PmAnion_out = []\n sum_PmAnion_in = []\n sum_PmCation_out = []\n sum_PmCation_in = []\n\n for i, z in enumerate(sim.zs):\n\n # tag as anion or cation\n ion_type = np.sign(z)\n\n # average values from membranes or environment to cell centres:\n Dm = np.dot(cells.M_sum_mems, sim.Dm_cells[i]) / cells.num_mems\n conc_cells = sim.cc_cells[i]\n\n if p.is_ecm is True:\n # average entities from membranes to the cell centres:\n conc_env = np.dot(cells.M_sum_mems, sim.cc_env[i][cells.map_mem2ecm]) / cells.num_mems\n\n else:\n\n conc_env = np.dot(cells.M_sum_mems, sim.cc_env[i]) / cells.num_mems\n\n if ion_type == -1:\n\n sum_PmAnion_in.append(Dm * conc_cells * (1 / p.tm))\n sum_PmAnion_out.append(Dm * conc_env * (1 / p.tm))\n\n\n if ion_type == 1:\n\n sum_PmCation_in.append(Dm * conc_cells * (1 / p.tm))\n sum_PmCation_out.append( Dm * conc_env * (1 / p.tm))\n\n if p.molecules_enabled:\n\n for name in sim.molecules.core.channels:\n obj = sim.molecules.core.channels[name]\n\n for ii, relP in zip(obj.channel_core.ions, obj.channel_core.rel_perm):\n\n ion_i = sim.get_ion(ii)\n zi = sim.zs[ion_i]\n conc_cells = sim.cc_cells[ion_i]\n\n # tag as anion or cation\n ion_type = np.sign(zi)\n\n if p.is_ecm is True:\n # average entities from membranes to the cell centres:\n conc_env = np.dot(cells.M_sum_mems, sim.cc_env[ion_i][cells.map_mem2ecm]) / cells.num_mems\n\n else:\n\n conc_env = np.dot(cells.M_sum_mems, sim.cc_env[ion_i]) / cells.num_mems\n\n if obj.channel_core.DChan is not None:\n Dmo = obj.channel_core.DChan*relP\n Dm = np.dot(cells.M_sum_mems, Dmo) / cells.num_mems\n\n else:\n Dm = 0.0\n\n if ion_type == -1:\n sum_PmAnion_in.append(Dm * conc_cells * (1 / p.tm))\n sum_PmAnion_out.append(Dm * conc_env * (1 / p.tm))\n\n if ion_type == 1:\n sum_PmCation_in.append( Dm * conc_cells * (1 / p.tm))\n sum_PmCation_out.append(Dm * conc_env * (1 / p.tm))\n\n if p.grn_enabled:\n\n for name in sim.grn.core.channels:\n obj = sim.grn.core.channels[name]\n\n for ii, relP in zip(obj.channel_core.ions, obj.channel_core.rel_perm):\n\n ion_i = sim.get_ion(ii)\n zi = sim.zs[ion_i]\n conc_cells = sim.cc_cells[ion_i]\n\n # tag as anion or cation\n ion_type = np.sign(zi)\n\n if p.is_ecm is True:\n # average entities from membranes to the cell centres:\n conc_env = np.dot(cells.M_sum_mems,\n sim.cc_env[ion_i][cells.map_mem2ecm]) / cells.num_mems\n\n else:\n\n conc_env = np.dot(cells.M_sum_mems, sim.cc_env[ion_i]) / cells.num_mems\n\n if obj.channel_core.DChan is not None:\n Dmo = obj.channel_core.DChan * relP\n Dm = np.dot(cells.M_sum_mems, Dmo) / cells.num_mems\n\n else:\n Dm = 0.0\n\n if ion_type == -1:\n sum_PmAnion_in.append(Dm * conc_cells * (1 / p.tm))\n sum_PmAnion_out.append(Dm * conc_env * (1 / p.tm))\n\n if ion_type == 1:\n sum_PmCation_in.append(Dm * conc_cells * (1 / p.tm))\n sum_PmCation_out.append(Dm * conc_env * (1 / p.tm))\n\n\n # NaKrate = (np.dot(cells.M_sum_mems, sim.rate_NaKATP)/cells.num_mems)\n\n # sum together contributions for Na and K flux across the membrane:\n # NaKflux = NaKrate - (2/3)*NaKrate\n\n sum_PmAnion_in_i = np.sum(sum_PmAnion_in, axis = 0)\n sum_PmAnion_out_i = np.sum(sum_PmAnion_out, axis=0)\n sum_PmCation_in_i = np.sum(sum_PmCation_in, axis=0)\n sum_PmCation_out_i = np.sum(sum_PmCation_out, axis=0)\n\n sim.vm_GHK = ((p.R * sim.T) / p.F) * np.log(\n (sum_PmCation_out_i + sum_PmAnion_in_i) / (sum_PmCation_in_i + sum_PmAnion_out_i))", "title": "" }, { "docid": "6f943ec097d74f57019f0461fee1bc8a", "score": "0.5248689", "text": "def HMM_distributions(self, data):\n # to add smoothing \n tag_counts = dict.fromkeys(self.tag_set,0)\n start_tag_counts = dict.fromkeys(self.tag_set,0)\n word_tag_counts = defaultdict(dict)\n tag_tag_counts = defaultdict(dict)\n for row_tag in self.tag_set:\n for col_tag in self.tag_set:\n tag_tag_counts[row_tag][col_tag] = 0\n\n for sentences, tags in zip(data['sentences'], data['tags']):\n # print(sentences, \"\\n\", tags)\n for i,_ in enumerate(tags):\n if i == 0:\n start_tag_counts[tags[i]] += 1\n else:\n tag_tag_counts[tags[i]][tags[i-1]] += 1\n for word, tag in zip (sentences, tags):\n tag_counts[tag] += 1\n if tag not in word_tag_counts[word]:\n word_tag_counts[word] = {key:0 for key in list(self.tag_set)}\n word_tag_counts[word][tag] += 1\n\n self.S = len(data['sentences'])\n self.T = len(tag_tag_counts)\n self.V = len(word_tag_counts)\n # print(self.S, self.T, self.V)\n\n # calculate Initial probability deistribution\n self.log_init_dist = self.Laplace_smoothing(start_tag_counts)\n # print(self.log_init_dist['IN'])\n\n # calculate Transition probability distribution\n self.log_tran_dist= self.Laplace_smoothing(tag_tag_counts)\n # print(self.log_emit_dist['1990']['CD'])\n\n # calculate Emission probability distribution\n self.log_emit_dist = self.Laplace_smoothing(word_tag_counts)\n # print(self.log_emit_dist['1990']['CD'])\n\n return", "title": "" }, { "docid": "8a8c425c4b014578014aea1900b997b2", "score": "0.5243114", "text": "def get_training_data(df=None):\n if df is None:\n df = load_dataset()\n\n # 5 input features (X) and target (y, Eseg)\n X = df[['diff_CEb/cn',\n 'gordy_eneg_host',\n 'diff_EA',\n 'r_dopant',\n 'IP_dopant']].values\n y = df['Eseg'].values\n\n return X, y", "title": "" }, { "docid": "88cbcd0fb312c9318ea1417830359d8d", "score": "0.52412456", "text": "def d_ghg(self, tmin=None, tmax=None):\n return (self.q_ghg(tmin=tmin, tmax=tmax, key='simulated') -\n self.q_ghg(tmin=tmin, tmax=tmax, key='observations'))", "title": "" }, { "docid": "9572bfbd3798188dbc53d2ff9814ce9c", "score": "0.52404517", "text": "def gen_gating():\n par['gating'] = []\n par['val_gating'] = []\n\n for t in range(par['n_tasks']):\n gating_task = np.zeros(par['n_hidden'], dtype=np.float32)\n val_gating_task = np.zeros(par['n_val_hidden'], dtype=np.float32)\n for i in range(par['n_val_hidden']):\n if par['gating_type'] == 'XdG':\n if np.random.rand() < 1-par['gate_pct']:\n val_gating_task[i] = 1\n elif par['gating_type'] is None:\n val_gating_task[i] = 1\n\n for i in range(par['n_hidden']):\n\n if par['gating_type'] == 'XdG':\n if np.random.rand() < 1-par['gate_pct']:\n gating_task[i] = 1\n\n elif par['gating_type'] == 'split':\n if t%par['n_subnetworks'] == i%par['n_subnetworks']:\n if np.random.rand() < 1-par['gate_pct']:\n gating_task[i] = 0.5\n else:\n gating_task[i] = 1\n\n elif par['gating_type'] == 'partial':\n if np.random.rand() < 1-par['gate_pct']:\n gating_task[i] = 0.5\n else:\n gating_task[i] = 1\n\n elif par['gating_type'] is None:\n gating_task[i] = 1\n\n par['gating'].append(gating_task)\n par['val_gating'].append(val_gating_task)", "title": "" }, { "docid": "5ead50896afd9fdaa6ebe3775e09cb3a", "score": "0.523582", "text": "def get_g_fetches(self,synth_placeholder,real_placeholder,model):\n graph_vars = model.gan(synth_placeholder,real_placeholder,FLAGS.channels,FLAGS.L2gan,FLAGS.wgan)\n g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope=\"G\")\n\n g_train_op = self.get_train_op(graph_vars[1],FLAGS.dlr,g_vars,\"G\")\n return [g_train_op] + graph_vars[1:3]", "title": "" }, { "docid": "718f1c27da08f10d39beda2d73aaa773", "score": "0.52354825", "text": "def test(self, data):\n\n self.mask = data['green_mask'].float().to(self.device).unsqueeze(0)\n\n if self.which_model_G == 'Pluralistic':\n img_inverted = self.var_L * (1-self.mask)\n\n self.var_L = self.var_L * self.mask\n\n self.netG.eval()\n with torch.no_grad():\n if self.is_train:\n # normal\n if self.which_model_G == 'AdaFill' or self.which_model_G == 'MEDFE' or self.which_model_G == 'RFR' or self.which_model_G == 'LBAM' or self.which_model_G == 'DMFN' or self.which_model_G == 'partial' or self.which_model_G == 'Adaptive' or self.which_model_G == 'DFNet' or self.which_model_G == 'RN':\n self.fake_H = self.netG(self.var_L, self.mask)\n # 2 rgb images\n elif self.which_model_G == 'CRA' or self.which_model_G == 'pennet' or self.which_model_G == 'deepfillv1' or self.which_model_G == 'deepfillv2' or self.which_model_G == 'Global' or self.which_model_G == 'crfill' or self.which_model_G == 'DeepDFNet':\n self.fake_H, _ = self.netG(self.var_L, self.mask)\n\n # special\n elif self.which_model_G == 'Pluralistic':\n self.fake_H, _, _ = self.netG(self.var_L, img_inverted, self.mask)\n\n elif self.which_model_G == 'EdgeConnect':\n self.fake_H, _ = self.netG(self.var_L, self.canny_data, self.grayscale_data, self.mask)\n\n elif self.which_model_G == 'FRRN':\n self.fake_H, _, _ = self.netG(self.var_L, self.mask)\n\n elif self.which_model_G == 'PRVS':\n self.fake_H, _, _, _ = self.netG(self.var_L, self.mask, self.canny_data)\n\n elif self.which_model_G == 'CSA':\n _, self.fake_H , _, _ = self.netG(self.var_L, self.mask)\n\n elif self.which_model_G == 'atrous':\n self.fake_H = self.netG(self.var_L)\n else:\n print(\"Selected model is not implemented.\")\n\n # Merge inpainted data with original data in masked region\n self.fake_H = self.var_L * self.mask + self.fake_H * (1-self.mask)\n self.netG.train()", "title": "" }, { "docid": "6ffd92685402a9fdecb202b5ce78dffc", "score": "0.52292854", "text": "def data():\n titanic_df = h2o.import_file(path=\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/titanic.csv\")\n\n # Basic preprocessing\n # columns_to_be_used - List of columns which are used in the training/test\n # data\n # columns_to_factorize - List of columns with categorical variables\n columns_to_be_used = ['pclass', 'age', 'sex', 'sibsp', 'parch', 'ticket',\n 'embarked', 'fare', 'survived']\n columns_to_factorize = ['pclass', 'sex', 'sibsp', 'embarked', 'survived']\n # Factorizing the columns in the columns_to_factorize list\n for col in columns_to_factorize:\n titanic_df[col] = titanic_df[col].asfactor()\n # Selecting only the columns we need\n titanic_frame = titanic_df[columns_to_be_used]\n trainFr, testFr, validFr = titanic_frame.split_frame([0.6, 0.2],\n seed=1234)\n predictors = trainFr.names[:]\n # Removing the response column from the list of predictors\n predictors.remove('survived')\n response = 'survived'\n return trainFr, testFr, validFr, predictors, response", "title": "" }, { "docid": "bee13a9bd017b25b4be31ef308f93dd9", "score": "0.5206011", "text": "def gdata(ndata=100,mux=.5,muy=0.5):\n\n \"\"\"Instead of getting values from URL, try from request object\"\"\"\n mux = request.args.get('mux', '')\n muy = request.args.get('muy', '')\n\n x = np.random.normal(mux,.5,ndata)\n y = np.random.normal(muy,.5,ndata)\n A = 10. ** np.random.rand(ndata)\n c = np.random.rand(ndata)\n return json.dumps([{\"_id\": i, \"x\": x[i], \"y\": y[i], \"area\": A[i],\n \"color\": c[i]}\n for i in range(ndata)])", "title": "" }, { "docid": "e9b2a815f44b81a42e2bac0e427f4021", "score": "0.5205641", "text": "def read_data_geom(geom_t):\n \n ga = geom_t.read()\n G = pd.Series([ga[0][0][0],ga[0][0][1],ga[0][1][0],ga[0][1][1],\n ga[0][2][0],ga[0][2][1],ga[0][3]],\n index=['xdet_min','xdet_max','ydet_min','ydet_max',\n 'zdet_min','zdet_max','R'])\n return G", "title": "" }, { "docid": "71643d4bd1b1eff09e788df01c004fea", "score": "0.51990235", "text": "def discriminants(self,data):\n return self.posteriors(data)", "title": "" }, { "docid": "127d0a448575b03f33e88cb9318952e0", "score": "0.5183058", "text": "def inflection_data(self) -> dict:\n # get max/min of the gradient, which is basically the same as the 2nd deriv 0-crossing\n if self._edge_method == Edge.FWHM:\n raise ValueError(\n \"FWHM edge method does not have inflection points. Use a different edge detection method\"\n )\n d1 = np.gradient(\n gaussian_filter1d(\n self.values, sigma=self._edge_smoothing_ratio * len(self.values)\n )\n )\n (peak_idxs, _) = MultiProfile(d1).find_peaks(threshold=0.8)\n (valley_idxs, _) = MultiProfile(d1).find_valleys(threshold=0.8)\n left_idx = self._x_interp_to_original(peak_idxs[0]) # left-most index\n right_idx = self._x_interp_to_original(valley_idxs[-1]) # right-most index\n if self._edge_method == Edge.INFLECTION_DERIVATIVE:\n data = {\n \"left index (rounded)\": int(round(left_idx)),\n \"left index (exact)\": left_idx,\n \"right index (rounded)\": int(round(right_idx)),\n \"right index (exact)\": right_idx,\n \"left value (@rounded)\": self._y_original_to_interp(\n int(round(left_idx))\n ),\n \"left value (@exact)\": self._y_original_to_interp(left_idx),\n \"right value (@rounded)\": self._y_original_to_interp(\n int(round(right_idx))\n ),\n \"right value (@exact)\": self._y_original_to_interp(right_idx),\n }\n return data\n else: # Hill\n # the 2nd deriv is a good approximation for the inflection point. Start there and fit Hill about it\n # penum_half_window = self.field_data()['width (exact)'] * self._hill_window_ratio / 2\n penum_half_window = int(\n round(self._hill_window_ratio * abs(right_idx - left_idx) / 2)\n )\n\n # left side\n x_data = np.array(\n [\n x\n for x in np.arange(\n left_idx - penum_half_window, left_idx + penum_half_window\n )\n if x >= 0\n ]\n )\n y_data = self._y_original_to_interp(x_data)\n # y_data = self.values[left_idx - penum_half_window: left_idx + penum_half_window]\n left_hill = Hill.fit(x_data, y_data)\n left_infl = left_hill.inflection_idx()\n\n # right side\n x_data = np.array(\n [\n x\n for x in np.arange(\n right_idx - penum_half_window, right_idx + penum_half_window\n )\n if x < len(d1)\n ]\n )\n y_data = self._y_original_to_interp(x_data)\n right_hill = Hill.fit(x_data, y_data)\n right_infl = right_hill.inflection_idx()\n\n data = {\n \"left index (rounded)\": left_infl[\"index (rounded)\"],\n \"left index (exact)\": left_infl[\"index (exact)\"],\n \"right index (rounded)\": right_infl[\"index (rounded)\"],\n \"right index (exact)\": right_infl[\"index (exact)\"],\n \"left value (@exact)\": left_hill.y(left_infl[\"index (exact)\"]),\n \"right value (@exact)\": right_hill.y(right_infl[\"index (exact)\"]),\n \"left Hill params\": left_hill.params,\n \"right Hill params\": right_hill.params,\n }\n return data", "title": "" }, { "docid": "71c8addb2ddeff7f5929139275150e6e", "score": "0.5180216", "text": "def differential_evolution(generations, data):\n result_data = {\n 'generations': [],\n 'pareto_frontier': [],\n 'best': None,\n }\n current_data = data\n\n pareto_frontier = []\n result_data['generations'].append(fitness_vector(current_data))\n for generation in range(generations):\n print(generation)\n next_generation_data = []\n for index, item in enumerate(current_data):\n crossover_vector = get_new_item(index, item, current_data)\n dominance_data = dominance(item, crossover_vector)\n while not dominance_data['state']:\n crossover_vector = get_new_item(index, crossover_vector, current_data)\n dominance_data = dominance(item, crossover_vector)\n winner_vector = dominance_data['vector']\n next_generation_data.append(winner_vector)\n pareto_frontier = update_pareto_frontier(winner_vector, pareto_frontier)\n current_data = next_generation_data\n# dominating = dominate(current_data, crossover_data)\n# mutate_data = get_mutate_data(current_data)\n# crossover_data = get_crossover_data(current_data, mutate_data)\n# next_generation_data, next_generation_fitness, best_item, best = evaluate_fitness(current_data, crossover_data)\n \n \n# generation_data = []\n# for index, item in enumerate(next_generation_data):\n# generation_data.append({\n# 'item': next_generation_data[index],\n# 'fitness': next_generation_fitness[index],\n# })\n#\n# result_data['generations'].append(generation_data)\n# # result_data['generations_fitness'].append(next_generation_fitness)\n# result_data['generations_best_item'].append(best_item)\n# result_data['generations_best_fitness'].append(best)\n# if not result_data['best'] or best > result_data['best']:\n# result_data['best'] = best\n # print(generation)\n # pprint.pprint(current_data)\n# current_data = next_generation_data\n result_data['generations'].append(fitness_vector(pareto_frontier))\n import pprint\n pprint.pprint(result_data)\n return result_data", "title": "" }, { "docid": "860332feff0cc5a7b1a0304116a6e7d8", "score": "0.5176235", "text": "def gc_only(data):\n gc0 = data[\"DiffusionMark\"]\n return np.array(gc0)", "title": "" }, { "docid": "126cbeff20aed54c76f48d541dfa8f15", "score": "0.51645476", "text": "def extract_training_data():\n # global values so we can build up a single X and y matrix/feature vector\n roadname_list = list()\n X_other_features = list()\n y = list()\n\n # go through each file and extract tab-separated data\n for i in range(TRAIN_FROM, TRAIN_TO + 1):\n filename = DATA_FOLDER + 'data.%s.gold.csv' % i\n with open(filename, 'r') as f:\n for line in f:\n data = [item.strip() for item in line.split(\"\\t\")]\n [roadname, malay_road_tag, average_word_length,\n all_words_in_dictionary, classification] = data[:5]\n roadname_list.append(roadname.strip())\n\n # glue together the rest of the data\n X_other_features.append([int(malay_road_tag),\n float(average_word_length),\n int(all_words_in_dictionary)])\n\n # build up the gold standard vector\n y.append(int(classification))\n\n # finally, get the output of the n-gram vectorizer\n # and \"glue\" it to the other features\n X_ngram_features = extract_ngrams(roadname_list, type='train')\n\n X = hstack((X_ngram_features, array(X_other_features)))\n\n return roadname_list, X, y", "title": "" }, { "docid": "b73514c838eb14ce455f987ecfaaaed5", "score": "0.5155958", "text": "def feature_extraction1(data):\n data = pd.DataFrame(data)\n\n column_mean = data.mean(axis=0)\n column_mean = column_mean[:6] # mean for megnetometer data was high corrolated\n\n column_sd = pd.DataFrame(data).std(axis=0)\n\n column_varience = data.var(axis=0)\n\n column_min = data.min(axis=0)\n column_min = column_min[:3] # min for gyro and megnetometer data was high corrolated\n\n column_max = data.max(axis=0)\n column_max = column_max[:3] # max for megnetometer data was high corrolated\n\n # column_mean_absolute_deviation = data.mad(axis=0) #high coor\n\n # column_iqr = iqr(data, axis=0) # high coor\n\n column_ara = average_resultant_acceleration(data) # 3 columns\n column_ara = column_ara[:1] # ara for gyro and megnetometer data was high corrolated\n\n column_skewness = data.skew(axis=0)\n\n column_kurtosis = kurtosis(data, axis=0)\n\n column_sma = sma(data) # Signal magnitude area, 3 columns\n column_sma = column_sma[:1] # sma for gyro and megnetometer data was high corrolated\n\n column_energy = energy(data) # high coor\n column_energy = column_energy[:3] # energy for gyro and megnetometer data was high corrolated\n\n column_zrc = zero_crossing_rate(data) # currently reduce accuracy\n\n column_no_peaks = no_peaks(data)\n\n features = np.concatenate(\n (column_mean, column_sd, column_varience, column_min, column_max,\n column_ara, column_skewness, column_kurtosis, column_sma, column_energy, column_zrc, column_no_peaks))\n\n return features", "title": "" }, { "docid": "f134c310d83e90330e26a8f3208473eb", "score": "0.51489633", "text": "def correlate_features_for_training():\n training = pd.read_csv(FLAGS.training_data, delimiter=\"|\",\n encoding=const.ENCODING)\n features = training.columns[7:]\n clades = collections.defaultdict(\n lambda:\n collections.defaultdict(\n lambda:\n collections.defaultdict(\n lambda:\n collections.defaultdict(int))))\n implicational = collections.defaultdict(\n lambda:\n collections.defaultdict(\n lambda:\n collections.defaultdict(int)))\n # Whereas the implicationals collect the conditional probability of v2, given\n # f1,v1 and f2, this just collects the conditional probability of v2 given\n # v1. If the latter is also high, then the fact that the former is high is\n # probably of less interest.\n implicational_prior = collections.defaultdict(\n lambda:\n collections.defaultdict(int))\n neighborhoods = collections.defaultdict(\n lambda:\n collections.defaultdict(\n lambda:\n collections.defaultdict(int)))\n\n feature_frequency = collections.defaultdict(int)\n distance_cache = {}\n training_list = training.to_dict(orient=\"row\")\n for language_df in training_list:\n genus = language_df[\"genus\"]\n family = language_df[\"family\"]\n for f1 in features:\n v1 = language_df[f1]\n if pd.isnull(v1):\n continue\n clades[\"genus\"][genus][f1][v1] += 1\n clades[\"family\"][family][f1][v1] += 1\n feature_frequency[f1, v1] += 1\n for f2 in features:\n if f1 == f2:\n continue\n v2 = language_df[f2]\n if pd.isnull(v2):\n continue\n implicational[f1, v1][f2][v2] += 1\n for f2 in features:\n v2 = language_df[f2]\n if pd.isnull(v2):\n continue\n implicational_prior[f2][v2] += 1\n # Find nearby languages\n lat1 = language_df[\"latitude\"]\n lng1 = language_df[\"longitude\"]\n close_language_indices = find_close_languages(\n lat1, lng1, training_list, distance_cache)\n if len(close_language_indices) == 1:\n continue\n for f1 in features:\n for k in close_language_indices:\n v1 = training_list[k][f1]\n if pd.isnull(v1):\n continue\n neighborhoods[lat1, lng1][f1][v1] += 1\n\n if FLAGS.dev_data:\n # If we are also processing the development data, make sure that we also\n # provide neighborhoods for the lat,lng for each language in the development\n # data --- of course only actually using data from training.\n development = pd.read_csv(FLAGS.dev_data, delimiter=\"|\",\n encoding=const.ENCODING)\n development_list = development.to_dict(orient=\"row\")\n for language_df in development_list:\n lat1 = language_df[\"latitude\"]\n lng1 = language_df[\"longitude\"]\n close_language_indices = find_close_languages(\n lat1, lng1, training_list, distance_cache)\n if len(close_language_indices) == 1:\n continue\n for f1 in features:\n for k in close_language_indices:\n v1 = training_list[k][f1]\n if pd.isnull(v1):\n continue\n neighborhoods[lat1, lng1][f1][v1] += 1\n\n clade_types = [(\"genus\", FLAGS.genus_filename),\n (\"family\", FLAGS.family_filename)]\n for clade_type, clade_filename in clade_types:\n write_clades(os.path.join(FLAGS.association_dir, clade_filename),\n clades[clade_type])\n write_neighborhoods(os.path.join(\n FLAGS.association_dir, FLAGS.neighborhood_filename), neighborhoods)\n write_implicational(os.path.join(\n FLAGS.association_dir, FLAGS.implicational_filename),\n implicational, implicational_prior)", "title": "" }, { "docid": "6c6be1ff92037f84b47ba512940f9f95", "score": "0.5146489", "text": "def get_g(self, h_stack, x_t):\n\t\tx_t_ft = self.get_x_ft(x_t)\n\t\t# x_t_ft.shape = (n_particles, batch_size, Dx_1)\n\t\twith tf.variable_scope(self.variable_scope + '/get_g'):\n\t\t\th_x_concat = tf.concat((h_stack, x_t_ft), axis = 2, name = 'h_x_concat')\n\t\t\tmu = fully_connected(h_x_concat, self.Dy, \n\t\t\t\t\t\t\t\t\tweights_initializer=xavier_initializer(uniform=False), \n\t\t\t\t\t\t\t\t\tactivation_fn = None, \n\t\t\t\t\t\t\t\t\treuse = tf.AUTO_REUSE, scope = \"mu\")\n\t\t\t# mu.shape \t\t\t\t= (n_paticles, batch_size, Dx)\n\t\t\tsigma = fully_connected(h_x_concat, self.Dy,\n\t\t\t\t\t\t\t\t\tweights_initializer=xavier_initializer(uniform=False),\n\t\t\t\t\t\t\t\t\tbiases_initializer=tf.constant_initializer(0.6),\n\t\t\t\t\t\t\t\t\tactivation_fn = tf.nn.softplus, \n\t\t\t\t\t\t\t\t\treuse = tf.AUTO_REUSE, scope = \"sigma\") + self.sigma_cons\n\t\t\t# sigma.shape \t\t\t= (n_paticles, batch_size, Dx)\n\t\t\tg = tfd.MultivariateNormalFullCovariance(loc = mu, covariance_matrix = tf.matrix_diag(sigma), \n\t\t\t\t\t\t\t\t\t\t\t\t\t name = \"g\")\n\t\t\treturn g", "title": "" }, { "docid": "cb16593e850d0e0dc3f25280ab9548c8", "score": "0.51448536", "text": "def get_latent_rep(self):\n self.x_skip_compound = None\n self.x_negative_compound = None\n self.x_skip_gene = None\n self.x_negative_gene = None\n \"\"\"\n get center node representation, case where center node is compound\n \"\"\"\n idx_origin = tf.constant([0])\n self.x_origin =tf.gather(self.Dense_final,idx_origin,axis=1)\n \"\"\"\n total data case\n \"\"\"\n idx_skip = tf.constant([i+1 for i in range(self.pos_compound_size+self.pos_gene_size-1)])\n idx_negative = \\\n tf.constant([i+self.pos_compound_size+self.pos_gene_size for i in range(self.negative_sample_size)])\n self.x_skip = tf.gather(self.Dense_final,idx_skip,axis=1)\n self.x_negative = tf.gather(self.Dense_final,idx_negative,axis=1)", "title": "" }, { "docid": "6ac7688a72003b443c25b1de40e21200", "score": "0.51286495", "text": "def _statistic(self):\n return self.G", "title": "" }, { "docid": "8b80c995557907a45b8f251cc86a10f3", "score": "0.5128069", "text": "def get_features(data, label,\n smiles_feature_path = 'data/joined_paccmann_data/smiles_atom_tokens.npy',\n gene_feature_path = 'data/joined_paccmann_data/selected_genes_20.npy'):\n \n annotations = pd.read_csv(\"data/joined_paccmann_data/annotations.csv\")\n all_drugs = annotations.inchi_key.unique()\n\n all_cells = annotations.cosmic_id.unique()\n\n # remove drugs and cells which are queried, but we do not have data for\n\n cells_in_query = data.cosmic_id.unique()\n drugs_in_query = data.inchi_key.unique()\n\n cells_not_in_data = set(cells_in_query)-set(all_cells)\n drugs_not_in_data = set(drugs_in_query)-set(all_drugs)\n if(cells_not_in_data):\n print(\"Removing \" + str(len(cells_not_in_data)) + \" of the queried cells, because missing data. \")\n\n keep_row = np.array([True]*len(data))\n # find all rows of the data which relate to cells, for which we have no kernel data\n for cell in cells_not_in_data:\n keep_row = keep_row&(data.cosmic_id!= cell)\n data = data.loc[keep_row, :]\n label = label.loc[keep_row]\n\n if(drugs_not_in_data):\n print(\"Removing \" + str(len(drugs_not_in_data)) + \" of the queried drugs, because missing data. \")\n\n keep_row = np.array([True]*len(data))\n # find all rows of the data which relate to drugs, for which we have no kernel data\n for drug in drugs_not_in_data:\n keep_row = keep_row&(data.inchi_key!= drug)\n data = data.loc[keep_row, :]\n label = label.loc[keep_row]\n\n\n cells_response = data.cosmic_id\n\n drugs_response = data.inchi_key\n\n # to filter out rows which are not queried\n mask_indices = []\n annotations_filtered =[]\n counter=0\n label = label.reset_index(drop=True)\n for drug, cell in tqdm(zip(drugs_response, cells_response), total=len(drugs_response)):\n\n is_experiment = (annotations.inchi_key==drug )& (annotations.cosmic_id == cell)\n if (np.any(is_experiment)):\n mask_indices.append(annotations[is_experiment].index[0])\n annotations_filtered.append(annotations[is_experiment].loc[:,[\"cosmic_id\", \"drug_names\", \"inchi_key\"]].values[0])\n else: \n label = label.drop(counter, axis=0)\n counter = counter+1\n\n smiles_atom_tokens = np.load(smiles_feature_path)[mask_indices,:]\n selected_genes_20 = np.load(gene_feature_path)[mask_indices,:]\n label = label.values.reshape(len(label) ,1) # DataFrame to reshaped numpy array\n \n\n annotations_filtered = np.stack(annotations_filtered)\n\n data_dict = {\"selected_genes_20\": selected_genes_20,\n \"smiles_atom_tokens\": smiles_atom_tokens,\n \"label\": label}\n return data_dict, annotations_filtered", "title": "" }, { "docid": "859bcc2b55a81092008e49cb3614ff7b", "score": "0.512519", "text": "def _compute_features(self, observation):\n n_geese = len(observation['geese'])\n features = np.zeros(2 + 2*n_geese - 1, dtype=np.float32)\n features[0] = get_steps_to_end(observation['step'], self.configuration['episodeSteps'])\n features[1] = get_steps_to_shrink(\n observation['step'], self.configuration['hunger_rate'])\n features[2] = get_steps_to_die(\n observation['step'], self.configuration['hunger_rate'],\n len(observation['geese'][observation['index']]))\n features[3:2 + n_geese] = [get_steps_to_die(\n observation['step'], self.configuration['hunger_rate'],\n len(goose)) for idx, goose in enumerate(observation['geese']) if idx != observation['index']]\n features[2 + n_geese:1 + 2*n_geese] = [len(observation['geese'][observation['index']]) - \\\n len(goose) for idx, goose in enumerate(observation['geese']) if idx != observation['index']]\n if self.normalize_features:\n features[0] /= self.configuration['episodeSteps']\n features[1] /= self.configuration['hunger_rate']\n features[2:2+n_geese] /= self.configuration['episodeSteps']\n return features", "title": "" }, { "docid": "fcbf651ec25a93b67d12f0b7a9a5e8f1", "score": "0.512391", "text": "def example(self, G, preff, choice):\n\n print(\"Grobner Bases wrt term order 1)\")\n print(\"G = (g1, g2, g3)\")\n for i in range(3):\n G[i].repr_module(G[i])\n\n g = []\n for i in range(len(G) - 1):\n for j in range(i + 1, len(F)):\n x = [G[i], G[j]]\n lt_gi, index1 = G[i].lt_in_module(preff, choice)\n lt_gj, index2 = G[j].lt_in_module(preff, choice)\n if index1 == index2:\n g.append(x)\n\n while len(g) != 0:\n random_num = r.randint(0, len(g))\n y = Module()\n y.module = []\n y.s_polynomial_for_modules(g[random_num - 1][0], g[random_num - 1][1], preff, choice)\n h = Module()\n h.module = []\n h = y.reduced_grobner_sev_terms(G, 3)\n g.remove(g[random_num - 1])\n\n if h.isNotEmpty():\n for j in range(len(G)):\n x = [h, G[j]]\n lt_h, index1 = h.lt_in_module(preff, choice)\n lt_gj, index2 = G[j].lt_in_module(preff, choice)\n if index1 == index2:\n g.append(x)\n G.append(h)\n\n print(\"Grobner Bases wrt term order (1, 2)\")\n print(\"G = (g1, g2, g3, g4)\")\n for i in range(4):\n G[i].repr_module(G[i])", "title": "" }, { "docid": "5b5926b78a4d2eb50856dc424c4129ff", "score": "0.51066816", "text": "def non_edge_feature_dataframe(g_train, g_test, g_parent, g_train_static, g_test_static, time, freq=5):\n train_data = {}\n total_pos = set()\n total_neg = set()\n total_edge = set()\n ts = time[1]\n te = time[2]\n it_index = time[4]\n parent_data = classification_train_data_static(g_parent, g_test_static)\n # print_attributes(parent_data, 'parent')\n train_data_static = classification_train_data_static(g_train_static, g_test_static)\n print_attributes(train_data_static[(train_data_static['label']==1)], 'pos')\n print_attributes(train_data_static[(train_data_static['label'] == 0)], 'neg')\n train_data_static = ut.shuffling(train_data_static, freq)\n # print_attributes(train_data_static, 'train')\n static_edge_set = set(train_data_static['row_name'])\n for t in range(ts, te, it_index):\n train_data[t] = classification_train_data_dynamic(g_train[t], g_test_static, static_edge_set)\n # print_attributes(train_data[t], 'train')\n total_pos.update(set(train_data[t][(train_data[t]['label'] == 1)]['row_name']))\n total_neg.update(set(train_data[t][(train_data[t]['label'] == 0)]['row_name']))\n total_edge.update(train_data[t]['row_name'])\n test_data_static = classification_test_data_static(g_test_static)\n # print_attributes(test_data_static, 'test')\n print(\"pos in time series:\", len(total_pos), \"neg in time series:\", len(total_neg),\n \"pos-neg ratio:\", len(total_pos) / len(total_neg), \"total:\", len(total_edge))\n return train_data, train_data_static, parent_data, test_data_static, list(total_edge)", "title": "" }, { "docid": "1e6351c11fee50a17d98136cc72741d3", "score": "0.51053894", "text": "def build_graph(self, features: dict):\n with tf.variable_scope(\"Behler\"):\n descriptors = self.get_g2_op(features)\n if self.angular:\n descriptors += self.get_g4_op(features)\n return self.split_descriptors(descriptors, features)", "title": "" }, { "docid": "422fbeabf6b5f1d7c500a02c048cffc8", "score": "0.50973964", "text": "def _read_gmbnds(self, data: bytes, n: int) -> int:\n op2 = self.op2\n op2.log.info('geom skipping GMBNDS in GEOM2')\n #(1, 0, 0, 0, 0, 'FEFACE ', 31, -1)\n ints = np.frombuffer(data[n:], dtype=op2.idtype).copy()\n iminus1 = np.where(ints == -1)[0]\n i0 = 0\n for iminus1i in iminus1:\n bid, n1, n2, n3, n4 = ints[i0:i0+5]\n s0 = n + (i0 + 5) * 4\n s1 = s0 + 8\n entity = data[s0:s1].decode('latin1').rstrip()\n assert entity in ['FEFACE', 'GMSURF', 'GRID'], (bid, n1, n2, n3, n4, entity)\n assert bid >= 0, (bid, n1, n2, n3, n4, entity)\n eids = ints[i0+7:iminus1i]\n #print(bid, n1, n2, n3, n4)\n #print('entity = %r' % entity)\n #print(eid)\n #print('-----')\n i0 = iminus1i + 1\n return len(data)", "title": "" }, { "docid": "21ae97fd77664c5a2009b7586c51b325", "score": "0.5095299", "text": "def train(dat,hyper,cv,gs):\n if gs != {}:", "title": "" }, { "docid": "5cd8d8dffed247f4d66f5df1b5425dc9", "score": "0.508784", "text": "def sc_nonadherent(self):\n H = self.sc_graph\n g = open(str(self.dir)+\"/csv/nonadherent_sc.csv\",\"w\")\n g1 = open(str(self.dir)+\"/csv/nonadherent_sc_ck_ids.csv\",\"w\")\n print>>g, \",\".join(map(str,self.header))\n \n look_up = self.look_up\n activity_table = self.activity\n \n for n in H.nodes():\n if self.G.node[n][\"weigh_ins\"] >=5:\n H.remove_node(n)\n \n nx.write_gml(H,str(self.dir)+\"/networks/method3_nonadherent_sc.gml\")\n\n print \"the number of small components that are nonadherent\", len(H)\n\n result = [(n, look_up[\"ck_id\"][n],look_up[\"initial_weight\"][n],look_up[\"weigh_ins\"][n],\\\n activity_table[\"activity\"][n], look_up[\"weight_change\"][n],look_up[\"percentage_weight_change\"][n],\\\n look_up[\"time_in_system\"][n],look_up[\"age\"][n],look_up[\"height\"][n],look_up[\"initial_bmi\"][n],\\\n look_up[\"final_bmi\"][n]) for n in list(map(int,H.nodes()))]\n \n norm = filter(lambda x: float(x[10]) < 25.0,result)\n over = filter(lambda x: 25< float(x[10]) < 30, result)\n ob = filter(lambda x: float(x[10]) > 30.0,result)\n \n x = open(str(self.dir)+\"/csv/nonadherent_sc_normal.csv\",\"w\")\n y = open(str(self.dir)+\"/csv/adherent_sc_overweight.csv\",\"w\")\n z = open(str(self.dir)+\"/csv/adherent_sc_obese.csv\",\"w\")\n \n #list of ck_ids for each group \n xx = open(str(self.dir)+\"/csv/adherent_sc_normal_ck_ids.csv\",\"w\")\n yy = open(str(self.dir)+\"/csv/adherent_sc_overweight_ck_ids.csv\",\"w\")\n zz = open(str(self.dir)+\"/csv/adherent_sc_obese_ck_ids.csv\",\"w\")\n \n bmi_data = 2*[norm,over,ob]\n files = [x,y,z,xx,yy,zz]\n \n #headers\n for ii in range(len(bmi_data)):\n if ii<3:\n print>>files[ii],\",\".join(map(str,self.header))\n \n #write the csv files to for each set\n for ii in range(len(bmi_data)):\n for n in bmi_data[ii]:\n if ii<3:\n print>>files[ii],\",\".join(map(str,n))\n else:\n print>>files[ii],n[1]\n \n for x in files:\n x.close()\n \n for r in result:\n print>>g, \",\".join(map(str,r)) \n print>>g1, r[1]\n \n g.close() \n g1.close()\n \n return H, result", "title": "" }, { "docid": "30aa920d39dbc477081ef816c332297e", "score": "0.5086622", "text": "def emKickoff(self):\n\t\tparentDataList= []\n\t\tnewDataSet = []\n\t\t# This function looks at each line of data {aka an event} in the original originalDataSet\n\t\tfor event in self.originalDataSet:\n\t\t\t# Make a dictionary that maps nodeName -> realNodeData it recievied in this event\n\t\t\tfor i, realNodeData in enumerate(event): \n\t\t\t nodeName = self.nodePositions[i].name\n\t\t\t if realNodeData != '-': # or if it is not in the set of values that mean the data is missing\n\t\t\t \tparentDataList.append((nodeName, realNodeData)) # [nodeName] = realNodeData\n\t\t\t # otherwise nothing is know about the data so don't place it into the disctionary, since that will mess up function that try to look\n\t\t\t # for probabities of various events as that event will be the event that the node = '-' as opposed to summing over all its values\n\t\t\teventList =[] # this holds lists that represent different events which occur with different probabilities they occured\n\t\t\t# it will hold one event provided that all the data is there, but it will hold multiple events if the data is missing and it uses a probability\n\t\t\t# distribution to fill in the data\n\t\t\t\"\"\" \n\t\t\t\tIf there is no missing data the event occured with a probability of one, \n\t\t\t\tIf there is missing data, I break the events down into multiple events each having a < 1 probability of occuring, that sums to one. \n\t\t\t\tThe way it is stored is that each eventList contains tuples of \n\t\t\t\"\"\"\n\t\t\t# Go through each piece of data and create a new dataset that includes the probability of each event and doesn't have any '-' spaces or missing data\n\t\t\tfor i, realNodeData in enumerate(event):\n\t\t\t\tnode = self.nodePositions[i]\n\t\t\t\tnodeName = node.name\n\t\t\t\tif realNodeData == '-':\n\t\t\t\t\ttemp_event_lists =[]\n\t\t\t\t\tdistribution = self.randomProbDistrubtionGivenNetwork(node, parentDataList) # returns a tuple of the form ( (value, %), (value, %))\n\t\t\t\t\tfor (distrubtedValue, percentage) in distribution: # MAY NEED TO DO ENUMERATE or something\n\t\t\t\t\t\tif not eventList: # if eventList is empty the next chunk of code won't run so\n\t\t\t\t\t\t\tcompleteEvent = []\n\t\t\t\t\t\t\tcompleteEvent.append( ( (nodeName, distrubtedValue), percentage) )\n\t\t\t\t\t\t\ttemp_event_lists.append(completeEvent)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor incompleteEvent in eventList:\n\t\t\t\t\t\t\t\tcompleteEvent = copy.deepcopy(incompleteEvent)\n\t\t\t\t\t\t\t\tcompleteEvent.append( ( (nodeName, distrubtedValue), percentage) )\n\t\t\t\t\t\t\t\ttemp_event_lists.append(completeEvent)\n\t\t\t\t\teventList = temp_event_lists\n\t\t\t\telse:\n\t\t\t\t\t# just append a tuple of (realNodeData, 1) to each value in the eventList\n\t\t\t\t\tif not eventList:\n\t\t\t\t\t\teventList.append([( (nodeName, realNodeData), 1)] )\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor incompleteEvent in eventList:\n\t\t\t\t\t\t\tincompleteEvent.append( ( (nodeName, realNodeData), 1) )\n\t\t\t # [ ( ( ('name', value),('name', value) ) , %)]\n\t\t\tfor event in eventList:\n\t\t\t\ttl = []\n\t\t\t\ttotalp = 1.0\n\t\t\t\tfor (partial_event, percentage) in event:\n\t\t\t\t\ttotalp *= percentage\n\t\t\t\t\ttl.append(partial_event)\n\t\t\t\tnewDataSet.append( (tl, totalp) ) \n\t\treturn newDataSet", "title": "" }, { "docid": "ad6ef048e55b4577132aa1cdbc04e5e3", "score": "0.5086163", "text": "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--input-gtf\",\n \"-i\",\n dest=\"input_gtf\",\n default=None,\n required=True,\n help=\"input GTF\",\n )\n parser.add_argument(\n \"--output-gtf\", \"-o\", dest=\"output_gtf\", default=None, help=\"output GTF\"\n )\n args = parser.parse_args()\n\n intron_cands = {}\n exons = {}\n exon_ids = {}\n gene_locs = {}\n gene_locations = {}\n # gather the location of each genes and exons; and exon_ids to avoid duplication\n with gzip.open(args.input_gtf, \"rt\") if args.input_gtf.endswith(\".gz\") else open(\n args.input_gtf, \"r\"\n ) as input_file:\n for line in input_file:\n if not line.startswith(\"#\"):\n fields = [x.strip() for x in line.strip().split(\"\\t\")]\n if fields[2] == \"exon\":\n gene_id = get_feature(line.strip(), \"gene_id\")\n exon_id = get_feature(line.strip(), \"exon_id\")\n contig_id = fields[0]\n locpair = (int(fields[3]), int(fields[4]))\n if contig_id not in exons:\n exons[contig_id] = []\n if exon_id not in exon_ids:\n exons[contig_id].append(locpair)\n exon_ids[exon_id] = True\n elif fields[2] == \"gene\":\n gene_id = get_feature(line.strip(), \"gene_id\")\n contig_id = fields[0]\n locpair = (int(fields[3]), int(fields[4]), gene_id)\n if gene_id is not None:\n if contig_id not in gene_locs:\n gene_locs[contig_id] = []\n gene_locs[contig_id].append(locpair)\n\n gene_locations[gene_id] = locpair\n\n # sorted the gene locs by start\n for contig_id in gene_locs:\n gene_locs[contig_id].sort(key=lambda x: x[0], reverse=False)\n # print(contig_id, len(gene_locs[contig_id]), gene_locs[contig_id][:3], gene_locs[contig_id][-3:])\n\n # keep sort the exons by start by contig\n for contig_id in exons:\n exons[contig_id].sort(key=lambda x: x[0], reverse=False)\n\n # compute the intron candidates for each contig\n # where any bp that is not an exon is an candidate intron whithout\n # worrying about the inclusiveness of that base pair within the range\n # of a gene\n for contig_id in exons:\n intron_cands[contig_id] = []\n last_exon_end = 0\n for exon_coor in exons[contig_id]:\n if exon_coor[0] > last_exon_end:\n pair = (last_exon_end, exon_coor[0])\n intron_cands[contig_id].append(pair)\n\n last_exon_end = max(last_exon_end, exon_coor[1])\n\n # add the remaining last\n pair = (last_exon_end, 30000000000)\n intron_cands[contig_id].append(pair)\n\n # global ordered (ascending) array of intronic start or end points\n introns = {}\n for contig_id in gene_locs:\n\n introns[contig_id] = []\n intronic_points = []\n for coor in intron_cands[contig_id]:\n intronic_points.append(coor[0])\n intronic_points.append(coor[1])\n\n for gene_loc in gene_locs[contig_id]:\n i = bisect_right(intronic_points, gene_loc[0], 0, len(intronic_points))\n j = bisect_left(intronic_points, gene_loc[1], 0, len(intronic_points))\n\n if i % 2 == 1: # it is a start location on i\n intron_start = gene_loc[0]\n intron_end = intronic_points[i]\n # introns[contig_id].append( (intron_start, intron_end, gene_loc[2]) )\n\n for k in range(i, j, 2):\n introns[contig_id].append(\n (intronic_points[k], intronic_points[k + 1], gene_loc[2])\n )\n\n if j % 2 == 1:\n intron_start = intronic_points[j]\n intron_end = gene_loc[1]\n introns[contig_id].append((intron_start, intron_end, gene_loc[2]))\n\n genewise_introns = {}\n for contig_id in introns:\n genewise_introns[contig_id] = {}\n for intron in introns[contig_id]:\n if intron[2] not in genewise_introns[contig_id]:\n genewise_introns[contig_id][intron[2]] = []\n genewise_introns[contig_id][intron[2]].append((intron[0], intron[1]))\n\n # print(contig_id, len(introns[contig_id]), introns[contig_id][:5])\n intron_no = 1\n with gzip.open(args.input_gtf, \"rt\") if args.input_gtf.endswith(\".gz\") else open(\n args.input_gtf, \"r\"\n ) as input_file:\n with gzip.open(args.output_gtf, \"wb\") if args.output_gtf.endswith(\n \".gz\"\n ) else open(args.output_gtf, \"w\") as output_gtf:\n\n for line in input_file:\n if line.startswith(\"#\"):\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")\n else:\n fields = [x.strip() for x in line.strip().split(\"\\t\")]\n if fields[2] == \"exon\":\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")\n\n elif fields[2] == \"gene\":\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")\n\n gene_id = get_feature(line.strip(), \"gene_id\")\n contig_id = fields[0]\n if gene_id in genewise_introns[contig_id]:\n for intron in genewise_introns[contig_id][gene_id]:\n mod_fields = fields.copy()\n mod_fields[2] = \"intron\"\n mod_fields[3] = str(intron[0])\n mod_fields[4] = str(intron[1])\n mod_fields[8] = mod_fields[\n 8\n ] + ' intron_id \"{}\"'.format(str(intron_no))\n intron_no += 1\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\n \"{}\".format(\n \"\\t\".join(mod_fields) + \"\\n\"\n ).encode()\n )\n else:\n output_gtf.write(\"\\t\".join(mod_fields) + \"\\n\")\n else:\n if args.output_gtf.endswith(\".gz\"):\n output_gtf.write(\"{}\".format(line.strip() + \"\\n\").encode())\n else:\n output_gtf.write(line.strip() + \"\\n\")", "title": "" }, { "docid": "878a51621c7b31d0a0b404c233b48a2e", "score": "0.50851667", "text": "def calculate_normalized_ged(data):\n norm_ged = data[\"ged\"]/(0.5*(len(data[\"labels_1\"])+len(data[\"labels_2\"])))\n return norm_ged", "title": "" }, { "docid": "188d05558d02d1368ed1958406838ed8", "score": "0.5084039", "text": "def data(self):\n\n\t\ttmax = len(self.interaction[-1].c.Nf)\n\t\tout = []\n\t\tfor ii in range(0,len(self.interaction)):\n\t\t\toffset = tmax-len(self.interaction[ii].c.Nf)\n\t\t\tfor jj in range(0,len(self.interaction[ii].c.Nf)):\n\t\t\t\t# print ii,jj\n\t\t\t\t#out.append([offset+jj,self.interaction[ii].c.Nf[jj],self.interaction[ii].c.Nv[jj],ii,\"C\",self.interaction[ii].c.func[jj]])\n\t\t\t\t# if ii == 3 :\n\t\t\t\t\t# pdb.set_trace()\n\t\t\t\tout.append([offset+jj,self.interaction[ii].c.Nf[jj],ii,\"C\",self.interaction[ii].c.func[jj]])\n\t\tout = np.array(out)\n\t\tdf1 = pd.DataFrame(out, columns=[\"t\",\"Nf\",\"id\",\"type\",\"func\"])#.to_csv(\"./save_cell.csv\",sep=\";\")\n\n\t\tout = []\n\t\tfor ii in range(0,len(self.interaction)):\n\t\t\toffset = tmax-len(self.interaction[ii].e.Nf)\n\t\t\t# if offset < 0 :\n\t\t\t\t# pdb.set_trace()\n\t\t\tfor jj in range(0,len(self.interaction[ii].e.Nf)):\n\t\t\t\tout.append([offset+jj,self.interaction[ii].e.Nf[jj],ii,\"E\",self.interaction[ii].e.func[jj]])\n\t\tout = np.array(out)\n\t\tdf2 = pd.DataFrame(out, columns=[\"t\",\"Nf\",\"id\",\"type\",\"func\"])#.to_csv(\"./save_env.csv\",sep=\";\")\n\t\tdf1 = df1.append(df2)\n\t\tdf1.to_csv(\"./save_83_46.csv\",sep=\";\")\n\n\t\t# plt.plot(out[:,0],out[:,1])\n\t\t# plt.show()", "title": "" }, { "docid": "8172dce51a0a7fe53ac9cc140fd39a48", "score": "0.50733227", "text": "def refactor_data(self):\n emotion_labels = self.labels.reshape(-1,)\n ind = np.arange(len(emotion_labels))\n \n # sort the data according to emotion categories \n emo_indices = []\n for emo in self.emotion_categories:\n emo_ind_i = ind[emotion_labels == emo]\n random.shuffle(emo_ind_i)\n emo_indices.append(emo_ind_i)\n emo_indices = np.array(emo_indices)\n \n self.features = self.features[emo_indices]\n self.embeds = self.embeds[emo_indices]\n self.speaker_id = self.speaker_id[emo_indices]\n self.labels = self.labels[emo_indices]\n\n self.n_utterances = self.features.shape[1]\n self.n_bins = int(np.ceil(self.n_emotions / self.emotions_per_batch))", "title": "" }, { "docid": "8b78a09d71375c728dd1d638c0c7f9a4", "score": "0.50671315", "text": "def get_known_formation_entang(dm):\n assert dm.num_rows == 4\n conc = TwoQubitState.get_concurrence(dm)\n u = (1+np.sqrt(1-conc**2))/2\n return ut.get_entropy_from_probs(np.array([u, 1-u]))", "title": "" }, { "docid": "7f8b42c98796ca4d85d4d80a769face2", "score": "0.5054577", "text": "def gendatareg():\n numsamples = 1000\n randmat = np.random.random((numsamples,5))*100\n labels = np.random.randint(100, size=numsamples)\n randmat[:,1] = labels*2 + np.random.randn() * 0.33\n randmat[:,2] = labels/2 + np.random.randn() * 0.33\n data = randmat\n traindataset = data[:(numsamples/2),:]\n testdataset = data[(numsamples/2):,:]\n trainlabels = labels[:(numsamples/2)]\n testlabels = labels[(numsamples/2):]\n print traindataset.shape\n print testdataset.shape\n print trainlabels.shape\n print testlabels.shape\n return traindataset,testdataset,trainlabels,testlabels", "title": "" }, { "docid": "99183a009cfea6ef9dbd930ee0d69710", "score": "0.5052974", "text": "def build_GTE_graph(gtf): \n \n if type(gtf) == str:\n # input is a file path\n gtf = gtf_2_json(gtf)\n \n gene_dict = {\"_START_\":{\"location\":[0]}}\n gene = \"_START_\"\n transcript_num = 0\n for i,anno in tqdm(enumerate(gtf)):\n if anno['hierarchy'] == 'gene':\n # previous gene_id\n gene_dict[gene][\"location\"].append(i) \n # renew gene_id\n gene = anno[\"gene_id\"]\n gene_dict[gene] = {'n_transcript':0,\"location\":[i]} # initiate a dict for gene\n transcript_num = 0\n\n elif anno['hierarchy'] == 'transcript':\n gene_dict[gene]['n_transcript'] += 1\n transcript = anno['transcript_id']\n gene_dict[gene][transcript] = []\n\n elif anno['hierarchy'] == 'exon':\n gene_dict[gene][transcript].append(anno['exon_id'])\n\n gene_dict[gene][\"location\"].append(i) \n gene_dict.pop(\"_START_\");\n\n return gene_dict", "title": "" }, { "docid": "1be7bbf60dc33707fc3fc7edeb073eed", "score": "0.5049469", "text": "def extract_HOG(generator, train, verbose=True):\n samples = generator.size(train=train)\n for images, labels in generator.generate_batch(train=train, batch_size=samples):\n start_time = time.time()\n features = _HOG(images)\n hog_time = time.time()\n if verbose:\n print \"Features calculated in \", hog_time - start_time, \" seconds\"\n return features, labels", "title": "" }, { "docid": "2044b561df86dd83a8d845c245cb5d06", "score": "0.5047067", "text": "def ent_stats(gm):\n entropy_stats = {}\n pij = gm.f/np.sum(gm.f)\n phatij = gm.ests/np.sum(gm.f)\n max_ent = round(np.log(len(gm.dt)), 4)\n entropy_stats['maximum_entropy'] = max_ent\n pred_ent = round(-np.sum(phatij*np.log(phatij)), 4)\n entropy_stats['predicted_entropy'] = pred_ent\n obs_ent = round(-np.sum(pij*np.log(pij)), 4)\n entropy_stats['observed_entropy'] = obs_ent\n diff_pred_ent = round(max_ent - pred_ent, 4)\n entropy_stats['max_pred_deviance'] = diff_pred_ent\n diff_obs_ent = round(max_ent - obs_ent, 4)\n entropy_stats['max_obs_deviance'] = diff_obs_ent\n diff_ent = round(pred_ent - obs_ent, 4)\n entropy_stats['pred_obs_deviance'] = diff_ent\n ent_rs = round(diff_pred_ent/diff_obs_ent, 4)\n entropy_stats['entropy_ratio'] = ent_rs\n obs_flows = np.sum(gm.f)\n var_pred_ent = round(((np.sum(phatij*(np.log(phatij)**2))-pred_ent**2)/obs_flows) + ((len(gm.dt)-1)/(2*obs_flows**2)), 11)\n entropy_stats['variance_pred_entropy'] = var_pred_ent\n var_obs_ent = round(((np.sum(pij*np.log(pij)**2)-obs_ent**2)/obs_flows) + ((len(gm.dt)-1)/(2*obs_flows**2)), 11)\n entropy_stats['variance_obs_entropy'] = var_obs_ent\n t_stat_ent = round((pred_ent-obs_ent)/((var_pred_ent+var_obs_ent)**.5), 4)\n entropy_stats['t_stat_entropy'] = t_stat_ent\n return entropy_stats", "title": "" }, { "docid": "4e9b3438937919c63fb9a79dc08e05e5", "score": "0.50346786", "text": "def init_GMM(self):\n\n print('Initializing GMM:')\n # Get image size with first image\n img_size = np.shape(cv2.imread(self.img_list[0], cv2.IMREAD_GRAYSCALE))\n w,h = img_size[0:2]\n\n #Allocate space for image intensities array\n init_data = np.zeros((self.N_train,w,h))\n\n #Prepare data to fit the GMM\n print('Initializing GMM from frames 0-' + str(self.N_train) + ':')\n i=0\n for filename in tqdm(self.img_list[0:self.N_train]):\n img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\n init_data[i,:,:] = img\n i+=1\n init_data = init_data.reshape((self.N_train*w*h, 1)) #Flatten input data array\n print(np.shape(init_data))\n\n #Fit GMM \n gm = GaussianMixture(n_components=self.n_components, covariance_type='spherical', max_iter=50).fit(init_data)\n\n order = np.flip(np.argsort(gm.weights_))\n self.GMM_weights = np.take_along_axis(gm.weights_, order, axis=0)\n self.GMM_means = np.take_along_axis(gm.means_.reshape(self.n_components), order, axis=0)\n self.GMM_dev = np.take_along_axis(np.sqrt(gm.covariances_), order, axis=0)\n\n return self.GMM_weights, self.GMM_means, self.GMM_dev", "title": "" }, { "docid": "e1a99ce241a361d3b63dac73902a3d07", "score": "0.503409", "text": "def evaluate_model(self, prev_data, new_data):\r\n prev_data = np.array(prev_data)\r\n data = np.array(np.append(prev_data, [new_data], axis=0))\r\n breath_data = data[:, 6]\r\n\r\n # print(\"All data: \", data)\r\n if self.look_for_gups != True:\r\n breath_result = self.detect_breath(breath_data)\r\n # print(\"Returned result: \", breath_result)\r\n\r\n if breath_result[0] == 1: # if inhale detected\r\n # print(\"Breath in detected\")\r\n return [1, 0, 0, 0, 0] # return that we are still inhaling\r\n\r\n elif breath_result[1] == 1:\r\n # print(\"Breathe out detected\")\r\n return [0, 1, 0, 0, 0]\r\n else: # a local maxima or minima\r\n x_array = np.array(range(0, len(breath_data[-20:-5])))\r\n slope = best_fit_slope(x_array, breath_data[-20:-5])\r\n print(slope)\r\n\r\n self.look_for_gups = True\r\n\r\n\r\n else:\r\n is_gup = self.detect_gup(data[:, 3]) # send in only gyro data\r\n if is_gup:\r\n print(\"Gup detected\")\r\n return [0, 0, 0, 0, 1]\r\n else:\r\n breath_result = self.detect_breath(breath_data)\r\n if breath_result[1] == 1: # detected that we are now breathing out\r\n self.look_for_gups = False # TODO this will probably trigger too often (need to show a trend of exhale)\r\n # print(\"Exhale after gup predicted\")\r\n return [0, 1, 0, 0, 0]\r\n\r\n elif breath_result[0] == 1: # detected that we are now breathing out\r\n self.look_for_gups = False # TODO this will probably trigger too often (need to show a trend of exhale)\r\n # print(\"Inhale after gup predicted\")\r\n return [1, 0, 0, 0, 0]\r\n\r\n # returns all zeros if nothing is detected\r\n result = [0, 0, 0, 0, 0]\r\n return result", "title": "" }, { "docid": "c53c1ec429c65486707e47a850ba2c8c", "score": "0.5030899", "text": "def gauss_data_gen(nndb, info=None, merge=False):\n if info is None: info = {}\n if not ('samples_per_class' in info): info['samples_per_class'] = 2\n if not ('center_cls_mean' in info): info['center_cls_mean'] = False\n if not ('noise_ratio' in info): info['noise_ratio'] = 0.2\n if not ('std' in info): info['std'] = None # Force standard deviation\n # to avoid the calculation of the std over the samples in the class.\n # 'noise_ratio' will be ignored\n\n # Initialize necessary variables\n max_samples_per_class = info['samples_per_class']\n use_cls_mean = info['center_cls_mean'] \n noise_ratio = info['noise_ratio']\n force_std = info['std']\n fsize = nndb.h * nndb.w * nndb.ch\n\n # Create a new database to save the generated data\n nndb_aug = NNdb('augmented', db_format=nndb.db_format)\n\n for i in range(0, nndb.cls_n):\n \n # Fetch samples per class i\n n_per_class = nndb.n_per_class[i]\n \n st = nndb.cls_st[i]\n en = st + n_per_class\n \n # Fetch input database for class i\n I = nndb.features[:, st:en]\n\n # By default, gauss distributions are centered at each image\n M = I\n\n # Gauss distributions are centered at the mean of each class\n if (use_cls_mean): M = np.mean(I, 1, keepdims=True)\n \n # Initialize a 2D matrix to store augmented data for class\n I_aug = np.zeros((fsize, np.int32(np.ceil(max_samples_per_class/n_per_class))*n_per_class), dtype=nndb.db.dtype)\n \n # Initialize the index variable for above matrix\n j = 0\n \n # Keep track of current sample count per class\n cur_samples_per_class = 0\n while (cur_samples_per_class < max_samples_per_class):\n r = np.random.normal(size=(fsize, n_per_class)) # r is from gaussian (m=0, std=1) \n \n if (force_std is None): # Caculate the std from the samples in the class i\n I_aug[:, j*n_per_class: (j+1)*n_per_class] = np.asarray(M + noise_ratio * np.std(I, 1, keepdims=True) * r, dtype=nndb.db.dtype)\n else:\n I_aug[:, j*n_per_class: (j+1)*n_per_class] = np.asarray(M + force_std * r, dtype=nndb.db.dtype)\n \n cur_samples_per_class = cur_samples_per_class + n_per_class\n j = j + 1\n \n I_aug = nndb_aug.features_to_data(I_aug[:, 0:max_samples_per_class], nndb.h, nndb.w, nndb.ch, nndb.db.dtype)\n nndb_aug.add_data(I_aug)\n nndb_aug.update_attr(True, max_samples_per_class)\n\n nndb_aug.finalize()\n\n if (merge):\n nndb_aug = nndb.merge(nndb_aug)\n\n return nndb_aug", "title": "" }, { "docid": "a80e260fed073017caf4285eb9a26255", "score": "0.50294566", "text": "def challenger_damage(filename=\"challenger.npy\", guess=np.array([20., -1.])):\n #Get data\n data = np.load(filename)\n x = data[:,0]\n y = data[:,1]\n #Get domain to fit on\n domain = np.linspace(30,100,71)\n #Fit and store prediction values using LogisticRegression class\n launch = LogisticRegression1D()\n launch.fit(x,y,guess)\n vals = [launch.predict(i) for i in domain]\n\n #Plot\n plt.plot(x, y, marker = 'o', linestyle = '', label = 'Previous damage')\n plt.plot(domain, vals, label = 'Probability')\n plt.plot(31, launch.predict(31), marker='o', label = 'P(Damage) of Launch')\n plt.ylabel('O-Ring Damage')\n plt.xlabel('Temperature')\n plt.title(\"Probability of O-Ring Damage\")\n plt.legend()\n plt.show()\n return launch.predict(31)", "title": "" }, { "docid": "cb0f956f06b676f1ce823777742094c1", "score": "0.50276756", "text": "def FeedbackData(self):\n data_vech = []\n data_target = []\n data_traffic = []\n\n data_vech = [self._output_vech_timestep,self._output_vech_speed_x,self._output_vech_speed_y,self._output_vech_speed_z,\n self._output_vech_angular_x,self._output_vech_angular_y,self._output_vech_angular_z,\n self._output_vech_pose_x,self._output_vech_pose_y,self._output_vech_pose_z,\n self._output_vech_quaternion_x,self._output_vech_quaternion_y,self._output_vech_quaternion_z,\n self._output_vech_quaternion_w]\n\n data_target = [self._output_target_timestep,self._output_target_speed_x,self._output_target_speed_y,self._output_target_speed_z,\n self._output_target_angular_x,self._output_target_angular_y,self._output_target_angular_z,\n self._output_target_pose_x,self._output_target_pose_y,self._output_target_pose_z,\n self._output_target_quaternion_x,self._output_target_quaternion_y,self._output_target_quaternion_z,\n self._output_target_quaternion_w,self._output_target_frenet_s, self._output_target_frenet_d]\n\n data_traffic = [self._output_traffic_timestep, self._output_traffic_light_id, self._output_traffic_recognition_result,\n self._output_traffic_reamining_time, self._output_traffic_light_name, self._output_traffic_pose_x,\n self._output_traffic_pose_y, self._output_traffic_pose_z]\n\n type_vech = self.find_msg_type(self._vech)\n type_target = self.find_msg_type(self._state)\n type_traffic = self.find_msg_type(self._traffic)\n\n return data_vech, data_target, data_traffic, type_vech, type_target, type_traffic", "title": "" }, { "docid": "b950042e81497565a6e9ef72e8f31ff7", "score": "0.50235045", "text": "def get_training_data():\n print(\"Getting Training Data\")\n df = clean_data(pd.read_csv(RAW_DATA))\n df = engineer_features(df)\n df = create_training_data(df)\n return df.to_numpy()", "title": "" }, { "docid": "d3c7fa9ffa88d237b62e53ebb28ccfcc", "score": "0.5022151", "text": "def _generate_G_from_H(H, variable_weight=False):\n H = np.array(H)\n n_edge = H.shape[1]\n # the weight of the hyperedge\n W = np.ones(n_edge)\n # the degree of the node\n DV = np.sum(H * W, axis=1)\n # the degree of the hyperedge\n DE = np.sum(H, axis=0)\n\n invDE = np.mat(np.diag(np.power(DE, -1)))\n DV2 = np.mat(np.diag(np.power(DV, -0.5)))\n W = np.mat(np.diag(W))\n H = np.mat(H)\n HT = H.T\n\n if variable_weight:\n DV2_H = DV2 * H\n invDE_HT_DV2 = invDE * HT * DV2\n return DV2_H, W, invDE_HT_DV2\n else:\n G = DV2 * H * W * invDE * HT * DV2\n return G", "title": "" }, { "docid": "790ff3ae2efea6990891622eb1958df6", "score": "0.5016419", "text": "def getGathers():\n global s1,so,s2\n n1,no,n2 = 1151,100,3599#12901\n d1,do,d2 = 0.008,0.1,0.00625 # s,km,km\n f1,fo,f2 = 0.0,0.0,0.0 # s,km,km\n s1,so,s2 = Sampling(n1,d1,f1),Sampling(no,do,fo),Sampling(n2,d2,f2)\n anidir = _datdir+\"ani/\"\n image = zerofloat(n1,no,n2)\n g = readImage(image,anidir+\"cmps\")\n return g", "title": "" }, { "docid": "d8a962295dcb723ec9d72fdeda2e43c6", "score": "0.5016145", "text": "def getLigData(lig, IND, device=\"cpu\"):\n II = []\n JJ = []\n XE = []\n XN = []\n NL = []\n score = []\n cnt = 0\n for index in IND:\n II.extend([lig['atom_connect'][index][:, 0].long() - 1 + cnt])\n JJ.extend([lig['atom_connect'][index][:, 1].long() - 1 + cnt])\n XE.append(lig['bond_type'][index].t().float())\n\n # feats = []\n # a_feats_idx = torch.nonzero(lig['atom_types'][index])[:, 1]\n # for idx in a_feats_idx:\n # tmp = torch.tensor(atom_feats[idx.item()])\n # tmp[0] = tmp[0] * 0.1\n # tmp[2] = tmp[2] * 0.01\n # tmp[3] = tmp[3] * 100\n # feats.append(tmp)\n # feats = torch.vstack(feats)\n\n XN.append(torch.vstack((lig['atom_types'][index].t().float(),\n 5 * lig['charges'][index].unsqueeze(0))))\n NL.append(XN[-1].shape[1])\n cnt += NL[-1]\n score.append(lig['scores'][index])\n\n II = torch.hstack(II).to(device)\n JJ = torch.hstack(JJ).to(device)\n XE = torch.hstack(XE).unsqueeze(0).to(device)\n XN = torch.hstack(XN).unsqueeze(0).to(device)\n NL = torch.tensor(NL, device=device)\n score = torch.vstack(score).to(device)\n return II, JJ, XN, XE, NL, score", "title": "" }, { "docid": "cfc08d0609ba630e37b4e90e05360133", "score": "0.5015979", "text": "def to_homo(G):\n num_nodes_per_ntype = [G.number_of_nodes(ntype) for ntype in G.ntypes]\n offset_per_ntype = np.insert(np.cumsum(num_nodes_per_ntype), 0, 0)\n srcs = []\n dsts = []\n etype_ids = []\n eids = []\n ntype_ids = []\n nids = []\n total_num_nodes = 0\n\n for ntype_id, ntype in enumerate(G.ntypes):\n num_nodes = G.number_of_nodes(ntype)\n total_num_nodes += num_nodes\n ntype_ids.append(F.full_1d(num_nodes, ntype_id, F.int64, F.cpu()))\n nids.append(F.arange(0, num_nodes))\n\n for etype_id, etype in enumerate(G.canonical_etypes):\n srctype, _, dsttype = etype\n src, dst = G.all_edges(etype=etype, order='eid')\n num_edges = len(src)\n srcs.append(src + int(offset_per_ntype[G.get_ntype_id(srctype)]))\n dsts.append(dst + int(offset_per_ntype[G.get_ntype_id(dsttype)]))\n etype_ids.append(F.full_1d(num_edges, etype_id, F.int64, F.cpu()))\n eids.append(F.arange(0, num_edges))\n\n retg = graph((F.cat(srcs, 0), F.cat(dsts, 0)), card=total_num_nodes, validate=False)\n retg.ndata[NTYPE] = F.cat(ntype_ids, 0)\n retg.ndata[NID] = F.cat(nids, 0)\n retg.edata[ETYPE] = F.cat(etype_ids, 0)\n retg.edata[EID] = F.cat(eids, 0)\n\n # features\n comb_nf = combine_frames(G._node_frames, range(len(G.ntypes)))\n comb_ef = combine_frames(G._edge_frames, range(len(G.etypes)))\n if comb_nf is not None:\n retg.ndata.update(comb_nf)\n if comb_ef is not None:\n retg.edata.update(comb_ef)\n\n return retg", "title": "" }, { "docid": "c5de72da0f97bf792ae9e1fc7041a59a", "score": "0.50100386", "text": "def compute_HOG(image: np.array):\n\n\t# Calcular los descriptores de la imagen\n\t#hog = HOGDescriptor()\n\thist = __hog.compute(image)\n\n\treturn hist", "title": "" }, { "docid": "b28a98cee84f565b45ff6e59e09d8b9c", "score": "0.50085086", "text": "def linear_regression(self):\n \n cid_list = sorted(self.cid2pmap_dict.keys())\n cid2species = {}\n for cid in cid_list:\n comp = self.kegg.cid2compound(cid)\n cid2species[cid] = (comp.get_nH(), comp.get_charge())\n \n N = len(self.train_rowids)\n y = zeros((N, 1))\n X = zeros((N, len(cid_list)))\n for r in range(N):\n row_data = self.data[self.train_rowids[r]]\n dG0_r = row_data.dG0_r\n for (cid, coeff) in row_data.sparse.iteritems():\n c = cid_list.index(cid)\n X[r, c] = coeff\n (nH, z) = cid2species[cid]\n dG0_r -= coeff * (nH*R*row_data.T*log(10)*row_data.pH - 2.91482*(z**2 - nH)*sqrt(row_data.I) / (1 + 1.6*sqrt(row_data.I)))\n y[r, 0] = dG0_r\n\n inv_corr_mat = pinv(dot(X.T, X))\n dG0_f_vec = dot(dot(inv_corr_mat, X.T), y)\n \n # add the formation energies to the CID dictionary\n for c in range(len(cid_list)):\n cid = cid_list[c]\n (nH, z) = cid2species[cid]\n cid2species[cid] = (dG0_f_vec[c], nH, z)\n return cid2species", "title": "" }, { "docid": "fb6c443299bd2906789ab279d0e79414", "score": "0.50072455", "text": "def get_grads(hgn, batch_size, dtype):\n register_hooks(hgn)\n rand_in = torch.rand((batch_size, hgn.seq_len, hgn.channels, 32, 32)).type(dtype)\n hgn.fit(rand_in)\n\n names = GRADIENTS.keys()\n max_grads = [np.abs((GRADIENTS[k][1] / GRADIENTS[k][0])).max() for k in names]\n mean_grads = [np.abs((GRADIENTS[k][1] / GRADIENTS[k][0])).mean() for k in names]\n\n return names, max_grads, mean_grads", "title": "" }, { "docid": "00e63af0843d496474d66c90c64188ef", "score": "0.5005885", "text": "def get_all_g(self, words):\n\n N = len(words)\n tags = list(self.tags)\n M = len(tags)\n\n g0 = np.empty(M)\n g = np.empty((N-1, M, M))\n # Initialize first layer\n for i, tag in enumerate(tags):\n g0[i] = sum([self.feature_weights[f] * count for f, count in self.get_features(words[0], tag, \"START\").items()]) \n\n for i in range(1, N):\n for j, curr_tag in enumerate(tags):\n for k, prev_tag in enumerate(tags):\n g[i-1, k, j] = sum([self.feature_weights[f] * count for f, count in self.get_features(words[i], curr_tag, prev_tag).items()]) \n return (g0, g)", "title": "" }, { "docid": "e257766a9751f2f0c0f74e4ab2bf7298", "score": "0.50032353", "text": "def preprocess(self):\n \n #Dropping Id (Id on tet data is required to create submisson file)\n self.train_data_org.drop(['Id'], axis=1, inplace=True)\n #self.test_data_org.drop(['Id'], axis=1, inplace=True)\n \n train = self.train_data_org.copy()\n test = self.test_data_org.copy()\n \n #Dropping features which have more than 50% missing values\n train.drop(['Fence', 'Alley','MiscFeature', 'PoolQC' ], axis=1, inplace=True)\n test.drop(['Fence', 'Alley','MiscFeature', 'PoolQC' ], axis=1, inplace=True)\n \n #New feature created 'FirePlacePresent'. Many houses may not have a fire place.\n train['FirePlacePresent'] = train['FireplaceQu'].apply(lambda x: 0 if pd.isna(x) else 1)\n test['FirePlacePresent'] = test['FireplaceQu'].apply(lambda x: 0 if pd.isna(x) else 1)\n \n #Imputing values for missing data\n train['FireplaceQu'] = train['FireplaceQu'].fillna('None') \n test['FireplaceQu'] = test['FireplaceQu'].fillna('None')\n \n #Imputing values for LotFrontage based on the Neighborhood. \n #Range LotFrontage values depends on the Neighborhood.\n train['LotFrontage'] = train.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))\n test['LotFrontage'] = test.groupby('Neighborhood')['LotFrontage'].transform(lambda x: x.fillna(x.median()))\n \n #All values are NaN for those houses which doesn't have garage.\n #GarageArea and GarageCars are zero.\n train['GarageYrBlt'] = train['GarageYrBlt'].fillna(0)\n train['GarageCond'] = train['GarageCond'].fillna('None')\n train['GarageFinish'] = train['GarageFinish'].fillna('None')\n train['GarageQual'] = train['GarageQual'].fillna('None')\n train['GarageType'] = train['GarageType'].fillna('None')\n #new feature 'GaragePresent' is created. There are houses without garage\n train['GaragePresent'] = train['GarageArea'].apply(lambda x: 1 if x > 0 else 0)\n test['GarageYrBlt'] = test['GarageYrBlt'].fillna(0)\n test['GarageCond'] = test['GarageCond'].fillna('None')\n test['GarageFinish'] = test['GarageFinish'].fillna('None')\n test['GarageQual'] = test['GarageQual'].fillna('None')\n test['GarageType'] = test['GarageType'].fillna('None')\n #new feature 'GaragePresent' is created. There are houses without garage\n test['GaragePresent'] = test['GarageArea'].apply(lambda x: 1 if x > 0 else 0)\n \n #Here we can create one extra feature GaragePresent\n# train['GaragePresent'] = train['GarageArea'].apply(lambda x: 0 if x is 'None' else 1)\n# test['GaragePresent'] = test['GarageArea'].apply(lambda x: 0 if x is 'None' else 1)\n ##No improvement on model\n \n #impute 'None' for missing values\n train['BsmtFinType1'] = train['BsmtFinType1'].fillna('None')\n train['BsmtFinType2'] = train['BsmtFinType2'].fillna('None')\n train['BsmtQual'] = train['BsmtQual'].fillna('None')\n train['BsmtExposure'] = train['BsmtExposure'].fillna('None')\n train['BsmtCond'] = train['BsmtCond'].fillna('None')\n #new feature 'BsmtPresent' is created. There are houses without basement\n train['BsmtPresent'] = train['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)\n test['BsmtFinType1'] = test['BsmtFinType1'].fillna('None')\n test['BsmtFinType2'] = test['BsmtFinType2'].fillna('None')\n test['BsmtQual'] = test['BsmtQual'].fillna('None')\n test['BsmtExposure'] = test['BsmtExposure'].fillna('None')\n test['BsmtCond'] = test['BsmtCond'].fillna('None')\n #new feature 'BsmtPresent' is created. There are houses without basement.\n test['BsmtPresent'] = test['TotalBsmtSF'].apply(lambda x: 1 if x > 0 else 0)\n \n #MasVnrArea, MasVnrType\n #impute zero and 'None'\n train['MasVnrArea'] = train['MasVnrArea'].fillna(0)\n train['MasVnrType'] = train['MasVnrType'].fillna('None')\n test['MasVnrArea'] = test['MasVnrArea'].fillna(0)\n test['MasVnrType'] = test['MasVnrType'].fillna('None')\n \n #Imputing missing 'MSZoning' values based on 'MSSubClass'.\n #Range of 'MSZoning' values depends on 'MSSubClass'\n test['MSZoning'] = test.groupby('MSSubClass')['MSZoning'].transform(lambda x: x.fillna(x.mode()[0]))\n\n #Creating new features 'BuildingAge and 'BuildingGarageAge'\n train['BuildingAge'] = train['YearBuilt'].apply(lambda x:2019-x) \n test['BuildingAge'] = test['YearBuilt'].apply(lambda x:2019-x)\n \n train['BuildingGarageAge'] = train['GarageYrBlt'].apply(lambda x: (2019-x) if x>0 else x)\n test['BuildingGarageAge'] = test['GarageYrBlt'].apply(lambda x: (2019-x) if x>0 else x)\n\n\n# ##No improvement on model bu using below new features.\n \n# train['BuildingRemodAddAge'] = train['YearRemodAdd'].apply(lambda x:2019-x)\n# test['BuildingRemodAddAge'] = test['YearRemodAdd'].apply(lambda x:2019-x) \n#\n# train['BuildingSoldAge'] = train['YrSold'].apply(lambda x:2019-x)\n# test['BuildingSoldAge'] = test['YrSold'].apply(lambda x:2019-x)\n\n# train.drop(['YrSold', 'YearRemodAdd', 'GarageYrBlt', 'YearBuilt'], axis=1, inplace=True)\n# test.drop(['YrSold', 'YearRemodAdd', 'GarageYrBlt', 'YearBuilt'], axis=1, inplace=True) \n \n #Electrical\n #train['Electrical'].mode() is 'SBrkr'\n train['Electrical'] = train['Electrical'].fillna('SBrkr')\n \n #Updating predictor list\n self.update_predictor_list(train) \n \n #impute values for remaining missing data\n for i in self.catVariable:\n train[i] = train[i].fillna('None')\n test[i] = test[i].fillna('None')\n for i in self.numVariable:\n train[i] = train[i].fillna(0)\n test[i] = test[i].fillna(0)\n \n \n return train, test", "title": "" }, { "docid": "54e3e6284d3440944f5ebe316f52fec2", "score": "0.49980274", "text": "def main():\n p = 0.3\n## dnn = SimGNNTrainer(100, 0, 5, 10000)\n## result = []\n## for r in range(10):\n## dnn.train(p, r / 10)\n## ac = 1 - dnn.test(p, r/10)\n## result.append(ac)\n## print(r, ac)\n## print(result)\n## plt.plot(result, color=\"red\")\n \n dnn = GNNTrainer(100, 5, 1000)\n result = []\n for r in range(10):\n dnn.train(p, r / 10)\n ac = 1 - dnn.test(p, r/10)\n result.append(ac)\n print(r, ac)\n print(result)\n plt.plot(np.linspace(0, 0.9, 10), result, color=\"green\", label=\"GCN with 5 features\")\n dnn = GNNTrainer(100, 10, 1000)\n result = []\n for r in range(10):\n dnn.train(p, r / 10)\n ac = 1 - dnn.test(p, r/10)\n result.append(ac)\n print(r, ac)\n print(result)\n plt.plot(np.linspace(0, 0.9, 10), result, color=\"red\", label=\"GCN with 10 features\")\n dnn = GNNTrainer(100, 50, 1000)\n result = []\n for r in range(10):\n dnn.train(p, r / 10)\n ac = 1 - dnn.test(p, r/10)\n result.append(ac)\n print(r, ac)\n print(result)\n plt.plot(np.linspace(0, 0.9, 10), result, color=\"blue\", label=\"GCN with 50 features\")\n## dnn = SimGNNTrainer(100, 5, 0, 1000)\n## result = []\n## for r in range(10):\n## dnn.train(p, r / 10)\n## ac = 1 - dnn.test(p, r/10)\n## result.append(ac)\n## print(r, ac)\n## print(result)\n## plt.plot(result, color=\"blue\")\n plt.plot(np.linspace(0, 0.9, 10), subgraphPrecision(100, 0.3, 10, 1000, 5), color=\"black\", label=\"Subgraph counts with 5 features\")\n plt.legend()\n plt.xlabel(\"noise\")\n plt.ylabel(\"accuracy\")\n plt.savefig(\"gcn.pdf\")", "title": "" }, { "docid": "ad9a3c196f4061fba8327370d4d337a6", "score": "0.49940363", "text": "def getGeometricFactor(self):\r\n k_list = []\r\n num_rdg = len(self.readings)\r\n for k in range(num_rdg):\r\n num_dipole = len(self.readings[k].Vdp)\r\n for j in range(num_dipole):\r\n if (self.readings[k].Vdp[j].flagRho == \"Accept\"):\r\n # k_a = self.readings[k].Vdp[j].K\r\n k_a = self.readings[k].Vdp[j].calcGeoFactor(self.readings[k].Idp)\r\n k_list.append(1 / k_a)\r\n return np.asarray(k_list)", "title": "" }, { "docid": "774d911951cd4db5f1476de1230e2cbd", "score": "0.49842182", "text": "def gf0(self, hartree=False) -> xr.DataArray:\n e_onsite = self.e_onsite - (0 if hartree is False else hartree*self.U)\n gf_0 = 1./(e_onsite + self.z - self.hybrid_fct)\n return gf_0", "title": "" }, { "docid": "bf333e886ebfa86632c1a3ecbdbd2a57", "score": "0.49828964", "text": "def get_Hom_Het(self, vcf):\r\n\r\n\t\tHet_num = 0\r\n\t\tHom_num = 0\t\r\n\t\tfor line in vcf:\r\n\t\t\thehe = line.split('\\t')[-2].split(':')\r\n\t\t\txixi = line.split('\\t')[-1].split(':')\r\n\t\t\tformat = dict(zip(hehe,xixi))\r\n\t\t\tif not format.has_key('GT'):\r\n\t\t\t\tcontinue\r\n\t\t\tif format['GT'] == '1/1' or format['GT'] == '0/0':\r\n\t\t\t\tHom_num += 1\r\n\t\t\telse:\r\n\t\t\t\tHet_num += 1\r\n\t\treturn Hom_num, Het_num", "title": "" }, { "docid": "4977e636ecbe1a0f5ffe3b7d4b4e7a0a", "score": "0.49827123", "text": "def get_greynoise_data(self, ip_address):\n # Malicious sample\n # {\n # \"ip\": \"222.187.238.136\",\n # \"noise\": true,\n # \"riot\": false,\n # \"classification\": \"malicious\",\n # \"name\": \"unknown\",\n # \"link\": \"https://viz.greynoise.io/ip/222.187.238.136\",\n # \"last_seen\": \"2021-06-23\",\n # \"message\": \"Success\"\n # }\n #\n # Benign sample\n # {\n # \"ip\": \"8.8.8.8\",\n # \"noise\": false,\n # \"riot\": true,\n # \"classification\": \"benign\",\n # \"name\": \"Google Public DNS\",\n # \"link\": \"https://viz.greynoise.io/riot/8.8.8.8\",\n # \"last_seen\": \"2021-06-23\",\n # \"message\": \"Success\"\n # }\n #\n # Unknown sample\n # {\n # \"ip\": \"123.123.115.117\",\n # \"noise\": false,\n # \"riot\": false,\n # \"message\": \"IP not observed scanning the internet or contained in RIOT data set.\"\n # }\n try:\n gn_headers = {\n \"key\": self.api_key,\n \"User-Agent\": \"greynoise-redelk-enrichment\",\n }\n gn_data = requests.get(\n f\"{self.greynoise_url}{ip_address}\", headers=gn_headers\n )\n json_result = gn_data.json()\n result = {\n \"ip\": ip_address,\n \"noise\": get_value(\"noise\", json_result, False),\n \"riot\": get_value(\"riot\", json_result, False),\n \"classification\": get_value(\"classification\", json_result, \"unknown\"),\n \"name\": get_value(\"name\", json_result, \"unknown\"),\n \"link\": get_value(\"link\", json_result, \"unknown\"),\n \"last_seen\": get_value(\"last_seen\", json_result, None),\n \"message\": get_value(\"message\", json_result, \"unknown\"),\n \"query_timestamp\": int(time()),\n }\n return result\n # pylint: disable=broad-except\n except Exception as error:\n self.logger.error(\"Error getting greynoise IP %s\", ip_address)\n self.logger.exception(error)\n return False", "title": "" }, { "docid": "34787e172acbccfa66bc1fcac8cdcd4a", "score": "0.4981036", "text": "def get_aged_g2_from_g12q(g12q, age_edge, age_center=None, timeperframe=1, time_sampling=\"log\", num_bufs=8):\n\n arr = rotate_g12q_to_rectangle(g12q)\n m, n = arr.shape # m should be 2*n-1\n # age_edge, age_center = get_qedge( qstart=slice_start,qend= slice_end,\n # qwidth = slice_width, noqs =slice_num )\n # print(arr.shape)\n age_edge = np.int_(age_edge)\n if age_center is None:\n age_center = (age_edge[:, 0] + age_edge[:, 1]) // 2\n\n age_edge_ = age_edge * 2\n age_center_ = age_center * timeperframe\n g2_aged = {}\n lag_dict = {}\n # print( age_edge, age_center)\n for i, age in enumerate(age_center_):\n age_edges_0, age_edges_1 = age_edge_[i][0], age_edge_[i][1]\n # print(i, age, age_edges_0, age_edges_1)\n g2i = arr[age_edges_0:age_edges_1].mean(axis=0)\n # print('here')\n g2i_ = np.array(g2i)\n g2_aged[age] = g2i_[np.nonzero(g2i_)[0]]\n N = len(g2_aged[age])\n lag_dict[age] = np.arange(N) * 1.0\n if time_sampling == \"log\":\n num_levels = int(np.log(N / (num_bufs - 1)) / np.log(2) + 1) + 1\n tot_channels, lag_steps, dict_lag = multi_tau_lags(num_levels, num_bufs)\n # max_taus= lag_steps[age].max()\n lag_steps_ = lag_steps[lag_steps < N]\n # print(i, age, lag_steps, N, lag_steps_, len(g2_aged[age]))\n g2_aged[age] = g2_aged[age][lag_steps_]\n lag_dict[age] = lag_steps_ * 1.0\n # print( lag_dict[age] )\n lag_dict[age] *= timeperframe\n\n return lag_dict, g2_aged", "title": "" }, { "docid": "41697f0b12e3af9d6b5fd38e811132f9", "score": "0.4979514", "text": "def mel_gff_list():\n\tmod_gff3 = sys.argv[3]\n\twith open(mod_gff3, 'r') as f:\n\t\tgff = [line.strip().split('\\t') for line in f]\n\t\tf.close()\n\treturn gff\n\t#gff_list ex/:\n\t#['2L', 'FlyBase', 'gene', '7529', '9484', '.', '+', '.', 'ID=FBgn0031208;Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'], ['2L', 'FlyBase', 'gene', '9839', '21376', '.', '-', '.', 'ID=FBgn0002121;Name=l(2)gl;fullname=lethal (2) giant larvae;Alias=Lgl,lgl,lethal giant larvae,lethal giant larve,lethal giant larva,lethal(2)giant larvae,Complementation group 2.1,Lethal Giant Larvae,dlgl,p127l(2)gl,LGL,l(2) giant larva,CG2671,L(2)GL,p127,l(2)giant larvae,D-LGL,l(2),gl,l[[2]]gl,l-gl,lethal-giant-larvae,Lethal giant larvae,Lethal (2) giant larvae,L(2)gl,Lethal (2) giant larva,Lethal-giant-larvae,MENE (2L)-B,lethal(2) giant larvae,p127[l(2)gl],lethal(2)-giant larvae,lethal-2-giant larvae,l(2) giant larvae,lethal- giant-larvae,Lethal(2)giant larvae,Lethal-2-giant larvae;Ontology_term=SO:0000010,SO:0000087,GO:0005578,GO:0005886,GO:0007269,GO:0016082,GO:0008021,GO:0008283,GO:0016334,GO:0016336,GO:0016333,GO:0016335,GO:0016327,GO:0005829,GO:0045175,GO:0016332,GO:0045184,GO:0007399,GO:0005938,GO:0005737,GO:0007179,GO:0045197,GO:0045196,GO:0002009,GO:0005918,GO:0008105,GO:0045167,GO:0008104,GO:0045746,GO:0007423,GO:0008285,GO:0001738,GO:0016323,GO:0007391,GO:0005856,GO:0030154,GO:0042127,GO:0005614,GO:0045159,GO:0035072,GO:0007559,GO:0045200,GO:0008360,GO:0019991,GO:0007406,GO:0051726,GO:0051668,GO:0007314,GO:0016325,GO:0030036,GO:0030863,GO:0035070,GO:0055059,GO:0035212,GO:0035293,GO:0090163,GO:0048730,GO:0000132,GO:0098725,GO:0060429,GO:0007293,GO:0045176,GO:0072697,GO:0000149,SO:0000548,GO:0005920,GO:0017022,GO:0004860,GO:0006469;Dbxref=FlyBase:FBan0002671,FlyBase_Annotation_IDs:CG2671,INTERPRO:IPR015943,GB_protein:AAN10503,GB_protein:AAG22256,GB_protein:AAN10502,GB_protein:AAN10501,GB_protein:AAF51570,GB_protein:AAG22255,INTERPRO:IPR017986,GB:AA246243,GB:AW942062,GB:AY051654,GB_protein:AAK93078,GB:BH809482,GB:CZ471313,GB:CZ482024,GB:CZ484691,GB:M17022,GB_protein:AAA28671,GB_protein:AAA28672,GB:X05426,GB_protein:CAA29007,UniProt/Swiss-Prot:P08111,INTERPRO:IPR000664,INTERPRO:IPR001680,INTERPRO:IPR013577,GB_protein:AGB92324,UniProt/TrEMBL:M9NCX1,UniProt/TrEMBL:M9PBJ2,OrthoDB7_Drosophila:EOG7CW2GT,OrthoDB7_Diptera:EOG7DRVK2,GB_protein:AFH03479,GB_protein:AFH03478,GB_protein:AFH03481,GB_protein:AFH03480,EntrezGene:33156,INTERPRO:IPR013905,BDGP_clone:PC00404,OrthoDB7_Insecta:EOG7SRGKH,OrthoDB7_Arthropoda:EOG7ZDD82,OrthoDB7_Metazoa:EOG79W94C,InterologFinder:33156,FlyAtlas:CG2671-RB,BIOGRID:59421,Fly-FISH:CG2671,GenomeRNAi:33156,INTERACTIVEFLY:/cytoskel/lethl2g1.htm;gbunit=AE014134;derived_computed_cyto=21A5-21A5'],\n\t# ['2L', 'FlyBase', 'ncRNA', '286383', '288292', '.', '+', '.', 'ID=FBtr0347595;Name=CR46263-RA;Parent=FBgn0267996;Dbxref=FlyBase_Annotation_IDs:CR46263-RA;score_text=Weakly Supported;score=0'], ['2L', 'FlyBase', 'gene', '287252', '289144', '.', '-', '.', 'ID=FBgn0025686;Name=Amnionless;fullname=Amnionless ortholog;Alias=FBgn0031246,CG11592,CK02467,BEST:CK02467,dAMN,Amnionless;Ontology_term=SO:0000010,SO:0000087,GO:0046331,GO:0097206,GO:0016021,GO:0097017;Dbxref=FlyBase:FBan0011592,FlyBase_Annotation_IDs:CG11592,GB_protein:AAF51514,GB:AA141784,GB:CZ468687,UniProt/TrEMBL:Q9VPN2,GB_protein:AGB92350,OrthoDB7_Drosophila:EOG7CGKJK,EntrezGene:33199,BDGP_clone:IP03221,OrthoDB7_Diptera:EOG774804,INTERPRO:IPR026112,OrthoDB7_Insecta:EOG7G266G,OrthoDB7_Arthropoda:EOG7P65FW,OrthoDB7_Metazoa:EOG7ZGX2W,InterologFinder:33199,FlyAtlas:CG11592-RA,GenomeRNAi:33199;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292419', '293222', '.', '+', '.', 'ID=FBgn0031247;Name=CG11562;Alias=FBgn0063011,BcDNA:RE44650;Ontology_term=SO:0000010,SO:0000087,GO:0005739,GO:0003674,GO:0008150;Dbxref=FlyBase:FBan0011562,FlyBase_Annotation_IDs:CG11562,GB_protein:AAF51513,GB:AI520524,GB:AI945841,GB:AY119645,GB_protein:AAM50299,GB:BE662187,GB:BI358003,UniProt/TrEMBL:Q9VPN3,OrthoDB7_Drosophila:EOG7HTW3H,OrthoDB7_Diptera:EOG7200K9,EntrezGene:33200,BDGP_clone:RE44650,OrthoDB7_Insecta:EOG7B9454,OrthoDB7_Arthropoda:EOG7RK278,OrthoDB7_Metazoa:EOG78H3X3,FlyAtlas:CG11562-RA,INTERPRO:IPR031568,Fly-FISH:CG11562,GenomeRNAi:33200;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292959', '294681', '.', '-', '.', 'ID=FBgn0017457;Name=U2af38;fullname=U2 small nuclear riboprotein auxiliary factor 38;Alias=FBgn0010626,U2AF38,U2AF,dU2AF38,DU2AF38,CG3582,dU2AF[38],l(2)06751,u2af38,U2AF 38;Ontology_term=GO:0089701,SO:0000010,SO:0000087,GO:0000398,GO:0008187,GO:0005681,GO:0005686,GO:0000381,GO:0005634,GO:0003729,GO:0007052,GO:0071011,GO:0008380,GO:0000166,GO:0046872;Dbxref=FlyBase:FBan0003582,FlyBase_Annotation_IDs:CG3582,GB_protein:AAF51512,GB:AA264081,GB:AA820431,GB:AC004115,GB:AC008371,GB:AI061776,GB:AI455418,GB:AI944553,GB:AQ026079,GB:AY058537,GB_protein:AAL13766,GB:U67066,GB_protein:AAB17271,UniProt/Swiss-Prot:Q94535,INTERPRO:IPR000504,INTERPRO:IPR000571,INTERPRO:IPR009145,INTERPRO:IPR012677,GB_protein:AGB92351,UniProt/TrEMBL:M9PBM1,OrthoDB7_Drosophila:EOG7FRM2M,OrthoDB7_Diptera:EOG700KS6,EntrezGene:33201,BDGP_clone:LD24048,OrthoDB7_Insecta:EOG76QSHP,OrthoDB7_Arthropoda:EOG7KMJ7T,OrthoDB7_Metazoa:EOG70089G,apodroso:10448-U2af38[k14504],InterologFinder:33201,FlyAtlas:CG3582-RA,BIOGRID:59457,Fly-FISH:CG3582,GenomeRNAi:33201;gbunit=AE014134;derived_computed_cyto=21B7-21B8']]", "title": "" }, { "docid": "5c64ab36c50faefc4502fb6a7c2c3989", "score": "0.4975732", "text": "def extract_DJS_data(self):\n super().complete_data()\n if self.exclude_names is not None:\n self.exclude_features()\n self.angles_ankle = self.angles_df.loc[self.features[0]]\n self.GRF_vertical = self.GRF_df.loc[self.features[1]]\n self.moment_ankle = self.moments_df.loc[self.features[2]]\n self.power_ankle = self.power_df.loc[self.features[3]]\n self.all_dfs_ankle = pd.concat([self.angles_ankle, self.GRF_vertical, \n self.moment_ankle, self.power_ankle], axis=0)\n #Changing the features and adding units\n for i in range(4):\n self.features[i] += ' '+self.units[i] #Ankle angle\n self.index_ankle = pd.MultiIndex.from_product([self.features,\n self.angles_ankle.index], \n names=['Feature', 'Gait cycle %'])\n self.all_dfs_ankle.index = self.index_ankle\n \n return self.all_dfs_ankle", "title": "" }, { "docid": "c50c0ac8413fde68a6d9e35906f5194b", "score": "0.4969445", "text": "def get_gini_index(data):\n # Skip first target class column\n col_data = data.iloc[:, 1:]\n best_mix_gini = math.inf\n best_threshold = math.inf\n best_attribute = \"\"\n # Iterate over all attributes\n for attr in col_data.columns:\n threshold_unique = []\n # Get unique threshold values\n for val in data[attr]:\n if val not in threshold_unique:\n threshold_unique.append(float(val))\n\n # Iterate over all thresholds\n for threshold in threshold_unique:\n list_lowerthreshold = []\n list_higherthreshold = []\n target_lowerthreshold = []\n target_higherthreshold = []\n\n for idx in range(0, len(data[attr])):\n val = data[attr].tolist()[idx]\n if val < threshold:\n # Append lower than threshold values\n list_lowerthreshold.append(float(val))\n # Append target class values for less than threshold\n target_lowerthreshold.append(data[\"Type\"].tolist()[idx])\n elif val >= threshold:\n # Append higher than threshold values\n list_higherthreshold.append(float(val))\n # Append target class values higher than threshold\n target_higherthreshold.append(data[\"Type\"].tolist()[idx])\n\n\n # Calculate gini for lower values than threshold\n if(len(list_lowerthreshold) !=0):\n try:\n\n list_zero_lower = [list_lowerthreshold[idx] for idx in range(0,len(list_lowerthreshold)) if target_lowerthreshold[idx] == \"Cupcake\"]\n prob_zero_lower = len(list_zero_lower) / len(list_lowerthreshold)\n\n list_one_lower = [list_lowerthreshold[idx] for idx in range(0,len(list_lowerthreshold)) if target_lowerthreshold[idx] == \"Muffin\"]\n prob_one_lower = len(list_one_lower) / len(list_lowerthreshold)\n\n ginival_lower = 1 - prob_zero_lower ** 2 - prob_one_lower ** 2\n except:\n ginival_lower = 0\n else:\n ginival_lower = 0\n\n # Calculate gini for higher values than threshold\n if (len(list_higherthreshold) != 0):\n\n try:\n list_zero_higher = [list_higherthreshold[idx] for idx in range(0,len(list_higherthreshold)) if target_higherthreshold[idx] == \"Cupcake\"]\n\n prob_zero_higher = len(list_zero_higher) / len(list_higherthreshold)\n\n list_one_higher = [list_higherthreshold[idx] for idx in range(0,len(list_higherthreshold)) if target_higherthreshold[idx] == \"Muffin\"]\n\n prob_one_higher = len(list_one_higher) / len(list_higherthreshold)\n ginival_higher = 1 - prob_zero_higher ** 2 - prob_one_higher ** 2\n except:\n ginival_higher = 0\n else:\n ginival_higher = 0\n\n\n\n\n\n total_val = len(list_lowerthreshold)+len(list_higherthreshold)\n\n\n if total_val!=0:\n # Calclulate weighted gini\n mix_gini_low = len(list_lowerthreshold) * ginival_lower\n mix_gini_high = len(list_higherthreshold) * ginival_higher\n mix_gini = (mix_gini_low + mix_gini_high)/total_val\n\n if mix_gini < best_mix_gini:\n # Set best gini, threshold and attribute\n best_mix_gini = mix_gini\n best_threshold = threshold\n best_attribute = attr\n\n return best_mix_gini, best_threshold, best_attribute", "title": "" }, { "docid": "15d9957f62c5c3d177669d8906df5321", "score": "0.49669987", "text": "def train_HMM(train_file_name):\r\n\r\n pos_data = read_data_train(train_file_name+'.txt')\r\n sent_inds = read_data_ind(train_file_name+'.ind')\r\n\r\n ####################\r\n # STUDENT CODE HERE\r\n sent_data = []\r\n for start, end in zip(sent_inds, sent_inds[1:]):\r\n sent_data.append(pos_data[start:end])\r\n sent_data.append(pos_data[sent_inds[-1]:]) # don't forget the last sentence!\r\n\r\n prior = np.zeros(N_tags)\r\n transition = np.zeros((N_tags, N_tags))\r\n emission = dict()\r\n\r\n for sent in sent_data:\r\n prior[UNIVERSAL_TAGS.index(sent[0][1])] += 1\r\n prior = np.log(prior / prior.sum())\r\n\r\n for sent in sent_data:\r\n for first, second in zip(sent, sent[1:]):\r\n transition[UNIVERSAL_TAGS.index(first[1])][UNIVERSAL_TAGS.index(second[1])] += 1\r\n for i, row in enumerate(transition):\r\n transition[i] = [np.log(float(i)/sum(row)) for i in row]\r\n transition = np.asarray(transition)\r\n\r\n tag_count = {t: 0 for t in UNIVERSAL_TAGS}\r\n for pos in pos_data:\r\n emission.setdefault((pos[1], pos[0]), 0)\r\n emission[(pos[1], pos[0])] += 1\r\n tag_count[pos[1]] += 1\r\n for key in emission:\r\n emission[key] = np.log(float(emission[key])/tag_count[key[0]])\r\n ####################\r\n\r\n return prior, transition, emission", "title": "" }, { "docid": "a5fac4d87c0a7f6314900e45192fb285", "score": "0.49575096", "text": "def __init__(self,x_cords,y_cords,data,distance_type = 'euclidean'):\r\n self.all_points = self.np.array([[x,y] for x,y in zip(x_cords,y_cords)])\r\n self.data = self.np.array(data).reshape((len(data),1))\r\n if distance_type == 'euclidean':\r\n self.distance_function = self.euclideanDistance\r\n elif distance_type == 'haversine':\r\n self.distance_function = self.haversineDistance\r\n\r\n\r\n\r\n # calculate all distances, and the individual semivarance\r\n self.X_distance_data,self.Y_semivariance_data = self.calcPointPairs()\r\n self.num_pairs = self.X_distance_data.shape[0]\r\n\r\n self.distance_from_all_points = self.calculated_distance_pairs()\r\n\r\n # we will hold universall values for the variogram, but will reset them\r\n # in function calls.\r\n self.X_mean = []\r\n self.Y_mean = []\r\n\r\n # This is the h (distance) vector that will be used to fit a model.\r\n self.h = self.np.linspace(min(self.X_distance_data),max(self.X_distance_data),100)", "title": "" }, { "docid": "6c2977bf73ccdd430be7c337a2584944", "score": "0.49549648", "text": "def _read_ged(self) -> None:\n id = \"\"\n fam_id = \"\"\n indi_flag = False\n\n for tag, arg in self._validate_gedcom():\n if tag == \"INDI\" and not indi_flag:\n id = arg\n if arg not in self._individual.keys():\n self._individual[arg] = Individual(arg)\n elif tag == \"NAME\" and not indi_flag:\n self._individual[id].set_name(arg)\n elif tag == \"SEX\" and not indi_flag:\n self._individual[id].set_gender(arg)\n elif tag == \"DATE\" and not indi_flag:\n self._individual[id].set_birth_death_date(arg)\n elif tag == \"FAMC\" and not indi_flag:\n self._individual[id].set_child(arg)\n elif tag == \"FAMS\" and not indi_flag:\n self._individual[id].set_spouse(arg)\n elif tag == \"FAM\":\n indi_flag = True\n fam_id = arg\n if arg not in self._family.keys():\n self._family[fam_id] = Family(fam_id)\n elif tag == \"HUSB\":\n self._family[fam_id].set_husband_id(arg)\n self._family[fam_id].set_husband_name(self._individual[arg].get_name())\n elif tag == \"WIFE\":\n self._family[fam_id].set_wife_id(arg)\n self._family[fam_id].set_wife_name(self._individual[arg].get_name())\n elif tag == \"CHIL\":\n self._family[fam_id].set_child(arg)\n elif tag == \"DATE\":\n self._family[fam_id].set_marriage_divorce_date(arg)", "title": "" }, { "docid": "0e13ef2bc000182dd57dff9d9390ec7d", "score": "0.49530223", "text": "def gather_EEG(self, flags):\n machine_id = 0\n machines = {}\n edf_file = []\n table = []\n for file in glob.glob(os.path.join(flags.data_path, 'physionet.org/files/capslpdb/1.0.0/*.edf')):\n\n # Fetch all data from file\n edf_file.append(file)\n try:\n data = pyedflib.EdfReader(file)\n except OSError:\n print(\"Crashed\")\n continue\n\n ch_freq = data.getSampleFrequencies()\n data = mne.io.read_raw_edf(file)\n ch = [c.lower() for c in data.ch_names]\n\n # Create state Dict (ID)\n state_dict = {}\n for n, f in zip(ch, ch_freq):\n state_dict[n] = f\n state_set = set(state_dict.items())\n\n # Create or assign ID\n if state_set not in table:\n id = copy.deepcopy(machine_id)\n machine_id +=1\n table.append(state_set)\n else:\n id = table.index(state_set)\n\n # Add of update the dictionnary\n if id not in machines.keys():\n machines[id] = {}\n machines[id]['state'] = state_set\n machines[id]['amount'] = 1\n machines[id]['dates'] = [data.info['meas_date']]\n machines[id]['names'] = [file]\n else:\n machines[id]['amount'] += 1 \n machines[id]['dates'].append(data.info['meas_date'])\n machines[id]['names'].append(file)\n \n _table = []\n for id, machine in machines.items():\n if machine['amount'] > 4:\n ch = [c[0] for c in machine['state']]\n freq = [c[1] for c in machine['state']]\n\n _table.append(set(ch))\n print(\"___________________________________________________\")\n print(\"Machine ID: \", id)\n print(\"Recording amount: \", machine['amount'])\n print(\"Channels: \", ch)\n print('Freqs: ', freq)\n print(\"Dates:\")\n for d in machine['dates']:\n print(d)\n print(\"Files:\")\n for f in machine['names']:\n print(f)\n\n return list(set.intersection(*_table))", "title": "" }, { "docid": "ea5172218a6bc2c3ea2e2dd6fc92435c", "score": "0.4952294", "text": "def gru_cell(self, Xt, h_t_minus_1,variable_scope):\n with tf.variable_scope(variable_scope):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n print(Xt)\n print(self.W_z)\n print(h_t_minus_1)\n print(self.U_z)\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z) + tf.matmul(h_t_minus_1,self.U_z) + self.b_z) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r) + tf.matmul(h_t_minus_1,self.U_r) + self.b_r) # r_t:[batch_size,self.hidden_size]\n # 3.compute candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h) +r_t * (tf.matmul(h_t_minus_1, self.U_h)) + self.b_h) # h_t_candiate:[batch_size,self.hidden_size]\n # 4.compute new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size,hidden_size]\n return h_t", "title": "" }, { "docid": "d6c64d1f41864c7bbdc568dbe3bbe200", "score": "0.49498013", "text": "def load_distiller_mgf(self):\n data = {}\n\n alm = [i for i in gen_allowed_mass_diff_with_sign(n=4, z=1)]\n\n with mgf.read(self.path) as spectra:\n for spectrum in spectra:\n mass = spectrum['params']['pepmass'][0]\n precursor_chrg = int(spectrum['params']['charge'][0])\n mass = calculatePrecursor(mass, precursor_chrg)\n\n scanid = int(parse_scan_id(spectrum['params']['title']))\n\n if scanid in self.ids_to_be_referenced:\n if self.ids_to_be_referenced[scanid] in data:\n mass1 = data[self.ids_to_be_referenced[scanid]]['params']['pepmass'][0]\n precursor_chrg1 = int(data[self.ids_to_be_referenced[scanid]]['params']['charge'][0])\n mass1 = calculatePrecursor(mass1, precursor_chrg1)\n diff = abs(mass1 - mass)\n diff2 = [abs(diff - abs(i)) for i in alm]\n pos = diff2.index(min(diff2))\n p = \"mass1:\\t {0}\\n\"\n p += \"mass:\\t {1}\\n\"\n p += \"scanid:\\t {2}\\n\"\n p += \"charge:\\t {3}\\n\"\n p += \"charge2:\\t {4}\\n\"\n p += \"scanid2:\\t {5}\\n\"\n if diff > 21: # distiller changes precursor charge therefore precurosr mass calculation is wrong\n print(p.format(mass1, mass, scanid, precursor_chrg1, spectrum['params']['charge'][0], self.ids_to_be_referenced[scanid]))\n print(diff)\n print(diff2)\n print(\"----------------\")\n else:\n self.references.add(Reference(ppm=self.ppm,\n id_2=scanid,\n id_1=self.ids_to_be_referenced[scanid], # also scanid\n peak_list_2=spectrum['m/z array'],\n peak_list_1=data[self.ids_to_be_referenced[scanid]]['m/z array'],\n mass_2=mass,\n mass_1=mass1,\n charge=spectrum['params']['charge'][0],\n extra_mass=alm[pos],\n int_list_2=spectrum['intensity array'],\n int_list_1=data[self.ids_to_be_referenced[scanid]]['intensity array'],\n params2=spectrum['params'],\n params1=data[self.ids_to_be_referenced[scanid]]['params']))\n del(data[self.ids_to_be_referenced[scanid]])\n del(self.ids_to_be_referenced[scanid])\n else:\n data[scanid] = spectrum", "title": "" }, { "docid": "35f2c8b47ed3ece6e9794559fa2de105", "score": "0.49489737", "text": "def eval_h(self):\n h_indices = {}\n for node in self.G:\n forward_cites = [self.G.nodes[child]['forward_cites'] for child in self.G.successors(node)]\n h_indices[node] = CitationNetwork.h_index(forward_cites)\n nx.set_node_attributes(self.G, h_indices, 'h_index')", "title": "" }, { "docid": "7fc4af6e5a9fc96108802bc2894324ee", "score": "0.49463764", "text": "def Glafic_source(filename, dataformat):\n data = open(filename, 'r')\n lines = data.readlines()\n iid = []\n AE = []\n ME = []\n for k in range(len(lines))[1:]:\n [ids, einstein_angle, mass_in_RE] = lines[k].split()\n iid.append(int(ids))\n AE.append(float(einstein_angle))\n ME.append(float(mass_in_RE)) # [Msun/h]\n iid = np.asarray(iid)\n AE = np.asarray(AE)\n ME = np.asarray(ME)\n if dataformat == 'dictionary':\n Glafic = {'ID' : iid,\n 'AE' : AE,\n 'ME' : ME}\n return Glafic\n else:\n return iid, AE, ME", "title": "" } ]
e00d4854127bcc72d1cd6d58082bb8aa
Reads a YAML file and return a dictionary of its contents.
[ { "docid": "befca0db1c00c8ff4b3347f5daa50d39", "score": "0.6859964", "text": "def read_yaml_config(path):\n with open(path) as input_file:\n data = input_file.read()\n try:\n return yaml.load(data)\n except yaml.YAMLError, e:\n raise ValueError('Error parsing YAML file %s: %s' % (path, e))", "title": "" } ]
[ { "docid": "8790fe5b5c4502b493d535daebb90b16", "score": "0.82360923", "text": "def read_yaml() -> dict:\n with open(CONFIG_FILE) as config_file:\n return yaml.load(config_file, Loader=yaml.FullLoader)", "title": "" }, { "docid": "f2b97221e12faf0a13d9506dc59f0d55", "score": "0.81965977", "text": "def read_yaml(path: Path) -> t.Dict[str, t.Any]: # type: ignore[misc]\n\n with open(path) as f:\n return dict(yaml.load(f, Loader=yaml.FullLoader))", "title": "" }, { "docid": "9d184fcae1a63cec81f609f7f4190d57", "score": "0.81817836", "text": "def read_yaml(path: str) -> dict:\n with open(path, mode='r', encoding='utf-8') as file:\n content = yaml.load(file, Loader=yaml.SafeLoader)\n return content", "title": "" }, { "docid": "4eb2b5cf720b4915cec034c392501a9a", "score": "0.79615206", "text": "def read_yaml(yaml_file):\n with open(yaml_file, 'r', encoding='utf-8') as yaml_fh:\n ss_dict = yaml.load(yaml_fh)\n return ss_dict", "title": "" }, { "docid": "aa684a96cfc2a65a7f8f48f4881c9309", "score": "0.7874171", "text": "def read_yaml_file(path: Path) -> dict:\n with open(path) as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n raise exc", "title": "" }, { "docid": "546cf121741a26acdb1b39ec7adeb272", "score": "0.7868081", "text": "def _read_file(yaml_file_path):\n obj = {}\n with open(yaml_file_path, \"r\") as stream:\n try:\n obj = yaml.load(stream, Loader=yaml.FullLoader)\n except yaml.YAMLError as ex:\n print(ex)\n return obj", "title": "" }, { "docid": "41d967324933ed1016fc5d719eb78707", "score": "0.78496516", "text": "def read_yaml_file(filename: str) -> dict:\n with open(filename, 'r') as stream:\n LOGGER.info('Chargement du fichier de configuration %s', filename)\n return ConfigLoader.read_yaml_stream(stream)", "title": "" }, { "docid": "1f0c8e84f462a7e9b459311db81d900c", "score": "0.7770052", "text": "def yaml_to_dict(file_name):\n file_path = os.path.abspath(file_name)\n with open(file_path, \"r\") as conf_:\n content = yaml.safe_load(conf_)\n\n return content", "title": "" }, { "docid": "5e49645affd343157f6fc9a343cd2d3d", "score": "0.7649594", "text": "def _read_file(path):\n try:\n with salt.utils.files.fopen(path, \"rb\") as contents:\n return salt.utils.yaml.safe_load(contents)\n except OSError:\n return {}", "title": "" }, { "docid": "5a6b195724f394ff4876dc45ee278ce2", "score": "0.75858617", "text": "def get_yaml_dict(yaml_file):\n\ttry:\n\t\twith open(yaml_file, \"r\") as file_:\n\t\t\tyaml_dict = yaml.safe_load(file_.read()) or {}\n\t\treturn yaml_dict\n\texcept FileNotFoundError:\n\t\treturn {}", "title": "" }, { "docid": "d90c9c4d07932f64582049aa1e11244d", "score": "0.75544655", "text": "def load_from_file(file_path: str) -> dict:\n return ConfigLoader.read_yaml_file(file_path)", "title": "" }, { "docid": "d244ca7629b0aca6f3b479f17ee63d8d", "score": "0.7523064", "text": "def load_yaml_as_dict(filepath):\n with open(filepath) as open_file:\n yaml_dict = yaml.safe_load(open_file)\n if yaml_dict:\n return yaml_dict\n return {}", "title": "" }, { "docid": "ddc2bfac497b5809ed22279adb440c64", "score": "0.75091386", "text": "def load_yaml(yaml_path: Path) -> dict:\n with yaml_path.open(\"r\") as file_open:\n yaml_dict = load_string(file_open)\n return yaml_dict", "title": "" }, { "docid": "da293a24b72636bcc89588e97c08b3c4", "score": "0.74767953", "text": "def read_config(config_path: str) -> Dict:\n fin = open(config_path, encoding=\"utf-8\")\n data = yaml.load(fin, Loader=yaml.FullLoader)\n return data", "title": "" }, { "docid": "0d9cfc5d9691ff8cb27f59ee6bd550de", "score": "0.7476096", "text": "def read_yaml(filepath):\n with open(filepath, encoding=\"utf-8\") as stream:\n data = yaml.safe_load(stream)\n return data", "title": "" }, { "docid": "4f9012f524e8d976051cc1ae6e401702", "score": "0.7447727", "text": "def get_dict_from_yml(self, filename):\n\n fid = open(os.path.join(RESOURCE_PATH, filename), 'r')\n result = yaml.load(fid)\n fid.close()\n\n if result is None:\n raise SampleException('dict is None')\n\n return result", "title": "" }, { "docid": "10ff90d4e22722a8dc0ad6d38b3156ba", "score": "0.7424501", "text": "def read_yaml(self, filename):\n with open(filename, 'r') as f:\n self.__dict.update(yaml.load(f, Loader=yaml.SafeLoader))", "title": "" }, { "docid": "8489a232c50ac8573bf7977bddc788e9", "score": "0.7371884", "text": "def _load_yaml_file(yaml_file: Text) -> Dict:\n with open(yaml_file, mode='rb') as stream:\n try:\n yaml_content = yaml.load(stream)\n except yaml.YAMLError as ex:\n err_msg = f\"YAMLError:\\nfile:{yaml_file}\\nerror:{ex}\"\n logger.error(err_msg)\n raise FileNotFoundError\n\n return yaml_content", "title": "" }, { "docid": "28a2694ce54b98561b94c2485500060d", "score": "0.7356315", "text": "def read(self):\n import yaml\n from os.path import exists\n if exists(self.filename):\n with lock:\n with open(self.filename) as handle:\n try:\n data = yaml.safe_load(handle.read()) # (2)\n return data\n except yaml.YAMLError:\n return {} # (3)\n else:\n return {}", "title": "" }, { "docid": "0e90d619ce45de415664844151580d34", "score": "0.7290625", "text": "def read_config_file(path: str) -> dict:\n if not os.path.isfile(path):\n raise FileNotFoundError('The config file is missing.')\n\n with open(path, 'r') as config_file:\n return yaml.safe_load(config_file)", "title": "" }, { "docid": "790760830fd21d58d1c8608f2e0b429c", "score": "0.7272207", "text": "def yaml2dict(yaml_path: str):\n assert yaml_path.endswith('.yml') or yaml_path.endswith('.yaml')\n\n with open(yaml_path) as f:\n doc = yaml.full_load(f)\n return doc", "title": "" }, { "docid": "602cea88b2a2a8ea12f0a60c69981925", "score": "0.72694516", "text": "def read_yaml(yaml_file):\n try:\n # Read the package yaml file\n yaml = YAML()\n yaml.explicit_start = True\n yaml.indent(mapping=3)\n yaml.preserve_quotes = True # not necessary for your current input\n\n with open(yaml_file) as fp:\n data = yaml.load(fp)\n yaml.dump(data, sys.stdout)\n fp.close()\n except (IOError, ValueError):\n # Log error and raise exception if package yaml can't be read.\n error_msg = '{1}\\nError loading {0}\\n'.format(yaml_file, traceback.format_exc())\n LOG.error(\"Reading the vars.yml file failed with \" + error_msg)\n raise ValueError(error_msg)\n\n return data", "title": "" }, { "docid": "4dc5bb9b77032e9b6517e1ec671d4b2c", "score": "0.7246916", "text": "def read_configs(infile):\n cfg = {}\n with open(infile) as hdl:\n cfg = yaml.safe_load(hdl)\n return cfg", "title": "" }, { "docid": "dfb628323b2fd268185eac64e8c49254", "score": "0.71985924", "text": "def yaml_to_dict(file_path):\r\n loaded_file = {}\r\n try:\r\n with open(file_path, encoding=\"utf-8\") as file:\r\n loaded_file = yaml.safe_load(file.read())\r\n except Exception as error:\r\n loaded_file = {'erreur': error}\r\n\r\n return loaded_file", "title": "" }, { "docid": "13b0d01cdc3c125eb9b9bf1dcee9a648", "score": "0.7184562", "text": "def load():\n\n path = pathlib.Path(__file__).parent.absolute()\n file = f\"{path}/config.yml\"\n\n config = open(file)\n\n data = yaml.load(config, Loader=yaml.FullLoader)\n\n return data", "title": "" }, { "docid": "70376f3d523515e59415ce81a1321969", "score": "0.71795964", "text": "def load_yaml_file(filename):\n with open(filename) as f:\n contents = yaml.load(f, Loader=yaml.FullLoader)\n return contents", "title": "" }, { "docid": "332087c78a3ca5842f472f20be81c50a", "score": "0.7174574", "text": "def yml_to_dict(yml_file_path):\n yml_path = Path(yml_file_path).resolve()\n\n if not yml_path.is_file():\n return None\n\n yml_dict = None\n with open(yml_path, encoding='utf-8') as file:\n yml_dict = yaml.load(file, Loader=yaml.BaseLoader)\n\n return yml_dict", "title": "" }, { "docid": "c3b9b5c844f40921f195f94f1d278401", "score": "0.71663755", "text": "def load_yaml_file(self, yaml_file):\r\n self.called_tools.append('load_yaml_file')\r\n\r\n try:\r\n with open(yaml_file, 'r') as file:\r\n dict_result = yaml.safe_load(file.read())\r\n except FileNotFoundError as error:\r\n raise RbkcliException.ToolsError(error)\r\n\r\n return dict_result", "title": "" }, { "docid": "ebe27a2e3edec8b10295bea54ef0a149", "score": "0.7148037", "text": "def load_yaml(filename):\n try:\n with open(filename, 'r') as stream:\n data = yaml.load(stream)\n except:\n print(\"ERROR: Supplied configuration file {} is not a properly-formatted YAML file.\".format(filename))\n sys.exit(1)\n\n return data", "title": "" }, { "docid": "4b32003d2fa390b8636f3bf1dfefba9c", "score": "0.7131626", "text": "def load_yaml(file_name: str = None, resource: str = None, package: str = None) -> dict:\n #\n if file_name is not None:\n with open(file_name, 'r', encoding='utf-8') as f:\n yaml_string = f.read()\n else:\n with importlib.resources.open_text(package, resource) as r:\n yaml_string = r.read()\n\n the_map = yaml.load(yaml_string)\n\n if not isinstance(the_map, dict):\n raise YcError('Yaml source should map to a dictionary {}'.format(the_map))\n\n return the_map", "title": "" }, { "docid": "8fcdf6416ddd6fa3b6673c7db21c1009", "score": "0.7130792", "text": "def read_config():\n\n with open(\"config.yml\", 'r') as ymlfile:\n return yaml.load(ymlfile)", "title": "" }, { "docid": "3f420f1f42421f860e2269cba4cb47eb", "score": "0.7118985", "text": "def load_yaml(file_path):\n with open(file_path) as fin:\n content = yaml.load(fin, Loader=yaml.FullLoader)\n return content", "title": "" }, { "docid": "20168970028563355ad391b7947f981c", "score": "0.70954406", "text": "def read_yaml_file(file_name, validate) -> Dict:\n print(cyan(\"Reading\"),\n cyan(file_name, bold=True))\n\n with open(file_name, 'r', encoding='utf-8') as f:\n content = f.read()\n\n if not validate:\n return yaml.safe_load(content)\n\n if \"REDACTED\" in content or \"DATA+OMITTED\" in content:\n print(red(\"REDACTED\", bold=True),\n red(\"or\"),\n red(\"DATA+OMITTED\", bold=True),\n red(\"found in\"),\n red(file_name, bold=True))\n print(red(\"Ensure the context was dumped with\"),\n red(\"--raw\", bold=True))\n print(red(\"i.e.\"),\n red(\"kubectl config view --raw\", bold=True))\n sys.exit(3)\n\n return yaml.safe_load(content)", "title": "" }, { "docid": "4f783468dc43536058f6abc776bcbc61", "score": "0.70877725", "text": "def read_yaml(file_path):\n\n with open(os.path.join(DATA_DIRECTORY, file_path), 'r') as yaml_file:\n try:\n return yaml.safe_load(yaml_file)\n except yaml.YAMLError as err:\n print(err)", "title": "" }, { "docid": "008677bef962fc7204fbfd587509f2fa", "score": "0.7087709", "text": "def read_config():\n with open('config.yaml') as file:\n config = yaml.load(file, Loader=yaml.FullLoader)\n return config", "title": "" }, { "docid": "f29fd9318abaacd45e347387ef722e61", "score": "0.7084344", "text": "def read_yaml_file(filepath):\n data = None\n if os.path.exists(filepath):\n with open(filepath, 'r') as f:\n data = yaml.safe_load(f)\n return data", "title": "" }, { "docid": "a395000be05284e9e246b423c37dd1df", "score": "0.7077685", "text": "def load_yaml(yaml_file):\n f = open(yaml_file)\n y = f.read()\n return yaml.load(y)", "title": "" }, { "docid": "71520d6baa40d99825c26fc4bd4fd1ee", "score": "0.707119", "text": "def load_yaml_vars(infile):\n with open(infile, 'r') as f:\n var_dict = yaml.safe_load(f)\n\n return(var_dict)", "title": "" }, { "docid": "dba063d815957a627e73de5c5f2abed6", "score": "0.7046259", "text": "def load_yaml(file_path):\n content = None\n with open(file_path, 'r') as f:\n content = yaml.safe_load(f)\n return content", "title": "" }, { "docid": "a4503e32485f48f52b481caceb38932a", "score": "0.7043907", "text": "def deserialize(self, file):\n\t\treturn yaml.load(file)", "title": "" }, { "docid": "89efa095ddf3aa6a2aa7b6439b1fdb19", "score": "0.70262045", "text": "def load_yaml(filepath):\n\n with open(filepath) as f:\n data = yaml.load(f, Loader=Loader)\n\n return data", "title": "" }, { "docid": "9b8cb5e9c2c36f4344a274829d7fc9c6", "score": "0.7002157", "text": "def parse_config(config_file):\n if config_file is None:\n return {}\n return yaml.load(open(config_file, 'r').read())", "title": "" }, { "docid": "639c73f9caf25350e6628de452e105fe", "score": "0.70004666", "text": "def yaml_to_dict(inp_yaml):\n with open(inp_yaml, 'r') as stream:\n try:\n out = yaml.load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return out", "title": "" }, { "docid": "81dae1b4002d25dcf7af7b9b614a1962", "score": "0.6971694", "text": "def loads_yaml(file_content):\n return load_yaml(file_content)", "title": "" }, { "docid": "dca90614ada944cc98130b9625d66842", "score": "0.6969649", "text": "def load_yaml(filepath):\n with open(filepath) as fp:\n r = yaml.load(fp)\n return r", "title": "" }, { "docid": "2a9aaa315a5bf203d4b50a8c83f49cb9", "score": "0.69647294", "text": "def _read_logging_config() -> dict:\n with open(constants.FILE_YAML, 'rt') as f:\n return yaml.safe_load(f.read())", "title": "" }, { "docid": "8f082bbc1d2f47367bd58412c68ec132", "score": "0.69617224", "text": "def read_from_configuration(file_name):\n\n with open(file_name, 'r') as yaml_fobj:\n yaml_content = yaml.load(yaml_fobj)\n\n return yaml_content", "title": "" }, { "docid": "d060f9a090f6cad754bbf1db160e5616", "score": "0.69449025", "text": "def read_yaml_file(self, file_name):\n with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as yamlfile:\n return yaml.load(yamlfile)", "title": "" }, { "docid": "d58db7f999e21813cdc074493029eefd", "score": "0.69419944", "text": "def read_file(filename):\n try:\n with open(filename, 'r') as stream:\n data_loaded = yaml.load(stream)\n\n return data_loaded\n except IOError:\n print(\"Could not read file:\", filename)\n except TypeError:\n print('something went wrong')", "title": "" }, { "docid": "462a4884fe26e838ab1ab35048376fe3", "score": "0.69201773", "text": "def load_yaml(filename):\n with open(filename) as file:\n return yaml.load(file, Loader=yaml.FullLoader)", "title": "" }, { "docid": "6fd659a560ec55f78f86a4fd7d73ab3a", "score": "0.6917695", "text": "def LoadYaml(filepath):\n with open(filepath, 'r') as f:\n doc = yaml.load(f)\n logger.debug(doc)\n return doc", "title": "" }, { "docid": "73153175692f5e710eb384683634c388", "score": "0.69048584", "text": "def _read_yaml_file(path_yaml: Path) -> Any:\n # TODO: modify so that mkdocs.yml can be read, but Python won't be executed...\n\n # Based on: https://github.com/yaml/pyyaml/issues/86#issuecomment-380252434\n yaml.add_multi_constructor('', lambda _loader, _suffix, _node: None)\n yaml.add_multi_constructor('!', lambda _loader, _suffix, _node: None)\n yaml.add_multi_constructor('!!', lambda _loader, _suffix, _node: None)\n try:\n return yaml.unsafe_load(path_yaml.read_text())\n except (FileNotFoundError, KeyError) as err: # pragma: no cover\n logger.warning(f'Unexpected error reading the {path_yaml.name} file ({path_yaml}): {err}')\n return {}\n except yaml.constructor.ConstructorError:\n logger.exception('Warning: burying poorly handled yaml error')\n return {}", "title": "" }, { "docid": "26218e3ed4270379570ce64b9c93c2de", "score": "0.68852204", "text": "def _convert_yaml_to_dict(self, inputfilename):\n yamldict = {}\n\n try:\n with open(inputfilename, \"r\", encoding=\"utf-8\") as yaml_file:\n yamldict = yaml.load(yaml_file)\n except yaml.YAMLError as exc:\n if hasattr(exc, 'problem_mark'):\n mark = exc.problem_mark\n print(\"Error position: (%s:%s)\" % (mark.line+1, mark.column+1))\n else:\n errmsg = \"Unexpected error while parsing the YAML:\", sys.exc_info()[1]\n logging.error(errmsg)\n raise Exception(errmsg)\n\n return yamldict", "title": "" }, { "docid": "4d1c93a280b3a8a9e7bb7bf4678cf8dd", "score": "0.68692064", "text": "def read_config(file_name):\n with open(file_name) as fin:\n config = yaml.safe_load(fin.read())\n return config", "title": "" }, { "docid": "4d1c93a280b3a8a9e7bb7bf4678cf8dd", "score": "0.68692064", "text": "def read_config(file_name):\n with open(file_name) as fin:\n config = yaml.safe_load(fin.read())\n return config", "title": "" }, { "docid": "574e7ebce2cb9c06587fbf0d373cafbf", "score": "0.6854796", "text": "def parse(self):\n with open(self.file_path, \"r\") as f:\n self.content = yaml.safe_load(f)\n return self.content", "title": "" }, { "docid": "fe9db23695d46d3b035ab00c45f23bad", "score": "0.6844314", "text": "def load_config(config_path: str) -> dict:\n with open(config_path, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return config", "title": "" }, { "docid": "2c85e70119dfcb0a8873619009951afc", "score": "0.6834853", "text": "def parse_config(config_file):\n return yaml.load(open(config_file), Loader=yaml.FullLoader)", "title": "" }, { "docid": "39fdd5c3e73bdb01524d08f4bd6c817f", "score": "0.6834625", "text": "def load_yaml(self, name):\n y = open(name, 'r')\n yml = yaml.safe_load(y)\n y.close()\n return yml", "title": "" }, { "docid": "6772370f77b55905d765893698ef7963", "score": "0.6832744", "text": "def read_yaml_file(pfilename: str) -> dict:\n # read the yaml file \"profiles.yaml\" for the list of profiles using pyyaml\n with open(pfilename, 'r') as stream:\n # try to load the yaml file\n try:\n profiles = yaml.safe_load(stream)\n # deal with a yaml error (for now just raise the error)\n except yaml.YAMLError as exc:\n raise exc\n # return the profiles\n return profiles", "title": "" }, { "docid": "dab0120f5a73ccb19df4c8b1387206e4", "score": "0.678199", "text": "def file_loads_yaml(file_path):\n file = None\n try:\n file = open(file_path)\n return loads_yaml(file)\n finally:\n if file:\n file.close()", "title": "" }, { "docid": "96e132a48c6060c76bc9aeba20b44c1f", "score": "0.6781286", "text": "def load_yaml(file_path):\n try:\n with open(file_path, 'r') as F:\n return yaml.load(F)\n except ImportError:\n print(\"import yaml is required\")\n exit(1)", "title": "" }, { "docid": "cf751a783676e439579be6e6addda0ab", "score": "0.67756873", "text": "def read(self):\n with open(self.file_name) as f:\n d = yaml.load(f)\n if d:\n self.update(d)\n f.close()", "title": "" }, { "docid": "c3f9b6600fdda61f8fe47d47a1ca1770", "score": "0.6756012", "text": "def load_yaml(path):\n stream = load_file(path)\n return yaml.load(stream)", "title": "" }, { "docid": "71d7df142abd8c46cfc36f595d346428", "score": "0.6753374", "text": "def __load_data(self):\n try:\n fh = open(self.ifile)\n data = yaml.load(fh)\n fh.close()\n return data\n except Exception, e:\n LOGGER.debug(str(e))\n LOGGER.error(_(\"You need to specify an existing file\"))\n sys.exit(1)", "title": "" }, { "docid": "aef4e3d6673014bbc9a3b60ded604dd5", "score": "0.6726021", "text": "def read_config(filename):\n \n with open(filename, 'r') as stream:\n try:\n config = yaml.load(stream)\n return config\n except yaml.YAMLError as exc:\n print(exc)", "title": "" }, { "docid": "7042dbf0f7c1c4c75f0975a0cfb561a2", "score": "0.6705862", "text": "def read_config_file(self):\n\n with open(self.file_path) as stream:\n LOG.info(\"Parsing pod file: %s\", self.file_path)\n cfg = yaml.load(stream)\n return cfg", "title": "" }, { "docid": "4740d435f78765f04a09030303fa7493", "score": "0.6697818", "text": "def parse_settings():\n\n with open(\"settings.yaml\", \"r\") as data:\n settings = yaml.safe_load(data)\n return settings", "title": "" }, { "docid": "24fd0ede51d674cad7853f1bdcf7a634", "score": "0.6696523", "text": "def config(self) -> Dict[str, Any]:\n try:\n with open(self.directory.joinpath(\"wordgoal.yml\"), \"r\") as stream:\n return cast(Dict[str, Any], safe_load(stream))\n except FileNotFoundError:\n return {}", "title": "" }, { "docid": "35eca6d438cdfaf4e2d09aea7f04913e", "score": "0.66908723", "text": "def load_config_data() -> map:\n\n cfg_file = get_config_file()\n\n with open(cfg_file, 'r') as cfg:\n data = yaml.safe_load(cfg)\n\n return data", "title": "" }, { "docid": "31d2fe01c6db0321e4f11df232b52967", "score": "0.6690024", "text": "def read_from_yaml(yml_file):\n logging.info(\"reading yaml file: %s\" % yml_file)\n sg_dict = {}\n yaml_stream = open(yml_file, \"r\")\n security_groups = yaml.load_all(yaml_stream)\n for security_group in security_groups:\n sg_dict = {sg_name: sg_attributes for (sg_name, sg_attributes) in security_group.items()}\n yaml_stream.close()\n logging.debug(\"sg_dict: %s\" % sg_dict)\n return sg_dict", "title": "" }, { "docid": "2aabb8906047d24d35e80eea24831fd7", "score": "0.6687082", "text": "def _read_yaml(path):\n with open(path, \"r\") as f:\n try:\n hist = yaml.safe_load(f)\n except yaml.YAMLError as exc:\n print(exc)\n return hist", "title": "" }, { "docid": "3ccc026172ef9e45953c715dab5b4f4c", "score": "0.66453356", "text": "def load_yaml_file(file_path: str):\n with codecs.open(file_path, \"r\") as f:\n return yaml.safe_load(f)", "title": "" }, { "docid": "3595b2f0fc1fd5cd01d4ebe90d31ed84", "score": "0.6639965", "text": "def parse_config_file(self, filename):\n self.logger.info(f'Loading config file: {filename}')\n with open(filename, 'r') as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "title": "" }, { "docid": "56e06741d1c1425c53bbb9df46dd1d64", "score": "0.6638672", "text": "def get_config_yaml():\n return yaml.load(open('test/test_config.yaml', 'r')) or {}", "title": "" }, { "docid": "c17bb0ed03c314efc6cda793b66976f9", "score": "0.6632735", "text": "def get_config() -> Dict[str, str]:\n with open(\"default_conf.yaml\") as file:\n config = yaml.safe_load(file)\n return config", "title": "" }, { "docid": "eed8fb9d0b5ea423d2e66c861c3677e6", "score": "0.66229504", "text": "def read_config_file(\n filename: Union[Path, Text], reader_type: Union[Text, List[Text]] = \"safe\"\n) -> Dict[Text, Any]:\n return read_validated_yaml(filename, CONFIG_SCHEMA_FILE, reader_type)", "title": "" }, { "docid": "cb6c2a851cdb4851dc0770d4b962abdd", "score": "0.66061336", "text": "def parse_config(self, path: str) -> dict:\n try:\n with Path(path).open(\"r\") as file:\n return yaml.safe_load(file)\n except (FileNotFoundError, ParserError) as error:\n raise argparse.ArgumentError(self, error)", "title": "" }, { "docid": "495888e4ded497bed181229b7a4d1daf", "score": "0.66040313", "text": "def read_config(config_path):\n with open(config_path, 'r') as ymlfile:\n return yaml.load(ymlfile)", "title": "" }, { "docid": "375864ae5e8a3c862bb1bde33d3fb72d", "score": "0.65909624", "text": "def _load_file(self, file: TextIO) -> Dict[str, Any]:\n import toml\n\n config_dict = toml.load(file)\n return cast(Dict[str, Any], config_dict)", "title": "" }, { "docid": "5421484bebffc1d256fd8192b65cd4aa", "score": "0.6590941", "text": "def load(cls, filename):\n f = open(filename)\n result = cls._from_yaml_dict(yaml.load(f))\n f.close()\n return result", "title": "" }, { "docid": "f4b3f7918824d693e8199d799b9586d1", "score": "0.6587401", "text": "def get_config(conf_file):\n from easydict import EasyDict as edict\n\n with open(conf_file, \"r\") as file_descriptor:\n data = yaml.load(file_descriptor)\n\n # convert the data into an easyDictionary\n return edict(data)", "title": "" }, { "docid": "cc35c89e53387117a975f2555fb84275", "score": "0.6587145", "text": "def read_yaml_args_file(args_file_path: Path) -> Dict[str, Union[str, int, bool, List, Dict, datetime]]:\r\n with open(args_file_path, 'r') as f:\r\n return remove_empty_values(yaml.safe_load(f))", "title": "" }, { "docid": "6ea3fa74329ace5701188428b286b852", "score": "0.65742385", "text": "def load(self, path):\n context = {}\n with codecs.open(self.source + path, 'r', encoding='utf-8') as f:\n content = f.read()\n result = re.search(r'^(---\\s*\\n.*?\\n?)^(---\\s*$\\n?)', content, re.DOTALL|re.MULTILINE)\n if result:\n front_matter = result.group(1)\n context = yaml.load(front_matter)\n content = content[result.end(0):len(content)]\n return content, context", "title": "" }, { "docid": "a7fb17e84bc1aa081e48b145f3352070", "score": "0.65531045", "text": "def load_config(config_path: str) -> dict:\n with fsspec.open(config_path, mode=\"r\") as f:\n config = yaml.safe_load(f)\n return config", "title": "" }, { "docid": "02423169ca9a96fafd9e3223897bafbc", "score": "0.65272075", "text": "def load(name):\n if not os.path.isfile(name):\n raise ValueError(f\"File {name} does not exists.\")\n return yaml.load(open(name), Loader=yaml.Loader)", "title": "" }, { "docid": "ccf763e99b8b88c99e5f38c6382cd189", "score": "0.6517302", "text": "def load_config(filepath: str) -> Dict[str, Any]:\n with open(filepath, mode=\"r\", encoding=\"utf-8\") as file:\n return parse(file.read())", "title": "" }, { "docid": "8316c3da86f0e10483bb2849fceeccf2", "score": "0.6498666", "text": "def load_yaml(fname):\n with open(fname, 'r') as outfile:\n yaml_file = yaml.load(outfile) # Loader=yaml.FullLoader\n return yaml_file", "title": "" }, { "docid": "7202b02674f170a804d644131cc81801", "score": "0.6497122", "text": "def load_yaml(file_path):\n with open(file_path, 'r') as usernames_yaml:\n try:\n data = yaml.safe_load(usernames_yaml)\n except yaml.YAMLError as exc:\n print(exc)\n return data", "title": "" }, { "docid": "676d4183402646b22e9dfadaab3616bd", "score": "0.6489465", "text": "def load_yaml(path):\n with open(path, 'r', encoding='utf-8') as stream:\n try:\n res = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return res", "title": "" }, { "docid": "5805cda0e77846a17d904332f60f18f6", "score": "0.64880127", "text": "def read_dict_file(file):\n with open(file) as data:\n data = yaml.load(data, Loader=yaml.FullLoader)\n email_list = []\n server_list = []\n subject_list = []\n for email in data['emails']:\n email_list.append(email)\n for server in data['servers']:\n server_list.append(server)\n for subject in data['subjects']:\n subject_list.append(subject)\n return email_list, server_list, subject_list", "title": "" }, { "docid": "ae0404260be401cf93a6ae26e36d48a8", "score": "0.6484009", "text": "def get_config():\n with open('config.yaml', 'r') as file:\n config = yaml.load(file, Loader=yaml.FullLoader)\n return config", "title": "" }, { "docid": "86a4ec82c0b22b0a1a19ce4d6b0c9e39", "score": "0.64752114", "text": "def load_yaml_config(config_file):\n if type(config_file) is file:\n CONFIG.update(yaml.load(config_file) or {})\n return CONFIG\n else:\n try:\n with open(config_file, 'r') as f:\n content = yaml.load(f)\n CONFIG.update(content)\n return content\n except IOError as e:\n e.message = \"Could not open configuration file \\\"{}\\\".\".format(config_file)\n raise e", "title": "" }, { "docid": "2c110977a05353c45aeaa6c4ffaf0838", "score": "0.64426583", "text": "def read_yaml_file(\n filename: Union[Text, Path], reader_type: Union[Text, List[Text]] = \"safe\"\n) -> Union[List[Any], Dict[Text, Any]]:\n try:\n return read_yaml(read_file(filename, DEFAULT_ENCODING), reader_type)\n except (YAMLError, DuplicateKeyError) as e:\n raise YamlSyntaxException(filename, e)", "title": "" }, { "docid": "e8bc7e7ecaca290f80b302ed99cc16cc", "score": "0.6427357", "text": "def _parse(filename):\n with open(filename, \"r\") as fp:\n config_str = jinja2.Template(fp.read()).render()\n config = yaml.load(config_str, Loader=yaml.Loader)\n\n return config", "title": "" }, { "docid": "e9c3c89cf36bd2cb45f22fc6e2efb06b", "score": "0.64152056", "text": "def read_model_configuration(filename: Union[Path, Text]) -> Dict[Text, Any]:\n return read_validated_yaml(filename, MODEL_CONFIG_SCHEMA_FILE)", "title": "" }, { "docid": "ee2ca8dbb8bff7743c63d0fe6b161bd6", "score": "0.6381088", "text": "def readYML():\n with open(ROAinfoFile, 'r') as infile:\n ROAinfo = yaml.load(infile)\n return ROAinfo", "title": "" }, { "docid": "c4750262a580fb7555a91999cf8126cf", "score": "0.63790244", "text": "def load_config(path):\r\n return yaml.load(open('config.yaml', 'r'), Loader=yaml.SafeLoader)", "title": "" }, { "docid": "8ae2b4043978441b19ed6a43cb5833c7", "score": "0.6373652", "text": "def load(filename):\n try:\n with open(filename, 'r') as f:\n return yaml.safe_load(f)\n except yaml.scanner.ScannerError as e:\t# For errors parsing schema.yaml\n logging.error(\"YAML errror: %s \", e)\n raise", "title": "" }, { "docid": "7cb56df60fa1252478b6cdfb33bd6191", "score": "0.63606393", "text": "def _load_file(path):\n with open(path) as f:\n return yaml.safe_load(f)", "title": "" } ]
2c0be3bdb13395e5faec089c68c32cc0
Loop for receiving messages from the server and calling the given handler.
[ { "docid": "3457228a51d88c8edcc897239a313816", "score": "0.64994836", "text": "async def handle(self):\n\n # wait until the socket object is created\n while self.__socket is None:\n await asyncio.sleep(0)\n\n # wait until the socket is ready\n await self.__socket.ensure_open()\n\n # loop while socket is open\n while not self.__socket.closed:\n try:\n # wait to receive data from the socket\n data = await self.__socket.recv()\n\n # if data is None exit loop\n if data is None:\n break\n\n # decode packet into event and message\n event, message = self.__encoder.decode(data)\n\n # check for parsing error\n if event is not None:\n try:\n # run the event's handler and get the event and message for the response\n event, response = await self.__call_event_handler(event, message)\n\n # if the response is not none send it\n if response is not None:\n await self.send(event, message)\n except Exception as e:\n self.logger.error(\"Exception when executing the handler for the '\" + event\n + \"' event. Error: \" + str(e))\n except websockets.ConnectionClosedError as e:\n self.logger.error(\"Connection closed: \" + str(e))\n self.logger.info(\"Disconnected.\")", "title": "" } ]
[ { "docid": "1f6eb6286e8f5b57133d53eb6f7141b0", "score": "0.73473716", "text": "def handle(self):\n sock = self.request\n response = \"\"\n\n while True:\n try:\n objects = _read_objects(sock)\n response = server._callback(objects)\n except ConnectionClosed:\n return\n except ConnectionResetError:\n sock.close()\n except Exception:\n sock.close()\n\n _write_objects(sock, response)", "title": "" }, { "docid": "095dffc9db44d375b4a219112a0950f3", "score": "0.7273748", "text": "def run(self):\n\n while(True):\n message = Replyer.receive()\n method, params = Marshaller.unmarshall(message)\n response = getattr(self.server, method)(*params)\n Replyer.send(Marshaller.marshall(response))", "title": "" }, { "docid": "b6619dd6e3df7946362a387d72331bfa", "score": "0.7205196", "text": "def run_loop(self):\n\n while self.pending_calls:\n args = self.pending_calls.dequeue()\n\n method = args.get('method')\n peer_id = args.get('peer_id')\n raw_cmd = args.get('raw_cmd')\n fn = args.get('fn')\n\n if method == 'send':\n self._send_message(peer_id, raw_cmd, fn)\n elif method in ['response', 'call']:\n fn()\n else:\n raise ValueError('method %s undefined' % (method))", "title": "" }, { "docid": "e18f2e8211d16ba1b9f8204e458c701f", "score": "0.72048277", "text": "def receive_forever(self):\n\n while True:\n body, addr = self.receive()\n if body == None and addr == None:\n # There was an error with the received message\n continue\n\n if body['type'] not in self.handlers:\n # Provided type is not a registered handler\n logging.debug('Invalid message type', body)\n self.send(Error.json(Error.BAD_REQ, 'invalid message type'), addr)\n continue\n\n try:\n self.handlers[body['type']](self, body, addr)\n except CloseServer:\n self.close()\n return\n except Exception as e:\n self.send(Error.json(Error.SERVER_ERR, str(e)), addr)", "title": "" }, { "docid": "be4be43c27814c8c9e804ad8362c3dcf", "score": "0.71238244", "text": "def run(self):\n self.connect()\n self.action_helo()\n i = 200\n while True:\n try:\n message = self.receive_message()\n except socket.timeout:\n # no data from server\n log.debug(\"timeout waiting for message\")\n if self.__terminate:\n return\n else:\n continue\n self.handle_message(message)\n i = i - 1\n if i < 0:\n break\n self.action_bye()", "title": "" }, { "docid": "5ca1b4f646aad3a2d2ff3da422d50635", "score": "0.70701617", "text": "def run_loop(self):\n\n while(1):\n try:\n payload = self._recv()\n except UnicodeError as e:\n logging.warning(f\"Failed to decode : {e.reason}\")\n continue\n except KeyboardInterrupt:\n if self._closing:\n logging.warning(\"Forced shutting down now.\")\n os._exit(-1)\n else:\n logging.info(\"Received KeyboardInterrupt. Ctrl+C again to force shutting down.\")\n self._closing = True\n continue\n\n send_id, recv_id, msg = self._parse_msg(payload)\n if msg is None:\n continue\n else:\n result, cont = self._execute_cmd(msg.cmd, msg.data)\n if not cont:\n logging.info(\"Shutting down.\")\n break\n\n # Currently not expecting to invoke any callback on ModelServer\n # side, so second parameter 0\n self._send_msg(0, send_id, result)", "title": "" }, { "docid": "7a5823264048390dfb119e082e5e01a8", "score": "0.7060128", "text": "def run(self):\n channels = self.channel_backend.registry.all_channel_names()\n while True:\n channel, message = self.channel_backend.receive_many_blocking(channels)\n # Handle the message\n consumer = self.channel_backend.registry.consumer_for_channel(channel)\n if self.callback:\n self.callback(channel, message)\n consumer(channel=channel, **message)", "title": "" }, { "docid": "ad809a85e7fe5fdf9cc756ed66e03a2b", "score": "0.7050459", "text": "async def read(ws: websockets.WebSocketClientProtocol, handler: Callable[[AnyStr], Coroutine]):\n async for message in ws:\n await handler(message)", "title": "" }, { "docid": "87c83765aef7e6278fc86a7d6545460b", "score": "0.7024942", "text": "def run(self):\n for data in self.__iter_data():\n self.on_message(data)", "title": "" }, { "docid": "01e31593ed2d9f13a55251cbf141d500", "score": "0.6971264", "text": "def _process_loop(self):\n for msg in self._event_queue:\n self._process_incoming_message(msg)", "title": "" }, { "docid": "6f96f5742b2606027a53eab8f1987ba5", "score": "0.6967414", "text": "def __callbackHandler(self):\n while True:\n callback, msg_obj = SessionManager.callback_queue.get()\n callback(msg_obj)", "title": "" }, { "docid": "2b0678be59c9ec58a97fe2f4b6b4a03c", "score": "0.6963102", "text": "def run(self):\r\n while True:\r\n soc, address = self.queue.get()\r\n self.handle_client(soc, address)", "title": "" }, { "docid": "6e8520162fe27e75d7d25d3a012960bd", "score": "0.6959939", "text": "def __message_loop(self):\n\n # loop until we're told to stop\n while self.__parsing:\n # receive message data from the server and pass it along to the\n # world model as-is. the world model parses it and stores it within\n # itself for perusal at our leisure.\n raw_msg = self.__sock.recv()\n\n msg_type = self.msg_handler.handle_message(raw_msg)\n if msg_type is not None:\n # we send commands all at once every cycle, ie. whenever a\n # 'sense_body' command is received\n if msg_type == handler.ActionHandler.CommandType.SENSE_BODY:\n self.__send_commands = True", "title": "" }, { "docid": "34813cd5814096560b6b405420741d07", "score": "0.69063914", "text": "def read_commands_loop(self):\n while True:\n msg = self.autopilot.recv_msg()\n if msg:\n if self.debug: print(f\"Receive: {msg}\")\n if msg.id in self.handlers:\n handle_thread = threading.Thread(\n target=self.handlers[msg.id],\n args=(msg,)\n )\n handle_thread.start()\n time.sleep(0.1)", "title": "" }, { "docid": "ef9902643a343dfa8108fa417f34828a", "score": "0.69027877", "text": "def _run(self):\n self.soc.settimeout(1)\n while self.connected and not self.shutdown:\n try:\n data = self.soc.recv(2**16)\n data_list = self._check_for_multiple(data)\n for item in data_list:\n try:\n jdata = json.loads(item.decode('utf-8'))\n func = self.handler[jdata['topic']]\n handler_thread = Thread(target=func, args=[jdata])\n handler_thread.start()\n except json.JSONDecodeError as e:\n print('\\n\\n', e)\n print('Caused by:', item)\n except socket.timeout:\n pass\n except ConnectionResetError:\n print(self, ':: Master has shut down. Stopping subscriber')\n self.shutdown = True\n self.soc.close()\n self.connected = False", "title": "" }, { "docid": "31320fa0916e3afe50f3c0701962d3ed", "score": "0.67917025", "text": "def main_loop(self):\n self.log.info(\"Beginning to scan for new inbox messages...\")\n while True:\n messages = self.r.get_unread()\n for message in messages:\n if message.subject == \"username mention\":\n self.respond_to_username_mention(message)\n elif message.subject == \"edit request\":\n self.respond_to_edit_request(message)\n elif message.subject == \"delete request\":\n self.respond_to_delete_request(message)\n elif message.subject == \"user translation default request\":\n self.respond_to_user_translation_request(message)\n elif message.subject == \"subreddit translation default request\":\n self.respond_to_subreddit_translation_request(message)\n message.mark_as_read()\n sleep(30)", "title": "" }, { "docid": "cf4f07e74bb70cb1fb46a81ed9d76c1f", "score": "0.6763541", "text": "def _get_received_messages(self, response_handler):\n for recvmsg in _utils.getsome(self._channel.poll_received_message):\n _TRACE(\"ECHO_RECV_MSG msgtype: %s msgbody: %s\",\n recvmsg.msgtype, recvmsg.message)\n if response_handler:\n response_handler(recvmsg.msgtype, recvmsg.message)", "title": "" }, { "docid": "449e4b9a3ead12396edcf84b20f414f2", "score": "0.67121315", "text": "def run(self):\n\n from twisted.internet import reactor\n\n while True:\n item = self.queue.get()\n if item.get('json', None) is not None:\n for json in item['json']:\n if json['cmd'] == 'refresh':\n continue\n client = self.session.get(item['uid'])\n if client is None:\n continue\n if json['cmd'] == 'keepalive':\n client.updateLastAction()\n continue\n if item.get('type', None) is not None:\n client.type = item['type']\n client.addResponse(self.protocol.parse(client, json))\n if callable(item.get('callback', None)):\n reactor.callFromThread(item['callback'],\n client.getResponse())\n self.queue.task_done()", "title": "" }, { "docid": "14942e32f6d81c388ad8dd588906da53", "score": "0.6702382", "text": "def run(self):\n self.setup_connection()\n while True:\n self.send_command()\n if self.socket_got_data():\n self.get_next_message()\n print('received', msg_parser.get_command_name(self.buffer))\n self.process_message()\n\n # This method for debug purpose only\n # it stop executing when all known blocks processed\n self.finish()", "title": "" }, { "docid": "5a9b09c3ecf9efa7dda2f230ce4e63f1", "score": "0.6667263", "text": "def incoming_message_handler(self, running):\n while running:\n try:\n self.unicast_receive()\n except (socket.timeout, BlockingIOError):\n pass", "title": "" }, { "docid": "4bb9672c224b43a28260ea1adb976ab2", "score": "0.663664", "text": "async def handle_messages(self, /) -> NoReturn:\n while True:\n message = await self.read_queue.get()\n try:\n await asyncio.to_thread(self.buffer.update, message)\n async for message in self._emit_responses(message):\n await self.write_queue.put(message)\n except (DeviceError, MessageError) as exc:\n await self.logger.error('Message handling error', exc_info=exc)", "title": "" }, { "docid": "c83e840862c5f55eccfa0eed9019fa88", "score": "0.66135174", "text": "async def _receiver(self, websocket):\n async for msg in websocket:\n LOGGER.debug(F\"Received message: {msg}\")\n self.handle_message(msg)", "title": "" }, { "docid": "cc9d143c418d7785434439c36290a3c2", "score": "0.660933", "text": "def run(self):\n try:\n while True:\n self.eventHandler()\n self.poller()\n except Exception as e:\n print('Error: %s' % e)\n finally:\n sys.exit(0)", "title": "" }, { "docid": "4309b71dac67ebb1411d1fdbb9228c06", "score": "0.6609241", "text": "def run(self):\r\n self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.serverSocket.bind((self.hostname, self.server_port))\r\n self.serverSocket.listen(1)\r\n while not self.done:\r\n conn_socket, addr = self.serverSocket.accept()\r\n handler = ConnectionHandler(conn_socket, addr, self.timeout)\r\n handler.run()", "title": "" }, { "docid": "de9d4db5ac2136b1e3e46e2a60fee168", "score": "0.6603911", "text": "def __recv_worker(self):\n while True:\n msg = self.atomic_b.wait_for_msg(None)\n self.msg_handler(msg)", "title": "" }, { "docid": "298497e9d6b1e3fc5ad3707f984ad8f9", "score": "0.660326", "text": "def handler_thread(self):\n while not self.done:\n try:\n message = self.message_queue.get(timeout=5)\n self.handle_message(message)\n except Queue.Empty:\n logger.debug(\"timeout getting message\")", "title": "" }, { "docid": "7383fe923d38aa11f8b9032277e7096d", "score": "0.6590043", "text": "def run(self):\n\n inputSocketList = []\n inputSocketList.append(self._clientSocket)\n\n while not self.shutdownEvent.is_set():\n readyToRead, readyToWrite, inputError = select.select(inputSocketList, [], [], self._selectTimeout)\n\n for sock in readyToRead:\n # Read a message off of the socket\n msgData = MessageHandler.recvMsg(sock)\n\n # Process the message\n if msgData is not None:\n self.__processMsg(msgData)\n else:\n break\n\n # Cleanup\n self.__shutdown()", "title": "" }, { "docid": "b3334f97149eb2abfa7b7a794850272f", "score": "0.65867937", "text": "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n self.user = User()\n\n # Loop that listens for messages from the client\n while True:\n\n received_string = self.connection.recv(4096)\n \n message = json.loads(received_string)\n\n request = message['request']\n content = message['content']\n\n print 'The request is: ' + request\n\n if request == 'login':\n self.handle_login(content)\n elif request == 'logout':\n self.handle_logout()\n elif request == 'msg':\n self.handle_msg(content)\n elif request == 'names':\n self.handle_names()\n elif request == 'help':\n self.handle_help()", "title": "" }, { "docid": "75ad583343fcfaa2137aba8336b8a5b9", "score": "0.6577251", "text": "def _receiver(self):\n\n while self._run_receive:\n try:\n if self._something_to_receive():\n header = self._receive_header()\n if Response.is_msg_response(header):\n body = self._receive_data(header[Response.DLEN])\n self._handle_msg_response(body, header)\n\n elif AsyncMsg.is_async_msg(header):\n header = AsyncMsg.convert_to_async_header(header)\n body = self._receive_data(header[AsyncMsg.DLEN])\n self._handle_async_msg(body, header)\n else:\n raise SpheroError(\"Unknown data received from sphero. Header: {}\".format(header))\n except SpheroError:\n # TODO release all blocked threads waiting to receive data\n print \"RECEIVER CRASHED\"\n self._receiver_crashed = True\n #time.sleep(0.01)", "title": "" }, { "docid": "0ac361cd89300750dc1cc55a1216d3c7", "score": "0.6574794", "text": "def listen(self):\n try:\n while 1:\n print \"Loop...\"\n # Process incoming XMPP messages.\n self.cl.Process(5)\n\n # Process shoutbox messages.\n self.process_shoutbox_messages()\n\n # Sleep before next loop iteration.\n #time.sleep(1)\n\n # Reconnect to XMPP if necessary.\n if not self.cl.isConnected():\n self.cl.reconnectAndReauth()\n\n except KeyboardInterrupt:\n print \"Exiting...\"", "title": "" }, { "docid": "cb355fe0a9dc0d205dd475e307982807", "score": "0.65730107", "text": "def main_loop(irc: ssl.SSLSocket):\n while True:\n data_ch = irc.recv(1024)\n raw_message = data_ch.decode('UTF-8')\n\n for line in raw_message.splitlines():\n if line.startswith('PING :tmi.twitch.tv'):\n send_pong(irc)\n else:\n components = line.split()\n command = components[1]\n\n if command == 'PRIVMSG':\n parse_chat(irc, line)\n elif command == 'NOTICE':\n print(\"NOTICE: {}\".format(line[line.find(\":\", 1, -1):]))", "title": "" }, { "docid": "9144766313286dc144f85b816c626ea4", "score": "0.6531432", "text": "def recv_loop(self):\n while True:\n recv_msg = self.s.recv(SEND_BUFFER_SIZE).decode(\"ISO-8859-1\")\n if recv_msg:\n message = self.process_received_message(recv_msg)\n sys.stdout.write(\"\\t\" + message + \"\\n\")\n sys.stdout.flush()\n else:\n os._exit(0)", "title": "" }, { "docid": "ee538df655a759f839d5e644bbfc248c", "score": "0.6527932", "text": "def handle(ws):\n if ws.path == '/echo':\n sem = Semaphore()\n while True:\n logger.debug(\"loop\")\n data = ws.wait()\n logger.info(\"data {}\".format(data))\n pool.apply_async(process, args=(data, ws,sem), callback=log_result)\n\n elif ws.path == '/data':\n for i in xrange(10000):\n ws.send(\"0 %s %s\\n\" % (i, random.random()))\n gevent.sleep(0.1)\n while True:\n logger.info (\"loop\")\n data = ws.recvmsg()\n pool.apply_async(process, args=(data, ws,sem), callback=log_result)", "title": "" }, { "docid": "87ef517931eafa99aa4220ad0ce40d23", "score": "0.6526114", "text": "def serverloop():\n # Create a regular internet socket (TCP/IP):\n server_socket = socket.socket()\n # Bind the socket to listen on a specific port on our computer:\n server_socket.bind((IP, PORT))\n # Begin listening on the socket, with a particular queue size:\n server_socket.listen(MAXIMUM_QUEUE_SIZE)\n\n # Do this forever (until server process is killed):\n while True:\n # Accept a connection from next client:\n # for each connection we get the socket and connection details\n (client_socket, client_ip_and_port) = server_socket.accept()\n # Process the client's request:\n respond(client_socket, client_ip_and_port)\n # Close the client connection:\n client_socket.close()", "title": "" }, { "docid": "f5f2c9870f4ab471c1739a2915f7431a", "score": "0.65247244", "text": "def process_receivedMessages():\n process_receivedMessagesAlertDb=alertsdb(\"process_receivedMessages\")\n #print(\"Processing the input received from client\")\n #return \"received message from clinent:{} \\n\".format(input_str)\n while True:\n input=ClientsToServerMessages.get(block=True)\n try:\n inputJson = json.loads(input)\n functionname=inputJson[\"method\"]\n args=inputJson[\"params\"]\n FunctionsDictionary[functionname](process_receivedMessagesAlertDb,**args)\n except Exception as ex:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n log.error(\"Exception in process_receivedMessages:{} {} {} {}\".format(exc_type, fname, exc_tb.tb_lineno,ex))\n time.sleep(2)\n continue", "title": "" }, { "docid": "7c1014430b5ed764c332922c7edb315e", "score": "0.652419", "text": "def run(self):\n\n while True:\n self.wait_on_message()", "title": "" }, { "docid": "37898ff6e9bf47450f963867ad4ec8d4", "score": "0.65201706", "text": "def listen(self):\n\n while True:\n self.client, address = self.socket.accept()\n print(('CONNECTED: @ ' + str(address)))\n\n try:\n ciphertext = self.request_handler(\n self.client.recv(4096).decode('utf-8'))\n self.client.sendall(ciphertext[0])\n self.client.sendall(ciphertext[1])\n print('file uploaded, awaiting next client')\n except ValueError:\n pass\n print(('DISCONNECTED: @ ' + str(address)))\n self.socket.close()", "title": "" }, { "docid": "c395d6d04edc0d2b19fe840916c55b16", "score": "0.6504693", "text": "def processMessages(conn, addr):\n while True:\n try:\n # --- receive request --- #\n data = conn.recv(CHUNK_SIZE) # Receive data\n if not data:\n conn.close()\n msg = data.decode(FORMAT) # Decode incoming data\n print(\"Received: \", msg)\n # ----------------------- #\n\n # --- send response --- #\n time.sleep(SLEEP_INTERVAL) # Intervals \n print(\"sending response\")\n conn.sendall(bytes('Server here.', FORMAT)) # Send data\n # --------------------- #\n\n except:\n conn.close() # Close connection\n print(\"Connection closed by\", addr)\n sys.exit() # Quit the thread.", "title": "" }, { "docid": "50a082e5b3af211aebc78cc686f36737", "score": "0.6502024", "text": "def run(self):\n while True:\n data, addr = self.sock.recvfrom(1024)\n self.lock.acquire()\n try:\n self.client.server_message.append(data)\n finally:\n self.lock.release()", "title": "" }, { "docid": "40b55b76e483a8d749792712b9c50208", "score": "0.64911175", "text": "async def dispatch_queue(self):\n\n while True:\n try:\n await self.process_one()\n except Exception:\n self.log.exception(\"Error in message handler\")", "title": "" }, { "docid": "7349b5acf1678d84d94d2fb418125a40", "score": "0.6489239", "text": "async def event_loop(self, *args: EventHandler):\n while True:\n await self.get_event(*args)", "title": "" }, { "docid": "9430a1a896924cd29bc32b4783f269d0", "score": "0.64866155", "text": "def listen(self, message_consumer):\n while not self._rfile.closed:\n try:\n request_str = self._read_message()\n except ValueError:\n if self._rfile.closed:\n return\n else:\n log.exception(\"Failed to read from rfile\")\n\n if request_str is None:\n break\n\n try:\n message_consumer(json.loads(request_str.decode('utf-8')))\n except ValueError:\n log.exception(\"Failed to parse JSON message %s\", request_str)\n continue", "title": "" }, { "docid": "f7a4513afc2b0c70d2f7b4bcacde296c", "score": "0.6466923", "text": "def run(self):\n while self.running:\n for msg in self.sub.listen():\n if msg['type'] == 'message':\n message = msg['data'].decode(\"utf-8\")\n event = message.split(';')[0]\n value = message.split(';')[1]\n uid = message.split(';')[2]\n\n if event == \"SonarLeftDetected\" or event == \"SonarRightDetected\":\n dir = \"left\" if event == \"SonarLeftDetected\" else \"right\"\n self.obstacle_detected(eval(value), dir)\n if event == \"SonarLeftNothingDetected\" or event == \"SonarRightNothingDetected\":\n self.obstacle_detected(0, None)\n if event == \"robotIsFalling\" or event == \"robotHasFallen\":\n self.robot_fallen()", "title": "" }, { "docid": "885e51046069dd35403b1f2e4a03c726", "score": "0.64516777", "text": "def run(self):\n self.poller = select.epoll()\n self.pollmask = select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR\n self.poller.register(self.server,self.pollmask)\n while True:\n # poll sockets\n try:\n fds = self.poller.poll(timeout=1)\n except:\n return\n for (fd,event) in fds:\n # handle errors\n if event & (select.POLLHUP | select.POLLERR):\n self.handleError(fd)\n continue\n # handle the server socket\n if fd == self.server.fileno():\n self.handleServer()\n continue\n # handle client socket\n self.setTime(fd)\n self.handleClient(fd)\n self.markSweep()", "title": "" }, { "docid": "5de3dc91b368b6ca6812d12fa5e749ea", "score": "0.64459133", "text": "def run(self):\n\n try:\n while True:\n connection, address = self.serverSock.accept()\n serverRunThread = threading.Thread(target=self.handler, args=(connection, address))\n serverRunThread.daemon = True\n serverRunThread.start()\n\n self.users.append((connection, address))\n print_safe(' [*] {0}:{1} connected'.format(address[0], str(address[1])))\n\n except KeyboardInterrupt:\n self.serverSock.close()\n sys.exit(0)", "title": "" }, { "docid": "d8cf2ce2c67feda3c3da88e8692947ce", "score": "0.64447606", "text": "def execute(self):\n self.running = True\n\n self.log_info(\"Starting...\")\n while self.running:\n # self.log(\"Running...\")\n try:\n message = self.inbox.get_nowait()\n self.handle(message)\n\n # yield to event loop after processing the message\n self.tick()\n\n except Empty:\n # Empty signifies that the queue is empty, so yield to another node\n self.tick()\n # pass\n\n # self.tick()\n # self.stop()\n self.log_info(\"Exiting...\")", "title": "" }, { "docid": "a7687aefa0f91d13a48afd3b09a36519", "score": "0.6433072", "text": "def message_loop(self):\r\n\r\n\t\treturn", "title": "" }, { "docid": "b3785f961604497f7f0684ba96150a91", "score": "0.6401567", "text": "def execute(self):\n sock = self.sock\n # Look for responses from all recipients\n while True:\n print(sys.stderr, \"waiting to receive\")\n try:\n data, server = sock.recvfrom(16)\n self.on_receive(data, server)\n except socket.timeout:\n print(sys.stderr, \"timed out, no more responses\")\n break\n else:\n print(sys.stderr, 'received \"%s\" from %s' % (data, server))", "title": "" }, { "docid": "f62a599bc9b65d7df0e06aaf861f7241", "score": "0.6397384", "text": "def __callback(self):\n while self.is_connected:\n data = self._connection.next()\n json_data = json.loads(data)\n\n event = json_data['ev']\n\n if event == 'update':\n self.on_update(json_data['data'])\n\n else:\n print ('Unknown event: %s' % event)", "title": "" }, { "docid": "08779d35b580179262609f3925eebd2a", "score": "0.63903886", "text": "async def _run_inbox_handler(self, inbox, handler_name):\n # Run the handler in a threadpool, so that it cannot block other handlers (from a different task),\n # or the main client thread. The number of worker threads forms an upper bound on how many instances\n # of the same handler can be running simultaneously.\n tp = ThreadPoolExecutor(max_workers=4)\n while True:\n handler_arg = await inbox.get()\n # NOTE: we MUST use getattr here using the handler name, as opposed to directly passing\n # the handler in order for the handler to be able to be updated without cancelling\n # the running task created for this coroutine\n handler = getattr(self, handler_name)\n if inspect.iscoroutinefunction(handler):\n # Wrap the coroutine in a function so it can be run in ThreadPool\n def coro_wrapper(coro, arg):\n asyncio_compat.run(coro(arg))\n\n # TODO: get exceptions to propogate\n tp.submit(coro_wrapper, handler, handler_arg)\n else:\n # Run function directly in ThreadPool\n # TODO: get exceptions to propogate\n tp.submit(handler, handler_arg)", "title": "" }, { "docid": "a658766653444a4f21f3c3ac6053762f", "score": "0.63893706", "text": "def runner(self):\n try:\n print(\"\\nWhen a thread is not connected, press Ctrl C \" +\n \"with no arguments to shutdown the server\\n\")\n while True:\n print(\"Waiting for connection...\")\n client, address = self.server.accept()\n handler = ClientHandler(client, self.server_name)\n handler.start()\n print(\"\\n... connected from:\", address)\n print(\"On IP, Port:\", client.getsockname())\n print(\"\")\n except KeyboardInterrupt:\n self.server.close()\n sys.exit(0)\n except OSError:\n print(\"Attempted to get message, however, either a keyboard interrupt caused a stop,\\n\" +\n \"or arguments given caused an error, server shutting down. Original Error: OSError\")\n self.server.close()\n except BrokenPipeError:\n print(\"Interrupts caused failed execution, server shutting down. Original Error: BrokenPipeError\")\n self.server.close()", "title": "" }, { "docid": "081cc5dd00998705f785e23d67b30373", "score": "0.6371375", "text": "def loop_forever(self):\r\n while True:\r\n self.client.wait_msg()", "title": "" }, { "docid": "9c7c1d7fc9378a402a684e2ed66b9cd9", "score": "0.63677037", "text": "def run(self):\n\n while not self._stop.is_set():\n sleep(INTERVAL)\n try:\n messages = self.socket.recv(BUFSIZE).split('\\n')\n except socket.error as error_:\n if error_[0] == errno.EWOULDBLOCK:\n continue\n elif error_[0] == errno.WSAECONNRESET:\n print('I tried to recv something after connection closed, like the dumb computer I am.')\n break\n else:\n raise error_\n for message in messages:\n if message == '':\n continue \n# print message\n message_dict = loads(message)\n self.command(message_dict)", "title": "" }, { "docid": "71f107fa73d5f89984d8ce7c9b2a2654", "score": "0.63586986", "text": "def run(self):\n try:\n while True:\n self.log('Loop start', level=logging.DEBUG)\n \n connection = None\n channel = None\n \n try:\n connection = self.fresh_connection()\n if connection.connection_open:\n self.log('Got connection', level=logging.DEBUG)\n channel = connection.channel()\n \n channel.queue_declare(\n queue=self.__queueName,\n durable=True,\n exclusive=False,\n auto_delete=False)\n \n channel.basic_consume(\n consumer=self.__handle_delivery,\n queue=self.__queueName)\n \n pika.asyncore_loop()\n \n self.log('Finished with connection', level=logging.DEBUG)\n else:\n self.log('No connection', level=logging.DEBUG)\n finally:\n if not channel is None:\n self.log('Closing channel', level=logging.DEBUG)\n channel.close()\n channel = None\n \n if not connection is None:\n self.log('Closing connection', level=logging.DEBUG)\n connection.close()\n connection = None\n \n time.sleep(self.__loopTimeout)\n except QuitApplication as e:\n self.log('Quitting application ({0})'.format(e.signalName))\n finally:\n self.log('Log Server stopped')", "title": "" }, { "docid": "159c6bfe518dcb2fc6cd713ea7916e36", "score": "0.6350875", "text": "def on_loop(self):\n data = self.last_read_buffer + self.read()\n if data:\n packets = data.split(MSG_END_HEADER)\n self.last_read_buffer = packets[-1]\n packets.pop()\n for packet in packets:\n self.handle_packet(packet)", "title": "" }, { "docid": "f0bad153c7451bb88dba7b94183ac2d0", "score": "0.63482934", "text": "def run(self):\n while True:\n self.handle_ready()\n p = self.polling_set()\n # Poll.\n events = p.poll()\n\n self.handle_events(events)\n self.remove_finished()", "title": "" }, { "docid": "84fb38004a13c51a6f205c8450e34df4", "score": "0.6304422", "text": "def loop(server):\n\n # create logger\n rootLogger = create_logger(server.logfolder)\n if rootLogger is not None:\n server.logging_enabled = True\n\n try:\n # This is the main loop\n while not server.break_loop:\n try:\n server.poll()\n if server.serving:\n if not len(server):\n # The server is serving, but there are no\n # connections so put in a sleep\n time.sleep(0.1)\n else:\n # if the server is not serving, put a sleep in the loop\n time.sleep(0.25)\n except NoService, e:\n server.log_exception(e)\n # Unable to bind\n # GUI will handle this, so loop continues\n except Exception, e:\n # log the exception and exit the main loop\n server.log_exception(e)\n return 1\n except KeyboardInterrupt:\n return 0\n finally:\n # shutdown the server\n server.shutdown()\n return 0", "title": "" }, { "docid": "b50fdf448e9ab6994d3e4ea34410f13c", "score": "0.6301584", "text": "async def process_events(self):\n try:\n while True:\n j = await self.recv()\n await self.process_packet(j)\n except websockets.ConnectionClosed as err:\n log.exception('Connection failed')\n await self.reconnect(err)", "title": "" }, { "docid": "a2ff189b215ddc2d3ee84e559a78a6fd", "score": "0.62858033", "text": "def handle(self):\n port = self.request.getpeername()[1]\n self.session = self.server.mainserver.create_session(session_id=port)\n self.session.connect(self)\n\n while True:\n try:\n messages = self.receive_message()\n if messages:\n for message in messages:\n self.handle_message(message)\n except EOFError:\n break\n except IOError:\n logger.exception(\"IOError in CoreAuxRequestHandler\")\n break", "title": "" }, { "docid": "2af06f5a9882abc2646f200a80bfaf53", "score": "0.625085", "text": "def loop(self, filename, handler):\n tailer = FileTailer(filename)\n while True:\n entry, comment = tailer.poll()\n if not entry:\n time.sleep(1.0)\n continue\n kos, not_kos, error = self.koscheck_logentry(entry)\n handler(comment, kos, not_kos, error)", "title": "" }, { "docid": "fc4359c2aee0074b225c6e123d386eae", "score": "0.6238515", "text": "def _listen(self):\n # start server\n self._socket.bind('tcp://{}:{}'.format(self.zmqconfig.get('host', ZMQ_DEFAULT_HOST), self.zmqconfig.get('port', ZMQ_DEFAULT_PORT)))\n while True:\n query = self._socket.recv_pyobj()\n self._socket.send_pyobj('QUERY_RECEIVED')\n callback_string = self._socket.recv_pyobj()\n callback = pickle.loads(callback_string)\n self._socket.send_pyobj('CALLBACK_RECEIVED')\n self._execute(query, callback)", "title": "" }, { "docid": "9509b008ee461576c678a379a44dce23", "score": "0.6231346", "text": "def _receive_loop(self, ws_url):\n\n if ws_url not in self._conns:\n self._logr.warning(\"<{}> is not an active connection\".format(ws_url))\n return\n\n if ws_url not in self._messages:\n self._messages[ws_url] = {}\n\n while not self._receive_stop_events[ws_url].is_set():\n try:\n raw_res = yield self._conns[ws_url].read_message()\n\n self._logr.debug(\"Read message: {}\".format(raw_res))\n\n if raw_res is None:\n self._logr.debug(\"Cannot read message: Closed WS connection\")\n yield tornado.gen.sleep(self.SLEEP_AFTER_ERR_SECS)\n continue\n\n msg_res = self._parse_msg_response(raw_res)\n\n if msg_res:\n self._messages[ws_url][msg_res.id] = msg_res\n conditions = self._msg_conditions.get(ws_url, None)\n\n if conditions and msg_res.id in conditions:\n self._logr.debug(\"Notifying: {}\".format(msg_res.id))\n conditions[msg_res.id].notify_all()\n except Exception as ex:\n self._logr.warning(\"Error in read loop: {}\".format(ex), exc_info=True)\n yield tornado.gen.sleep(self.SLEEP_AFTER_ERR_SECS)\n\n self._receive_stop_events[ws_url].clear()", "title": "" }, { "docid": "50aefc1f5b8e70679c98785f6f90b75f", "score": "0.6230563", "text": "def incoming(self):\n with ThreadPoolExecutor(max_workers=self.worker_limit) as executor:\n with socket(AF_INET, SOCK_STREAM) as sock:\n sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n sock.bind((\"0.0.0.0\", self.socket_port))\n sock.listen(10)\n self.logger.info(\"Server listening on local TCP port %s\",\n self.socket_port)\n while True:\n conn, addr = sock.accept()\n executor.submit(self.handler, conn, addr)\n self.logger.info(\"Server shutting down\")", "title": "" }, { "docid": "bdddb1a1c5739a3e99331cace2e216ba", "score": "0.623004", "text": "def loop(self):\r\n self.client.check_msg()\r\n return;", "title": "" }, { "docid": "8564f11c70394715d85e581628f00514", "score": "0.62184477", "text": "def run_main_loop(self):\n result = self.parse_input()\n while result:\n result = self.parse_input()\n if result == 2:\n self.respond(\"? unknown command\\n\\n\")", "title": "" }, { "docid": "439177dc2306ff1e4df6810bf2c22722", "score": "0.62142485", "text": "def run(self):\r\n self.handle_connection()", "title": "" }, { "docid": "62daf70752f3e8a10360fecc09b51b78", "score": "0.6211222", "text": "def loop(self):\n host, port = self._server_init()\n self._logger.info(\"Listener on: {}:{}\".format(host, port))\n downstream_addr = (\n self._config.get(\"Downstream\", \"Host\"),\n int(self._config.get(\"Downstream\", \"Port\")),\n )\n self._logger.info(\n \"Connected to downstream: {}:{}\".format(\n downstream_addr[0], downstream_addr[1]\n )\n )\n downstream_cli = socket.create_connection(downstream_addr)\n self._logger.info(\"Converter started.\")\n upstream_conn, client_address = self._server.accept()\n self._logger.info(\"Client connected: {}\".format(client_address))\n self._loop(upstream_conn, downstream_cli, downstream_addr)", "title": "" }, { "docid": "d75fe90ea52757d96af39dd1725be59d", "score": "0.62013793", "text": "def handler ( self ) :\r\n\r\n\r\n # counter\r\n \r\n self.count = self.count + 1\r\n\r\n # updates number of events to send\r\n \r\n if self.number > 0 : self.number = self.number - 1\r\n \r\n # stops and deletes timer\r\n \r\n self.stop()\r\n \r\n## if not self.timer is None : del( self.timer )\r\n\r\n # next event\r\n \r\n if self.nextTimeMs is None : self.nextTimeMs = self.clockMs()\r\n \r\n self.nextTimeMs = self.nextTimeMs + self.periodMs\r\n \r\n # triggers one shot timer for next event\r\n \r\n self.trigger()\r\n \r\n # external call back\r\n \r\n if not self.command is None :\r\n \r\n # already busy executing command\r\n \r\n if self.busy : return\r\n \r\n self.busy = True\r\n \r\n self.command()\r\n \r\n self.busy = False", "title": "" }, { "docid": "cd4fe24cdb425449c42e39abd239be6e", "score": "0.619992", "text": "def _dispatcher(self):\n while 1:\n try:\n response = self._incoming_messages.get(block=True)\n self._format_incoming(response)\n now = time.time()\n # match the event to the best command\n for command in self.commands[:]:\n if command.deadline is not False and command.deadline < now:\n self.unregister_command(command) # silently remove him\n continue\n match = command.matches(self, response)\n if match:\n command(_SlackBotWrapper(self, response), response, *match.groups())\n if command.activations is not False and command.activations <= 0:\n self.unregister_command(command)\n except Exception:\n self.debug(u'\\n'.join([traceback.format_exc(), str(response)]), 'red')\n else:\n try:\n if u'_logged' not in response:\n self.debug(response, 'gray')\n except:\n print(\"Wow something went REAL wrong.\")", "title": "" }, { "docid": "5bdb2843e4ed84aa0a52aee8db3262fc", "score": "0.6189986", "text": "def _receive_thread(self):\r\n while 1:\r\n try:\r\n message = json.loads(self.ws.recv())\r\n if 'id' in message:\r\n message_id = message['id']\r\n event = self.requests.pop(message_id, None)\r\n if event is not None:\r\n self.results[message_id] = message\r\n event.set()\r\n except Exception:\r\n break", "title": "" }, { "docid": "7cfea8ae99426dd404437a1783fa08c2", "score": "0.6182838", "text": "async def receive(self):\n\n await self.create_connection()\n\n while True:\n data = json.loads(await self.ws.recv())\n #print(data)\n\n self.sequence_id = data['s'] # Set the sequence id to the one sent by the gateway in case a reconnect is needed\n\n if data['op'] != 0:\n await self.handle_op(data['op'], data)\n\n await self.dispatcher.dispatch(data)", "title": "" }, { "docid": "dbdca3f01cee59c6f4f3e36cf7a15053", "score": "0.6154406", "text": "def read_loop(self, event): # !TODO timeout to signal\n\n while True:\n try:\n server_ans = recvTlv(self.sock)\n\n if TLV_OK_TAG in server_ans or TLV_FAIL_TAG in server_ans: # ACK / NACK response - main thread is awaiting for it\n # print(\"Message {} has ACK/NACK tag => saving to pipeline for future processing\".format(server_ans))\n self.pipeline.put(server_ans)\n continue\n\n if TLV_NEWTURN_TAG in server_ans:\n client_logger.debug(\"Put TLV_NEWTURN_TAG msg into the pipeline\")\n self.pipeline.put(server_ans)\n continue\n \n # print(\"Message {} is a request from the server\".format(server_ans))\n self.handle_answer(server_ans) # received msg is not a control msg\n except (OSError, EOFError) as e:\n print(\"\\nServer error\\n\")\n client_logger.error(\"Error while reading data: \" + str(e))\n os.kill(os.getpid(), signal.SIGINT) # should raise Keyboard interrupt in the main thread\n event.set() # alarm main thread that program should exit\n return\n except Exception as e:\n client_logger.error(\"Other error while reading data: \" + str(e))\n os.kill(os.getpid(), signal.SIGINT)\n event.set() # alarm main thread that program should exit\n return", "title": "" }, { "docid": "fc5a032b017db13c53f70e791d6eda14", "score": "0.61470777", "text": "def async_handle(self):\n\n self.raw_requestline = yield from self.rfile.readline()\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(414)\n return\n\n if not (yield from self.parse_request()): # An error code has been sent, just exit\n return\n\n handler = ServerHandler(\n self.rfile, self.wfile, self.get_stderr(), self.get_environ()\n )\n handler.request_handler = self # backpointer for logging\n yield from handler.run(self.server.get_app())", "title": "" }, { "docid": "2696f3fe24bd9f41cadddc8f6f8dea70", "score": "0.614598", "text": "def loop(self):\n print(f'Running on {socket.gethostname()}')\n self.mgr.run(self.base_components)\n with cli_connection.server(paths.beeflow_socket()) as server:\n while not self.quit:\n # Handle a message from the client, if there is one\n self.handle_client(server)\n # Poll the components\n self.mgr.poll()\n time.sleep(1)\n # Kill everything, if possible\n self.mgr.kill()", "title": "" }, { "docid": "12ac7d7020d9fad93912e68c129fad75", "score": "0.61454654", "text": "def handle(self):\n try:\n data = self.request.recv(1024).decode('UTF-8').strip()\n # process the data, i.e. print it:\n\n test = json.loads(data)\n\n # send some 'ok' back\n\n #channels_raw = server.send_command(\"channellist\").data\n #channels = [Channel(**channel) for channel in channels_raw]\n\n #clients_raw = server.clientlist()\n #clients = [Client(**clients_raw[client]) for client in clients_raw]\n #test = Client.clients_json(clients)\n self.request.sendall(bytes(json.dumps(handler.command(test)), 'UTF-8'))\n except Exception as e:\n print(\"Exception wile receiving message: \", e)", "title": "" }, { "docid": "590e650ecd121ef11b701dbd35e058f8", "score": "0.61379915", "text": "def execute(self):\n self.running = True\n\n # self.log_info(\"Starting...\")\n # while self.running:\n # # self.log(\"Running...\")\n # try:\n # message = self.inbox.get_nowait()\n # self.handle(message)\n\n # # yield to event loop after processing the message\n # self.tick()\n\n # except Empty:\n # # Empty signifies that the queue is empty, so yield to another node\n # self.tick()\n # # pass\n\n # self.tick()\n # self.stop()\n # self.log_info(\"Exiting...\")", "title": "" }, { "docid": "74a95340be1f187d9c069261cfb0bb3b", "score": "0.6122714", "text": "async def _dispatch_loop(self):\n last_msg_time = time.time()\n async with websockets.connect(self._uri) as ws:\n self._ws = ws\n while self._running:\n msg = await ws.recv()\n msg = self.decode_message(msg)\n\n if msg is None or msg.get_type() == 'BAD_DATA':\n continue\n\n # send a heartbeat message back, since this needs to be\n # constantly sent so the autopilot knows this exists\n if msg.get_type() == 'HEARTBEAT':\n # send -> type, autopilot, base mode, custom mode, system status\n outmsg = self._mav.heartbeat_encode(mavutil.mavlink.MAV_TYPE_GCS,\n mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0,\n mavutil.mavlink.MAV_STATE_ACTIVE)\n await self.send_message(outmsg)\n\n # print('Message received', msg)\n current_time = time.time()\n\n # print(\"Time between messages\", current_time - last_msg_time)\n\n # If we haven't heard a message in a given amount of time\n # terminate connection and event loop.\n if current_time - last_msg_time > self._timeout:\n self.notify_message_listeners(MsgID.CONNECTION_CLOSED, 0)\n self.stop()\n\n # update the time of the last message\n last_msg_time = current_time\n\n dispatch_message(self, msg)\n\n await self._shutdown_event_loop()", "title": "" }, { "docid": "79a20d67e0045810507e29c2fdf46671", "score": "0.61191785", "text": "def run(self):\n\t\t\n\t\twhile self.running or len(self.outbox) > 0:\n\t\t\tif len(self.outbox) > 0:\n\t\t\t\tif self.debug:\n\t\t\t\t\tprint \"[Notifier] There are\", len(self.outbox), \"messages to send\"\n\t\t\t\tself.obLock.acquire()\n\t\t\t\twhile len(self.outbox) > 0:\n\t\t\t\t\tmessage = self.outbox.pop()\n\t\t\t\t\tif self.debug:\n\t\t\t\t\t\tprint \"[Notifier] Sending queued message to\", message.getTo()\n\t\t\t\t\tself.client.send(message)\n\t\t\t\tself.obLock.release()\n\t\t\tself.client.process(2)\n\t\tif self.debug:\n\t\t\tprint \"[Notifier] Shutting down\"\n\t\tself.client.disconnect()", "title": "" }, { "docid": "4c6df57c5860bd78334c3dfdf7df9dd4", "score": "0.6119011", "text": "def run(self) -> None:\n\n self.logger.info('Starting Socket consumer.')\n\n while True:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n asyncio.get_event_loop().run_until_complete(self.get())", "title": "" }, { "docid": "9fd8e7b0e5b2ee64fe2f43061b9d1f21", "score": "0.61128116", "text": "def __runner(self):\n \n conn = pika.BlockingConnection(self.__connection_params)\n chan = conn.channel()\n \n chan.add_on_return_callback(self.__on_returned_msg)\n \n while not self.__shutdown_event.is_set():\n try:\n req = self.__pending_requests.get(True, 5)\n \n self._logger.debug(\"invoking %r\", req)\n \n response = {}\n \n try:\n response['result'] = req.callback(*req.args)\n except:\n self._logger.warn(\"exception invoking %r\", req, exc_info = True)\n \n # the exception value\n response['exception'] = sys.exc_info()[1]\n \n resp_body = serializer_utils.serialize(response, req.content_type)\n\n chan.basic_publish(\n exchange='',\n routing_key = req.reply_to,\n properties = pika.BasicProperties(\n correlation_id = req.correlation_id,\n content_type = req.content_type,\n ),\n body = resp_body\n )\n except Queue.Empty:\n pass\n \n \n conn.close()", "title": "" }, { "docid": "527fd2f96bffa349a49d1479846874bf", "score": "0.61126834", "text": "def Run(self):\r\n \r\n if (self._server is None): self.PrepareServer()\r\n while not self.quit:\r\n logging.debug('Waiting for request')\r\n self._server.handle_request()\r\n logging.debug('Finished processing requests')", "title": "" }, { "docid": "eef1c0cbee2e2d206f1337c5c8878d6a", "score": "0.61029685", "text": "def handle(self):\n while True:\n try:\n command = self.fetch_command()\n if not command:\n raise ControlException(\"No command found.\")\n\n for module in self.modules:\n foundWords = []\n for word in module.commandWords:\n if str(word) in command:\n foundWords.append(str(word))\n if len(foundWords) == len(module.commandWords):\n #A command has been said correctly so we can execute the message\n response = module.execute(command)\n #if the command was formatted wrong\n if response == \"ERROR\":\n print(\"Invalid command. Try again.\")\\\n #if we were setting up the token\n if response == \"Setup\":\n print(\"Setup complete.\")\n #otherwise we have the song JSON object returned and we need to parse it and send to MQTT\n else:\n index = response.find('#$#');\n beats = json.loads(response[index+3:])\n index2 = response.find('#%#')\n artist = response[0:index2]\n song = response[index2+3:index]\n print(\"Playing \" + song.upper() + \" by \" + artist.upper())\n self.clientRP.publish(\"raspberrypi/lcd\", response[0:index])\n self.clientRP.publish(\"raspberrypi/led\", response[index+3:])\n except (TypeError, ControlException):\n pass\n except Exception as exc:\n print(\"The request is not included in the API. Try Again.\")\n time.sleep(1)", "title": "" }, { "docid": "4e0b2fd1c100bbe798d7028370db4c15", "score": "0.60967106", "text": "def run(self):\n while True:\n msg = self.reader.read()\n if msg: # Set event if have new message\n self.log += \"%s\\n\" % msg.decode(DEFAULT_ENCODE)\n self.event.set()", "title": "" }, { "docid": "2da5692ebf649d4b31143786ee31ddb5", "score": "0.6090398", "text": "def run(self):\n while True:\n socks = select.select(self.sockets.values(), [], [], 0.1)[0]\n for conn in socks:\n try:\n k = conn.recv(65535)\n except:\n # either died on a connection reset, or was SIGTERM's by parent\n return\n if k:\n for sock in self.sockets:\n if self.sockets[sock] == conn:\n srcif = sock\n msg = json.loads(k)\n if not self.handle_packet(srcif, msg):\n self.send_error(conn, msg, srcif)\n else:\n return", "title": "" }, { "docid": "c56b93b51623a2714afc22f43d37e900", "score": "0.608456", "text": "async def _recv_loop(self):\n try:\n while self._connected:\n try:\n data = await self._recv()\n except asyncio.CancelledError:\n break\n except (IOError, asyncio.IncompleteReadError) as e:\n self._log.warning('Server closed the connection: %s', e)\n await self._recv_queue.put((None, e))\n await self.disconnect()\n except InvalidChecksumError as e:\n self._log.warning('Server response had invalid checksum: %s', e)\n await self._recv_queue.put((None, e))\n except InvalidBufferError as e:\n self._log.warning('Server response had invalid buffer: %s', e)\n await self._recv_queue.put((None, e))\n except Exception as e:\n self._log.exception('Unexpected exception in the receive loop')\n await self._recv_queue.put((None, e))\n await self.disconnect()\n else:\n await self._recv_queue.put((data, None))\n finally:\n await self.disconnect()", "title": "" }, { "docid": "8466187dc60ef8490ce4f06d5a6d4624", "score": "0.6083547", "text": "def on_msg(self,sock: socket.socket):\n\t\tlogging.info(\"Listening for server messages.\")\n\t\tself.run = True\n\t\tself.on_connect()\n\t\twhile self.run:\n\t\t\ttry:\n\t\t\t\tdata = sock.recv(LEN_OF_HEADER)\n\t\t\t\tif data == b'':\n\t\t\t\t\tself.run = False\n\t\t\t\t\tcontinue\n\t\t\t\tbody_len = get_size_of_msg(data)\n\t\t\t\tbody = b''\n\t\t\t\tif body_len != 0:\n\t\t\t\t\twhile body_len > 0:\n\t\t\t\t\t\tif body_len > MAX_LEN_OF_RECV:\n\t\t\t\t\t\t\tbody += sock.recv(MAX_LEN_OF_RECV)\n\t\t\t\t\t\t\tbody_len -= MAX_LEN_OF_RECV\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tbody += sock.recv(body_len)\n\t\t\t\t\t\t\tbody_len = 0\n\t\t\t\tmsg = Message()\n\t\t\t\tmsg.unpack(data + body)\n\t\t\t\tif msg.type in self.type_callbacks.keys():\n\t\t\t\t\tself.type_callbacks[msg.type](msg)\n\t\t\t\telse:\n\t\t\t\t\tself.run = False\n\t\t\t\t\traise Exception(\"Type: %s isnt defined in the callbacks\" % msg.type)\n\t\t\texcept timeout:\n\t\t\t\tcontinue\n\t\t\texcept Exception as e:\n\t\t\t\tlogging.info(e)\n\t\t\t\tself.run = False\n\t\t\t\tif self.raise_exceptions:\n\t\t\t\t\traise e\n\t\t\t\tbreak\n\t\tself.run = False\n\t\tsock.close()\n\t\tself.on_disconnect()", "title": "" }, { "docid": "33eee1aa590f9e8f422988a42f281ee1", "score": "0.60823125", "text": "def receiver(conn):\r\n while 1:\r\n msg = conn.recv()\r\n if 'END' in msg:\r\n break\r\n print(F\"Receive the messages: {msg}\")", "title": "" }, { "docid": "0a1c9715fb5a4a91fb083acbb2388d89", "score": "0.60811144", "text": "def handler(self, timeout=0):\n while True:\n try:\n yield self.recv(timeout=timeout)\n except TimeoutError:\n yield None", "title": "" }, { "docid": "cb2eef4f1256de6bc40ebc26f08c8183", "score": "0.6077736", "text": "def run(self):\n while True:\n for client in self.clients:\n gevent.spawn(self.send, client)\n gevent.sleep(1)", "title": "" }, { "docid": "bf24b622306ffab9d9a8e6807c386a7b", "score": "0.6076663", "text": "def catch_data(self):\n while True:\n buf = self.irc.recv(4096)\n lines = buf.split('\\n')\n for data in lines:\n data = str(data).strip()\n \n if len(data) < 2:\n continue\n \n # server ping/pong? \"PING :FFFFFFFF1155571C\"\n if data.startswith('PING'):\n self.send('PONG :' + data.split(':')[1])\n if self.connected == False:\n self.connected = True\n \n if len(data.split()) < 2:\n continue\n \n try:\n if len(data.split()) >= 4:\n line = data.split()\n sender = line[0][1::].split('!',1)[0] # :[email protected]\n location = line[2] # Can be channel or user\n text = ' '.join(line[3::])[1::] # Remove the first : then re-join the text\n func = getattr(self, 'trigger_%s' % data.split()[1])\n self.exec_buffer.append({'function': func, 'args': [data, sender, location, text]})\n else:\n func = getattr(self, 'trigger_%s' % data.split()[1])\n self.exec_buffer.append({'function': func, 'args': [data]})\n except AttributeError:\n continue\n except:\n continue", "title": "" }, { "docid": "b1defdd8c5c1c015c4088ed1a8c085f5", "score": "0.60731703", "text": "def messagesHandler(self):\n try:\n messageArr = self._component.recv()\n for message in messageArr:\n if(message is not None):\n t, m = Parser.splitMessage(message)\n self._container.append(self._component.getName(), message)\n\n except socket.error as error: # Error that happends when TCP client disconect.\n pass", "title": "" }, { "docid": "de4a87699e43cb704c6a3062d7bcb525", "score": "0.60722226", "text": "def run(self):\n while True:\n try:\n for item in self.reddit.inbox.unread(limit=None):\n if isinstance(item, praw.models.Message):\n try:\n self.process_message(item)\n item.mark_read()\n except Exception as e:\n print(\"Error processing message: \" + str(item))\n traceback.print_exc()\n time.sleep(1)\n except Exception as e:\n print(\"Error reading inbox: \" + str(e))\n traceback.print_exc()", "title": "" }, { "docid": "ecec4930dad9af2a479b0dcdd1fa7416", "score": "0.6071832", "text": "def __listen_almost_forever(\n self,\n handler: Callable,\n preprocess: Callable = lambda x: x,\n poll_timeout: int = 500,\n ):\n while True:\n with self.__lock:\n if self.__done:\n break\n\n # Don't allow pausing halfway through a message\n with self.__background_lock:\n msg = self.__listener.recv(poll_timeout)\n if msg is None:\n continue\n self.__message_flow(msg, handler, preprocess)\n\n self.__listener.stop()", "title": "" }, { "docid": "1f094aa170b5a803cca8b3b39d929937", "score": "0.606992", "text": "def run(self) -> None:\n messages = self.server.server_receive_message()\n for message in messages:\n self.progress.emit(message)", "title": "" }, { "docid": "5c267cc4366675389e3706d784d230e0", "score": "0.6067176", "text": "def handle(self):\n while True:\n chunk = self.connection.recv(4)\n if len(chunk) < 4: break\n slen = struct.unpack(\">L\", chunk)[0]\n chunk = self.connection.recv(slen)\n while len(chunk) < slen:\n chunk = chunk + self.connection.recv(slen - len(chunk))\n obj = pickle.loads(chunk)\n record = logging.makeLogRecord(obj)\n self.handleLogRecord(record)", "title": "" }, { "docid": "bf981b99c960273b6519f5b005ad38cc", "score": "0.6050623", "text": "async def handle_incoming(self):\n # TODO Deal with other types of messages using flag (currently _)\n\n while True:\n stream_id, flag, message = await self.read_message()\n\n if stream_id is not None and flag is not None and message is not None:\n if stream_id not in self.buffers:\n self.buffers[stream_id] = asyncio.Queue()\n await self.stream_queue.put(stream_id)\n\n if flag is get_flag(True, \"NEW_STREAM\"):\n # new stream detected on connection\n await self.accept_stream()\n\n if message:\n await self.buffers[stream_id].put(message)\n\n # Force context switch\n await asyncio.sleep(0)", "title": "" }, { "docid": "359103b85430f54b77eff232ecec9e02", "score": "0.60503155", "text": "def handle(self):\n self.ws_addr = \"%s:%s\" % (\n self.ws.environ[\"REMOTE_ADDR\"],\n self.ws.environ[\"REMOTE_PORT\"])\n logger.info(\"%s connected\" % self.ws_addr)\n\n if \"connected\" in actions:\n Handler.currents.append(self.ws)\n actions[\"connected\"]()\n Handler.currents.pop()\n\n go = True\n while go:\n try:\n m = self.ws.receive()\n except:\n m = None\n\n if m is None:\n go = False\n else:\n self.do_message(m)\n\n if \"disconnected\" in actions:\n Handler.currents.append(self.ws)\n actions[\"disconnected\"]()\n Handler.currents.pop()\n\n # remove websocket from connected socket list\n Handler.s.remove(self.ws)\n # check if no more sockets remain and call\n # disconnect functions if so.\n if len(Handler.s) is 0:\n for f in alldisconnected.funcs:\n f()\n logger.info(\"%s disconnected\" % self.ws_addr)", "title": "" }, { "docid": "452b97be6e1bc24b5a229729216a9500", "score": "0.60486776", "text": "def mainloop(self):\n lverb = True\n is_rfid_scanner = (self.comm_link is not None)\n\n print(\"mainloop begin: is_rfid_server: {}\".format(is_rfid_scanner))\n self.logger.info(\"mainloop begin\")\n # start a random generator thread for testing....\n # self.randTM = Taskmeister.RandomGenerator(self.msgQ, self.logger)\n # self.randTM.set_active(True)\n\n if is_rfid_scanner:\n # send the RFID status to the webclient\n rfid_stat = self.comm_link.get_rfid_state()\n self.send_ws_msg(CommonMSG(CommonMSG.MSG_SV_RFID_STATREP, rfid_stat))\n else:\n # send the stocky server config data\n self.send_server_config()\n # send the QAI update status to the webclient\n self.send_qai_status(None)\n\n do_loop = True\n while do_loop:\n if lverb:\n print(\"YO: before get\")\n msg: CommonMSG = self.msgQ.get()\n self.logger.debug(\"handling msgtype '{}'\".format(msg.msg))\n if lverb:\n print(\"YO: handling msgtype '{}'\".format(msg.msg))\n # handle a EOF separately\n if msg.msg == CommonMSG.MSG_WC_EOF:\n if lverb:\n print(\"mainloop detected WS_EOF... quitting\")\n self.ws = None\n do_loop = False\n print(\"step 2\")\n if msg.is_from_rfid_reader():\n self.logger.debug(\"GOT RFID {}\".format(msg.as_dict()))\n self.activate_rfid_spinner()\n\n is_handled = False\n if self.ws is not None and msg.msg in CommonStockyServer.MSG_FOR_WC_SET:\n is_handled = True\n # print(\"sending to WS...\")\n self.send_ws_msg(msg)\n # print(\"...OK send\")\n if self.tls is not None and msg.msg in CommonStockyServer.MSG_FOR_RFID_SET:\n is_handled = True\n self.tls.send_rfid_msg(msg)\n if msg.msg in CommonStockyServer.MSG_FOR_ME_SET:\n print(\"msg for me: {}\".format(msg.msg))\n is_handled = True\n self.server_handle_msg(msg)\n print(\"done handling\")\n if not is_handled:\n mmm = \"mainloop DID NOT handle msgtype '{}'\".format(msg.msg)\n self.logger.error(mmm)\n print(mmm)\n print(\"end of ML.while\")\n print(\"OUT OF LOOP\")", "title": "" }, { "docid": "46609d36d2efed72778e8638997a9184", "score": "0.6048616", "text": "def receiver():\n while True:\n msg, future = incoming.get(True)\n if msg == TerminateReceiver:\n break\n else:\n method, args, kw = msg\n future.target = getattr(obj, method)(*args, **kw)\n future.event.set()", "title": "" }, { "docid": "cc8ad9ede0abe26241b991d85f347a2f", "score": "0.60392517", "text": "def run(self):\n while True:\n recv_data = self.socket.recv(self.buffer_size)\n print(\"Server received data: {}\".format(recv_data))\n packet_info = recv_data.decode('utf-8').strip().split(\",\")\n\n if packet_info[0] == \"rput\" and len(packet_info[1:]) == 2:\n self.__read_file(*packet_info[1:])\n elif packet_info[0] == \"rget\" and len(packet_info[1:]) == 1:\n self.__send_file(*packet_info[1:])\n else:\n if packet_info[0] is not '':\n print(\"Server does not support that check sent command\")\n self.socket.sendall(\"Invalid\".encode('utf-8'))\n\n if not recv_data:\n print(\"Disconnecting from client {}:{}\".format(self.ip, self.port))\n self.socket.shutdown(socket.SHUT_RDWR)\n self.socket.close()\n break", "title": "" } ]
36afa34acf326a9c7053b818cecc01ff
Convert list of tiles as returned by SH WFS into list of time ranges whose time deltas are not greater than max_timedelta.
[ { "docid": "c32b8a492c9b5dc658dfb7c466e6b815", "score": "0.7689309", "text": "def tile_features_to_time_ranges(cls, tile_features, max_timedelta: Union[str, pd.Timedelta] = '1H') \\\n -> List[Tuple[pd.Timestamp, pd.Timestamp]]:\n max_timedelta = pd.to_timedelta(max_timedelta) if isinstance(max_timedelta, str) else max_timedelta\n feature_properties = [feature[\"properties\"] for feature in tile_features]\n timestamps = [pd.to_datetime(f'{properties[\"date\"]}T{properties[\"time\"]}', utc=True)\n for properties in feature_properties]\n timestamps.sort()\n num_timestamps = len(timestamps)\n time_ranges = []\n i = 0\n while i < num_timestamps:\n timestamp1 = timestamp2 = timestamps[i]\n while i < num_timestamps:\n timestamp = timestamps[i]\n if timestamp - timestamp1 >= max_timedelta:\n break\n timestamp2 = timestamp\n i += 1\n time_ranges.append((timestamp1, timestamp2))\n return time_ranges", "title": "" } ]
[ { "docid": "322f66d335834d6e7191aa041611eaff", "score": "0.6455336", "text": "def split(self, max_delta):\n assert isinstance(max_delta, timedelta)\n chunks = []\n chunk_since = self.since\n while chunk_since < self.until:\n chunk_delta = min(max_delta, self.until - chunk_since)\n chunks.append(TimeWindow.from_timedelta(chunk_since, chunk_delta))\n chunk_since += chunk_delta\n return chunks", "title": "" }, { "docid": "177ea67f424b310a56547c9177c0aa70", "score": "0.59138876", "text": "def expand_time_windows(periods: List[TimeWindow]) -> List[pd.Timestamp]:\n days_exclude = []\n for window in periods:\n days_exclude += pd.date_range(window.first_day, window.last_day, freq='D')\n\n return list(set(days_exclude))", "title": "" }, { "docid": "712dcf88b0eb24498d5f1c2fdca179a0", "score": "0.5832873", "text": "def _tiff_timestamp_ranges(self) -> list:\n return self.frame_timestamp_ranges()", "title": "" }, { "docid": "0ab84714f060edc3c064b7b1e57131ed", "score": "0.58066446", "text": "def _getTimeGrid(self, tMin, tMax, nPoi):\n return [tMin + float(i) / (nPoi - 1) * (tMax - tMin) for i in range(nPoi)]", "title": "" }, { "docid": "d4caf76c7e1f162971066e91d002be57", "score": "0.57903236", "text": "def getNonEntries(self):\n entryList = self.entryList\n invertedEntryList = [\n (entryList[i][1], entryList[i + 1][0], \"\")\n for i in range(len(entryList) - 1)\n ]\n\n # Remove entries that have no duration (ie lie between two entries\n # that share a border)\n invertedEntryList = [\n entry for entry in invertedEntryList if entry[0] < entry[1]\n ]\n\n if entryList[0][0] > 0:\n invertedEntryList.insert(0, (0, entryList[0][0], \"\"))\n\n if entryList[-1][1] < self.maxTimestamp:\n invertedEntryList.append((entryList[-1][1], self.maxTimestamp, \"\"))\n\n invertedEntryList = [\n entry if isinstance(entry, Interval) else Interval(*entry)\n for entry in invertedEntryList\n ]\n\n return invertedEntryList", "title": "" }, { "docid": "81862dbc17a81dd4152c5180c4e037e1", "score": "0.5698723", "text": "def window_time_range(incident_time, threshold=5, unit=\"second\"):\n\n if unit == \"second\":\n second_index = incident_time.rfind(\":\")\n seconds = int(incident_time[second_index + 1: second_index + 3])\n min_sec, max_sec = check_time(str(max(seconds - threshold, 0))), check_time(str(min(seconds + threshold, 59)))\n min_time = incident_time[:second_index + 1] + min_sec\n max_time = incident_time[:second_index + 1] + max_sec\n return [min_time, max_time]\n\n elif unit == \"minute\":\n minute_index = incident_time.find(\":\")\n minutes = int(incident_time[minute_index + 1: minute_index + 3])\n min_min, max_min = check_time(str(max(minutes - threshold, 0))), check_time(str(min(minutes + threshold, 59)))\n min_time = incident_time[:minute_index + 1] + min_min + incident_time[minute_index + 3:]\n max_time = incident_time[:minute_index + 1] + max_min + incident_time[minute_index + 3:]\n return [min_time, max_time]\n\n elif unit == \"hour\":\n hour_index = incident_time.find(\"T\")\n hour = int(incident_time[hour_index + 1: hour_index + 3])\n min_hour, max_hour = check_time(str(max(hour - threshold, 0))), check_time(str(min(hour + threshold, 23)))\n min_time = incident_time[:hour_index + 1] + min_hour + incident_time[hour_index + 3:]\n max_time = incident_time[:hour_index + 1] + max_hour + incident_time[hour_index + 3:]\n return [min_time, max_time]\n\n return None", "title": "" }, { "docid": "655bda73fa96acb31cc7faac9093f9be", "score": "0.5638426", "text": "def get_tilearea_range(self):\n return [x ** 2 for x in xrange(1, self.tilearea_limit + 1)]", "title": "" }, { "docid": "cfade30e1a645e36d80b3c17c01479c2", "score": "0.5628009", "text": "def tiles(self) -> List[HipsTile]:\n return [self.tile(ipix) for ipix in range(self.n_tiles)]", "title": "" }, { "docid": "7dbc7431fc8c5d7464c86b21301a5041", "score": "0.56226414", "text": "def _convert_range_to_time_units(time_windows):\n start, end = time_windows\n tw_range = (end - start) * 60\n return (0, tw_range)", "title": "" }, { "docid": "b854e1e39e165df7e3aaccb3e3303a7c", "score": "0.5593613", "text": "def __getTimeRange_ut(self): \n for f in self.__fileList:\n regex = ( r'^' + self.runName + '_mhd_' +\n r'(\\d{4})\\-(\\d{2})\\-(\\d{2})' +\n r'T' +\n r'(\\d{2})\\-(\\d{2})-(\\d{2})' +\n r'Z'+\n self.runExt + r'$' )\n \n r = re.match(regex, os.path.basename(f))\n \n assert( len(r.groups()) == 6 )\n t = [ int(match) for match in r.groups() ]\n self.__timeRange.append( datetime.datetime(year=t[0],\n month=t[1],\n day=t[2],\n hour=t[3],\n minute=t[4],\n second=t[5]) )\n \n return self.__timeRange", "title": "" }, { "docid": "3bb5f3b385b7cfece8820710af72ecf7", "score": "0.5544131", "text": "def MaxToMin(min_lev, max_lev, datetime_maxes):\r\n y_res_mins = []\r\n deltaT_ranges = []\r\n for row in range(len(max_lev)-1):\r\n range_length = (datetime_maxes.iloc[row+1]-datetime_maxes.iloc[row]).seconds/3600 # hours\r\n if ~np.isnan(max_lev.iloc[row]) & (range_length>=3.5) & (range_length<=7.5): # peak to trough should take 6.2 hours in ocean\r\n y_res_mins.append(min_lev.iloc[row+1])\r\n deltaT_ranges.append(datetime_maxes.iloc[row+1]-datetime_maxes.iloc[row])\r\n elif ~np.isnan(max_lev.iloc[row]): # if peak lag is more than 4 hours, erroneous data\r\n y_res_mins.append(np.nan)\r\n deltaT_ranges.append(np.nan)\r\n # if last value in oceanside array is not nan, append another nan on the return arrays\r\n if (datetime_maxes.iloc[row+1]==datetime_maxes.iloc[-1]) & ~np.isnan(max_lev.iloc[row+1]): \r\n y_res_mins.append(np.nan)\r\n deltaT_ranges.append(np.nan)\r\n y_res_mins = np.array(y_res_mins)\r\n deltaT_ranges = np.array(deltaT_ranges)\r\n return deltaT_ranges, y_res_mins", "title": "" }, { "docid": "41b1dbe4f8cd9d2568d9d3386c4a4859", "score": "0.5513535", "text": "def resamplify_ranges(ranges, scored_hz, target_hz=200):\n\n ranges_copy = []\n\n for start, stop in ranges:\n corrected_start = (start * scored_hz) / target_hz\n corrected_stop = (stop * scored_hz) / target_hz\n\n ranges_copy.append((corrected_start, corrected_stop))\n return ranges_copy", "title": "" }, { "docid": "1215f220a238d9e91fcf0eb5d6bc0b77", "score": "0.5413744", "text": "def get_bb_max_times(self):\n return [scipy.optimize.fminbound(\n (lambda x: -f(x)), self.t_initial, self.t_final) for f in self.fps]", "title": "" }, { "docid": "d5b44bb030acc7b8a7b386e7bc2a4cce", "score": "0.5357053", "text": "def time_slicer(time_coordinate, query):\n ref_time = time_coordinate.values[0]\n # next step is parsing out the times\n max_hours = max(query['hours'])\n assert int(max_hours) == max_hours\n max_time = ref_time + np.timedelta64(int(max_hours), 'h')\n max_ind = np.max(np.nonzero(time_coordinate.values <= max_time)[0])\n assert max_ind > 0\n return slice(0, max_ind + 1)", "title": "" }, { "docid": "5e561d6f32e5841cd68875f60c76edc2", "score": "0.5344265", "text": "def _get_breaks(n_items: int, n_workers: int) -> List[Tuple[int, int]]:\n div, remainder = divmod(n_items, n_workers)\n slices = []\n bottom = 0\n for i in range(n_workers):\n top = bottom + div\n top += int(i < remainder)\n slices.append((bottom, top))\n bottom = top\n return slices", "title": "" }, { "docid": "a9fb2b396b170ebe52fbe5b2edba0a75", "score": "0.5329466", "text": "def _trim(self): \n trimmed = []\n for start, end in self.stamps:\n if not (end < self.tStart or start > self.tEnd):\n trimmed.append((max(start, self.tStart), min(end, self.tEnd)))\n self.stamps = trimmed", "title": "" }, { "docid": "be7a8dcd65fb033ac172b5d1663b3e95", "score": "0.531007", "text": "def calculate_deltas(self, start: datetime, end: datetime) -> List[datetime]:\n deltas = []\n curr = start\n while curr < end:\n deltas.append(curr)\n curr += self._time_granularity_\n return deltas", "title": "" }, { "docid": "4ea10c59d602c9340d024ca922c1324c", "score": "0.5305902", "text": "def ConvertRanges2ExpandedRanges( ranges, max_length ):\n \n new_ranges = []\n \n for r in ranges:\n new_ranges.append( ( max(int(r[0] * param_resolution) + 1, 1),\n min(int(r[1] * param_resolution) + 1, max_length)) )\n return new_ranges", "title": "" }, { "docid": "9fc78a21d79b5448f6c15ae4793876ac", "score": "0.5299879", "text": "def timeseries_to_batch(x: npt.NDArray[T], len_t: int = 3) -> list[npt.NDArray[T]]:\n subviews = np.moveaxis(\n np.lib.stride_tricks.sliding_window_view( # type: ignore [no-untyped-call]\n x, len_t, axis=0\n ),\n -1,\n 1,\n )\n\n list_subviews = np.split( # type: ignore [no-untyped-call]\n subviews, subviews.shape[0], axis=0\n )\n return [np.take(subview, 0, axis=0) for subview in list_subviews]", "title": "" }, { "docid": "91a8c7658c29c3f725b15cf24411adda", "score": "0.5297275", "text": "def build_climatology_bounds(da: xr.DataArray) -> list[str]:\n n = len(da.time)\n return da.time[0 :: n - 1].dt.strftime(\"%Y-%m-%d\").values.tolist()", "title": "" }, { "docid": "614df040cb216430b45604215397a9fc", "score": "0.5269239", "text": "def absolute_timings(self) -> List[Tuple[int, int, int, int]]:\n schedules = []\n for schedule in self.absolute_schedules.all():\n event_time = schedule.event_time\n num_seconds = event_time.minute * 60 + event_time.hour * 3600\n schedules.append([event_time.year, event_time.month, event_time.day, num_seconds])\n return schedules", "title": "" }, { "docid": "577e53c3a47159674c2908c7a2b3ec3a", "score": "0.526138", "text": "def get_unstable_range(\n self, limit: int = None, range_threshold: float = None, **kwargs\n ) -> typing.List[VideoCutRange]:\n change_range_list = sorted(\n [i for i in self.range_list if not i.is_stable(**kwargs)],\n key=lambda x: x.start,\n )\n\n # video can be totally stable ( nothing changed )\n # or only one unstable range\n if len(change_range_list) <= 1:\n return change_range_list\n\n # merge\n i = 0\n merged_change_range_list = list()\n while i < len(change_range_list) - 1:\n cur = change_range_list[i]\n while cur.can_merge(change_range_list[i + 1], **kwargs):\n # can be merged\n i += 1\n cur = cur.merge(change_range_list[i], **kwargs)\n\n # out of range\n if i + 1 >= len(change_range_list):\n break\n merged_change_range_list.append(cur)\n i += 1\n if change_range_list[-1].start > merged_change_range_list[-1].end:\n merged_change_range_list.append(change_range_list[-1])\n\n if limit:\n merged_change_range_list = self._length_filter(\n merged_change_range_list, limit\n )\n # merged range check\n if range_threshold:\n merged_change_range_list = [\n i for i in merged_change_range_list if not i.is_loop(range_threshold)\n ]\n logger.debug(\n f\"unstable range of [{self.video.path}]: {merged_change_range_list}\"\n )\n return merged_change_range_list", "title": "" }, { "docid": "5d55876f5627898e2b71c7df9ffda943", "score": "0.5261111", "text": "def compressed(self):\n time_windows = self.time_windows_sorted_by_since\n if not time_windows:\n return TimeWindowsCollection([])\n stack = []\n\n latest = {\n 'since': None,\n 'until': None\n }\n\n for current in time_windows:\n if latest['until'] is None:\n latest['since'] = current.since\n latest['until'] = current.until\n elif latest['until'] >= current.since:\n latest['until'] = max(latest['until'], current.until)\n else:\n stack.append(TimeWindow(latest['since'], latest['until']))\n latest['since'] = current.since\n latest['until'] = current.until\n\n stack.append(TimeWindow(latest['since'], latest['until']))\n return TimeWindowsCollection(stack, sorted_since=True)", "title": "" }, { "docid": "857893fc52595e1667f57e57d035f309", "score": "0.5246155", "text": "def test_global_tiles_clamped():\n tiles = list(mercantile.tiles(-180, -90, 180, 90, [1]))\n assert len(tiles) == 4\n assert min(t.y for t in tiles) == 0\n assert max(t.y for t in tiles) == 1", "title": "" }, { "docid": "2adf7bdc91bddc71de0cd1901d0d236e", "score": "0.5245297", "text": "def filter_out_non_working_hours(times): # TODO: what's this?\n for start, end in times:\n pass", "title": "" }, { "docid": "4f313cc3fae33ef1cf4351c6a838e65a", "score": "0.5245202", "text": "def adjust_list(ticks: list, ttask: int) -> list:\n for _ in range(ttask - 1):\n ticks.append(0)\n return ticks", "title": "" }, { "docid": "8d0feb1999602273e6c9cfd37b24189c", "score": "0.52408475", "text": "def get_gtopo30_tile_list(self):\n\n logger = logging.getLogger(__name__)\n\n # GTOPO30 tile information\n NORTH_LON_LOCATIONS = np.array([-180.0, -140.0, -100.0, -60.0, -20.0,\n 20.0, 60.0, 100.0, 140.0])\n\n SOUTH_LON_LOCATIONS = np.array([-180.0, -120.0, -60.0,\n 0.0, 60.0, 120.0])\n\n LAT_LOCATIONS = np.array([90.0, 40.0, -10.0, -60.0])\n\n GTOPO30_TILE_SET_CUTOFF_LATITUDE = -60.0\n\n '''\n The tiles do not necessarily go all the way to the boundary. For\n example, the w100n90 tile only goes to -99.995833333334 longitude.\n Thus, if our UL longitude falls between -99.995833333334 and -100.0\n then we will need to use w140n90 to get the far left edge. To\n account for this we'll artificially pad the East and West boundaries\n by 1 degree.\n\n Note: this does not seem to cause problems on the North and South\n borders, so we will no worry about them.\n '''\n ul_lon = Math.longitude_norm(self.bounding_west_longitude -\n self.gtopo30_padding)\n lr_lon = Math.longitude_norm(self.bounding_east_longitude +\n self.gtopo30_padding)\n logger.debug('ul_lon = {0}'.format(ul_lon))\n logger.debug('lr_lon = {0}'.format(lr_lon))\n\n '''\n There is a condition which could cause problems on the North and South\n boundaries as well. This is if a scene comes very near the edge, but\n does not quite get there. It could be that adding 1.5 to nrows before\n taking the ceiling (see calculation of nrows in get_gtopo30_data.c)\n causes the nrows to be large enough that we need data from the tile to\n the South, even though the scene stopped just before that tile. To\n address this we will grow North and South by 1 degree as we did in the\n East and West directions above. Note, the same problem can occur in\n the East and West directions, but was already addressed when adding\n one to the longitudes above.\n '''\n ul_lat = min((self.bounding_north_latitude + self.gtopo30_padding),\n self.north_latitude_limit)\n lr_lat = max((self.bounding_south_latitude - self.gtopo30_padding),\n self.south_latitude_limit)\n logger.debug('ul_lat = {0}'.format(ul_lat))\n logger.debug('lr_lat = {0}'.format(lr_lat))\n\n # Determine the unique latitudes where the input data resides\n lat_list = list(set([min(LAT_LOCATIONS[\n np.where(ul_lat < LAT_LOCATIONS)]),\n min(LAT_LOCATIONS[\n np.where(lr_lat < LAT_LOCATIONS)])]))\n logger.debug('lat_list = {0}'.format(lat_list))\n\n tile_list = list()\n # Determine the unique longitudes where the input data resides\n for lat in lat_list:\n lon_list = list()\n if lat > GTOPO30_TILE_SET_CUTOFF_LATITUDE:\n lon_list.append(\n max(NORTH_LON_LOCATIONS[\n np.where(ul_lon > NORTH_LON_LOCATIONS)]))\n lon_list.append(\n max(NORTH_LON_LOCATIONS[\n np.where(lr_lon > NORTH_LON_LOCATIONS)]))\n else:\n lon_list.append(\n max(SOUTH_LON_LOCATIONS[\n np.where(ul_lon > SOUTH_LON_LOCATIONS)]))\n lon_list.append(\n max(SOUTH_LON_LOCATIONS[\n np.where(lr_lon > SOUTH_LON_LOCATIONS)]))\n lon_list = list(set(lon_list))\n logger.debug('lon_list = {0}'.format(lon_list))\n\n n_s = 'n'\n if lat < 0:\n n_s = 's'\n\n for lon in lon_list:\n e_w = 'e'\n if lon <= 0:\n e_w = 'w'\n\n # Already know if north/south, east/west so we just need\n # the positive value for the filename\n abs_lat = abs(lat)\n abs_lon = abs(lon)\n\n tile_list.append('{0}{1:03}{2}{3:02}'\n .format(e_w, abs_lon, n_s, abs_lat))\n\n return tile_list", "title": "" }, { "docid": "9015767027dbd353a22eee05789ed134", "score": "0.5227308", "text": "def _removeUltrashortIntervals(tier, minLength, minTimestamp):\n\n # First, remove tiny intervals\n newEntryList = []\n j = 0 # index to newEntryList\n for start, stop, label in tier.entryList:\n\n if stop - start < minLength:\n # Correct ultra-short entries\n if len(newEntryList) > 0:\n lastStart, _, lastLabel = newEntryList[j - 1]\n newEntryList[j - 1] = (lastStart, stop, lastLabel)\n else:\n # Special case: the first entry in oldEntryList was ultra-short\n if len(newEntryList) == 0 and start != minTimestamp:\n newEntryList.append((minTimestamp, stop, label))\n # Normal case\n else:\n newEntryList.append((start, stop, label))\n j += 1\n\n # Next, shift near equivalent tiny boundaries\n # This will link intervals that were connected by an interval\n # that was shorter than minLength\n j = 0\n while j < len(newEntryList) - 1:\n diff = abs(newEntryList[j][1] - newEntryList[j + 1][0])\n if diff > 0 and diff < minLength:\n newEntryList[j] = (\n newEntryList[j][0],\n newEntryList[j + 1][0],\n newEntryList[j][2],\n )\n j += 1\n\n return tier.new(entryList=newEntryList)", "title": "" }, { "docid": "181fc0a6659862eed9e9293c98c5cfd1", "score": "0.5219579", "text": "def cut(times, dt=1):\n time_list = times.values\n if not time_list.size:\n return []\n\n jumps = time_list[1:] != time_list[:-1]+dt\n nb_jumps = sum(jumps) # numbers of jumps\n\n dates = np.zeros((nb_jumps+1, 2))\n # Fist time value\n dates[0, 0] = time_list[0]\n # Last time value\n dates[-1, -1] = time_list[-1]\n # Add all other jumps\n dates[1:, 0] = time_list[1:][jumps]\n dates[:-1, 1] = time_list[:-1][jumps]\n return dates.tolist()", "title": "" }, { "docid": "c5b164778c0d2897adea2c63719902f8", "score": "0.52050847", "text": "def time_range(self):\n if len(self._time_range) == 0:\n for band in self.bands_from_grib_file:\n self._time_range.append(self.timestep_for_band(band))\n return self._time_range", "title": "" }, { "docid": "d9e912e74144156504e701b78250b167", "score": "0.5189701", "text": "def range_maps(self) -> List[RangeMap]:\n range_maps_list = []\n for dest in self._state.attributes[SERVICE_RANGEMAP][\"rangemaps\"]:\n range_maps_list.append(RangeMap(dest))\n return range_maps_list", "title": "" }, { "docid": "db126b5d56db778a58e6dd0fcf370c93", "score": "0.5189438", "text": "def tile_limits(x_min, x_max, y_min, y_max, x_tile, y_tile):\n # modify the bounding box to be a multiple of the tile dimension\n x_pad = (x_max - x_min) % x_tile\n x_min += math.ceil(x_pad/2)\n x_max += math.floor(x_pad/2) \n x_num_tiles = int((x_max - x_min) // x_tile)\n\n y_pad = (y_max - y_min) % y_tile\n y_min += math.ceil(y_pad/2)\n y_max += math.floor(y_pad/2) \n y_num_tiles = int((y_max - y_min) // y_tile)\n\n # generate tile bboxes\n tiles = []\n for ii in range(x_num_tiles):\n for jj in range(y_num_tiles):\n tile = {}\n tile['x_min'] = x_min + ii*x_tile\n tile['x_max'] = tile['x_min'] + x_tile\n tile['y_min'] = y_min + jj*y_tile\n tile['y_max'] = tile['y_min'] + y_tile\n tiles.append(tile)\n\n return tiles", "title": "" }, { "docid": "6200c7c574041bc8ed3f27af2e7e8923", "score": "0.5184637", "text": "def get_bb_max_times(self):\n return [sympyutils.poly_fminbound(\n -poly, self.t_initial, self.t_final) for poly in self.polys]", "title": "" }, { "docid": "9f72df2508cbe1b32761abaf6284d332", "score": "0.51813865", "text": "def scale_timings(timelist, input_units, output_units, time_repetition):\r\n if input_units==output_units:\r\n _scalefactor = 1.\r\n if (input_units == 'scans') and (output_units == 'secs'):\r\n _scalefactor = time_repetition\r\n if (input_units == 'secs') and (output_units == 'scans'):\r\n _scalefactor = 1./time_repetition\r\n timelist = [np.max([0., _scalefactor*t]) for t in timelist]\r\n return timelist", "title": "" }, { "docid": "93c02940a180c73e6c46faf4dbb625a2", "score": "0.517908", "text": "def _get_empty_timeslots(list_of_booked_timings, date):\n day = datetime.strptime((details[\"date\"]), '%d-%b-%Y').strftime('%A')\n \n #weekday 8.30 to 10.30\n #weekend 8.30 to 5\n \n list_of_available_timings = []\n \n list_of_booked_timings.sort()\n \n for i in range(len(list_of_booked_timings)-1):\n time_range1 = list_of_booked_timings[i].split('-')\n start_time1 = int(time_range1[0].replace(':', \"\"))\n end_time1 = int(time_range1[1].replace(':',\"\"))\n \n \n time_range2 = list_of_booked_timings[i+1].split('-')\n start_time2 = int(time_range2[0].replace(':', \"\"))\n end_time2 = int(time_range2[1].replace(':',\"\"))\n \n if i == 0:\n check = start_time1 - 800\n if check > 0:\n first = '08:30' + '-' + time_range1[0]\n list_of_available_timings.append(first)\n \n \n if i == len(list_of_booked_timings)-2: \n check = 2230 - end_time2\n if check > 0: \n if day == 'Saturday' or day == 'Sunday':\n last = time_range2[1] + '-' + '17:00'\n list_of_available_timings.append(last)\n else:\n last = time_range2[1] + '-' + '22:30'\n list_of_available_timings.append(last)\n \n if start_time2 - end_time1 > 0:\n add = time_range1[1] + '-' + time_range2[0]\n list_of_available_timings.append(add)\n \n list_of_available_timings.sort()\n \n return list_of_available_timings", "title": "" }, { "docid": "a7f63a31a2f5b2ff8e8b3be377aa448a", "score": "0.51764965", "text": "def get_filenames_by_time_range(\n self, start: datetime, stop: datetime\n ) -> List[Path]:\n filenames = []\n time = start\n while time <= stop:\n filenames.append(self.get_filename_by_time(time))\n time += timedelta(hours=1)\n\n # Cast to set then back to list to remove duplicates.\n return sorted(list(set(filenames)))", "title": "" }, { "docid": "1c7a7d3d459bbda361500bda8065fe5c", "score": "0.517434", "text": "def _calculate_min_max(self):\n\n min_max_list = []\n\n for band in self.bands:\n\n file_path = ('%s/%s_B%s.TIF' % (self.warp_path,\n self.image, band))\n\n if os.path.exists(file_path):\n print ('Starting the Min/Max process with designated -percent '\n 'cut- for band %s of %s' % (band, self.image))\n print '...'\n\n # Open images in the warp folder\n ds = gdal.Open(file_path)\n\n # converting raster to numpy array\n values = numpy.array(ds.GetRasterBand(1).ReadAsArray())\n to_list = values.tolist()\n\n full_list = [item for sublist in to_list for item in sublist]\n\n # removing zeros\n value_list = filter(lambda x: x != 0, full_list)\n list_len = len(value_list)\n\n value_list.sort()\n\n # determining number of integers to cut from bottom of list\n cut_value_bottom = int(float(self.btm_prct) /\n float(100) * float(list_len))\n\n # determining number of integers to cut from top of list\n cut_value_top = int(float(self.top_prct) /\n float(100) * float(list_len))\n\n # establishing new min and max with percent cut\n cut_list = value_list[\n (cut_value_bottom + 1):(list_len - cut_value_top)]\n\n # adding min and max with percent cut values to list\n min_max_list.extend([cut_list[0], cut_list[-1]])\n\n print 'Finished processing band %s of %s' % (band, self.image)\n\n return [min(min_max_list), max(min_max_list)]", "title": "" }, { "docid": "4ee63c0439a4476e10a117915e35af8f", "score": "0.51733375", "text": "def create_list(dig):\n max_range = pow(10, dig) - 1\n min_range = pow(10, dig-1) - 1\n unfiltered_list = list(range(max_range, min_range, -1))\n return unfiltered_list", "title": "" }, { "docid": "ceb2b2c28adc79a6cde5bb05107097ce", "score": "0.5172149", "text": "def timedelta_with_itself(time_list: List) -> List:\n temp_list = []\n for index , time in enumerate(time_list):\n try:\n if index==0:\n temp_list.append((0.0))\n else:\n temp_list.append(time_list[index]-time_list[index-1])\n except IndexError:\n pass\n return temp_list", "title": "" }, { "docid": "a182e850796228a52ff5605a5b9ac531", "score": "0.5169623", "text": "def get_time_range_from_relayoutData(relayoutData):\n time_range = [df_start, df_end]\n if relayoutData is None:\n pass\n else:\n if 'xaxis.range[0]' in relayoutData:\n time_range = [pd.to_datetime(relayoutData['xaxis.range[0]']),\n pd.to_datetime(relayoutData['xaxis.range[1]'])]\n return time_range", "title": "" }, { "docid": "4944d0d6bcc8c043ed50f3349dfe72aa", "score": "0.5168822", "text": "def list_of_length_scales(bottom_bound, top_bound, steps):\n length_scales = []\n for i in range(bottom_bound, top_bound, steps):\n length_scales.append(i)\n return length_scales", "title": "" }, { "docid": "4d857517b86e9b30fd371be8f3085f93", "score": "0.5161929", "text": "def _generate_time_windows(od_dist, time_windows):\n # initialize time matrix as empty list\n time_matrix = []\n # if time_windows is set in database this means it is a list of lits.\n # so iterate each list of lists and add it to time matrix.\n # else, do it from slider.\n if len(time_windows) > 2:\n for ti_wi in time_windows:\n tw = _convert_range_to_time_units(ti_wi)\n time_matrix.append(tw)\n else:\n tw = _convert_range_to_time_units(time_windows)\n time_matrix = [tw for elem in range(len(od_dist))]\n return time_matrix", "title": "" }, { "docid": "5440cbfc84a53cd8db90e087315b083b", "score": "0.51561314", "text": "def make_consistent(orig_list, delta, max_value=None, target='datetimefield'):\n if not orig_list:\n return\n\n prev = orig_list[0]\n\n for i in orig_list:\n while abs(prev[target]-i[target]).total_seconds() / delta > 1:\n if prev[target] > i[target]:\n prev[target] -= dt.timedelta(seconds=delta)\n else:\n prev[target] += dt.timedelta(seconds=delta)\n\n yield prev, True\n\n prev = i\n yield i, False\n\n # append new elements at the end until reach max value\n while max_value and abs(prev[target] - max_value).total_seconds()/delta > 1:\n if prev[target] > max_value:\n prev[target] -= dt.timedelta(seconds=delta)\n else:\n prev[target] += dt.timedelta(seconds=delta)\n\n yield prev, True", "title": "" }, { "docid": "1657797e8e149e49f2e0ac34a224b3e1", "score": "0.5152248", "text": "def maxvals4timeslice(self, name, start, stop):\n return self.reduce_lonlat_slice(name, np.nanmax, start, stop)", "title": "" }, { "docid": "f4ce06af851daedef0460dddba5849bf", "score": "0.51467496", "text": "def date_range_groups(data):\n\tranges = []\n\tfor k, g in groupby(enumerate(data), lambda i_x: i_x[1]+relativedelta(days=-i_x[0])):\n\t\tgroup = list(map(itemgetter(1), g))\n\t\tranges.append((group[0], group[-1]))\n\n\treturn ranges", "title": "" }, { "docid": "e7cf2433b1c582744f45361d622915e2", "score": "0.513196", "text": "def split_tranges(tranges, width, tres):\n newtranges = []\n for trange in tranges:\n t0, t1 = trange\n assert width < (t1 - t0)\n # calculate left and right edges of subtranges that fall within trange:\n # This is tricky: find maximum left edge such that the corresponding maximum right\n # edge goes as close as possible to t1 without exceeding it:\n tend = (t1-width+tres) // tres*tres # there might be a nicer way, but this works\n ledges = np.arange(t0, tend, tres)\n redges = ledges + width\n subtranges = [ (le, re) for le, re in zip(ledges, redges) ]\n newtranges.append(subtranges)\n return np.vstack(newtranges)", "title": "" }, { "docid": "76ff79eef98757a88f01cd7b9a35eb2e", "score": "0.5129836", "text": "def bounds_to_tiles(self, bounds):\n grid_ys, grid_xs = self._frame_bounds(bounds)\n return self._yield_tiles(grid_ys, grid_xs, bounds)", "title": "" }, { "docid": "8c3013f098873b4f6568da362e0ac566", "score": "0.5124893", "text": "def make_last_days_y_list(time_tuple_lst):\n temp_list = []\n for tpl in time_tuple_lst:\n temp_list.append(tpl[1]) #do not use \"extend\"...says \"numpy.float64 object is not iterable\" \n return temp_list", "title": "" }, { "docid": "0aed682abebd6d1b897d1aefd011477e", "score": "0.51233643", "text": "def filter_annots_using_minimum_timedelta(ibs, aid_list, min_timedelta):\n import vtool as vt\n #min_timedelta = 60 * 60 * 24\n #min_timedelta = 60 * 10\n grouped_aids = ibs.group_annots_by_name(aid_list)[0]\n unixtimes_list = ibs.unflat_map(ibs.get_annot_image_unixtimes_asfloat, grouped_aids)\n # Find the maximum size subset such that all timedeltas are less than a given value\n chosen_idxs_list = [\n ut.maximin_distance_subset1d(unixtimes, min_thresh=min_timedelta)[0]\n for unixtimes in unixtimes_list]\n filtered_groups = vt.ziptake(grouped_aids, chosen_idxs_list)\n filtered_aids = ut.flatten(filtered_groups)\n if ut.DEBUG2:\n timedeltas = ibs.get_unflat_annots_timedelta_list(filtered_groups)\n min_timedeltas = np.array([np.nan if dists is None else\n np.nanmin(dists) for dists in timedeltas])\n min_name_timedelta_stats = ut.get_stats(min_timedeltas, use_nan=True)\n print('min_name_timedelta_stats = %s' % (ut.dict_str(min_name_timedelta_stats),))\n return filtered_aids", "title": "" }, { "docid": "a50af269ca2843fabd52d605ffea8f36", "score": "0.5115767", "text": "def get_pools(all_trips, total_trips, pool_size):\n pools_per_day = (24 * 60) / pool_size\n pools_per_hour = 60 / pool_size\n pools = [ [] for i in range(pools_per_day)]\n\n pool_start_time = 0\n prev_hour = 0\n trips_pooled = 0\n for i in range(pools_per_day):\n if pool_start_time >= 60:\n pool_start_time = 0\n\n for j in range(trips_pooled, total_trips):\n trip_start_minutes = int(all_trips[j][5].split(\":\")[1])\n trip_start_hour = int(all_trips[j][5].split(\":\")[0])\n\n if prev_hour == trip_start_hour:\n if trip_start_minutes >= pool_start_time and trip_start_minutes < pool_start_time + pool_size:\n pools[i].append(all_trips[j][0])\n trips_pooled += 1\n\n pool_start_time += pool_size\n if ((i + 1) % pools_per_hour) == 0:\n prev_hour += 1\n\n return pools", "title": "" }, { "docid": "3e9ecde636bc49bcaf64c556581184ee", "score": "0.5112423", "text": "def prepare_time_list_according_to_format(self):\n final_time_list = []\n actual_data = list(self.get_demand_data()[\"Time\"])\n for each in actual_data:\n if \"pm\" in each and \"am\" not in each:\n sub_str = []\n res = re.sub('pm', '', each)\n res = res.split(\"-\")\n res = list(map(int, res))\n for each_split in res:\n if each_split < 12:\n each_split += 12\n sub_str.append(str(each_split))\n final_time_list.append(\"-\".join(sub_str))\n if \"am\" in each and \"pm\" not in each:\n res = re.sub('am', '', each)\n final_time_list.append(res)\n if \"am\" in each and \"pm\" in each:\n res = re.sub('am|pm', '', each)\n final_time_list.append(res)\n return final_time_list", "title": "" }, { "docid": "66a54225e72ced6c14df1220027ae310", "score": "0.51037484", "text": "def time_window_bins(self, t0, tend):\n indexes = []\n for level in self.dmd_tree.levels:\n for leaf in self.dmd_tree.index_leaves(level):\n local_times = self.partial_time_interval(level, leaf)\n if (\n local_times[\"t0\"] <= t0 < local_times[\"tend\"]\n or local_times[\"t0\"] < tend <= local_times[\"tend\"]\n or (t0 <= local_times[\"t0\"] and tend >= local_times[\"tend\"])\n ):\n indexes.append((level, leaf))\n\n indexes = np.unique(indexes, axis=0)\n return indexes", "title": "" }, { "docid": "b86808c5107e7c11acca27c250317eb4", "score": "0.5089789", "text": "def build_tiles():\n return [[ Tile(y)\n for y in range(MAP_HEIGHT) ]\n for x in range(MAP_WIDTH) ]", "title": "" }, { "docid": "319ecf77e84f33a9f80f6c301e08d815", "score": "0.5086945", "text": "def time_window_bins(self, t0, tend):\n indexes = []\n for level in self.dmd_tree.levels:\n for leaf in self.dmd_tree.index_leaves(level):\n\n local_times = self.partial_time_interval(level, leaf)\n if (local_times[\"t0\"] <= t0 < local_times[\"tend\"] or\n local_times[\"t0\"] < tend <= local_times[\"tend\"] or\n (\n t0 <= local_times[\"t0\"] and\n tend >= local_times[\"tend\"]\n )):\n indexes.append((level, leaf))\n\n indexes = np.unique(indexes, axis=0)\n return indexes", "title": "" }, { "docid": "be5290842e10ad069370a00909f50151", "score": "0.50859946", "text": "def my_time_slice(self, t_start, t_stop):\n \n #new_VmList = VmList(self.raw_data(), self.id_list(), self.dt, t_start, t_stop, self.dimensions)\n #for id in self.id_list():\n # new_VmList[id]=self.analog_signals[id].time_slice(t_start, t_stop)\n \n #return new_AnalogSignalList\n \n \n new_VmList=super( MyVmList, self ).time_slice(t_start, t_stop)\n \n\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n new_MyVmList=convert_super_to_sub_class(new_VmList, MyVmList)\n \n return new_MyVmList", "title": "" }, { "docid": "aec236a2203f6d6ab8547420ca854a24", "score": "0.5085477", "text": "def cut_tim(tim,tmin=None,tmax=None,output=None):\n tmin = tmin or -np.inf\n tmax = tmax or np.inf\n tim_strings = deque()\n lines = file(tim).readlines()\n if 'FORMAT 1' not in lines[0]:\n raise ValueError('Cannot parse non-tempo2 style TOA files.')\n for line in lines:\n tim_strings.append(line)\n if id_toa_line(line):\n mjd = float(line.split()[2])\n if not((mjd >= tmin) and (mjd < tmax)):\n tim_strings.pop()\n if output is not None:\n file(output,'w').write(''.join(tim_strings))\n return tim_strings", "title": "" }, { "docid": "d1e06ce12d5a516c54c8252f9696ac70", "score": "0.5085181", "text": "def _list_from_ranges(data):\n if not isinstance(data, list):\n data = [data]\n result = []\n for ranges in data:\n if (isinstance(ranges, int)):\n ranges = str(ranges)\n num_start = 0\n num_end = 0\n if '-' in ranges:\n split_start, split_end = ranges.split('-')\n if (split_start.isdigit()) and (split_end.isdigit()):\n # Regular numeric ranges\n num_start, num_end = int(split_start), int(split_end)\n else:\n # X10 device ranges, presumably\n num_start = X10.housecode_to_int(split_start)\n num_end = X10.housecode_to_int(split_end)\n if (num_start is None) or (num_end is None):\n continue\n range_start = num_start - 1\n range_end = num_end - 1\n result.extend(list(range(range_start, range_end + 1)))\n else:\n range_start = None\n num_start = 0\n if ranges.isdigit():\n num_start = int(ranges)\n else:\n num_start = X10.housecode_to_int(ranges)\n range_start = num_start - 1\n result.append(num_start)\n return result", "title": "" }, { "docid": "56caadf17b3fc3f6d8f29ac6c56eadd9", "score": "0.5083477", "text": "def filter_time_points2(tot_incidlist, incl_min, incl_max):\n\n\t# calculate cumulative number of cases at each time step\n\tcum_ct = np.cumsum(tot_incidlist)\n\t# generate list of time steps where cumulative number of cases is between incl_min and incl_max proportion of total cases in epidemic\n\tincl_tsteps = [i for i, ct in enumerate(cum_ct) if ct/float(max(cum_ct)) > incl_min and ct/float(max(cum_ct)) < incl_max]\n\t\n\tif incl_tsteps:\n\t\treturn min(incl_tsteps), max(incl_tsteps)\n\telse:\n\t\treturn 0, 0", "title": "" }, { "docid": "12e033cdd1ee70b7be6d941672a0fd79", "score": "0.5078448", "text": "def list_time_range(self, limit: int = DEFAULT_LIMIT, offset: int = None) -> dict[str, Any]:\n return self._http_request(\n method='GET',\n url_suffix='api/objects/timeranges',\n params=assign_params(limit=limit, offset=offset),\n )", "title": "" }, { "docid": "a2d13d1a16d44b4ff22c9b351a1a1d51", "score": "0.50747746", "text": "def filter_temps_by_monitoring(temp_range, start_time, end_time):\n monitored_temps = []\n for row in temp_range:\n try:\n # Check if the datetime in the row's first column is in between\n # the start and end monitoring time\n temp_date = datetime.strptime(row[0].value, '%Y-%m-%d %H:%M:%S')\n except TypeError:\n # We're parsing a sheet that has fewer than 1440 rows of temp data\n break\n if start_time - timedelta(minutes=15) <= temp_date <= end_time + timedelta(minutes=15):\n monitored_temps.append(row[1].value)\n\n return monitored_temps", "title": "" }, { "docid": "ec7ae7e98fcee6a804922e53460278f3", "score": "0.506944", "text": "def _getTimeVsPotObjsForEachTimeHillAdded(inpObj, timeRange=None, timeTol=1e-4, minTimeDiff=1e-3):\n\tuseObj = copy.deepcopy(inpObj)\n\tuseObj.sortTimes()\n\n\t#Step 0: Figure out the time range\n\tif timeRange is None:\n\t\ttimeRange = [-1, np.inf]\n\n\t#Step 1: We need to figure out the time ranges for each\n\tminTime, maxTime = timeRange\n\tspawnTimes = [t for t in useObj.times if t>minTime and t<maxTime] #this will be in order\n\n\n\n\t#Find the INDICES corresponding to times where hills were essentially spawned\n\tspawnIndices, outTimeVals = [0], [spawnTimes[0]]\n\tfor idx,t in enumerate(spawnTimes[1:],1):\n\t\tif t>outTimeVals[-1]+minTimeDiff:\n\t\t\tspawnIndices.append(idx)\n\t\t\toutTimeVals.append(spawnTimes[idx])\n\n\t#Now use minTime to get timeRanges (we need to do a load of -1 things tho)\n\toutTimeRanges = list()\n\tfor idx in spawnIndices[1:]:\n\t\toutTimeRanges.append( [minTime,spawnTimes[idx-1]] )\n\n\t#We need to figure out the last one now; which just runs from minTime to maxTime\n\toutTimeRanges.append([minTime, maxTime])\n\n\t#Step 2: get potential objs from these times\n\toutObjs = list()\n\tfor tRange in outTimeRanges:\n\t\tcurrObj = useObj.createGroupedHills(timeRange=tRange, timeTol=timeTol)\n\t\toutObjs.append(currObj)\n\n\treturn [ [t,obj] for t,obj in it.zip_longest(outTimeVals,outObjs)]", "title": "" }, { "docid": "cd3c50f892de2f31e880c579a7ca1a40", "score": "0.5066422", "text": "def extent(lst):\n vecs = zip(*lst)\n return [max(v) - min(v) for v in vecs]", "title": "" }, { "docid": "efa36aacda44aadc979f32710257ddef", "score": "0.5055729", "text": "def partition(self, max_size):\n if max_size <= 0:\n raise ValueError('max_size must be > 0: {}'.format(max_size))\n\n for interval in self:\n refname = interval.reference_name\n for pos in range(interval.start, interval.end, max_size):\n yield make_range(refname, pos, min(interval.end, pos + max_size))", "title": "" }, { "docid": "f39f0f1464966f351e73d2179b201f21", "score": "0.50527614", "text": "def get_table_sizes(task_list: list) -> list:\n x = 0\n time_slot = 0\n for ele in task_list:\n if len(ele[1]) > x:\n x = len(ele[1])\n if len(ele[0]) > time_slot:\n time_slot = len(ele[0])\n if x < 5:\n x = 5\n return [x + 15, time_slot - 7]", "title": "" }, { "docid": "0415c966cb35f35eb0f7e00f868d8c41", "score": "0.5042304", "text": "def _partition_daterange(self, start, end, step: datetime.timedelta):\n get_start_time = operator.itemgetter(self.partition_field_start.eval(self.config))\n get_end_time = operator.itemgetter(self.partition_field_end.eval(self.config))\n date_range = [\n dr\n for dr in super(DatetimeIncrementalSyncComponent, self)._partition_daterange(start, end, step)\n if get_start_time(dr) < get_end_time(dr)\n ]\n for i, _slice in enumerate(date_range):\n start_time = self._parser.parse(get_start_time(_slice), self.start_datetime.datetime_format, self._timezone)\n end_time = self._parser.parse(get_end_time(_slice), self.end_datetime.datetime_format, self._timezone)\n _slice[self.stream_slice_field_step.eval(self.config)] = (end_time + datetime.timedelta(days=int(bool(i))) - start_time).days\n return date_range", "title": "" }, { "docid": "e683c73106742273faa2edf2ba559dce", "score": "0.5034739", "text": "def space_timesteps(num_timesteps, section_counts):\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\") :])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n raise ValueError(\n f\"cannot create exactly {num_timesteps} steps with an integer stride\"\n )\n elif section_counts == \"fast27\":\n steps = space_timesteps(num_timesteps, \"10,10,3,2,2\")\n # Help reduce DDIM artifacts from noisiest timesteps.\n steps.remove(num_timesteps - 1)\n steps.add(num_timesteps - 3)\n return steps\n section_counts = [int(x) for x in section_counts.split(\",\")]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(\n f\"cannot divide section of {size} steps into {section_count}\"\n )\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)", "title": "" }, { "docid": "82cb93efc92f41642cefa81f0ac5569c", "score": "0.5032564", "text": "def get_tile_tups(self):\n return [(t.left, t.right) for t in self.tiles]", "title": "" }, { "docid": "025c5c185d042648a990f4b6809785d9", "score": "0.5026414", "text": "def candidates_for_task_timeslots(t, ul):\r\n \r\n lt = []\r\n ls = []\r\n \r\n ts = t[4] / t[6] * 60\r\n \r\n # time\r\n for i in ul:\r\n if(i[3] >= t[3] and i[3] <= (t[3] + ts)):\r\n lt.append(i)\r\n \r\n # space\r\n for j in lt:\r\n t1 = (t[1], t[2])\r\n t2 = (j[1], j[2])\r\n distance = hv(t1, t2, 'm')\r\n # distance factor of user-i for task-j\r\n dMax = 1 - (distance / t[5])\r\n if(distance >= 0):\r\n ls.append(j)\r\n \r\n return ls", "title": "" }, { "docid": "58eb9de2d75206bffdf50be740d057ed", "score": "0.5026111", "text": "def filter_list(lst):\n return [t for t in lst if -1 < t[0] < 7 and -1 < t[1] < 6]", "title": "" }, { "docid": "748545a69cd4bafaacc1d4c482043ac3", "score": "0.5024284", "text": "def getByTime(self):\n width, height, nChannels, nSlices, nFrames = self.image.getDimensions() \n dendritesByTime = [[] for x in range(0, nFrames)] \n for element in self.elements.values():\n t = element.getFrame()\n dendritesByTime[t-1].append(element)\n return dendritesByTime", "title": "" }, { "docid": "2f84812e91adadc09dd84f03caf72ff6", "score": "0.5023871", "text": "def from_to(tstart, tend, tdelta):\r\n if not type(tstart) == dt.datetime:\r\n tstart = dt.datetime.strptime(tstart, \"%Y-%m-%d %H:%M:%S\")\r\n if not type(tend) == dt.datetime:\r\n tend = dt.datetime.strptime(tend, \"%Y-%m-%d %H:%M:%S\")\r\n tdelta = dt.timedelta(seconds=tdelta)\r\n tsteps = [tstart, ]\r\n tmptime = tstart\r\n while True:\r\n tmptime = tmptime + tdelta\r\n if tmptime > tend:\r\n break\r\n else:\r\n tsteps.append(tmptime)\r\n return tsteps", "title": "" }, { "docid": "c7c3b5ff40b7b42776ad469c126ba892", "score": "0.50130343", "text": "def convertToTiles(surface, width, height):\n \n ret = []\n \n sz = surface.get_size()\n if sz[0] == width and sz[1] == height: return [surface]\n \n xstep = int(ceil(float(sz[0]) / float(width)))\n ystep = int(ceil(float(sz[1]) / float(height)))\n \n for dy in xrange(ystep):\n for dx in xrange(xstep):\n surf = pygame.Surface((width, height)).convert_alpha()\n surf.fill((0,0,0,0))\n surf.blit(surface, (0,0), (dx*width, dy*height, width, height))\n ret.append(surf)\n \n print \"Converted surface with dimensions (%d x %d) into %d tiles of dimension (%d x %d)\" % \\\n (surface.get_width(), surface.get_height(), len(ret), width, height)\n \n return ret", "title": "" }, { "docid": "62d0bf21aee6fffc967a7297ce23c102", "score": "0.50098825", "text": "def _group_ranges(self, units):\n for _, group in itertools.groupby(enumerate(units), lambda q: (q[1] - q[0])):\n group = list(group)\n yield group[0][1], group[-1][1]", "title": "" }, { "docid": "b4204692b566a58a0f716553571839d8", "score": "0.5003235", "text": "def ranges(self) -> typing.List[ezcoach.range.Range]:", "title": "" }, { "docid": "d0f500fb1cfb8a4f438c9e4eee38f754", "score": "0.50002885", "text": "def _concatenate_timestreams(cls, ts_lst, ts_rounding_error=0.6, ts_interp_threshold=0):\n #check for contiguous timestreams\n for i in range(1, len(ts_lst)):\n ts_sep = (ts_lst[i].start.time - ts_lst[i-1].stop.time) * ts_lst[i].sample_rate\n if numpy.abs(ts_sep - 1) > ts_rounding_error:\n if (ts_interp_threshold > 0) and ((ts_sep - 1) < ts_interp_threshold):\n log_warn(\"Timestreams are not contiguous: timestreams %d and %d \"\n \"separated by %f samples (%s). Interpolating.\" %\n (i, i-1, ts_sep - 1, str(ts_lst[i].start)))\n v = numpy.linspace(ts_lst[i-1][-1], ts_lst[i][0], ts_sep + 1)[1:-1]\n ts_interp = cls(v)\n ts_interp.units = ts_lst[0].units\n ts_interp.start = ts_lst[i-1].stop + 1. / ts_lst[i].sample_rate\n ts_interp.stop = ts_lst[i].start - 1. / ts_lst[i].sample_rate\n ts_lst = ts_lst[:i] + [ts_interp] + ts_lst[i:]\n else:\n log_fatal(\"Timestreams are not contiguous: timestreams %d and %d \"\n \"separated by %f samples (%s)\" % (i, i-1, ts_sep - 1, str(ts_lst[i].start)))\n if (ts_lst[i].units != ts_lst[0].units):\n log_fatal(\"Timestreams are not the same units\")\n out_ts = cls(numpy.concatenate(ts_lst))\n out_ts.units = ts_lst[0].units\n out_ts.start = ts_lst[0].start\n out_ts.stop = ts_lst[-1].stop\n return out_ts", "title": "" }, { "docid": "c05187abbe42bb87e7bc8e06136bbeb3", "score": "0.49992543", "text": "def copy_tiles(self):\n new_tiles = []\n for l in self.tiles:\n new_row = l[:]\n new_tiles.append(new_row)\n \n return new_tiles", "title": "" }, { "docid": "6598aea487edac0f3e68cda27c5ce7cc", "score": "0.49947765", "text": "def range_mapping(pattern, data):\n\n squished_data = []\n\n for entry in data:\n min_diff = entry - min(data)\n normalized_diff = min_diff / (max(data) - min(data))\n squished_entry = min(pattern) + normalized_diff * (max(pattern) - min(pattern))\n\n squished_data.append(squished_entry)\n\n return squished_data", "title": "" }, { "docid": "8b1d3e03c8c1ce3e8cf00cc5523aabf5", "score": "0.49899983", "text": "def set_limit_list(msid):\n \n [udict, ddict] = read_unit_list()\n tchk = 0\n try:\n unit = udict[msid.lower()]\n if unit.lower() in ['k', 'degc']:\n tchk = 1\n elif unit.lower() == 'degf':\n tchk = 2\n elif unit.lower() == 'psia':\n tchk = 3\n except:\n pass\n \n l_list = gsr.read_glimmon(msid, tchk)\n \n if len(l_list) == 0:\n try:\n l_list = mta_db[msid]\n except:\n l_list = []\n#\n#--- if there is no limt given, set dummy limits\n#\n if len(l_list) == 0:\n tstart = 31536000 #---- 1999:001:00:00:00\n tstop = find_current_stime()\n l_list = [[tstart, tstop, -998, 998, -999, 999]]\n\n return l_list\n\n cleaned = []\n for alist in l_list:\n if alist[0] == alist[1]:\n continue\n else:\n cleaned.append(alist)\n \n cleaned2 = []\n alist = cleaned[0]\n for k in range(1, len(cleaned)):\n blist = cleaned[k]\n if (alist[2] == blist[2]) and (alist[3] == blist[3]) and (alist[4] == blist[4]) and (alist[5] == blist[5]):\n alist[1] = blist[1]\n else:\n cleaned2.append(alist)\n alist = blist\n\n cleaned2.append(alist)\n \n return cleaned2", "title": "" }, { "docid": "6528631120c2513d135bffc2dc03a111", "score": "0.49865663", "text": "def _slice_time_series(time_series, start, end):\n # start must be greater than 1, this is asserted in genex_databse._process_loi\n rtn = list()\n\n for ts in time_series:\n ts_id = ts[0]\n ts_data = ts[1]\n # we take min because min can be math.inf\n for i in range(start, min(end, len(ts_data))):\n rtn += _get_sublist_as_sequences(single_time_series=ts_data, data_id=ts_id, length=i)\n return rtn", "title": "" }, { "docid": "49417de99f92c728d2bc00c1c9e0afb1", "score": "0.49861628", "text": "def pruneList(list_):\r\n i = 0\r\n while i < len(list_):\r\n while True:\r\n if len(list_[i][1]) == 0:\r\n list_.pop(i)\r\n i -= 1\r\n break\r\n if list_[i][1][0] + numHours - 300 < time(): # 5 minute leeway\r\n break\r\n list_[i][1].pop(0)\r\n i += 1", "title": "" }, { "docid": "f1845541e8dd065f7ab58b8bbeaa633c", "score": "0.49857557", "text": "def sanitizeMultiStopLongFlights(flightList, maxDuration=21600):\n ## TODO: add type checking; this method should only accept dictionaries\n\n modifiedLists = []\n\n #Remove flights with more than 1 stop and with a total duration of 5 or more hours\n logging.debug('sanitizeMultiStopLongFlights: ' + 'Sanitizing list to remove multi-stop and long duration flights...')\n sanitizedList = {k:v for k,v in flightList.items() if v['count'] <= 2 and v['duration'] <= maxDuration }\n logging.debug('sanitizeMultiStopLongFlights: ' + 'Size of result set reduced from ' + str(len(flightList)) + ' to ' + str(len(sanitizedList)))\n\n #We need to use this list to filter the separate outbound/inbound lists to make sure we arent pulling them in any future queries\n discardedList = {k:v for k,v in flightList.items() if v['count'] > 2 or v['duration'] > maxDuration }\n logging.debug('sanitizeMultistopLongFlights: ' + 'Size of discarded list is ' + str(len(discardedList)))\n\n modifiedLists.append(sanitizedList)\n modifiedLists.append(discardedList)\n return modifiedLists", "title": "" }, { "docid": "8772e634c002e4023b663ab2dae854e7", "score": "0.49856314", "text": "def _apply_gti_to_live_time(self):\n\n # First negate all FT2 entries\n\n filter_idx = np.zeros_like(self._livetime, dtype=bool)\n\n # now loop through each GTI interval\n\n for start, stop in zip(self._gti_start, self._gti_stop):\n\n # create an index of all the FT2 bins falling within this interval\n\n tmp_idx = np.logical_and(start <= self._ft2_tstart, self._ft2_tstop <= stop)\n\n # add them to the already selected idx\n filter_idx = np.logical_or(filter_idx, tmp_idx)\n\n # Now filter the whole list\n self._ft2_tstart = self._ft2_tstart[filter_idx]\n self._ft2_tstop = self._ft2_tstop[filter_idx]\n self._livetime = self._livetime[filter_idx]", "title": "" }, { "docid": "b16f9b726498665a74bb821945f66fd4", "score": "0.4980329", "text": "def split_ranges(start, stop, count=None, step=None):\n if count is not None and count < 1:\n return []\n\n if count is None:\n if isinstance(step, timedelta):\n time_range = stop - start\n time_in_ms = time_range.microseconds + 1e6 * (\n time_range.seconds + 86400 * time_range.days\n )\n step_in_ms = step.microseconds + 1e6 * (step.seconds + 86400 * step.days)\n count = time_in_ms / step_in_ms\n else:\n count = (stop - start) / step\n if count < 1:\n count = 1\n\n if step is None:\n step = (stop - start) / count\n\n return [(start + i * step, start + (i + 1) * step) for i in range(int(count))]", "title": "" }, { "docid": "d626e409424faea89c7be257685c4818", "score": "0.49720186", "text": "def build_timestamp_list(start, end, time_step_second=15*60):\n timestamp_list = [start]\n\n start = libdt.datetime.strptime(start, \"%Y-%m-%d %H:%M:%S\")\n end = libdt.datetime.strptime(end, \"%Y-%m-%d %H:%M:%S\") #+ libdt.timedelta(minutes=time_step_second)\n\n while start < end:\n start = start + libdt.timedelta(0, time_step_second)\n timestamp_list.append(libdt.datetime.strftime(start, \"%Y-%m-%d %H:%M:%S\"))\n return timestamp_list", "title": "" }, { "docid": "48314a638d8b3df4d261317a3813ed40", "score": "0.49701375", "text": "def build_streched_workloads(workloads: list):\n assert isinstance(workloads[0], pd.DataFrame)\n target_workloads = []\n\n for workload in workloads:\n w = np.array(workload.as_matrix(), dtype=\"float32\")\n\n streched_workload = np.zeros((2 * w.shape[0], w.shape[1]))\n n_time_steps = w.shape[0]\n \n for t in range(n_time_steps):\n k = t*2\n streched_workload[k:k + 2, :] = w[[t], :]\n\n target_workloads.append(streched_workload)\n\n return target_workloads", "title": "" }, { "docid": "c42a655a7ba60730f33a7ce555a993c1", "score": "0.49635893", "text": "def to_ranges(self):\n\t\tranges = []\n\t\tfor di in range(self.ndet):\n\t\t\ts1, s2 = self.stack_bounds[di:di+2]\n\t\t\tinds = self.index_stack[s1:s2]\n\t\t\tmask = np.any(self.flag_stack[s1:s2],1)\n\t\t\tmask = np.concatenate([[False],mask]).astype(int)\n\t\t\tedges = mask[1:]-mask[:-1]\n\t\t\tstarts = np.where(edges>0)[0]\n\t\t\tends = np.where(edges<0)[0]\n\t\t\tif len(starts) == 0:\n\t\t\t\tranges.append(np.zeros([0,2],int))\n\t\t\telse:\n\t\t\t\tr = np.zeros([len(starts),2],int)\n\t\t\t\tr[:,0] = inds[starts]\n\t\t\t\tr[:len(ends),1] = inds[ends]\n\t\t\t\tif len(ends) < len(starts): r[-1,1] = self.nsamp\n\t\t\t\tranges.append(r)\n\t\treturn ranges", "title": "" }, { "docid": "ed648d55d34ecc869551f1376e5e2b63", "score": "0.49619672", "text": "def potentialRange(self):\n values = []\n for candidate in self.root.output():\n values.append(dateutil.parser.parse(candidate.get(self.attribute)).time())\n return (min(values), max(values))", "title": "" }, { "docid": "98279742d758d892ea1f40b1a161ed50", "score": "0.4961269", "text": "def relative_timings(self) -> List[Tuple[int, int, int]]:\n schedules = []\n for schedule in self.relative_schedules.all():\n num_seconds = schedule.minute * 60 + schedule.hour * 3600\n schedules.append([schedule.intervention.id, schedule.days_after, num_seconds])\n return schedules", "title": "" }, { "docid": "906152c6823f82fe93d6b99d52c0b8e8", "score": "0.49586782", "text": "def image_range_to_tiles(start_ind, end_ind, display=False, save_summary=True, save_data=True, save_top_tiles=True):\n image_num_list = list()\n tile_summaries_dict = dict()\n for slide_num in range(start_ind, end_ind + 1):\n tile_summary = summary_and_tiles(slide_num, display, save_summary, save_data, save_top_tiles)\n image_num_list.append(slide_num)\n tile_summaries_dict[slide_num] = tile_summary\n return image_num_list, tile_summaries_dict", "title": "" }, { "docid": "ee240255ce46e4d6b8e4dcc37ffba8d6", "score": "0.49579102", "text": "def calc_date_chunks(start, end):\n chunks = []\n\n start = datetime.datetime.strptime(start, '%m-%d-%Y-%H:%M')\n prev = start\n end = datetime.datetime.strptime(end, '%m-%d-%Y-%H:%M')\n\n while (prev <= end):\n # Increment 6 hours\n curr = prev + datetime.timedelta(seconds=21600)\n\n chunks.append((prev.strftime('%m-%d-%Y-%H:%M'), curr.strftime('%m-%d-%Y-%H:%M')))\n prev = curr\n\n \"\"\"\n Adjust the last tuple incase the time period doesnt divide evenly into\n 6-hr periods\n\n Ex\n ---\n Unadjusted:\n in: calc_date_chunks('09-01-2019-15:00', '09-06-2019-13:00')[-1]\n out: ('09-06-2019-09:00', '09-06-2019-15:00')\n\n Adjusted:\n in: calc_date_chunks('09-01-2019-15:00', '09-06-2019-13:00')[-1]\n out: ('09-06-2019-09:00', '09-06-2019-13:00')\n\n \"\"\"\n if (datetime.datetime.strptime(chunks[-1][1], '%m-%d-%Y-%H:%M') > end):\n chunks = chunks[:-1]\n prev = prev - datetime.timedelta(seconds=21600)\n chunks.append((prev.strftime('%m-%d-%Y-%H:%M'), end.strftime('%m-%d-%Y-%H:%M')))\n\n return chunks", "title": "" }, { "docid": "c21b7d1a6531aaae04bbf366e5f55498", "score": "0.4957181", "text": "def intervals(\n nms #list of indices\n ):\n return list(zip(nms[:],nms[1:]))", "title": "" }, { "docid": "c0d25e19bfcbfb65a188aefb80f6ea9c", "score": "0.49563673", "text": "def range_and_interval_to_list(start_time_unix_sec=None, end_time_unix_sec=None,\n time_interval_sec=None, include_endpoint=True):\n\n error_checking.assert_is_integer(start_time_unix_sec)\n error_checking.assert_is_not_nan(start_time_unix_sec)\n error_checking.assert_is_integer(end_time_unix_sec)\n error_checking.assert_is_not_nan(end_time_unix_sec)\n error_checking.assert_is_integer(time_interval_sec)\n error_checking.assert_is_boolean(include_endpoint)\n\n if include_endpoint:\n error_checking.assert_is_geq(end_time_unix_sec, start_time_unix_sec)\n else:\n error_checking.assert_is_greater(end_time_unix_sec, start_time_unix_sec)\n\n start_time_unix_sec = int(rounder.floor_to_nearest(\n float(start_time_unix_sec), time_interval_sec\n ))\n end_time_unix_sec = int(rounder.ceiling_to_nearest(\n float(end_time_unix_sec), time_interval_sec\n ))\n\n if not include_endpoint:\n end_time_unix_sec -= time_interval_sec\n\n num_time_steps = 1 + int(numpy.round(\n (end_time_unix_sec - start_time_unix_sec) / time_interval_sec\n ))\n\n return numpy.linspace(start_time_unix_sec, end_time_unix_sec,\n num=num_time_steps, dtype=int)", "title": "" }, { "docid": "adde6045a88d6db8a6b268355a4c0a71", "score": "0.49558872", "text": "def get_time_slot_set(lecture: Lecture):\n time_slot_set = [[], [], [], [], [], []]\n for time_slot in lecture.time_slots.all():\n day_index = day_to_index[time_slot.day_of_week]\n start_int = time_to_int(time_slot.start_time)\n end_int = time_to_int(time_slot.end_time)\n time_slot_set[day_index].append((start_int, end_int))\n for i, time_slots in enumerate(time_slot_set):\n if not time_slots:\n continue\n time_slots.sort()\n new_time_slots = []\n\n start, end = time_slots[0]\n for time_slot in time_slots[1:]:\n if end < time_slot[0]:\n new_time_slots.append((start, end))\n start, end = time_slot\n else:\n end = max(end, time_slot[1])\n new_time_slots.append((start, end))\n time_slot_set[i] = new_time_slots\n return time_slot_set", "title": "" }, { "docid": "7cadecc119e73dfdba63313974c2802c", "score": "0.49555343", "text": "def subtract(self, other_time_range):\n output = []\n if (self.t1 < other_time_range.t1):\n output.append(TimeRange(self.t1, other_time_range.t1))\n \n if (self.t2 > other_time_range.t2):\n output.append(TimeRange(other_time_range.t2, self.t2))\n\n return output", "title": "" }, { "docid": "a63625b0bb5de267eaada4d345153519", "score": "0.49435526", "text": "def reshape_spiking_times(my_spikemon:SpikeMonitor, spiking_index:int=0, lower_threshold:float=0, upper_threshold=np.infty) -> Tuple[list, int, int]:\n result = []\n min_ = inf\n max_ = -1.\n\n argmin_ = 0\n argmax_ = 0\n\n n = len(my_spikemon.values('t'))\n for i in range(n):\n if len(my_spikemon.values('t')[i] / ms) <= spiking_index:\n result.append(nan)\n else:\n spike_time = my_spikemon.values('t')[i][spiking_index] / ms\n if spike_time >= lower_threshold and spike_time <= upper_threshold:\n result.append(spike_time)\n if spike_time <= min_:\n argmin_ = i\n min_ = spike_time\n elif spike_time >= max_:\n argmax_ = i\n max_ = spike_time\n else:\n result.append(nan)\n\n if len(result) == 0:\n raise Warning(\"No spikes\")\n\n return result, argmin_, argmax_", "title": "" }, { "docid": "99334b47854fa95a9d27ab178a23171e", "score": "0.4935655", "text": "def roi_to_tiles(self, roi):\n bounds = BoundingBox(*roi.bounds)\n grid_ys, grid_xs = self._frame_bounds(bounds)\n return self._yield_tiles(grid_ys, grid_xs, roi)", "title": "" }, { "docid": "e79b61abf5cef6b6cfe8e677f517c093", "score": "0.4922719", "text": "def extract_intervals(request_history):\n if not request_history:\n return []\n\n # first - sort by request start time and end time, this allows us to order the\n # requests on the actual timeline.\n timings = sorted(\n [req.timing for req in request_history],\n key=lambda timing: (timing[0], timing[1]),\n )\n\n distinct_intervals = []\n min_start, max_end = timings[0][:2]\n\n for timing in timings:\n start, end = timing[0], timing[1]\n if start <= max_end and end > max_end:\n max_end = timing[1]\n\n if start > max_end:\n distinct_intervals.append((min_start, max_end))\n min_start, max_end = timing[:2]\n\n # last loop iteration doesn't append distinct interval\n distinct_intervals.append((min_start, max_end))\n\n return distinct_intervals", "title": "" }, { "docid": "9f9835b199461d6318d7395f49526ff7", "score": "0.4919626", "text": "def tiles(filepath, tile_ids):\n generated_tiles = []\n\n for tile_id in tile_ids:\n tile_id_parts = tile_id.split(\".\")\n tile_position = list(map(int, tile_id_parts[1:3]))\n\n (dense, mins, maxs) = get_data(\n h5py.File(filepath), tile_position[0], tile_position[1]\n )\n\n \"\"\"\n if len(dense):\n max_dense = max(dense)\n min_dense = min(dense)\n else:\n max_dense = 0\n min_dense = 0\n has_nan = len([d for d in dense if np.isnan(d)]) > 0\n if (\n not has_nan and\n max_dense > min_f16 and max_dense < max_f16 and\n min_dense > min_f16 and min_dense < max_f16\n ):\n tile_value = {\n 'dense': base64.b64encode(dense.astype('float16')).decode('utf-8'),\n 'mins': base64.b64encode(mins.astype('float16')).decode('utf-8'),\n 'maxs': base64.b64encode(mins.astype('float16')).decode('utf-8'),\n 'dtype': 'float16'\n }\n else:\n \"\"\"\n tile_value = {\n \"dense\": base64.b64encode(dense.astype(\"float32\")).decode(\"utf-8\"),\n \"mins\": base64.b64encode(mins.astype(\"float32\")).decode(\"utf-8\"),\n \"maxs\": base64.b64encode(maxs.astype(\"float32\")).decode(\"utf-8\"),\n \"dtype\": \"float32\",\n }\n\n generated_tiles += [(tile_id, tile_value)]\n\n return generated_tiles", "title": "" }, { "docid": "e85acb35406628678d0ba05818c7abd2", "score": "0.49142137", "text": "def frame_timestamp_ranges(self, *, include_dead_time=False):\n\n ts_min = self._timestamps(reduce=np.min)\n ts_max = self._timestamps(reduce=np.max)\n delta_ts = int(1e9 / self.infowave.sample_rate) # We want the sample beyond the end\n if ts_min.ndim == 2:\n return [(np.min(ts_min), np.max(ts_max) + delta_ts)]\n else:\n if include_dead_time:\n frame_time = ts_min[1, 0, 0] - ts_min[0, 0, 0]\n return [(t, t + frame_time) for t in ts_min[:, 0, 0]]\n else:\n maximum_timestamp = np.max(ts_max, axis=tuple(range(1, ts_max.ndim)))\n return [(t1, t2) for t1, t2 in zip(ts_min[:, 0, 0], maximum_timestamp + delta_ts)]", "title": "" }, { "docid": "9bd3bfbe9d726c83df5a6325b34b8300", "score": "0.4913405", "text": "def take_all_remaining_tiles(self):\r\n result = self._my_tiles\r\n self._my_tiles = []\r\n return result", "title": "" } ]
a7c90f3bc19198d782a98754c8220f05
calculate a and b of y=a.x+b before including in the point p
[ { "docid": "d6e7219911aa3e8d7f7c2c10c2fa4687", "score": "0.6427087", "text": "def evalPoint(self, p):\n if not self._points:\n raise EvalPointError(\"\"\"Cannot calculate slope\n and intercept with a single point.\n \"\"\")\n x = p.x\n y = p.y\n w = float(p.w)\n\n wixiyi = self._wixiyi + w*x*y\n wixi = self._wixi + w*x\n wiyi = self._wiyi + w*y\n wixi2 = self._wixi2 + w*x*x\n wi = self._wi + w\n\n if wixi2 == wixi**2/wi:\n a = float(\"inf\")\n b = float(\"inf\")\n return (a, b)\n a = (wixiyi - wixi*wiyi/wi)/(wixi2 - wixi**2/wi)\n b = (wiyi - a * wixi)/wi\n return (a, b)", "title": "" } ]
[ { "docid": "3420fc09cc7a6c8ccb3050887f7e8572", "score": "0.7422088", "text": "def point_add(a, b, p, x0, y0, x1, y1):\n\n # ADD YOUR CODE BELOW\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x0, Bn) and isinstance(y0, Bn)) or (x0 == None and y0 == None)\n assert (isinstance(x1, Bn) and isinstance(y1, Bn)) or (x1 == None and y1 == None)\n\n if (x0 is not None and y0 is not None) and (x1 is None and y1 is None):\n return (x0, y0)\n if (x0 is None and y0 is None) and (x1 is not None and y1 is not None):\n return (x1, y1)\n if (x0 is x1) and (y0 is not y1):\n return (None, None)\n if (x0 is not x1) and (y0 is not y1):\n lam = (y1.mod_sub(y0, p)).mod_mul(((x1.mod_sub(x0, p)).mod_inverse(p)), p)\n xr = ((lam.mod_pow(2, p)).mod_sub(x0, p)).mod_sub(x1, p)\n yr = (lam.mod_mul(x0.mod_sub(xr, p), p)).mod_sub(y0, p)\n return (xr, yr) \n raise ArithmeticError(\"EC Points must not be equal\")", "title": "" }, { "docid": "6e9f565833cf303d8390ec92753e1af3", "score": "0.74085206", "text": "def point_add(a, b, p, x0, y0, x1, y1):\n\n if ((x0, y0) == (x1, y1)):\n raise Exception('EC Points must not be equal')\n\n # if x1 == x0 then there is no inverse, also check both points are on curve\n if ((x0 == x1) or\n (not is_point_on_curve(a, b, p, x0, y0)) or\n (not is_point_on_curve(a, b, p, x1, y1))):\n return(None, None)\n\n if (x0, y0) == (None, None):\n return (x1, y1)\n\n if (x1, y1) == (None, None):\n return (x0, y0)\n\n # calculate lam in stages using Bn methods\n xqminxp = x1.mod_sub(x0, p)\n yqminyp = y1.mod_sub(y0, p)\n\n xqminxpmodinv = xqminxp.mod_inverse(m=p)\n lam = xqminxpmodinv.mod_mul(yqminyp, p)\n\n # calculate xr\n lamsq = lam.mod_mul(lam, p)\n lamsqmin = lamsq.mod_sub(x0, p)\n xr = lamsqmin.mod_sub(x1, p)\n\n # calculate yr\n xpminxr = x0.mod_sub(xr, p)\n lamxpxr = lam.mod_mul(xpminxr, p)\n yr = lamxpxr.mod_sub(y0, p)\n\n return (xr, yr)", "title": "" }, { "docid": "b83fd6662528e49c276dd1d0280731f5", "score": "0.7015354", "text": "def __add__(self,b):\n return Punto(self.x + b.x, self.y + b.y)", "title": "" }, { "docid": "2412afaf4905a0427016faa6e08c0927", "score": "0.6985416", "text": "def add(self, p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n a, p = self.a, self.p\n\n if p1 == self.POINT_AT_INFINITY:\n return p2\n elif p2 == self.POINT_AT_INFINITY:\n return p1\n elif x1 == x2 and y1 != y2:\n # We can rewrite this condition as\n # x1 == x2 and find_inverse(-y1, p) == y2\n # but, y1 != y2 is more efficient.\n # This is correct because vertical line intersects\n # the elliptic curve in one or two points.\n return self.POINT_AT_INFINITY\n\n if p1 == p2:\n m = (3 * x1**2 + a) * find_inverse(2 * y1, p) % p\n else:\n m = (y2 - y1) * find_inverse(x2 - x1, p) % p\n\n x3 = (m**2 - x1 - x2) % p\n y3 = (m * (x1 - x3) - y1) % p\n result = (x3, y3)\n\n return result", "title": "" }, { "docid": "72ce35e9e7c0dae0a8178ffa222c2fed", "score": "0.6642534", "text": "def point_double(a, b, p, x, y): \n\n # ADD YOUR CODE BELOW\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and isinstance(y, Bn)) or (x == None and y == None)\n\n if (x is not None) and (y is not None):\n lam = (((x.mod_pow(2, p)).mod_mul(3, p)).mod_add(a, p)).mod_mul((y.mod_mul(2, p)).mod_inverse(p), p)\n xr = (lam.mod_pow(2, p)).mod_sub(x.mod_mul(2, p), p)\n yr = ((x.mod_sub(xr, p)).mod_mul(lam, p)).mod_sub(y, p)\n return (xr, yr)\n return (None, None)", "title": "" }, { "docid": "43fee64b56075c2f93ca88f5101ce039", "score": "0.66143286", "text": "def __add__(self, other):\n if self == PAI:\n return other\n if other == PAI:\n return self\n if self.x == other.x and self.y != other.y:\n return PAI\n\n # Compute slope y = mx + v\n if self.x == other.x and self.y == other.y:\n # Two Points are the same -> Compute tangent on curve\n m = (3 * self.x**2) * inv(2 * self.y, self.curve.p)\n else:\n # Compute slope between two points\n m = (other.y - self.y) * inv(other.x - self.x, self.curve.p)\n\n v = self.y - m * self.x\n\n # New Point is at the intersection of the slop and the curve\n new_x = (m**2 - self.x - other.x) % self.curve.p\n new_y = (-(m * new_x + v)) % self.curve.p\n\n return Point(self.curve, new_x, new_y)", "title": "" }, { "docid": "202c705f07d067dd507f1814053a60a7", "score": "0.65900207", "text": "def makeLine(p1,p2):\n x1 = p1[0]\n y1 = p1[1]\n\n x2 = p2[0]\n y2 = p2[1]\n\n a = (y1 - y2) / (x1 - x2)\n b = (x1 * y2 - x2 * y1) / (x1 - x2)\n\n return a,b", "title": "" }, { "docid": "a8012eb38404f0ab4ab915d8ed550ad0", "score": "0.6575684", "text": "def point_double(a, b, p, x, y):\n\n if x is None and y is None:\n return None, None\n\n xsq = x.mod_mul(x, p)\n xsq3 = Bn(3).mod_mul(xsq, p)\n num = xsq3.mod_add(a, p)\n y2 = Bn(2).mod_mul(y, p)\n y2inv = y2.mod_inverse(m=p)\n lam = num.mod_mul(y2inv, p)\n\n xr = lam.mod_mul(lam, p)\n xr = xr.mod_sub(x, p)\n xr = xr.mod_sub(x, p)\n\n yr = lam.mod_mul(x.mod_sub(xr, p), p)\n yr = yr.mod_sub(y, p)\n\n return (xr, yr)", "title": "" }, { "docid": "fddf12737ff87b09db1618354dad3456", "score": "0.656293", "text": "def yp(self,a,b):\n \n y_prime = np.array([-np.sin(a),np.cos(a),0])\n return y_prime", "title": "" }, { "docid": "95ae92515c68d0ee85f5dbf718f817a6", "score": "0.65425587", "text": "def source(self, p):\n x = p[..., 0]\n y = p[..., 1]\n rhs = 2 * (y - y ** 2) + 2 * (x - x ** 2)\n return rhs", "title": "" }, { "docid": "d146142695eac0628804c7b6d3d7a175", "score": "0.6453014", "text": "def point(self, p, x):\n return self.plec(x, p)", "title": "" }, { "docid": "244fae6563ee6f0b919e4df35fabe410", "score": "0.63918453", "text": "def restar(self, b):\n return Punto(self.x - b.x, self.y - b.y)", "title": "" }, { "docid": "244fae6563ee6f0b919e4df35fabe410", "score": "0.63918453", "text": "def restar(self, b):\n return Punto(self.x - b.x, self.y - b.y)", "title": "" }, { "docid": "8ef3cf169cab513e5ae413a8d4477610", "score": "0.63806015", "text": "def source(self, p):\n x = p[..., 0]\n y = p[..., 1]\n a = 10000 * (x ** 2 + y ** 2) ** 2\n rhs = -100 * (4 - 4 * a) / (a + 1) ** 2\n return rhs", "title": "" }, { "docid": "158fbd965428d8f1d088bf80536f91ae", "score": "0.6314499", "text": "def __add__(self, other):\r\n return Point2d(self.x + other.x, self.y + other.y)", "title": "" }, { "docid": "5ed7d1da2004cf73c5ec2aeece598215", "score": "0.6312248", "text": "def __add__(self, otherPoint) :\n\t\treturn point(self.x + otherPoint.x, self.y + otherPoint.y, self.z + otherPoint.z)", "title": "" }, { "docid": "6dcafd5403f3b44cefd7d0411b47b349", "score": "0.6309287", "text": "def bresa(p1, p2):\n xi, yi = p1\n x2, y2 = p2\n cells = [(xi, yi)]\n\n # Slope is calculated once only if it's possible\n if xi != x2:\n m = (y2 - yi) / (x2 - xi)\n delta_y = m\n x1 = xi\n y1 = yi + 0.5 - m * 0.5\n\n if x1 < x2 and yi <= y2:\n x = x1 + 1\n y = yi\n inc_y = y1 + delta_y\n while x < x2 + 1 or y < y2:\n if inc_y > y + 1:\n y += 1\n elif inc_y == y + 1:\n y += 1\n x += 1\n delta_y += m\n else:\n x += 1\n delta_y += m\n cells.append((x - 1, y))\n inc_y = y1 + delta_y\n\n elif x1 < x2 and yi >= y2:\n x = x1 + 1\n y = yi\n inc_y = y1 + delta_y\n while x < x2 + 1 or y > y2:\n if inc_y < y:\n y -= 1\n elif inc_y == y:\n y -= 1\n x += 1\n delta_y += m\n else:\n x += 1\n delta_y += m\n cells.append((x - 1, y))\n inc_y = y1 + delta_y\n\n elif x1 > x2 and yi > y2:\n x = x1\n y = yi\n inc_y = y1\n delta_y = 0\n while x > x2 or y > y2:\n if inc_y < y:\n y -= 1\n elif inc_y == y:\n y -= 1\n x -= 1\n delta_y -= m\n else:\n x -= 1\n delta_y -= m\n cells.append((x, y))\n inc_y = y1 + delta_y\n\n elif x1 > x2 and yi <= y2:\n x = x1\n y = yi\n inc_y = y1\n delta_y = 0\n while x > x2 or y < y2:\n if inc_y > y + 1:\n y += 1\n elif inc_y == y + 1:\n y += 1\n x -= 1\n delta_y -= m\n else:\n x -= 1\n delta_y -= m\n cells.append((x, y))\n inc_y = y1 + delta_y\n\n else:\n y = yi\n if y2 >= yi:\n while y < y2 + 1:\n cells.append((xi, y))\n y += 1\n else:\n while y > y2:\n cells.append((xi, y - 1))\n y -= 1\n\n return np.array(cells)", "title": "" }, { "docid": "2771385a126aa4e9c6a9056d6f911d1c", "score": "0.63012713", "text": "def project_y(self, p):\r\n raise NotImplementedError", "title": "" }, { "docid": "e63fb2ff37a255a7a1d19527ca8d72a7", "score": "0.6294456", "text": "def _reduce_xyfp(x, y):\n a = 420.0\n return -x/a, y/a", "title": "" }, { "docid": "f98e7f30356d534c4d0069a867e65bde", "score": "0.6290059", "text": "def pos_line(p):\n # y = x + c\n # c = (p.y-p.x)\n pos_y_intersects.add(p.y - p.x)", "title": "" }, { "docid": "3e21035726b43dd91ebf6d03a380bae4", "score": "0.62183064", "text": "def source(self, p):\n x = p[..., 0]\n y = p[..., 1]\n val = -2 * (x ** 2 + y ** 2)\n return val", "title": "" }, { "docid": "7ee3ec6caa31e10021ba8e29b0a32a1a", "score": "0.62158895", "text": "def Interpolate(a, b, p):\n\treturn a + (b - a) * p", "title": "" }, { "docid": "aa26bfc911d4bc0bc5aabba98c59e24d", "score": "0.62128115", "text": "def get_line_to(self, p2):\n \"\"\" y1- ax1 = y2 - ax2\n a = (y1-y2)/(x1-x2)\n b = y1 - a * x1\"\"\"\n if self.x == p2.x:\n return 'x = {0}'.format(self.x)\n elif self.y == p2.y:\n return 'y = {0}'.format(self.y)\n else:\n a = (self.y - p2.y)/(self.x - p2.x)\n b = self.y - a * self.x\n return (a, b)", "title": "" }, { "docid": "f659810350a6341efa94a6a38e1cd310", "score": "0.6169263", "text": "def sum (self, other):\n new_pt = Point(self.x + other.x, self.y + other.y)\n return new_pt", "title": "" }, { "docid": "58992d107efdd8acddb93f580aa7ed44", "score": "0.61668736", "text": "def source(self, p):\n x = p[..., 0]\n y = p[..., 1]\n val = -0.16 * (x ** 2 + y ** 2) ** (-0.8)\n return val", "title": "" }, { "docid": "de5e4121550330c7ab097e607b56c785", "score": "0.61619216", "text": "def __add__(self, other):\n return Point(self.x+other.x, self.y+other.y)", "title": "" }, { "docid": "552c20c11fb4155927a32b66419b0b91", "score": "0.61449575", "text": "def axpy(alpha,x,y):\n for i in range(0,len(x)):\n y[i]=alpha*x[i]+y[i]", "title": "" }, { "docid": "2f66e3ba0296e7bfeee3f8d6650be372", "score": "0.61183864", "text": "def skalarni_produkt(self, other):\n return self.x * other.x + self.y * other.y", "title": "" }, { "docid": "dc53b3f08439aa631a5033e14379ea6d", "score": "0.608645", "text": "def adding_points(self, other):\n sum = Point()\n sum.x = self.x + other.x\n sum.y = self.y + other.y\n return sum", "title": "" }, { "docid": "a937fd5606f64d05e064b7a37e097f75", "score": "0.6071962", "text": "def crossover(self, p1, p2):\n return (p1 + p2) / 2", "title": "" }, { "docid": "277c55565ed833bb815fa50baf7d0c41", "score": "0.60685253", "text": "def Double(self, p: EcPoint) -> EcPoint:\n if p == INFINITY:\n return p\n x, y = p\n num = (3 * x * x + self.a) % self.mod\n den = 2 * y\n t = num * gmpy.invert(den, self.mod) % self.mod\n x2 = (t * t - 2 * x) % self.mod\n y2 = (t * (x - x2) - y) % self.mod\n return (x2, y2)", "title": "" }, { "docid": "bdb4e83d2db4e6222cc3d3402046ec8d", "score": "0.6058135", "text": "def h(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return abs(x1 - x2) + abs(y1 - y2)", "title": "" }, { "docid": "7ba25ba56832f05342cc5bc0952fac81", "score": "0.6055224", "text": "def himmelblau(p):\n x, y = p\n a = x*x + y - 11\n b = x + y*y - 7\n return a*a + b*b", "title": "" }, { "docid": "efc07df32e014b7da7b5402653d74f73", "score": "0.6045281", "text": "def y_value(a: Point, b: Point, x: Union[int, float]) -> float:\n assert isinstance(x, (int, float))\n\n if a.x == b.x:\n raise ValueError('Cannot return y-value for x=const.')\n\n return (a.y - b.y) * (x - b.x) / (a.x - b.x) + b.y", "title": "" }, { "docid": "34ceeb34368086ac841be2a8e165528c", "score": "0.59830785", "text": "def appendPoint(self, p):\n self._points.append(p)\n self._wixiyi += p.w * p.x * p.y\n self._wixi += p.w * p.x\n self._wiyi += p.w * p.y\n self._wixi2 += p.w * p.x * p.x\n self._wi += p.w\n if len(self._points) > 1:\n # if points are aligned with exactly the same x, line is vertical\n # a and b would be infinity\n if self._wixi2 == self._wixi**2/self._wi:\n self.a = float(\"inf\")\n self.b = float(\"inf\")\n else:\n self.a = (self._wixiyi - self._wixi*self._wiyi/self._wi)\\\n / (self._wixi2 - self._wixi**2/self._wi)\n self.b = (self._wiyi - self.a * self._wixi)/self._wi\n self._rss = self.calcRSS(self.a, self.b)", "title": "" }, { "docid": "3c79bd48cf4c73b4eb7e84d4736c1c75", "score": "0.597987", "text": "def __or__(self,other):\n if self.a==other.a:\n return None\n x=(self.b-other.b)/(other.a-self.a)\n y=self.a*x+self.b\n return Point(x,y)", "title": "" }, { "docid": "4765f1b0722b4b6aa8ca09eeff1fbdb9", "score": "0.5977134", "text": "def __sub__(self,b):\n return Punto(self.x - b.x, self.y - b.y)", "title": "" }, { "docid": "18ee7218cf11d06e075703ed6d4f4692", "score": "0.59696823", "text": "def _project_point_onto_line(self, p, ep0, ep1):\n dx, dy = ep1[0] - ep0[0], ep1[1] - ep0[1]\n determinant = dx * dx + dy * dy\n coeff = (dy * (p[1] - ep0[1]) + dx * (p[0] - ep0[0])) / determinant\n\n return ep0[0] + coeff * dx, ep0[1] + coeff * dy", "title": "" }, { "docid": "df4ec24f049e9baad160dcfac340525c", "score": "0.5966919", "text": "def lerp(x, y, a):\n\treturn (1 - a) * x + a * y", "title": "" }, { "docid": "4edf564c718338183be214c4221cc8ab", "score": "0.59647214", "text": "def source(self, p):\n x = p[..., 0]\n y = p[..., 1]\n rhs = -(4 * x ** 2 + 4 * y ** 2 + 4) * (np.exp(x ** 2 + y ** 2))\n return rhs", "title": "" }, { "docid": "73362f6ce969028d4dcd64d98d59c5df", "score": "0.5933852", "text": "def sum(self):\n return self.x + self.y", "title": "" }, { "docid": "d98a1f1b20feadc260d2824c1d07ccd3", "score": "0.59313613", "text": "def source(self, p):\n x = p[..., 0]\n y = p[..., 1]\n t0 = (x + 0.5) ** 2 + (y - 0.5) ** 2 + 0.01\n t1 = (x - 0.5) ** 2 + (y + 0.5) ** 2 + 0.01\n val = (\n (2 * x - 1.0) * (4 * x - 2.0) / t1 ** 3 -\n (2 * x + 1.0) * (4 * x + 2.0) / t0 ** 3 -\n (2 * y - 1.0) * (4 * y - 2.0) / t0 ** 3 +\n (2 * y + 1.0) * (4 * y + 2.0) / t1 ** 3 + 4 / t0 ** 2 - 4 / t1 ** 2\n )\n\n return val", "title": "" }, { "docid": "4fb7697f9750b0eed1c2bf141d533b82", "score": "0.5914811", "text": "def __add__(self, b):\n temp_y = self.y - b.y\n result = Sweep()\n result.LoadArrays(self.x, temp_y)\n return result", "title": "" }, { "docid": "494cf9ec50748b2c7b59d47b42189639", "score": "0.5908124", "text": "def Cp_a(a, y):\n #return (a - y) * Sigmoid.prime(z)\n return (a - y)", "title": "" }, { "docid": "b8563422fb987b44c6d663a28cbb5972", "score": "0.5891777", "text": "def phi_b(x):\n y = np.copy(x)\n x1, x2 = x[:2]\n y[0], y[1] = x1, x2 + b*x1**2 - 100*b\n return y", "title": "" }, { "docid": "872335f4a037e02e249eb68d9d7e4f1c", "score": "0.5886332", "text": "def _reduce_point(self, R, p):\n if R.is_zero():\n return R.curve().change_ring(rings.GF(p))(0)\n x, y = R.xy()\n d = lcm(x.denominator(), y.denominator())\n return R.curve().change_ring(rings.GF(p))([x*d, y*d, d])", "title": "" }, { "docid": "518c03a8dc0ee11e655efed0ac1d6cba", "score": "0.5879374", "text": "def B(self):\n\t\ta = self.points[1].x-self.points[0].x\n\t\tb = self.points[2].x-self.points[0].x\n\t\tc = self.points[1].y-self.points[0].y\n\t\td = self.points[2].y-self.points[0].y\n\t\treturn 1/(a*d-c*b)*(np.array([[d,-c],[-b,a]]))", "title": "" }, { "docid": "3a8edbf29e8c83b00ddb3a3250a614e8", "score": "0.58390766", "text": "def pcoord(x, y):\n r = np.sqrt( x**2 + y**2 )\n az = np.degrees( np.arctan2(x, y) )\n # az[where(az<0.)[0]] += 360.\n az = (az+360.)%360.\n return r, az", "title": "" }, { "docid": "65d381933f0fea732e72c5d8043c7384", "score": "0.58352846", "text": "def add(x, y):", "title": "" }, { "docid": "35befa5d8a46c43bd1f28d387ac30740", "score": "0.58334136", "text": "def __radd__(self,\n point: IPoint) -> IPoint:", "title": "" }, { "docid": "776b08038104a9ca64c5f3b8c113cc70", "score": "0.5827073", "text": "def calc_target_point_rect(p1, p2):\n tp = [0, 0] # coordinates of target point\n # determine position on frame of p1\n frame_pos_p1 = calc_frame_pos(p1)\n\n # opposite border position\n frame_pos_opp = (frame_pos_p1 + 2) % 4\n\n # handle vertical lines, because steepness is infinity\n if p1[0] == p2[0]:\n tp[0] = p1[0]\n tp[1] = frame_borders[frame_pos_opp]\n return tp\n # otherwise calculate line equation y = mx + b\n m = calc_steepness(p1, p2)\n b = calc_bias(p1, p2)\n # if left or right\n if frame_pos_p1 % 2 == 0:\n tp[0] = frame_borders[frame_pos_opp]\n tp[1] = m * frame_borders[frame_pos_opp] + b\n if tp[1] < top:\n tp[0] = (top - b) / m\n tp[1] = top\n elif tp[1] > bot:\n tp[0] = (bot - b) / m\n tp[1] = bot\n # if top or bottom\n else:\n tp[0] = (frame_borders[frame_pos_opp] - b) / m\n tp[1] = frame_borders[frame_pos_opp]\n if tp[0] < left:\n tp[0] = left\n tp[1] = m * left + b\n elif tp[0] > right:\n tp[0] = right\n tp[1] = m * right + b\n return tp", "title": "" }, { "docid": "2ae9fde7dea3c24e881ae8766627a379", "score": "0.58093", "text": "def xp(self,a,b):\n \n x_prime = np.array([np.cos(a)*np.cos(b),np.sin(a)*np.cos(b),np.sin(b)])\n return x_prime", "title": "" }, { "docid": "a8729f947df81e1efbc60f50eba4a77e", "score": "0.58075106", "text": "def __mul__(self, otherPoint) :\n\t\treturn sum(map((lambda x,y : x*y), self.coords, otherPoint.coords))", "title": "" }, { "docid": "cb8d7535f339acd913b65332b0bde6bb", "score": "0.5805177", "text": "def gradient(self, p):\n x = p[..., 0]\n y = p[..., 1]\n pi = np.pi\n uprime = np.zeros(p.shape, dtype=np.float64)\n uprime[..., 0] = (1 - 2 * x) * (y - y ** 2)\n uprime[..., 1] = (1 - 2 * y) * (x - x ** 2)\n return uprime", "title": "" }, { "docid": "6c02a92f8619527547764cbcced482be", "score": "0.5803821", "text": "def __add__(self,other):\n if not isinstance(other,Point):\n return NotImplemented\n return Point( self._x + other._x, self._y + other._y)", "title": "" }, { "docid": "abfc4447e5e1e67def94427c995a1997", "score": "0.5770588", "text": "def __abs__(self):\n return (self.x ** 2 + self.y ** 2) ** (1 / 2)", "title": "" }, { "docid": "f41e1d0af0d3dfb80e89a27129e1f558", "score": "0.5759189", "text": "def __add__(self,\n point: IPoint) -> IPoint:", "title": "" }, { "docid": "31100fa7ca9e9d5fa23e345fdb9fe407", "score": "0.57477564", "text": "def suma(x, y):\n return x + y", "title": "" }, { "docid": "9d4c44f230f4ba2af87c06fdaf32b293", "score": "0.5747229", "text": "def slope(p):\n try:\n return float(p.y - p0.y)/(p.x - p0.x)\n except ZeroDivisionError:\n return 0", "title": "" }, { "docid": "81b52e91f520215135082cfa0b54ddd2", "score": "0.57468235", "text": "def f(x, y):\n return x/y + y/x", "title": "" }, { "docid": "c9dcb30df328a9de8dbdb2942dc39ead", "score": "0.573359", "text": "def neg_line(p):\n # y = -x + c\n # c = (p.x+p.y)\n neg_y_intersects.add(p.x + p.y)", "title": "" }, { "docid": "3e2de4f62d2409ace1fde62ed0ea11c6", "score": "0.5733273", "text": "def test_inplace_plus(self):\n result = coords.Cartesian(self.p1.x + self.p2.x,\n self.p1.y + self.p2.y,\n self.p1.z + self.p2.z)\n a = self.p1\n a += self.p2\n self.assertTrue(result == a)", "title": "" }, { "docid": "4a0bf35bbb3abe08bdddbc99c6100d94", "score": "0.57209086", "text": "def itp(x1,x2,y):\n x=float(y-x1)/float(x2-x1)\n assert x>=0\n return x", "title": "" }, { "docid": "d504193153683f8dc9ea2c9aa365b561", "score": "0.571949", "text": "def projectOnLine(v, p):\n l = 1./linalg.norm(v)\n vn = v*l\n a = float(vn.T*p)\n u = a*vn\n return u,a*l", "title": "" }, { "docid": "04ab41675635ea696132d70722e87a1b", "score": "0.57041645", "text": "def gradient(self, p):\n x = p[..., 0]\n y = p[..., 1]\n pi = np.pi\n uprime = np.zeros(p.shape, dtype=np.float64)\n uprime[..., 0] = 2 * x * (np.exp(x ** 2 + y ** 2))\n uprime[..., 1] = 2 * y * (np.exp(x ** 2 + y ** 2))\n return uprime", "title": "" }, { "docid": "7cdf30cfc07b6c6c2b2e170cac563f7e", "score": "0.5699533", "text": "def x_value(a: Point, b: Point, y: Union[int, float]) -> float:\n assert isinstance(y, (int, float))\n\n if a.y == b.y:\n raise ValueError('Cannot return x-value for y=const.')\n\n return (a.x - b.x) * (y - b.y) / (a.y - b.y) + b.x", "title": "" }, { "docid": "6d6d2b2beb7d175c43d471a2e083279c", "score": "0.56977385", "text": "def __add__(self, other):\n return Coordinates(self.x + other.x, self.y + other.y)", "title": "" }, { "docid": "84159ce0e9e85121f092428bb9969467", "score": "0.5693522", "text": "def project_on_line(l, p):\n\ta, b = l\n\tseg_length = distance(a, b)\n\tap = (p[0] - a[0], p[1] - a[1])\n\tab = (b[0] - a[0], b[1] - a[1])\n\tab = (ab[0] / seg_length, ab[1] / seg_length)\n\tdot = ab[0]*ap[0] + ab[1]*ap[1]\n\treturn (a[0] + dot*ab[0], a[1] + dot*ab[1])", "title": "" }, { "docid": "6eece127ad362069f9610f9f6bd8c195", "score": "0.56822526", "text": "def f(x, y):\n return -2 * y + 4 * x", "title": "" }, { "docid": "0abf750bd7774049b28a1567d9aa54df", "score": "0.5680937", "text": "def _lhs(self, P):\n P = self._process_regularizer(P)\n return P, self._AtA + (P.T @ P)", "title": "" }, { "docid": "38c583b19369311175bcdab375b20e65", "score": "0.5678734", "text": "def rel(self, x, y):", "title": "" }, { "docid": "2cdacb439206cb7aeef31813e1a021e5", "score": "0.56757486", "text": "def test_add_pipoints_current_value(self, pi_point):\n total = pi_point.point + pi_point.point\n assert (\n round(\n total.current_value - (pi_point.values[-1] + pi_point.values[-1]),\n ndigits=7,\n )\n == 0\n )", "title": "" }, { "docid": "739d0e774817b1585634aa5713bafcfb", "score": "0.5671398", "text": "def adding_tuple(self, other):\n sum = Point()\n sum.x = self.x + other[0]\n sum.y = self.y + other[1]\n return sum", "title": "" }, { "docid": "abf5108e14381d3dc672daf7cda0d107", "score": "0.5663466", "text": "def ta_ori(self, r, p, y) -> Tuple[float, float, float]:\n pass", "title": "" }, { "docid": "793be6fb4b77d466b8687c5a1bd9f193", "score": "0.56602883", "text": "def __iadd__(self,other):\n # print('in-place addition')\n if not isinstance(other,Point):\n return NotImplemented\n self._x += other._x\n self._y += other._y\n\n return self", "title": "" }, { "docid": "e2deafbff70ff0a3140ecbe39d216bb0", "score": "0.565977", "text": "def cross_point(self,aXY1,aXY2,bXY1,bXY2):\n\t\tA1 = self.find_A(aXY1,aXY2)\n\t\tA2 = self.find_A(bXY1,bXY2)\n\t\tB1 = self.find_B(aXY1,aXY2)\n\t\tB2 = self.find_B(bXY1,bXY2)\n\t\tC1 = self.find_C(aXY1,aXY2)\n\t\tC2 = self.find_C(bXY1,bXY2)\n\t\tif (A1==A2==0) or (B1==B2==0):\n\t\t\treturn None\n\t\treturn [(B1*C2-B2*C1)/float(A1*B2-A2*B1),(C1*A2-C2*A1)/float(A1*B2-A2*B1)]", "title": "" }, { "docid": "d2becc084ada73abf8e41ebd454ffeb8", "score": "0.56586313", "text": "def Perp2(a, b):\n\n return a[0] * b[1] - a[1] * b[0]", "title": "" }, { "docid": "2eda7ec654e9b5b746946bd5d80367bf", "score": "0.5655645", "text": "def compute(x, y):\n return x + y", "title": "" }, { "docid": "28440b6046e11cec8b4b8308b92c5953", "score": "0.5649141", "text": "def source(self, p):\n x = p[..., 0]\n y = p[..., 1]\n val = 1.0\n return val", "title": "" }, { "docid": "5e8c876dc790ecbe6e0652b16faf2239", "score": "0.56401366", "text": "def transform_point(p,R,t):\r\n x = R[0][0]*p[0]+R[0][1]*p[1]+t[0]\r\n y = R[1][0]*p[0]+R[1][1]*p[1]+t[1]\r\n return [x,y]", "title": "" }, { "docid": "0cec334aee987d84fee3c64141f12e07", "score": "0.56382406", "text": "def point(self,p):\n return self.token(p[0]).token(p[1])", "title": "" }, { "docid": "39973c7a7dd4c33658de4272361217e8", "score": "0.56363815", "text": "def heuristic(a, b):\n (x1, y1) = a\n (x2, y2) = b\n return abs(x1 - x2) + abs(y1 - y2)", "title": "" }, { "docid": "d45cd9b7fc4df5f88aa0af094bafa0e8", "score": "0.5632378", "text": "def recta(x,a,b):\r\n fa=log(f(a))\r\n return ((fa-log(f(b)))/(a-b))*(x-a) + fa", "title": "" }, { "docid": "00e4134e46a1d496bd5d6af534314661", "score": "0.56241816", "text": "def gradient(self, p):\n x = p[..., 0]\n y = p[..., 1]\n\n val = np.zeros(p.shape, dtype=p.dtype)\n pi = np.pi\n cos = np.cos\n sin = np.sin\n gamma = 0.1\n sigma = -14.9225565104455152\n rho = pi / 4\n theta = np.arctan2(y, x)\n theta = (theta >= 0) * theta + (theta < 0) * (theta + 2 * pi)\n t = 1 + (y / x) ** 2\n r = np.sqrt(x ** 2 + y ** 2)\n rg = r ** gamma\n\n ux1 = ((x >= 0.0) & (y >= 0.0)) * (\n gamma * rg * cos((pi / 2 - sigma) * gamma) * (x * cos((theta - pi / 2 + rho) * gamma) / (r * r)\n + y * sin((theta - pi / 2 + rho) * gamma) / (x * x * t))\n )\n\n uy1 = ((x >= 0.0) & (y >= 0.0)) * (gamma * rg * cos((pi / 2 - sigma) * gamma) * (\n y * cos((theta - pi / 2 + rho) * gamma) / (r * r) - sin((theta - pi / 2 + rho) * gamma) / (x * t)))\n\n ux2 = ((x <= 0.0) & (y >= 0.0)) * (gamma * rg * cos(rho * gamma) * (\n x * cos((theta - pi + sigma) * gamma) / (r * r) + y * sin((theta - pi + sigma) * gamma) / (x * x * t)))\n\n uy2 = ((x <= 0.0) & (y >= 0.0)) * (gamma * rg * cos(rho * gamma) * (\n y * cos((theta - pi + sigma) * gamma) / (r * r) - sin((theta - pi + sigma) * gamma) / (x * t)))\n\n ux3 = ((x <= 0.0) & (y <= 0.0)) * (gamma * rg * cos(sigma * gamma) * (\n x * cos((theta - pi - rho) * gamma) / (r * r) + y * sin((theta - pi - rho) * gamma) / (x * x * t)))\n\n uy3 = ((x <= 0.0) & (y <= 0.0)) * (gamma * rg * cos(sigma * gamma) * (\n y * cos((theta - pi - rho) * gamma) / (r * r) - sin((theta - pi - rho) * gamma) / (x * t)))\n\n ux4 = ((x >= 0.0) & (y <= 0.0)) * (gamma * rg * cos((pi / 2 - rho) * gamma) * (\n x * cos((theta - 3 * pi / 2 - sigma) * gamma) / (r * r) + y * sin((theta - 3 * pi / 2 - sigma) * gamma) / (\n x * x * t)))\n\n uy4 = ((x >= 0.0) & (y <= 0.0)) * (gamma * rg * cos((pi / 2 - rho) * gamma) * (\n y * cos((theta - 3 * pi / 2 - sigma) * gamma) / (r * r) - sin((theta - 3 * pi / 2 - sigma) * gamma) / (\n x * t)))\n\n val[..., 0] = ux1 + ux2 + ux3 + ux4\n val[..., 1] = uy1 + uy2 + uy3 + uy4\n return val", "title": "" }, { "docid": "a01bc1b94f1dfa71666c80e1d1066811", "score": "0.5613631", "text": "def __call__(self, x: float) -> float:\n return (self.slope * (x - self.point.x)) + self.point.y", "title": "" }, { "docid": "878235a2110faaeec17b93b20c5819c9", "score": "0.5610729", "text": "def gradient(self, p):\n x = p[..., 0]\n y = p[..., 1]\n a = 10000 * (x ** 2 + y ** 2) ** 2\n uprime = np.zeros(p.shape, dtype=np.float64)\n uprime[..., 0] = 200 * x / (1 + a)\n uprime[..., 1] = 200 * y / (1 + a)\n return uprime", "title": "" }, { "docid": "0abd15fb337df1adb4ef9c662a72f4c8", "score": "0.5609698", "text": "def __radd__(self,\n point: IPoint) -> IPoint:\n return self + point", "title": "" }, { "docid": "af1fe4c9d0b2bec548eb95d9dee153c3", "score": "0.5596991", "text": "def vecteur_points (p1,p2):\n return ( p2 [0] - p1 [0], p2 [1] - p1 [1])", "title": "" }, { "docid": "bd017d95c56b36d5e5e07f9882046ecf", "score": "0.5595749", "text": "def _parabola_3points(x1,y1,x2,y2,x3,y3):\n delta = (x1 - x2)*(x1 - x3)*(x2 - x3)\n a = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / delta\n b = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / delta\n c = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / delta\n return a, b, c", "title": "" }, { "docid": "73ea1535c6544471b0494e1e00930daa", "score": "0.55935794", "text": "def find_slope2(m, x, b):\n m = float(m)\n x = float(x)\n b = float(b)\n y = m * x + b\n\n return y", "title": "" }, { "docid": "6419ca50ad4e8a0fdefaeb36cb9aa990", "score": "0.5592216", "text": "def _line(x, a, b):\n return a*x + b", "title": "" }, { "docid": "329d5e28df9331ef10840b50765217d7", "score": "0.55885667", "text": "def zp(self,a,b):\n\n z_prime = np.array([-np.cos(a)*np.sin(b),-np.sin(a)*np.sin(b),np.cos(b)])\n return z_prime", "title": "" }, { "docid": "aff1bbeb9862dc36923f7978e7c18951", "score": "0.5587336", "text": "def _point_interpolation(self, p1, p2):\n\n # Find the middle point of p1-p2\n p = p1 + (p2 - p1)/2\n # Project the point onto the ellipsoid\n p = self._point_ellipsoid_projection(p)\n # Return\n return p", "title": "" }, { "docid": "979d7a1b9ee5f0974836f14e49f5e10a", "score": "0.5586449", "text": "def polyval(p,x):\n y = 0\n for i in range(len(p)):\n y = x*y + p[i]\n return y", "title": "" }, { "docid": "6a898a753e61dedf1e5757551a7ce9b3", "score": "0.55851406", "text": "def cost(a, b, all_points):\n\n total_cost = 0\n\n pointA = all_points[a]\n pointB = all_points[b]\n\n x1 = pointA['coord'][0]\n x2 = pointB['coord'][0]\n y1 = pointA['coord'][1]\n y2 = pointB['coord'][1]\n\n # Travel time\n total_cost += math.hypot(x2 - x1, y2 - y1) / ROBOT_SPEED\n\n # Penalties incurred for skipping\n for i in range(a+1, b):\n total_cost += all_points[i]['penalty']\n\n # Add waiting time\n total_cost += ROBOT_WAIT_TIME\n\n return total_cost", "title": "" }, { "docid": "5300dd422945dec99c5799f6813c6b47", "score": "0.5582936", "text": "def is_point_on_curve(a, b, p, x, y):\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and\n isinstance(y, Bn)) or (x is None and y is None)\n\n if x is None and y is None:\n return True\n\n lhs = (y * y) % p\n rhs = ((x * x * x) + (a * x) + b) % p\n on_curve = (lhs == rhs)\n\n return on_curve", "title": "" }, { "docid": "55a2bd6dd953a6c201c289f25dde65b6", "score": "0.5582299", "text": "def point_return(point):\n return Point(0, point.y + 1)", "title": "" }, { "docid": "ea582fa8a8a20153aed6796289a4d02b", "score": "0.5582241", "text": "def _interpolate_p(p, r, v):\n\n # interpolate p (v should be in table)\n # if .5 < p < .75 use linear interpolation in q\n # if p > .75 use quadratic interpolation in log(y + r/v)\n # by -1. / (1. + 1.5 * _phi((1. + p)/2.))\n\n # find the 3 closest v values\n p0, p1, p2 = _select_ps(p)\n try:\n y0 = _func(A[(p0, v)], p0, r, v) + 1.\n except:\n print(p,r,v)\n raise\n y1 = _func(A[(p1, v)], p1, r, v) + 1.\n y2 = _func(A[(p2, v)], p2, r, v) + 1.\n\n y_log0 = math.log(y0 + float(r)/float(v))\n y_log1 = math.log(y1 + float(r)/float(v))\n y_log2 = math.log(y2 + float(r)/float(v))\n\n # If p < .85 apply only the ordinate transformation\n # if p > .85 apply the ordinate and the abcissa transformation\n # In both cases apply quadratic interpolation\n if p > .85:\n p_t = _ptransform(p)\n p0_t = _ptransform(p0)\n p1_t = _ptransform(p1)\n p2_t = _ptransform(p2)\n\n # calculate derivatives for quadratic interpolation\n d2 = 2*((y_log2-y_log1)/(p2_t-p1_t) - \\\n (y_log1-y_log0)/(p1_t-p0_t))/(p2_t-p0_t)\n if (p2+p0)>=(p1+p1):\n d1 = (y_log2-y_log1)/(p2_t-p1_t) - 0.5*d2*(p2_t-p1_t)\n else:\n d1 = (y_log1-y_log0)/(p1_t-p0_t) + 0.5*d2*(p1_t-p0_t)\n d0 = y_log1\n\n # interpolate value\n y_log = (d2/2.) * (p_t-p1_t)**2. + d1 * (p_t-p1_t) + d0\n\n # transform back to y\n y = math.exp(y_log) - float(r)/float(v)\n\n elif p > .5:\n # calculate derivatives for quadratic interpolation\n d2 = 2*((y_log2-y_log1)/(p2-p1) - \\\n (y_log1-y_log0)/(p1-p0))/(p2-p0)\n if (p2+p0)>=(p1+p1):\n d1 = (y_log2-y_log1)/(p2-p1) - 0.5*d2*(p2-p1)\n else:\n d1 = (y_log1-y_log0)/(p1-p0) + 0.5*d2*(p1-p0)\n d0 = y_log1\n\n # interpolate values\n y_log = (d2/2.) * (p-p1)**2. + d1 * (p-p1) + d0\n\n # transform back to y\n y = math.exp(y_log) - float(r)/float(v)\n\n else:\n # linear interpolation in q and p\n v = min(v, 1e38)\n q0 = math.sqrt(2) * -y0 * \\\n scipy.stats.t.isf((1.+p0)/2., v)\n q1 = math.sqrt(2) * -y1 * \\\n scipy.stats.t.isf((1.+p1)/2., v)\n\n d1 = (q1-q0)/(p1-p0)\n d0 = q0\n\n # interpolate values\n q = d1 * (p-p0) + d0\n\n # transform back to y\n y = -q / (math.sqrt(2) * scipy.stats.t.isf((1.+p)/2., v))\n\n return y", "title": "" }, { "docid": "853ea1b03c03dae2f30016a13b2e2f3c", "score": "0.5577537", "text": "def is_point_on_curve(a, b, p, x, y):\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and isinstance(y, Bn)) or (x == None and y == None)\n\n if (x is not None) and (y is not None):\n lhs = (y * y) % p\n rhs = (x*x*x + a*x + b) % p\n on_curve = (lhs == rhs)\n return on_curve\n return True", "title": "" }, { "docid": "5b04d1630e595eeca93b4421531a9318", "score": "0.55759376", "text": "def __add__(self, p):\n\n\tnew_poly = list\n\tif len(self.coeff) <= len(p.coeff):\n\t\tnew_poly = p.coeff[:]\n\t\tfor i in range(len(self.coeff)):\n\t\t\tnew_poly[i] = p.coeff[i] + self.coeff[i]\n\telse:\n\t\tnew_poly = self.coeff[:]\n\t\tfor i in range(len(p.coeff)):\n\t\t\tnew_poly[i] = self.coeff[i] + p.coeff[i]", "title": "" } ]
5c23c20f83b7e5f6c4fde5503aa792ee
Run aflshowmap on the target and get the coverage bitmap for a particular testcase.
[ { "docid": "7976f8ba9da94bc5b4cef400affa0d06", "score": "0.7753865", "text": "def run_afl_showmap(target: str, afl_stats: FuzzerStats, testcase: str) -> bytes:\n # Base afl-showmap options and arguements on afl-fuzz options and arguments\n afl_showmap_opts = ['-b', '-q']\n for opt, arg in afl_stats.afl_cmdline:\n if opt in ('-m', '-Q', '-t'):\n afl_showmap_opts.extend([opt, arg])\n\n # Default timeout of 1 sec\n if '-t' not in afl_showmap_opts:\n afl_showmap_opts.extend(['-t', '1000'])\n\n # Generate the target command-line for a testcase popped off the task\n # queue. Replace the original (uninstrumented) target binary with the\n # instrumented version\n target_cmdline, target_input = afl_stats.gen_command_line(testcase)\n target_cmdline[0] = target\n\n with NamedTemporaryFile() as temp:\n # Run afl-showmap\n args = ['afl-showmap', *afl_showmap_opts, '-o', temp.name, '--',\n *target_cmdline]\n subprocess.run(args, input=target_input, check=False,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Read the coverage bitmap\n with open(temp.name, 'rb') as showmap_out:\n return showmap_out.read()", "title": "" } ]
[ { "docid": "40e66fffa9607e4942f5d9355e8da9d7", "score": "0.56286484", "text": "def show_coverage(self, viewer_id=None, table_group='main'):\n\n view_type = 'coverImage'\n cid = viewer_id if viewer_id else (\"%s-%s\" % (LO_VIEW_DICT[view_type], table_group))\n payload = {'viewerType': LO_VIEW_DICT[view_type], 'cellId': cid}\n return self.dispatch(ACTION_DICT['ShowCoverage'], payload)", "title": "" }, { "docid": "8c6f366d7fc37e29f39aaeb437017c28", "score": "0.55119926", "text": "def __main__():\n\n args = setup_parser().parse_args()\n\n args = check_args(args)\n\n if args.verbose:\n print('{a} image(s) at {pos}'.format(\n a='Writing' if not args.coadd else 'Coadding', pos=args.skypos))\n print('\t\tof dimensions {w}x{h} degrees'.format(\n w=args.skyrange[0], h=args.skyrange[1]))\n print('\t\twith a virtual detector {d} degrees across'.format(\n d=args.detsize))\n print('\t\tin time range(s): {t}'.format(t=repr(args.trange)))\n\n gmap(band=args.band, cntfile=args.cntfile,\n coadd=args.coadd, detsize=args.detsize, intfile=args.intfile,\n skypos=args.skypos, maxgap=args.maxgap,\n memlight=args.memlight, minexp=args.minexp, overwrite=args.overwrite,\n retries=args.retries, skyrange=args.skyrange, stepsz=args.stepsz,\n trange=args.trange, verbose=args.verbose,\n cntcoaddfile=args.cntcoaddfile, intcoaddfile=args.intcoaddfile)", "title": "" }, { "docid": "73c3aa142f471d5478c926535b65627c", "score": "0.5394963", "text": "def test_print_all_details_runs():\n image_info.print_all_details()", "title": "" }, { "docid": "67748086c5c9b818e2f154b2850b68d3", "score": "0.5338853", "text": "def capture_unittest_coverage(self):\n assert self.m.chromium.c.BUILD_CONFIG == 'Coverage'\n generate_coverage_py = self.public_scripts_dir.join(\n 'generate_coverage.py')\n args = ['--verbose',\n '--syzygy',\n '--build-dir', self.output_dir]\n return self.m.python(\n 'capture_unittest_coverage', generate_coverage_py, args)", "title": "" }, { "docid": "84ef8c1ac0d4292307c52db81ba0b272", "score": "0.5305564", "text": "def coverage():\n run('coverage run --source chanjo-report setup.py test')\n run('coverage report -m')\n run('coverage html')\n run('open htmlcov/index.html')\n log.info('collected test coverage stats')", "title": "" }, { "docid": "aa7afde0e7f48c97e6b975eca418320d", "score": "0.5225435", "text": "def main(args, utils_impl=Utils):\n\n if args.date:\n resolved_date = args.date\n else:\n resolved_date = MapPreview.get_most_recent_date(\n args.source, args.signal, args.geo_type, args.geo_value)\n print(f'using latest data reported for date {resolved_date}')\n\n screen_size = (\n args.width + MapPreview.SCREEN_PADDING[0],\n args.height + MapPreview.SCREEN_PADDING[1],\n )\n with utils_impl.run_screenshot_stack(screen_size) as driver:\n MapPreview.take_screenshot(driver, args, resolved_date)", "title": "" }, { "docid": "e2267171cef3143f3cb1a1ac8898f3ff", "score": "0.52149725", "text": "def test_simple_all_run(msa_fasta_file: Path, tmp_path: Path):\n mv = MsaViz(msa_fasta_file)\n\n fig_outfile = tmp_path / \"test.png\"\n mv.savefig(fig_outfile)\n\n assert fig_outfile.exists()", "title": "" }, { "docid": "abe8241e4df36202ceae5c089531dd30", "score": "0.52022386", "text": "def coverage(self):\n return self._hitmap.viewkeys()", "title": "" }, { "docid": "7c2eb324267b8a33077e93aa723e6888", "score": "0.51690245", "text": "def main():\n\n with its.device.ItsSession() as cam:\n # Arbitrary capture request exposure values; image content is not\n # important for this test, only the metadata.\n props = cam.get_camera_properties()\n if not its.caps.manual_sensor(props):\n print \"Test skipped\"\n return\n\n req,fmt = its.objects.get_fastest_manual_capture_settings(props)\n cap = cam.do_capture(req, fmt)\n pprint.pprint(cap[\"metadata\"])\n\n # No pass/fail check; test passes if it completes.", "title": "" }, { "docid": "3323706ea25cc11b6931c6845bbde71c", "score": "0.5165554", "text": "def test_all_run_with_options(msa_fasta_file: Path, tmp_path: Path):\n mv = MsaViz(\n msa_fasta_file,\n color_scheme=\"Identity\",\n wrap_length=50,\n show_label=False,\n show_seq_char=False,\n sort=True,\n )\n mv.set_highlight_pos([1, 5, (10, 13), 18])\n mv.set_highlight_pos_by_ident_thr(min_thr=80, max_thr=100)\n mv.add_markers([50, 51, 52, (60, 70), 80], marker=\"x\", color=\"blue\", size=6)\n mv.add_text_annotation(\n (100, 120), text=\"test\", text_color=\"blue\", text_size=10, range_color=\"blue\"\n )\n\n fig_outfile = tmp_path / \"test.png\"\n mv.savefig(fig_outfile)\n\n assert fig_outfile.exists()", "title": "" }, { "docid": "fb3b6f858afa060ed7599f2f18be574f", "score": "0.51270723", "text": "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if not its.caps.full(props):\n print \"Test skipped\"\n return\n\n _,fmt = its.objects.get_fastest_manual_capture_settings(props)\n e, s = its.target.get_target_exposure_combos(cam)[\"midExposureTime\"]\n e /= 2.0\n\n r_means = []\n g_means = []\n b_means = []\n\n reqs = [\n its.objects.manual_capture_request(s, e, True),\n its.objects.manual_capture_request(s, e, True),\n its.objects.manual_capture_request(s*2,e, True),\n its.objects.manual_capture_request(s*2,e, True),\n its.objects.manual_capture_request(s, e, True),\n its.objects.manual_capture_request(s, e, True),\n its.objects.manual_capture_request(s, e*2, True),\n its.objects.manual_capture_request(s, e, True),\n its.objects.manual_capture_request(s*2,e, True),\n its.objects.manual_capture_request(s, e, True),\n its.objects.manual_capture_request(s, e*2, True),\n its.objects.manual_capture_request(s, e, True),\n its.objects.manual_capture_request(s, e*2, True),\n its.objects.manual_capture_request(s, e*2, True),\n ]\n\n caps = cam.do_capture(reqs, fmt)\n for i,cap in enumerate(caps):\n img = its.image.convert_capture_to_rgb_image(cap)\n its.image.write_image(img, \"%s_i=%02d.jpg\" % (NAME, i))\n tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)\n rgb_means = its.image.compute_image_means(tile)\n r_means.append(rgb_means[0])\n g_means.append(rgb_means[1])\n b_means.append(rgb_means[2])\n\n # Draw a plot.\n idxs = range(len(r_means))\n pylab.plot(idxs, r_means, 'r')\n pylab.plot(idxs, g_means, 'g')\n pylab.plot(idxs, b_means, 'b')\n pylab.ylim([0,1])\n matplotlib.pyplot.savefig(\"%s_plot_means.png\" % (NAME))\n\n g_avg = sum(g_means) / len(g_means)\n g_ratios = [g / g_avg for g in g_means]\n g_hilo = [g>1.0 for g in g_ratios]\n assert(g_hilo == [False, False, True, True, False, False, True,\n False, True, False, True, False, True, True])", "title": "" }, { "docid": "164cc4024a9866da376a18a660601ea3", "score": "0.5114869", "text": "def view_coverage():\n coverage_file = os.path.join(SCRIPTDIR, '.coverage')\n if not os.path.exists(coverage_file):\n raise InvalidArg('No coverage file found, run coverage: {}'.format(\n coverage_file,\n ))\n # Show console report.\n print(C('').join(C('\\nCoverage Report', 'cyan'), ':'))\n\n covreportcmd = [\n 'coverage',\n 'report',\n ]\n try:\n output = subprocess.check_output(\n covreportcmd,\n stderr=subprocess.STDOUT,\n ).decode()\n except subprocess.CalledProcessError:\n return 1\n if not output.startswith('Name'):\n print_err(output)\n return 1\n\n divline = C('-' * 45, 'dimgrey')\n for line in output.splitlines():\n if line.startswith('--'):\n # Divider line.\n print(divline)\n continue\n name, statements, miss, cover = line.split()\n namefmt = C(name).ljust(25)\n statementsfmt = C(statements, (46, 137, 255)).rjust(5)\n try:\n miss = int(miss)\n if miss == 0:\n missfmt = C(miss, 'lightgreen', style='bright').rjust(6)\n else:\n missfmt = C(miss, 'red').rjust(6)\n except ValueError:\n # Actual 'Miss' header.\n missfmt = C(miss, 'red').rjust(6)\n try:\n cover = int(cover.rstrip('%'))\n if cover == 100:\n covercolr = {'fore': 'lightgreen', 'style': 'bright'}\n elif cover > 49:\n covercolr = {'fore': 'green'}\n else:\n covercolr = {'fore': 'red'}\n coverfmt = C('{}%'.format(cover), **covercolr).rjust(6)\n except ValueError:\n # Actual 'Cover' header.\n coverfmt = C(cover, 'green').rjust(6)\n print(C(' ').join(namefmt, statementsfmt, missfmt, coverfmt))\n return 0", "title": "" }, { "docid": "87f81b4d465d34c90833cc53640041db", "score": "0.5109988", "text": "def _run_test(self, test_info):\n self._ensure_dump_render_tree_is_running()\n # The pixel_hash is used to avoid doing an image dump if the\n # checksums match, so it should be set to a blank value if we\n # are generating a new baseline. (Otherwise, an image from a\n # previous run will be copied into the baseline.)\n image_hash = test_info.image_hash()\n if image_hash and self._test_args.new_baseline:\n image_hash = \"\"\n start = time.time()\n crash, timeout, actual_checksum, output, error = \\\n self._driver.run_test(test_info.uri, test_info.timeout, image_hash)\n end = time.time()\n\n result = process_output(self._port, test_info, self._test_types,\n self._test_args, self._options.configuration,\n self._options.results_directory, crash,\n timeout, end - start, actual_checksum,\n output, error)\n self._test_results.append(result)\n return result", "title": "" }, { "docid": "72db4b1979c7a2c3f6eca99720f4c95b", "score": "0.5108259", "text": "def run(self):\n \n run_afl(\n indir = self.srcdir, \n outdir = self.afloutdir, \n tgtcmd = self.cfg.tgtcmd, \n cfg = self.cfg, \n cores = self.cores, \n persist_tgt = False,\n verbose = self.verbose,\n dry_run = self.dry_run,\n )\n\n self.test_img_creation()", "title": "" }, { "docid": "d50a0d5c8bd81d1faa8c1ea7e4df00ab", "score": "0.5093121", "text": "def runAnalysis(aif):\n\n aif.runDefaultAnalysis()\n\n for ttest in aif.getTTestsForTargetAndExperiment():\n aif.runHammingDistanceAnalysis(ttest.randomTraceSet, \"di1\",\"di2\")", "title": "" }, { "docid": "91ab0be9bde1a12283152c4e2cf6ddd7", "score": "0.5092178", "text": "def cov():\n local(\"coverage report -m\")\n local(\"coverage html\")\n local(\"coverage xml\")", "title": "" }, { "docid": "8be185c0911acbba7dc88c66a9ba23cd", "score": "0.50723636", "text": "def main():\r\n calibrate_camera_to_DMD_mapping()\r\n goto_flagged_images()", "title": "" }, { "docid": "67e98532384640f184daed30d2585fb3", "score": "0.50558", "text": "def focalCover(targetfile = pc4layer, cbscode='40', outputfile='coverageparks.shp'):\n\n return focalsum", "title": "" }, { "docid": "3a2c433efab6746b8930a29837880397", "score": "0.5052849", "text": "async def showMap(self,ctx,*argv):\n if(len(argv)==3):\n zoom = int(argv[0])\n lat_deg = float(argv[1])\n lon_deg = float(argv[2])\n elif(len(argv)==1):\n zll = argv[0][argv[0].find(\"map=\")+4:]\n zoom=int(zll[:zll.find('/')])\n ll = zll[zll.find('/')+1:]\n lat_deg = float(ll[:ll.find('/')])\n lon_deg = float(ll[ll.find('/')+1:])\n #print(f'{zoom},{lat_deg},{lon_deg}')\n else:\n await ctx.send(f\"Either enter the zoom, latitude and longditude, or url, like this: `€showMap 15 60.3912 5.3242`\")\n return\n if(zoom>19):\n zoom=19\n await ImageCluster(ctx, lat_deg, lon_deg, zoom)", "title": "" }, { "docid": "8fb328b24c61edcedb51025df4e23fd7", "score": "0.50147796", "text": "def print_target_maps(target_maps):\n for index, target_map in enumerate(target_maps):\n print('Target Map {}/{}'.format(index + 1, len(target_maps)))\n print_target_map(target_map)", "title": "" }, { "docid": "3e92840211c6f9d99624e7daa1699b6f", "score": "0.5008252", "text": "def test_mosaic_to_workspace_FGDB(self):\n raster1 = os.path.join(os.getcwd(), 'test-data', 'raster', 'mosaic1')\n raster2 = os.path.join(os.getcwd(), 'test-data', 'raster', 'mosaic2')\n raster3 = os.path.join(os.getcwd(), 'test-data', 'raster', 'mosaic3')\n self.request['params'][0]['response']['docs'][0]['path'] = raster1\n self.request['params'][0]['response']['docs'][1]['path'] = raster2\n self.request['params'][0]['response']['docs'][2]['path'] = raster3\n self.request['params'][3]['value'] = 'Mosaic'\n self.request['params'][5]['value'] = 'FileGDB'\n self.request['params'][2]['value'] = self.target_ws\n getattr(sys.modules[self.request['task']], \"execute\")(self.request)\n dsc = arcpy.Describe(os.path.join(self.target_ws, 'Mosaic'))\n self.assertEquals([dsc.bandcount, dsc.pixeltype], [1, 'U8'])", "title": "" }, { "docid": "2ee7635916f0ae14e5a441824aa522fa", "score": "0.49968728", "text": "def test_get_status_maps1(self):\n pass", "title": "" }, { "docid": "5c3c2a04b06933684a37f94dfdd9aa9e", "score": "0.4987271", "text": "def display(self):\n if not self.initialized:\n return\n\n cv2.imshow(\"explored map\", self._map_exploration)\n cv2.imshow(\"exploration_zone\", self._exploration_zone)\n cv2.waitKey(1)", "title": "" }, { "docid": "bee6748ea45ad623eccec5c4deb37f6f", "score": "0.4959971", "text": "def do_test(self):\n exe_name = \"a.out\"\n exe = self.getBuildArtifact(exe_name)\n\n # Create the target\n target = self.dbg.CreateTarget(exe)\n self.assertTrue(target, VALID_TARGET)\n\n self.expect(\"target modules dump symtab -m a.out\",\n patterns=['Metadata.*_TMC1a3Foo'])\n\n self.expect(\"target modules dump symtab a.out\",\n patterns=['Metadata.*type metadata for'])", "title": "" }, { "docid": "9c5291000db13eea6a693d77a4f905e0", "score": "0.4957987", "text": "def coverage():\n tests = unittest.TestLoader().discover('live/tests')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n COV.html_report()\n COV.erase()\n return 0\n return 1", "title": "" }, { "docid": "be8dd53a8b29eeb641e9681835a9d094", "score": "0.4954103", "text": "def testGetOneMAPWithMatchingGroundtruthAndDetections(self):\n coco_evaluator = coco_evaluation.CocoDetectionEvaluator(\n _get_categories_list())\n coco_evaluator.add_single_ground_truth_image_info(\n image_id='image1',\n groundtruth_dict={\n standard_fields.InputDataFields.groundtruth_boxes:\n np.array([[100., 100., 200., 200.]]),\n standard_fields.InputDataFields.groundtruth_classes: np.array([1])\n })\n coco_evaluator.add_single_detected_image_info(\n image_id='image1',\n detections_dict={\n standard_fields.DetectionResultFields.detection_boxes:\n np.array([[100., 100., 200., 200.]]),\n standard_fields.DetectionResultFields.detection_scores:\n np.array([.8]),\n standard_fields.DetectionResultFields.detection_classes:\n np.array([1])\n })\n coco_evaluator.add_single_ground_truth_image_info(\n image_id='image2',\n groundtruth_dict={\n standard_fields.InputDataFields.groundtruth_boxes:\n np.array([[50., 50., 100., 100.]]),\n standard_fields.InputDataFields.groundtruth_classes: np.array([1])\n })\n coco_evaluator.add_single_detected_image_info(\n image_id='image2',\n detections_dict={\n standard_fields.DetectionResultFields.detection_boxes:\n np.array([[50., 50., 100., 100.]]),\n standard_fields.DetectionResultFields.detection_scores:\n np.array([.8]),\n standard_fields.DetectionResultFields.detection_classes:\n np.array([1])\n })\n coco_evaluator.add_single_ground_truth_image_info(\n image_id='image3',\n groundtruth_dict={\n standard_fields.InputDataFields.groundtruth_boxes:\n np.array([[25., 25., 50., 50.]]),\n standard_fields.InputDataFields.groundtruth_classes: np.array([1])\n })\n coco_evaluator.add_single_detected_image_info(\n image_id='image3',\n detections_dict={\n standard_fields.DetectionResultFields.detection_boxes:\n np.array([[25., 25., 50., 50.]]),\n standard_fields.DetectionResultFields.detection_scores:\n np.array([.8]),\n standard_fields.DetectionResultFields.detection_classes:\n np.array([1])\n })\n metrics = coco_evaluator.evaluate()\n self.assertAlmostEqual(metrics['DetectionBoxes_Precision/mAP'], 1.0)", "title": "" }, { "docid": "913613823815f627e1d6e9515eba0ba3", "score": "0.49520984", "text": "def main(self):\n if len(sys.argv) > 2:\n for i in range(2, len(sys.argv)):\n if sys.argv[i].lower() == \"p\":\n self.ploter.show_compareImages(self.imageProcessor.result_image())\n elif sys.argv[i].lower() == \"s\":\n self.__create_image()\n else:\n print(\"This flag is unknown\\n\\n\")\n self.__use()\n \n print(\"Cloud Coverage Index: \" + str(self.imageProcessor.get_CCI()))", "title": "" }, { "docid": "a5202515237df3b09d013f4afb7bb80f", "score": "0.49435407", "text": "def test_and_run_command(self):\n self.build()\n self.breakOnCtor()\n\n raw_output = self.res.GetOutput()\n frameRE = re.compile(r\"\"\"\n ^\\s\\sframe # heading for the frame info,\n .* # wildcard, and\n 0x[0-9a-f]{16} # the frame pc, and\n \\sa.out`(.+) # module`function, and\n \\s\\+\\s # the rest ' + ....'\n \"\"\", re.VERBOSE)\n for line in raw_output.split(os.linesep):\n match = frameRE.search(line)\n if match:\n function = match.group(1)\n #print(\"line:\", line)\n #print(\"function:\", function)\n self.runCmd(\"disassemble -n '%s'\" % function)", "title": "" }, { "docid": "7a460dc47b3c644d4964978664fd3b8f", "score": "0.4939229", "text": "def test_get_status_maps(self):\n pass", "title": "" }, { "docid": "743d5f34f19134bbca906bea7845a65f", "score": "0.49347323", "text": "def _analyze_anchors_coverage(self, anchors: Anchors, image_size: int, labels: list, title: str):\n\n fig = plt.figure(figsize=(12, 5))\n fig.suptitle(f\"{title} anchors coverage\")\n\n # box style plot\n ax = fig.add_subplot(121)\n ax.set_xlabel(\"W\", fontsize=STAT_LOGGER_FONT_SIZE)\n ax.set_ylabel(\"H\", fontsize=STAT_LOGGER_FONT_SIZE)\n ax.set_xlim([0, image_size])\n ax.set_ylim([0, image_size])\n\n anchors_boxes = anchors.anchors.cpu().numpy()\n anchors_len = anchors.num_anchors\n\n anchors_boxes = anchors_boxes.reshape(-1, 2)\n\n for i in range(anchors_len):\n rect = self._get_rect(anchors_boxes[i][0], anchors_boxes[i][1])\n rect.set_alpha(0.3)\n rect.set_facecolor([random.random(), random.random(), random.random(), 0.3])\n ax.add_patch(rect)\n\n # distance from anchor plot\n ax = fig.add_subplot(122)\n ax.set_xlabel(\"W\", fontsize=STAT_LOGGER_FONT_SIZE)\n ax.set_ylabel(\"H\", fontsize=STAT_LOGGER_FONT_SIZE)\n\n x = np.arange(1, image_size, 1)\n y = np.arange(1, image_size, 1)\n\n xx, yy = np.meshgrid(x, y, sparse=False, indexing=\"xy\")\n points = np.concatenate([xx.reshape(1, -1), yy.reshape(1, -1)])\n\n color = self._get_score(anchors_boxes, points, image_size)\n\n ax.set_xlabel(\"W\", fontsize=STAT_LOGGER_FONT_SIZE)\n ax.set_ylabel(\"H\", fontsize=STAT_LOGGER_FONT_SIZE)\n plt.imshow(color, interpolation=\"nearest\", origin=\"lower\", extent=[0, image_size, 0, image_size])\n\n # calculate the coverage for the dataset labels\n cover_masks = []\n for i in range(anchors_len):\n w_max = (anchors_boxes[i][0] / image_size) * 4\n w_min = (anchors_boxes[i][0] / image_size) * 0.25\n h_max = (anchors_boxes[i][1] / image_size) * 4\n h_min = (anchors_boxes[i][1] / image_size) * 0.25\n cover_masks.append(\n np.logical_and(np.logical_and(np.logical_and(labels[:, 3] < w_max, labels[:, 3] > w_min), labels[:, 4] < h_max), labels[:, 4] > h_min)\n )\n cover_masks = np.stack(cover_masks)\n coverage = np.count_nonzero(np.any(cover_masks, axis=0)) / len(labels)\n\n self.sg_logger.add_figure(tag=f\"{title} anchors coverage\", figure=fig)\n return coverage", "title": "" }, { "docid": "47644e7446b102e28b498746ab865996", "score": "0.49319172", "text": "def exportMap():\r\n global OD\r\n iface.mapCanvas().saveAsImage('D:/Users/63707/Documents/python3/C_TWN_road_attribute/test/{}_{}.png'.format(OD[0], OD[1]))\r\n QTimer.singleShot(500, next_point)", "title": "" }, { "docid": "3b13f0d421a009b0aa89c97c41e99864", "score": "0.49302006", "text": "def test_CAS7697(self):\n myia = iatool()\n imagename = \"CAS7697.im\"\n myia.fromshape(imagename, [100,100, 10])\n myia.addnoise()\n bb = myia.getchunk()\n bb[:,:,9] = 40\n myia.putchunk(bb)\n myia.calcmask(imagename + \"<30\")\n for clmethod in [\"framework\", \"tiled\"]:\n stats = myia.statistics(axes=[0,1], clmethod=clmethod)\n self.assertTrue(stats['max'][9] == 0)\n self.assertTrue(stats['min'][9] == 0)\n myia.done()", "title": "" }, { "docid": "d4828e1818bd19cd4abb5f78fe07cd29", "score": "0.49240312", "text": "def test_plot_65_qubit_gate_map(self):\n # getting the mock backend from FakeProvider\n\n backend = FakeManhattan()\n\n fname = \"65_qubit_gate_map.png\"\n self.graph_plot_gate_map(backend=backend, filename=fname)\n\n ratio = VisualTestUtilities._save_diff(\n self._image_path(fname),\n self._reference_path(fname),\n fname,\n FAILURE_DIFF_DIR,\n FAILURE_PREFIX,\n )\n self.assertGreaterEqual(ratio, 0.99)", "title": "" }, { "docid": "8aac14006c4b3e3f041e01f950be2f9c", "score": "0.49185798", "text": "def test_arcs(self):\r\n image = self.design.layout.layers[0].images[0]\r\n assert len(image.traces) == 2", "title": "" }, { "docid": "c8323cdd29be54f591199d64662be6a9", "score": "0.4911458", "text": "def mapper(self, sample):\n if sample.general.bestassemblyfile != \"NA\":\n # Define the Qualimap log and report files\n reportfile = os.path.join(sample.general.QualimapResults, 'genome_results.txt')\n # Define the Qualimap call\n qualimapcall = 'qualimap bamqc -bam {} -outdir {}'.format(sample.general.sortedbam,\n sample.general.QualimapResults)\n sample.commands.qualimap = qualimapcall\n # Initialise a dictionary to hold the Qualimap results\n qdict = dict()\n # If the report file doesn't exist, run Qualimap, and print logs to the log file\n if not os.path.isfile(reportfile):\n tlock = threading.Lock()\n out, err = run_subprocess(sample.commands.qualimap)\n tlock.acquire()\n write_to_logfile(sample.commands.qualimap, sample.commands.qualimap, self.logfile,\n sample.general.logout, sample.general.logerr, None, None)\n write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None)\n tlock.release()\n # Initialise a genobject to store the coverage dictionaries\n sample.depth = GenObject()\n sample.depth.length = dict()\n sample.depth.bases = dict()\n sample.depth.coverage = dict()\n sample.depth.stddev = dict()\n try:\n with open(reportfile) as report:\n # Read the report\n for line in report:\n # Sanitise the keys and values using self.analyze\n key, value = self.analyze(line)\n # If the keys and values exist, enter them into the dictionary\n if (key, value) != (None, None):\n qdict[key] = value\n if 'Coverage per contig' in line:\n for contigline in report:\n try:\n _, name, length, bases, coverage, stddev = contigline.rstrip().split('\\t')\n sample.depth.length.update({name: length})\n sample.depth.bases.update({name: bases})\n sample.depth.coverage.update({name: coverage})\n sample.depth.stddev.update({name: stddev})\n except ValueError:\n pass\n\n except (IOError, FileNotFoundError):\n pass\n # If there are values in the dictionary\n if qdict:\n # Make new category for Qualimap results and populate this category with the report data\n for attribute in qdict:\n # Remove the 'X' from the depth values e.g. 40.238X\n setattr(sample.mapping, attribute, qdict[attribute].rstrip('X'))", "title": "" }, { "docid": "50a492e4dde4246cfc7a5b12985d9b7e", "score": "0.49097607", "text": "def main():\n\n\tprint \"Doing stuff...\\n\"\n\n\ttest=map.Map()\n\ttest.updateMap()\n\twhile(1):\n\t\tcv2.namedWindow('map hector', 0)\n\t \t# refresh the image on the screen\n\t\tcv2.imshow('map hector',test._map)\n\t\tcv2.waitKey(3)", "title": "" }, { "docid": "06fd5c3921452fe3a7206ac8452d9488", "score": "0.49032006", "text": "def test_simple(self):\r\n image = self.design.layout.layers[0].images[0]\r\n assert len(image.traces) == 2", "title": "" }, { "docid": "ff9777612899284a917d16162efc5e2d", "score": "0.4892885", "text": "def test_get_image_information_runs():\n image_info.get_image_information(5)", "title": "" }, { "docid": "d6d586398c52a87597d7f66bdb921021", "score": "0.48847738", "text": "def identify_confusion(fname, base_dir, gt_dir):\n tiles = open(fname).read().split(\"\\n\")\n tiles = [t for t in tiles if t!= \"\"]\n sums = numpy.zeros(17)\n for tile in tiles:\n pred = misc.imread(base_dir + tile + \".png\")\n target = misc.imread(gt_dir + tile + \".png\")\n target[pred!=1] = 17 # so that only for road (label=1) can the two be equal\n for i in range(17):\n sums[i] += numpy.count_nonzero(target==i)\n\n for i in range(17):\n print(f'{i}: {sums[i]}')", "title": "" }, { "docid": "eabdc2adf59282bf0f3d1cc72eae406c", "score": "0.4877715", "text": "def testCoverage_0(self):\n mtt.makeTempDirParent()\n tmpDir = os.path.abspath(mtt.makeTempDir('testCoverage_0'))\n parent = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n customOpts = mafval.GenericValidationOptions()\n for target, mafSeq, solutionDict in g_knownGood:\n testMaf = mtt.testFile(os.path.join(os.path.abspath(tmpDir), 'test.maf'),\n mafSeq, g_headers)\n cmd = [os.path.abspath(os.path.join(parent, 'test', 'mafCoverage'))]\n cmd += ['--maf', os.path.abspath(os.path.join(tmpDir, 'test.maf')),\n '--species', target]\n outpipes = [os.path.join(tmpDir, 'output.txt')]\n mtt.recordCommands([cmd], tmpDir, outPipes=outpipes)\n mtt.runCommandsS([cmd], tmpDir, outPipes=outpipes)\n self.assertTrue(mafval.validateMaf(os.path.join(tmpDir, 'test.maf'),\n customOpts))\n self.assertTrue(coverageIsCorrect(os.path.join(tmpDir, 'output.txt'),\n solutionDict))\n mtt.removeDir(tmpDir)", "title": "" }, { "docid": "f4ef77b99e077457106c74116ec8a0b6", "score": "0.4867624", "text": "def test_pokemon_map_output(self, mock_stdout):\n expected_output = \"🐱 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\\\n \"🌲 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\\\n \"🌲 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\\\n \"🌲 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\\\n \"🌲 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\\\n \"🌲 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\\\n \"🌲 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\\\n \"🌲 🌲 🌲 🌲 🌲 🌲 🌲 🌲\\n\"\n pokemon_map()\n self.assertEqual(mock_stdout.getvalue(), expected_output)", "title": "" }, { "docid": "c2db9d9d76a3fee17e2c5c301c723790", "score": "0.48535594", "text": "def main():\n args = argument_parser()\n args.mbtiles_file.close()\n mbtiles_file = args.mbtiles_file.name\n zoom = args.zoom\n tile_x = args.tilex\n tile_y = args.tiley\n mvt_content = run(tile_x, tile_y, zoom, mbtiles_file)\n print(mvt_content, end='')", "title": "" }, { "docid": "62f8bc2455db32b045117a3787dd6713", "score": "0.48534715", "text": "def _binclass_print_teststats(self,pmodel,targets,imode):\n popts = helpers.Struct()\n popts.imode = imode\n popts.ptype = 3\n (h_q, rho_q, logz, h_p, rho_p) = self.predict(pmodel,popts)\n nte = targets.shape[0]\n acc = 100.*float((np.sign(h_q)==targets).sum())/nte\n loglh = logz.sum()/nte\n print ('Test set predictions: Accuracy: %4.2f%%, '\n 'log likelihood: %.6f') % (acc, loglh)", "title": "" }, { "docid": "34d5df76f4da061f2daa4865cca78aba", "score": "0.48515138", "text": "def _BuilderRunsTestOfInterest(self, test_map: Dict[str, Any]) -> bool:\n raise NotImplementedError()", "title": "" }, { "docid": "cd94393951093aaab4d4db1a73b30e1f", "score": "0.4850536", "text": "def test_plot_1_qubit_gate_map(self):\n # getting the mock backend from FakeProvider\n\n backend = FakeArmonk()\n\n fname = \"1_qubit_gate_map.png\"\n self.graph_plot_gate_map(backend=backend, filename=fname)\n\n ratio = VisualTestUtilities._save_diff(\n self._image_path(fname),\n self._reference_path(fname),\n fname,\n FAILURE_DIFF_DIR,\n FAILURE_PREFIX,\n )\n self.assertGreaterEqual(ratio, 0.99)", "title": "" }, { "docid": "06449bd1248ff80e999ddbec03cb704f", "score": "0.48434246", "text": "def show_map_batch(sample_batched, img_to_show=3, save_file_path=None, as_numpy=False):\n\n # just select 6 images to show per batch\n sat_img_batch, map_img_batch = sample_batched['sat_img'][:img_to_show, :, :, :],\\\n sample_batched['map_img'][:img_to_show, :, :, :]\n batch_size = len(sat_img_batch)\n\n f, ax = plt.subplots(int(np.ceil(batch_size / 3)), 3, figsize=(15, int(np.ceil(batch_size / 3)) * 5))\n f.tight_layout()\n f.subplots_adjust(hspace=.05, wspace=.05)\n ax = ax.ravel()\n\n # unorm = UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n\n for i in range(batch_size):\n ax[i].axis('off')\n show_map(sat_img=sat_img_batch.cpu().numpy()[i, :, :, :].transpose((1, 2, 0)),\n map_img=map_img_batch.cpu().numpy()[i, 0, :, :], axis=ax[i])\n\n if save_file_path is not None:\n f.savefig(save_file_path)\n\n if as_numpy:\n f.canvas.draw()\n width, height = f.get_size_inches() * f.get_dpi()\n mplimage = np.frombuffer(f.canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)\n plt.cla()\n plt.close(f)\n\n return mplimage", "title": "" }, { "docid": "968e8429c1fa23ffe0ac80617f446638", "score": "0.4835228", "text": "def main():\n\n options, configs = parse_args(sys.argv)\n ip_reader = csv.reader(open(options.ips, 'rb'), delimiter=',')\n ip_converter = IpConverter(configs)\n width = options.width\n height = options.height\n color = 'black'\n # Here we apply ratios, since the map will be croped later on.\n width = int(width * 2.5)\n height = int(height * 1.33333333) \n world_map = Map(width, height, color)\n\n roll = options.gen\n current_roll = None\n\n for row in ip_reader:\n try:\n ip = proxy_filter(row[0])\n timestamp = parse(row[1])\n if current_roll is None:\n current_roll = getattr(timestamp, roll)\n elif current_roll != getattr(timestamp, roll) and\\\n getattr(timestamp, roll) % options.gen_accuracy == 0:\n current_roll = getattr(timestamp, roll)\n # print current map to file, and generate new one.\n world_map.render('%s/%s.jpg' % (configs['maps']['folder'],\n timestamp))\n\n data = ip_converter.get_gps_from_ip(ip)\n if data is not None:\n world_map.add_gps_to_map(data) \n except pygeoip.GeoIPError, error:\n logging.error(error)\n except ValueError, error:\n logging.error(error)\n\n world_map.render('%s/%s.jpg' % (configs['maps']['folder'],\n timestamp))\n \n # generates video oO\n video = Video(width, height) \n video.build(configs['maps']['folder'])", "title": "" }, { "docid": "40b8cde4718350bb315854d64300d7f6", "score": "0.48325807", "text": "def view_coverage_browser():\n indexfile = os.path.join(COVERAGE_DIR, 'index.html')\n if not os.path.exists(indexfile):\n print_err(C(': ').join(\n C('Missing coverage report file', 'red'),\n C(indexfile, 'blue'),\n ))\n print_err(C(' ').join(\n C('Run', 'red'),\n C('./runtests.py -R', 'blue').join('`', '`'),\n C('to generate it.', 'red'),\n ))\n return 1\n\n cmd = ['google-chrome', indexfile]\n print(C(': ').join(\n C('Running', 'cyan'),\n C(' ').join(\n C(cmd[0], 'blue', style='bright'),\n C(' '.join(cmd[1:]), 'blue'),\n ),\n ))\n subprocess.Popen(cmd, stderr=subprocess.DEVNULL)\n return 0", "title": "" }, { "docid": "e3a313c8be0e2e8b2704386ff1ccbc7e", "score": "0.48314133", "text": "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if (not its.caps.raw(props) or\n not its.caps.read_3a(props)):\n print \"Test skipped\"\n return\n\n cam.do_3a()\n\n req = its.objects.auto_capture_request()\n cap_dng, cap_yuv = cam.do_capture(req, cam.CAP_DNG_YUV)\n\n img = its.image.convert_capture_to_rgb_image(cap_yuv)\n its.image.write_image(img, \"%s.jpg\" % (NAME))\n\n with open(\"%s.dng\"%(NAME), \"wb\") as f:\n f.write(cap_dng[\"data\"])\n\n # No specific pass/fail check; test is assumed to have succeeded if\n # it completes.", "title": "" }, { "docid": "eec7c2399041533fb56193b89ffdbba6", "score": "0.4824971", "text": "def main():\n cur_dir = os.path.dirname(os.path.realpath(sys.argv[0]))\n input_tif = cur_dir + \"/../tests/data/Jamaica_dem.tif\"\n rst = RasterUtilClass.read_raster(input_tif)\n # metadata information\n print(\"rows: %d, cols: %d\" % (rst.nRows, rst.nCols))\n print(\"LLCornerX: %.2f, LLCornerY: %.2f\" % (rst.xMin, rst.yMin))\n print(\"cell size: %.1f\" % rst.dx)\n # basic statistics, nodata is excluded\n print(\"mean: %.2f, max: %.2f, min: %.2f\" % (rst.get_average(), rst.get_max(), rst.get_min()))\n print(\"std: %.2f, sum: %.2f\" % (rst.get_std(), rst.get_sum()))", "title": "" }, { "docid": "5e5c060fc70675e7deef53904d2e9cbe", "score": "0.4817491", "text": "def test_vis(layer_to_img_dict, model_fn, output_dir =\"./zeiler_fergus_vis\" ):", "title": "" }, { "docid": "d0a6bbf904f11b4cce066edcd8f802e9", "score": "0.48165083", "text": "def coverage(ctx):\n path = (_COVERAGE_PATH / \"index.html\").absolute()\n webbrowser.open(f\"file:///{path}\")", "title": "" }, { "docid": "5c09238ae978e4d6eed66c8fd3ecc767", "score": "0.48047954", "text": "def analyze(display=True):", "title": "" }, { "docid": "ec70ece3f91c8315b5ad8fbf08a67ab0", "score": "0.48042879", "text": "def inspect(output_dir):\n #log = getLog(\"tmp.log\")\n log.info(\"Check parcellation\")\n\n reg_path = output_dir\n\n cmdstr = op.join(reg_path, 'fsmask_1mm.nii.gz ')\n\n for p in lausanne_spec.keys():\n cmdstr += \" \" + op.join(reg_path, p, 'ROIv_HR_th.nii.gz')\n\n freeview_view_cmd = 'freeview %s' % cmdstr\n runCmd( freeview_view_cmd, log )", "title": "" }, { "docid": "9b2ed09837a856dfe770b7cc93a9a214", "score": "0.47905618", "text": "def task_coverage() -> DoitTask:\n path_tests = DG.test.path_tests\n cov_dir = DG.test.path_coverage_index.parent\n cov_html = f'--cov-report=html:\"{cov_dir}\" --html=\"{DG.test.path_test_report}\" --self-contained-html'\n diff_html = f'--html-report {DG.test.path_diff_test_report}'\n return debug_task([\n Interactive(\n f'poetry run python -m pytest \"{path_tests}\" {DG.test.args_pytest} --cov={DG.meta.pkg_name} {cov_html}',\n ),\n 'poetry run python -m coverage json', # Create coverage.json file for \"_write_coverage_to_md\"\n 'poetry run python -m coverage xml',\n Interactive(f'poetry run diff-cover coverage.xml {DG.test.args_diff} {diff_html}'),\n ])", "title": "" }, { "docid": "230ff2c38e3141dc68f6a32bcc0e6686", "score": "0.4780019", "text": "def test_processing():\n for cleantopo_process in [\n \"testdata/cleantopo_tl.mapchete\", \"testdata/cleantopo_br.mapchete\"\n ]:\n mp = mapchete.open(os.path.join(SCRIPTDIR, cleantopo_process))\n for zoom in range(6):\n tiles = []\n for tile in mp.get_process_tiles(zoom):\n output = mp.execute(tile)\n tiles.append(output)\n assert isinstance(output, BufferedTile)\n assert isinstance(output.data, ma.MaskedArray)\n assert output.data.shape == output.shape\n assert not ma.all(output.data.mask)\n mp.write(output)\n mosaic, mosaic_affine = create_mosaic(tiles)\n try:\n temp_vrt = os.path.join(OUT_DIR, str(zoom)+\".vrt\")\n gdalbuildvrt = \"gdalbuildvrt %s %s/%s/*/*.tif > /dev/null\" % (\n temp_vrt, OUT_DIR, zoom)\n os.system(gdalbuildvrt)\n with rasterio.open(temp_vrt, \"r\") as testfile:\n for file_item, mosaic_item in zip(\n testfile.meta[\"transform\"], mosaic_affine\n ):\n assert file_item == mosaic_item\n band = testfile.read(1, masked=True)\n assert band.shape == mosaic.shape\n assert ma.allclose(band, mosaic)\n assert ma.allclose(band.mask, mosaic.mask)\n finally:\n shutil.rmtree(OUT_DIR, ignore_errors=True)", "title": "" }, { "docid": "0a730a490732d7e97e279455c2300003", "score": "0.47758254", "text": "def test_with_dwarf(self):\n self.buildDwarf()\n self.do_my_test()", "title": "" }, { "docid": "65a2bdd7a6815c519e4162f477e867fb", "score": "0.47702894", "text": "def tile_classifier__simple_test(ctx, self, img):\n return tile_classifier__simple_test_impl(self, img)", "title": "" }, { "docid": "07fb06daeacde1a62deb09fc5f923e3f", "score": "0.47621217", "text": "def runAnalysis(aif):\n \n for blob in aif.getTraceSetBlobsForTargetAndExperiment():\n\n aif.runAverageTraceForTraceSetBlob(blob)\n\n di1_xor_di2 = aif.opXor(blob, \"di1\", \"di2\")\n di3_xor_di4 = aif.opXor(blob, \"di3\", \"di4\")\n di5_xor_di6 = aif.opXor(blob, \"di5\", \"di6\")\n\n aif.runHammingDistanceAnalysis(blob,\n di1_xor_di2, di3_xor_di4, \"HD(d1^d2,d3^d4)\")\n \n aif.runHammingDistanceAnalysis(blob,\n di1_xor_di2, di5_xor_di6, \"HD(d1^d2,d5^d6)\")\n \n aif.runHammingDistanceAnalysis(blob,\n di3_xor_di4, di5_xor_di6, \"HD(d3^d4,d5^d6)\")\n\n aif.runHammingDistanceAnalysis(blob, \"di1\",\"di2\")\n aif.runHammingDistanceAnalysis(blob, \"di1\",\"di3\")\n aif.runHammingDistanceAnalysis(blob, \"di1\",\"di4\")\n aif.runHammingDistanceAnalysis(blob, \"di1\",\"di5\")\n aif.runHammingDistanceAnalysis(blob, \"di1\",\"di6\")\n\n aif.runHammingDistanceAnalysis(blob, \"di2\",\"di3\")\n aif.runHammingDistanceAnalysis(blob, \"di2\",\"di4\")\n aif.runHammingDistanceAnalysis(blob, \"di2\",\"di5\")\n aif.runHammingDistanceAnalysis(blob, \"di2\",\"di6\")\n\n aif.runHammingDistanceAnalysis(blob, \"di3\",\"di4\")\n aif.runHammingDistanceAnalysis(blob, \"di3\",\"di5\")\n aif.runHammingDistanceAnalysis(blob, \"di3\",\"di6\")\n \n aif.runHammingDistanceAnalysis(blob, \"di4\",\"di5\")\n aif.runHammingDistanceAnalysis(blob, \"di4\",\"di6\")\n \n aif.runHammingDistanceAnalysis(blob, \"di5\",\"di6\")\n\n aif.runHammingWeightAnalysis(blob, \"di1\")\n aif.runHammingWeightAnalysis(blob, \"di2\")\n aif.runHammingWeightAnalysis(blob, \"di3\")\n aif.runHammingWeightAnalysis(blob, \"di4\")\n aif.runHammingWeightAnalysis(blob, \"di5\")\n aif.runHammingWeightAnalysis(blob, \"di6\")", "title": "" }, { "docid": "224d4f1415b6079fc8f2ce273019715c", "score": "0.47593078", "text": "def show(self):\n c_map=copy.deepcopy(self.map[:])\n c_map[self.agent.pos[0]][self.agent.pos[1]]=\"I\"\n for e in self.enemies:\n c_map[e[0]][e[1]]=\"E\"\n #print(c_map)\n print(\"---------------------------\")\n for i in range(len(c_map)):\n str=\"\"\n for c in c_map[i]:\n str=str+c\n print(\"|\"+str+\"|\")\n \n print(\"---------------------------\")\n str2=\"\"\n for i in range(self.agent.energy):\n str2=str2+\"H\"\n print(str2)", "title": "" }, { "docid": "4a1ad359f0f9776b598853ee7050b816", "score": "0.47584385", "text": "def print_hitmap(hitmap):\r\n lines = 0\r\n for i in xrange(256):\r\n if (i % 16) == 0:\r\n line = [\"%02X: \"% (i)]\r\n if i not in hitmap or hitmap[i][List([])] == 0:\r\n line.append(\" \")\r\n else:\r\n line.append(\"%03i\" % hitmap[i][List([])])\r\n\r\n if (i % 16) == 15:\r\n line = \" \".join(line)\r\n if len(line.strip()) > 3:\r\n lines += 1\r\n print line\r\n if lines == 0:\r\n print \" <nothing>\"\r\n return", "title": "" }, { "docid": "b5565c18e384263a07d53db2ee08c17b", "score": "0.4757261", "text": "def test_imshow():\n s = numpy.random.random((10, 10))\n return imshow(s, colormap='gist_earth')", "title": "" }, { "docid": "bd252b434d78687d5b9316d1131e0285", "score": "0.47531605", "text": "def test_plot_27_qubit_gate_map(self):\n # getting the mock backend from FakeProvider\n\n backend = FakeMumbai()\n\n fname = \"27_qubit_gate_map.png\"\n self.graph_plot_gate_map(backend=backend, filename=fname)\n\n ratio = VisualTestUtilities._save_diff(\n self._image_path(fname),\n self._reference_path(fname),\n fname,\n FAILURE_DIFF_DIR,\n FAILURE_PREFIX,\n )\n self.assertGreaterEqual(ratio, 0.99)", "title": "" }, { "docid": "c97a9e81b5269a159f2d3baa8ad997a6", "score": "0.474789", "text": "def cvg_main(mappedlocations, conflictlocations, bamfile, reference, outdir, prefix):\n newpath = 'coverage'\n os.makedirs(os.path.join(outdir,newpath))\n bar = progressbar.ProgressBar(widgets = ['Running SAMtools: ', progressbar.Bar(), '(', progressbar.ETA(),')'])\n for i in bar(range(1)):\n make_bed(mappedlocations, conflictlocations, reference, outdir, prefix)\n sam(bamfile, outdir, prefix)\n output(outdir, prefix)", "title": "" }, { "docid": "a911a47541d3d0b60f7d5ec64134ca42", "score": "0.4746671", "text": "def show(img):\n remap = \" .*#\"+\"#\"*100\n img = (img.flatten()+.5)*3\n if len(img) != 784: return\n print(\"START\")\n for i in range(28):\n print(\"\".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))", "title": "" }, { "docid": "00fdd08672d2f1b9bd69a34939389e67", "score": "0.47436896", "text": "def test_init() -> None:\n # check that the map start with no info\n map_ = sm.SepalMap()\n\n # add a fullscreenControl\n control = sm.FullScreenControl(map_)\n map_.add(control)\n\n assert isinstance(control, sm.FullScreenControl)\n assert control in map_.controls\n assert control.zoomed is False\n assert \"fa-solid fa-expand\" in control.w_btn.children[0].children\n\n return", "title": "" }, { "docid": "2dad434907b996751cbbf6b0012a96f6", "score": "0.47427168", "text": "def test_run(self):\n CalibrationTest.setup_flags(self)\n log_file = os.path.join(self.io_args.log_dir, \"CalibrationLibMain.INFO\")\n\n # CalibrationLibMain assumes the operating frame to be 000000\n lib_main_input = self.io_args.color_full + \"_000000\"\n if not os.path.exists(lib_main_input):\n shutil.copytree(self.io_args.color_full, lib_main_input)\n for cam in os.listdir(lib_main_input):\n cur_img = os.path.join(lib_main_input, cam, f\"{self.io_args.first}.png\")\n new_img = os.path.join(lib_main_input, cam, \"000000.png\")\n os.rename(cur_img, new_img)\n\n self.run_app(\n \"CalibrationLibMain\",\n args=f\"{self.io_args.rig_out} {self.io_args.matches} {self.io_args.rig_in} {lib_main_input}\",\n log_file=log_file,\n )\n record = parse_calibration_results(\n self.io_args.log_dir, bin_name=\"CalibrationLibMain\"\n )\n self.check_metrics(record)", "title": "" }, { "docid": "3e7687404d80ae6c33acab4583d0a717", "score": "0.47350043", "text": "def test_imshow():\n s = np.random.random((10, 10))\n return imshow(s, colormap='gist_earth')", "title": "" }, { "docid": "dbacaf1685129d12771b8a3bafeb9645", "score": "0.47331163", "text": "def test_glyph(self):\r\n\r\n glyph = \"a\"\r\n self.put_text(glyph)\r\n\r\n #found = list(scrape.str_from_img(self.img))\r\n #assert len(found) == 1, found\r\n\r\n #self._show()\r", "title": "" }, { "docid": "9634646b116b42a3395be9c9594257ab", "score": "0.47267002", "text": "def display_mapping(mapping):\n img_size = mapping[\"frame_size\"]\n win_name = \"mappings\"\n win = cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)\n\n for frame, cmd in izip(mapping[\"frames\"], mapping[\"commands\"]):\n read_cmd = command_readable_mapping[cmd]\n msg = read_cmd\n img = frame.reshape(img_size)\n cv2.putText(img, msg, (10, 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.6, (0, 0, 0))\n cv2.imshow(win_name, img)\n if cv2.waitKey(0) & ord(\"q\") == ord(\"q\"):\n break\n\n cv2.destroyAllWindows()", "title": "" }, { "docid": "0a35767f00285b47d024b9be766389b6", "score": "0.47253826", "text": "def ShowRasterRef(self) -> bool:", "title": "" }, { "docid": "0a35767f00285b47d024b9be766389b6", "score": "0.47253826", "text": "def ShowRasterRef(self) -> bool:", "title": "" }, { "docid": "63091d4aa3ccba4681ade7b286339f3c", "score": "0.47246438", "text": "def get_map_and_measure(png_a):\n\n import os\n from CPAC.qc.utils import get_map_id\n\n measure_name = None\n map_name = None\n\n if '_fwhm_' in png_a:\n measure_name = os.path.basename(os.path.dirname(os.path.dirname(png_a)))\n else:\n measure_name = os.path.basename(os.path.dirname((png_a)))\n\n str_ = os.path.basename(png_a)\n\n if 'sca_tempreg' in png_a:\n map_name = get_map_id(str_, 'maps_roi_')\n\n if 'sca_roi' in png_a:\n map_name = get_map_id(str_, 'ROI_')\n\n if 'dr_tempreg' in png_a:\n map_name = get_map_id(str_, 'temp_reg_map_')\n\n if 'centrality' in png_a:\n map_name = get_map_id(str_, 'centrality_')\n\n return map_name, measure_name", "title": "" }, { "docid": "36b65bab80c3c41304d6668ee9995edb", "score": "0.47228912", "text": "def test_run(self):\n in_bam = 'test.bam'\n index = 'test.bam.bai'\n picard_memory = '58G'\n picard_path = 'path/to/picard'\n temp_dir = 'path/to/temp/dir'\n lines = ps.general._picard_index(in_bam, index, picard_memory,\n picard_path, temp_dir)", "title": "" }, { "docid": "a68f3ca07821c27980fea0d3f693a686", "score": "0.47226566", "text": "def test_get_summary_heat(self):\n pass", "title": "" }, { "docid": "c5d6fb16a706bab3f4f1da4c77b58cff", "score": "0.4718451", "text": "def simple_drizzle(images, coverage_map, wcs_file, filt):\n\n # This calculates how many of the input images cover each\n # pixel of the output image grid. Used to determine best\n # median algorithm for drizzle\n median_coverage = np.median(coverage_map[coverage_map>0])\n out_name = '{}_final'.format(filt)\n if median_coverage>4:\n med_alg = 'median'\n combine_nhigh = 1\n else:\n med_alg = 'minmed'\n combine_nhigh = 0\n\n # Due to bug in astrodrizzle, this can't yet be optimized\n # to use the mdriztab\n\n astrodrizzle.AstroDrizzle(images, clean=True, build=True,\n context=False, preserve=False,\n combine_type=med_alg,\n combine_nhigh=combine_nhigh,\n in_memory=False, final_wcs=True,\n final_refimage=wcs_file,\n output=out_name)", "title": "" }, { "docid": "dc0cb0aca6f70c1f84aad52a73bf46fc", "score": "0.47070697", "text": "def coverage(context):\n cmd = \"pytest -s --color no --cov=function_pipe/core --cov-report html\"\n context.run(cmd, echo=True)\n import webbrowser\n webbrowser.open(\"htmlcov/index.html\")", "title": "" }, { "docid": "bb189ab1309927ca956683d07f55e194", "score": "0.470688", "text": "def run_demo() -> None:\n qc3 = StandardImagingQC3.from_demo_image()\n qc3.analyze()\n qc3.plot_analyzed_image()", "title": "" }, { "docid": "4e9a88ce9716dee489e09726409ec209", "score": "0.47065702", "text": "def display_image(heatmap, class_index):\n\tplt.imshow(heatmap[:,:,class_index])\n\tplt.show()", "title": "" }, { "docid": "dca23b9533a2413249880941ce1aebda", "score": "0.47005153", "text": "def test_image_processing():\n src_path = '/media/bryce/1TB Samsung/ml_datasets/cityscapes_data/gtFine/train/aachen'\n inst_path = os.path.join(src_path, 'aachen_000000_000019_gtFine_instanceIds.png')\n seg_path = os.path.join(src_path, 'aachen_000000_000019_gtFine_labelIds.png')\n\n inst_img = np.array(Image.open(inst_path), dtype=np.int16)\n\n base_img = seg_cv2_bgr(seg_path)\n\n bboxes = bbox_info_from_instance(inst_img)\n\n # assert all(bbox['train_id'] < 19 for bbox in bboxes), f\"Invalid Train ID found: {inst_path}.\"\n\n write_info_json(inst_path, bboxes)\n\n visualise_output(base_img, bboxes)", "title": "" }, { "docid": "647493bcbca791430c7c9fcd16eaa5d4", "score": "0.46926156", "text": "def _run_tests(test_suite, package_name):\n count = test_suite.countTestCases()\n print(\"########\")\n print(\"%s tests has been discovered in %s\" % (count, package_name))\n print(\"Python GDAL : %s\" % gdal.VersionInfo(\"VERSION_NUM\"))\n print(\"########\")\n\n cov = coverage.Coverage(\n source=[\"/tests_directory/quick_api\"],\n omit=[\n \"*/quick_api_dialog_base.py\",\n \"*/quick_api.py\",\n \"*/__init__.py\",\n \"*/tests/*\",\n \"*/test_suite.py\",\n ],\n )\n cov.start()\n\n unittest.TextTestRunner(verbosity=3, stream=sys.stdout).run(test_suite)\n\n cov.stop()\n cov.save()\n cov.report(file=sys.stdout)", "title": "" }, { "docid": "031f25a2afd44ea1ea8f8265161d8513", "score": "0.46898025", "text": "def pcap_analysis(self, pcap_md5: str) -> requests.Response:\n response = requests.get(\n self.base_url + self.version + '/pcaps/{0}/analysis'.format(pcap_md5),\n headers=self.headers\n )\n\n return response", "title": "" }, { "docid": "675a48decb00ebf576ee9f4babf7b254", "score": "0.4689187", "text": "def test_CAS8357(self):\n myia = iatool()\n myia.fromshape(\"\",[30,30])\n reg = \"circle[[-1pix,-1pix], 10pix]\"\n res = myia.statistics(region=reg)\n myia.done()\n self.assertTrue(res['npts'] == 70, 'Wrong number of points')", "title": "" }, { "docid": "d5b3f000570cc3add717dac71290504e", "score": "0.46868145", "text": "def show_cmaps():\n x = np.linspace(0, 100, 100)[None, :]\n fig, axs = plt.subplots(int(np.ceil(len(crameri_cmaps) / 7)), 7, figsize=(22, 10))\n fig.subplots_adjust(hspace=.8, wspace=.08)\n axs = axs.ravel()\n for ax in axs:\n ax.axis('off')\n for c, cmap_selected in enumerate(sorted(crameri_cmaps.keys())):\n colourmap = crameri_cmaps[cmap_selected]\n axs[c].pcolor(x, cmap=colourmap)\n axs[c].text(5, -0.3, cmap_selected, fontsize=26)", "title": "" }, { "docid": "54cd0ff294bfe914096081fa6d7e21fb", "score": "0.4684422", "text": "def main():\n\n # Parse arguments ---\n args = parse_arguments()\n\n # Load log ---\n if args.debug:\n log_level = logging.DEBUG\n elif args.quiet:\n log_level = logging.NOTSET\n else:\n log_level = logging.INFO\n\n log = load_log(log_level)\n log.info('\\n 2D Map Extractor')\n log.info(' by {0}'.format(__author__))\n log.info(' Version {0} - {1}\\n'.format(__version__, __date__))\n\n # Let's start to count the time ---\n tstart = datetime.datetime.now()\n log.debug(' [{0}] Script Start'.format(tstart.strftime('%H:%M:%S')))\n\n # Perform the 2D-Map Extraction ---\n results = perform_2dmap_extraction(\n args.filename, log, args.pool_size, args.algorithm\n )\n\n # Write the results to a FITS file ---\n write_results(\n results, args.filename, args.output, args.algorithm,\n wavelength=args.wavelength\n )\n\n # Now I am good. The script is already done ---\n tend = datetime.datetime.now()\n delta_t = tend - tstart\n\n log.info('')\n log.debug(' [{0}] Script finished.'.format(tend.strftime('%H:%M:%S')))\n log.debug(' Total time elapsed: {:s}'.format(str(delta_t)))\n log.info('All done.')", "title": "" }, { "docid": "92df6a69d9ad3315fd73e1a4faae6fe9", "score": "0.4675138", "text": "def inspect(component_run_id, address: str = \"\"):\n # Set address\n if address and len(address) > 0:\n set_address(address)\n\n show_info_card(component_run_id)", "title": "" }, { "docid": "07710d40870a400585b6a1bca36914b0", "score": "0.46741608", "text": "def show_contour():\n draw = ImageDraw.Draw(OutputImageObj)\n for list in PixelLocations:\n for pixel in list:\n i = pixel[0]\n j = pixel[1]\n draw.point((i,j), fill=128)\n print \"Optimal Contour:\"\n for i in range(1, len(Contour)):\n draw.line(Contour[i-1] + Contour[i], fill=256)\n print \"\"\" %d %d \"\"\" % (Contour[i-1][0], Contour[i-1][1])\n OutputImageObj.show()", "title": "" }, { "docid": "fbdb203951ddb22d24aac5a5dc9075e5", "score": "0.46704444", "text": "def show_sample():\n # an example to show difference between grayscale image and binary image\n license_plate = imread(\"data/crop_h1/I00000.png\", as_grey=True)/255.0\n print(license_plate.shape)\n # see the difference between gray scale and binary image\n gray_car_image = license_plate * 255\n _, (ax1, ax2) = plt.subplots(1, 2)\n ax1.imshow(gray_car_image, cmap=\"gray\")\n # threshold_otsu is an algorithm to reduce grayscale image to binary image\n threshold_value = threshold_otsu(gray_car_image)\n binary_car_image = gray_car_image > threshold_value\n ax2.imshow(binary_car_image, cmap=\"gray\")\n print(binary_car_image)", "title": "" }, { "docid": "701ff4791192cd7a6b81dd73cbefbdae", "score": "0.4669743", "text": "def test(ctx, debug=False, maxfail=0):\n print_header(\"RUNNING TESTS\")\n flags = [\n \"--verbose\",\n f\"--cov={package.__name__}\",\n \"--cov-branch\",\n f'--cov-report=\"html:{_COVERAGE_PATH}\"',\n ]\n if debug:\n flags.append(\"--capture=no\")\n if maxfail:\n flags.append(f\"--maxfail={int(maxfail)}\")\n\n ctx.run(f\"pytest {' '.join(flags)} tests/\", pty=True)\n print(f\"\\nCoverage: {get_total_coverage(ctx)}\")", "title": "" }, { "docid": "ea29f2e54879adda842e12aca35d5477", "score": "0.466707", "text": "def main():\n parser = argparse.ArgumentParser(allow_abbrev=False)\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-v\", \"--verbose\", help=\"display all files processed\",\n action=\"store_const\", const=logging.INFO, dest='loglevel')\n group.add_argument(\"-d\", \"--debug\", help=\"display debugging information\",\n action=\"store_const\", const=logging.DEBUG, dest='loglevel')\n\n parser.add_argument('-o', '--output', default='pinmap.pcf',\n help='output file with .pcf extension')\n parser.add_argument(\"-n\", \"--dry-run\", help=\"examine files without changing\",\n action=\"store_true\")\n parser.add_argument(\"file\", nargs=1, help=\"file to check\")\n args = parser.parse_args()\n logging.basicConfig(level=args.loglevel or logging.WARNING)\n\n if args.dry_run:\n logging.info(\"Dry run -- no files will be overwritten\")\n\n symtable = []\n for fname in args.file:\n with open(fname) as source_file:\n logging.info(r\"in: %s\", fname)\n symtable = [mapping for line in source_file if (\n mapping := get_mapping(line)) is not None]\n\n if not symtable:\n logging.warning('No @MAP_IO statements found for %s', args.output)\n\n if not args.dry_run:\n logging.info(r'out: %s', args.output)\n with open(args.output, mode='wt') as pcf_file:\n pcf_file.write(PCF_HEADER)\n for item in symtable:\n pcf_file.write(item.to_set_io())", "title": "" }, { "docid": "f5bfc5a98918592f1bca67e35dc0b50c", "score": "0.4666668", "text": "def test(coverage=False):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print('Coverage Summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "title": "" }, { "docid": "9fcc91119c9cbfea625259d7d7afa261", "score": "0.46650156", "text": "def run_canica(params):\n dataset = datasets.fetch_adhd()\n func_files = dataset.func\n output_dir = osp.abspath(params.pop('output_folder'))\n prepare_report_directory(output_dir)\n run(func_files, params, output_dir)\n json.dump(params, open(osp.join(output_dir, 'params.json'), 'w'))\n img_src_filenames = [osp.join(output_dir, 'images', fname) for fname in\n os.listdir(osp.join(output_dir, 'images'))\n if fname.startswith('IC_')]\n report = generate_report(params, img_src_filenames)\n reportindex = osp.abspath(osp.join(output_dir, 'index.html'))\n report.save_html(reportindex)\n return ('file', 'file://{}'.format(reportindex))", "title": "" }, { "docid": "9254448f3f41a8594beba8a2005890e5", "score": "0.46635103", "text": "def run(self, showDetails=False):\n \n pass", "title": "" }, { "docid": "b6eff3134ed4f97c8f6b63d9cc6bfe1e", "score": "0.46633792", "text": "def show_cartoon(contest):\n image = mpimg.imread(INFO_DIR+str(contest)+\"/\"+str(contest)+\".jpg\")\n plt.figure(figsize=(12, 12))\n plt.imshow(image, cmap='Greys_r')\n plt.axis('off')", "title": "" }, { "docid": "79287e033e60334c42fe8373afbdcf8f", "score": "0.46548027", "text": "async def fetch_coverage() -> coverage.Coverage:\n client = TestingCupid()\n data = await client.coverage()\n await client.close()\n return data", "title": "" }, { "docid": "29024d67bc14cf9d4dea2504f87630f0", "score": "0.46501207", "text": "def test(coverage=False):\n if coverage and not os.environ.get('FLASK_COVERAGE'):\n import sys\n os.environ['FLASK_COVERAGE'] = '1'\n os.execvp(sys.executable, [sys.executable] + sys.argv)\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n if COV:\n COV.stop()\n COV.save()\n print('Test coverage summary:')\n COV.report()\n basedir = os.path.abspath(os.path.dirname(__file__))\n covdir = os.path.join(basedir, 'tmp/coverage')\n COV.html_report(directory=covdir)\n print('HTML version: file://%s/index.html' % covdir)\n COV.erase()", "title": "" }, { "docid": "cb35b605a9b9f440b5fc3814cd8443ef", "score": "0.46497378", "text": "def test_show_command(self, capsys):\n main(\"show pydicom::MR_small_RLE.dcm\".split())\n out, err = capsys.readouterr()\n\n assert \"Instance Creation Date DA: '20040826'\" in out\n assert out.endswith(\"OB: Array of 126 elements\\n\")\n assert err == \"\"\n\n # Get a specific data element\n main(\"show pydicom::MR_small_RLE.dcm::LargestImagePixelValue\".split())\n out, _ = capsys.readouterr()\n assert \"4000\" == out.strip()", "title": "" }, { "docid": "aa5c851f00dddf23a1fba3b5955740df", "score": "0.46470836", "text": "def info(test_func):\n\n @wraps(test_func)\n def test_func_wrapper(nom, test):\n print '\\n============================================================='\n print 'Starting test:', test_func.__name__\n print 'Testing highland analysis:', nom.name\n print 'originally processed with:', nom.input_file\n print 'nominal version:', nom.input_type, nom.version\n print 'test version:', test.input_type, test.version, '\\n'\n test_func(nom, test)\n\n return test_func_wrapper", "title": "" }, { "docid": "437a5ac03fddaa97b642ffba4f22bbc8", "score": "0.46434614", "text": "def show_map(self, rbt_pos):\n # normalise values in numpy matrix occ_grid.m to values between 0 and 255, + RGB.\n # img_mat = uint8(self.m * 255) # multiply by 255 from 0-1 to 0-255\n # img_mat = stack((img_mat, img_mat, img_mat), 2) # convert to RGB (3rd dim)\n # print(img_mat.shape) # (100, 100, 3)\n \n # color the robot position as a crosshair\n rbt_idx = self.pos2idx(rbt_pos) # robot index\n # img_mat[rbt_idx[0], rbt_idx[1], :] = (0, 255, 0) # green\n img_mat = self.m.copy()\n img_mat[rbt_idx] = 1 # white\n img_mat[rbt_idx] = 0\n img_mat[rbt_idx[0] - 1, rbt_idx[1]] = 1\n img_mat[rbt_idx[0] + 1, rbt_idx[1]] = 1\n img_mat[rbt_idx[0], rbt_idx[1] - 1] = 1\n img_mat[rbt_idx[0], rbt_idx[1] + 1] = 1\n \n # print to a window 'img'\n cv2.imshow('img', img_mat)\n cv2.waitKey(10);", "title": "" }, { "docid": "7f88957f0e8d8f4c413bf8b3de4c55a9", "score": "0.46375304", "text": "def test_cmap2act(self):\n colors.cmap2act('viridis', filename=self.f)\n ref = os.path.join(self.ref_dir, 'viridis.act')\n\n assert filecmp.cmp(self.f, ref)", "title": "" } ]
7de2e432a41ce01f92bdcf930bbdbe45
A rule statement that defines a string match search for AWS WAF to apply to web requests. See Byte Match Statement below for details.
[ { "docid": "588cd4e70cd75d6bde629d1c0c44578a", "score": "0.54430264", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" } ]
[ { "docid": "90ea33f01c95e611308c46f7671fe928", "score": "0.6055283", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "37b1bade8a13772a1924d586704bd672", "score": "0.60350204", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "c07caf852cad3f61887cc0c04acf80d3", "score": "0.60124177", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "46b5cb9235185a3e27fe5006d9e51615", "score": "0.5988622", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "1e94d06db038d8401e3bba08848f9e27", "score": "0.5986877", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "b185edbbcf2fddc1e7c66b74474f46e2", "score": "0.59791476", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "4d60b5e41f96c8acc8626113f3af3137", "score": "0.5976147", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "187f497294343371bdf70da54614d4c0", "score": "0.5970547", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "211d0c9f82c3481006caa81549caee72", "score": "0.5967579", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "06ef80d3ae0ee352594ad4496b624769", "score": "0.59659845", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "25432b072bf96b04f097e91d94180f72", "score": "0.5965506", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "08af4ef69d822a81d4bbeaaa1ed47e45", "score": "0.59638906", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "7cae839a37a3c67205d80db18810ef2c", "score": "0.59469104", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "660436ffd9e7f0ea30168c3ea987c8cb", "score": "0.5943673", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "2d9eabcd9fb79cdd6b3b8634007147b6", "score": "0.5931858", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "302cc600cd10c159077449d556fb40b7", "score": "0.59254676", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "1465b0769d5079e441ae518a7f9f9eea", "score": "0.5920594", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "650cd976e29b4cfd9858e770e2d1234b", "score": "0.59054774", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "eef65009e0116bbd7ca730775c8c0c1b", "score": "0.5894218", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "2da3c86c2e1228b1375bdd1286c6d317", "score": "0.58768034", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "f3f309c946a0376cfbdc1eab97ef51cf", "score": "0.5875306", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "75dd1e3bbb23a92627f42c2e9f3199e9", "score": "0.5872796", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "943b8d14175e786a1d650c4db39a9053", "score": "0.5872302", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "a837434a4f2ac0c998ea4feb15c85ec7", "score": "0.58694166", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementNotStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "45cfbaf06ab2254a077d47e289d2ac86", "score": "0.58691984", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "d6caf2260d1330d7af7cfcef165e4be6", "score": "0.5864394", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "88c09673c44eef79f4e3647eac5fa96e", "score": "0.58545715", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "d659677e7e5145648a3b2fa0e783dea3", "score": "0.5851143", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "e3b9ffa7c147cc3c90002cce76c4c9f4", "score": "0.5846507", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementNotStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "8fdf75f3739b5b94ac38dd438be85efd", "score": "0.584298", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "83d6ed5e1b6673a0190e99f492fe8cc0", "score": "0.5842126", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "dd2805c501a696b0fe4709b4006433d4", "score": "0.58298934", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "4f9b1825dd3289049e15e6e4de98b53d", "score": "0.58291286", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "96cedc9ba3b3baf89d96d514deb85e37", "score": "0.58158344", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "70d3f90055878a9e124852de47964bf2", "score": "0.5815436", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "aaf97b938073f2bcdddb86a961127576", "score": "0.5811115", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "1e521d1cc80b9dc654de31911454ecc4", "score": "0.5806521", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "79eb9179fcb4c8d3a594704f4c5030e0", "score": "0.5804012", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "0f7fdde4e6735e967771ed68453b88c8", "score": "0.57720006", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementNotStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "3bd855293e70252b24a72f3e713837ae", "score": "0.5767267", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "15fc1837ee33de50497b8bceca1164c3", "score": "0.5600881", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementAndStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "43be85b96aae0ec5dd79ceb03bf0e12a", "score": "0.5579652", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementNotStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "b27aaafcf484652887e96a9d34bb9769", "score": "0.55768216", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementNotStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "28d07d5c9b0ae297fe07ce16626ce3c1", "score": "0.5560359", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementOrStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "c243d741d07111cde308f1b6a05474b3", "score": "0.55501586", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementAndStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "f254bb94ffba4eb972e32f852beb2154", "score": "0.5530985", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementAndStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "0a922fd098a717bc390cac3a3e441b2e", "score": "0.55158365", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "d96be1e51e119585f7ecb9d43e8b6085", "score": "0.55146426", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementOrStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "167f755d574ca4fc212f5708f3c51728", "score": "0.5513237", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementNotStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "a295695a3436291c629430a76f67368b", "score": "0.5510537", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementOrStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "f2aa7417970f67542931bf89248d304e", "score": "0.55066735", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "d3559e170ee208656a83ec7109a46526", "score": "0.54610586", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['WebAclRuleStatementRateBasedStatementScopeDownStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "c86e1248854060cd206a0c4ec1c33981", "score": "0.54358566", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "b1baf88753686e0a92fcc3abd27f0daf", "score": "0.5433733", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementAndStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "15065d1c53d38dd998bc2da392246ee6", "score": "0.543036", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "3c640962ecbb251be98c908fb990a117", "score": "0.541499", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "6fc9d0fabe5f2360fa60eb279d7920a3", "score": "0.54079485", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "68b784c51444fa5c9e15e8adf3f1e5f2", "score": "0.53983206", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementOrStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "22ab98b8b10e07b9711d415f0d47f9f6", "score": "0.53966856", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "2309c88dfa1c7a08bd6a494db8368a54", "score": "0.53933", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "729e9efdbc9600f29c89bdb1f3e577ad", "score": "0.5392608", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "aeaa52f5b62ee0deb660364b64344323", "score": "0.5385919", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "06d7b9052fc55b372ee9d959fcf4dfb7", "score": "0.5384918", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementNotStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "cfdb290985e565a10675182947500265", "score": "0.5382792", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "353228a97d3b84627252e1d850905208", "score": "0.53812593", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementAndStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "646a1625afc630a4726c031635613b10", "score": "0.5379696", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "0f4235da4d750fff0260c4f1ffbcf48a", "score": "0.5377892", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "ab7fdddc1a407643c4ef565890ebc013", "score": "0.5367528", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "a8f3142f60fd1ee48b35127166cd9b93", "score": "0.5365676", "text": "def match(self, match):\r\n pass", "title": "" }, { "docid": "991670b48a340f62646bb9a6768f29b9", "score": "0.53637457", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "443a7401789eeff44d82a158661d4caa", "score": "0.5361841", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementNotStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "d25b138c90c9ddac471edaf78ba0a442", "score": "0.5358515", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "b8425ebe37784f485f0c12022b12be5d", "score": "0.53569084", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "5a904b4eebe66a211c5fb8a6cd57c1f0", "score": "0.53545016", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "7c0390f734eaa35906dedc2eeaeef98e", "score": "0.5352244", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementOrStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "c5765610e2df617ca75788b506933a52", "score": "0.5348085", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "3fd669c7cbdcff82ae5d35fe5ce7c6da", "score": "0.5343908", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementOrStatementStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "659e9ee9281454861721792bc1dc8fb8", "score": "0.5341646", "text": "def test_payload_match(self):\n # Case 1 possible ssrf\n self.ssrf_obj.payloads = [\"http://2852039166/\"]\n result=self.ssrf_obj.payload_match(\"http://2852039166/\")\n self.assertTrue(result)\n\n # Case 2 No ssrf\n\n self.ssrf_obj.payloads = [\"http://2852039166/\"]\n result=self.ssrf_obj.payload_match(\"https://www.google.com\")\n self.assertFalse(result)", "title": "" }, { "docid": "acf94326e92f9d6e57043f5e3665db5f", "score": "0.5340817", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "31e532949f9361df5cef538ed413a5a0", "score": "0.5337767", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "e7b434d4a7331ef1984a2ce9567b396f", "score": "0.53277296", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementNotStatementStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "7cce27036648076d9ad592d200c7d49f", "score": "0.53257835", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementAndStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "910994d8493a1bbbde352ae9c8872838", "score": "0.5324175", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "693b942f107724743a82c34779d47a24", "score": "0.5322797", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "372023cbbbfa7d6fba7816e2d54012d1", "score": "0.532265", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementOrStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "458c45a761a74f26221a60bf3cfa66da", "score": "0.5322235", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementNotStatementStatementAndStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "73b88662c7ded1344a5b8013b816b1e8", "score": "0.5316091", "text": "def test_event_match_body(self) -> None:\n\n # if the key is `content.body`, the pattern matches substrings.\n\n # non-wildcards should match\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": \"foobaz\",\n }\n self._assert_matches(\n condition,\n {\"body\": \"aaa FoobaZ zzz\"},\n \"patterns should match and be case-insensitive\",\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa xFoobaZ yy\"},\n \"pattern should only match at word boundaries\",\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa foobazx yy\"},\n \"pattern should only match at word boundaries\",\n )\n\n # wildcards should match\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": \"f?o*baz\",\n }\n\n self._assert_matches(\n condition,\n {\"body\": \"aaa FoobarbaZ zzz\"},\n \"* should match string and pattern should be case-insensitive\",\n )\n self._assert_matches(\n condition, {\"body\": \"aa foobaz yy\"}, \"* should match 0 characters\"\n )\n self._assert_not_matches(\n condition, {\"body\": \"aa fobbaz yy\"}, \"? should not match 0 characters\"\n )\n self._assert_not_matches(\n condition, {\"body\": \"aa fiiobaz yy\"}, \"? should not match 2 characters\"\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa xfooxbaz yy\"},\n \"pattern should only match at word boundaries\",\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa fooxbazx yy\"},\n \"pattern should only match at word boundaries\",\n )\n\n # test backslashes\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": r\"f\\oobaz\",\n }\n self._assert_matches(\n condition,\n {\"body\": r\"F\\oobaz\"},\n \"backslash should match itself\",\n )\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": r\"f\\?obaz\",\n }\n self._assert_matches(\n condition,\n {\"body\": r\"F\\oobaz\"},\n r\"? after \\ should match any character\",\n )", "title": "" }, { "docid": "39be4150c64f856db770eafa362ce6c8", "score": "0.53157616", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "d7ecc99a9f73409d632e59a2237646a8", "score": "0.5315204", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "c350a64a8521fafa1f824c003ce9e7e8", "score": "0.5312725", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "dd5b817e4a305eeab000c216d4226707", "score": "0.5309885", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "7d42d0fc43aa76a97fc60f2a3782c5a1", "score": "0.53063047", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "fe7c81fd554e3af7d3e1036854c3b3e9", "score": "0.52973425", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "9aa1062d7f0bc12b1caef8a6d3d5be6c", "score": "0.5296215", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementOrStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "38a33be35b3e795cf0167070e26311e3", "score": "0.5291868", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementOrStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "e5ec5a8043555885ea6e73fcda540cb3", "score": "0.5290918", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "52b91d36a44976add6808041ba60a730", "score": "0.5290032", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementNotStatementStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "cb577aa298501244e45529ab9b618479", "score": "0.52869815", "text": "def byte_match_statement(self) -> Optional[pulumi.Input['RuleGroupRuleStatementNotStatementStatementNotStatementStatementByteMatchStatementArgs']]:\n return pulumi.get(self, \"byte_match_statement\")", "title": "" }, { "docid": "fde75b566773ae8889e163a2bfd2f7bd", "score": "0.5284116", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementNotStatementStatementAndStatementStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" }, { "docid": "cf89773910a299ff2a3e40c3cd66a87b", "score": "0.5281293", "text": "def field_to_match(self) -> Optional[pulumi.Input['WebAclRuleStatementOrStatementStatementByteMatchStatementFieldToMatchArgs']]:\n return pulumi.get(self, \"field_to_match\")", "title": "" } ]
bce6738a1a6462f06b945c022ba3e93f
Read solution from a binary file. Please refer to README.md for description of the binary file format used here.
[ { "docid": "eea1f3696bea425597d502f3e1952a69", "score": "0.6074145", "text": "def read_solution_from_file(path_to_data):\n\n data = open(path_to_data, \"rb\").read()\n\n # nx: number of x points\n # ----------\n\n start = 0\n end = 4*3\n (_, nx, _) = struct.unpack(\"@iii\", data[start: end])\n\n # nt: number of t points\n # ----------\n\n start = end\n end = start + 4*3\n (_, nt, _) = struct.unpack(\"@iii\", data[start: end])\n\n # x values\n # ---------\n\n start = end + 4\n end = start + nx * 8\n x_values = array.array(\"d\")\n x_values.frombytes(data[start:end])\n\n # t values\n # ---------\n\n start = end + 8\n end = start + nt * 8\n t_values = array.array(\"d\")\n t_values.frombytes(data[start:end])\n\n # Solution: 2D array\n # ---------\n\n start = end + 8\n end = start + nx * nt * 8\n solution = array.array(\"d\")\n solution.frombytes(data[start:end])\n solution = np.reshape(solution, (nt, nx), order='C')\n\n return (x_values, t_values, solution)", "title": "" } ]
[ { "docid": "54d9052ff2375d0421dc2be1610b999e", "score": "0.65914464", "text": "def read_binary(filename):\n with FileBinaryReader(filename) as f:\n return f.read()", "title": "" }, { "docid": "83beb9771c450aa37bce0e79e62b5fcc", "score": "0.6584203", "text": "def load_solution(filepath):\n with open(filepath) as f:\n solution = pickle.load(f)\n return solution", "title": "" }, { "docid": "081ddc5df69ce0eeccfae1756e551372", "score": "0.6413085", "text": "def load_binary(path):\n try:\n with open(path, \"rb\") as f:\n loaded_data = pickle.load(f)\n print(f\"Data binary file loaded from {path}\")\n return loaded_data\n except Exception as e:\n print(\"Binary file couldn't be loaded.\")", "title": "" }, { "docid": "e3eebf94bda4497f169fc6cecd832960", "score": "0.6381922", "text": "def open_for_reading_binary(file_path):\n try:\n f = open(file_path, 'r')\n return f\n except IOError as e:\n raise _io_load_error(e, file_path)", "title": "" }, { "docid": "c83e78e91312904f676408d421479e08", "score": "0.6330716", "text": "def load(raw):\n return BinFile(raw[:0x20], raw[0x20:])", "title": "" }, { "docid": "7ad1b97c48f1973bf44bb60403ee3be3", "score": "0.62161446", "text": "def read_binary_file(file_name):\n return read_file(file_name, 'rb')", "title": "" }, { "docid": "5833686738a2e2586680303015219694", "score": "0.6203299", "text": "def load_bin_file(filepath):\n datas = []\n f = open(filepath, 'rb')\n s = f.read()\n for x in s:\n rdata = struct.unpack(\"B\", x)[0]\n datas.append(rdata)\n return datas", "title": "" }, { "docid": "0232133f58de643f09d0050bc7120f01", "score": "0.61890787", "text": "def read_binary(self, filename):\n return self._read_file_or_404(filename, False)", "title": "" }, { "docid": "be4a4123358aab2f659222ceea275d1f", "score": "0.61867404", "text": "def read(path_to_file):", "title": "" }, { "docid": "83f68aa4d8f22c96017e59dedd80e9b4", "score": "0.61727834", "text": "def open_for_reading_binary(filename):\n return FileBinaryReader(filename)", "title": "" }, { "docid": "1fa5301b50a7a026be3c790a1bc1643e", "score": "0.61461914", "text": "def readSol(filename):\r\n\r\n if not isinstance(filename, str):\r\n raise TypeError(\"argument 1 to readSol must be a string\")\r\n #end if\r\n data_type=sol_data_type()\r\n record_length=numpy.empty(0, dtype=data_type).itemsize\r\n header_length_noCheck=numpy.empty(0,\r\n dtype=numpy.dtype(sol_header_types()[:-1])).itemsize\r\n #print(data_type)\r\n #Calculate checksum for first record. This is done again later on the whole\r\n #data set. This is just here so if it is clearly rubbish I don't read the\r\n #whole file into memory\r\n fd=open(filename, 'r', encoding=\"utf-8\",errors=\"ignore\")\r\n header_size=numpy.empty(0, dtype=numpy.dtype(sol_header_types())).itemsize\r\n header=fd.read(header_size)\r\n fd.close()\r\n # Calculate the checksum - TODO: NOT SURE IF WORKING WITH PYTHON 3\r\n checksum = 0\r\n # Do a cumulative bitwise xor on all elements of the header bytearray\r\n for byte in bytearray(header[:-1],encoding=\"utf8\"):\r\n # ^ means xor\r\n checksum ^= byte\r\n # ADDED THIS TRY TO AVOID ERRORS IN SOME SOL FILES, SEEMS TO WORK OK(kevin mcguigan, AGRG)\r\n try:\r\n if checksum != ord(header[header_size-1]):\r\n raise IOError(\"Header checksum does not match\")\r\n except:\r\n pass\r\n #this is the error that would occasinally appear, such as when running:\r\n # testSol = r\"Q:\\CHII\\1_IE\\20170828174507_SH_AR\\proc\\t200_TC_MULTI_RAPID_BART_LTWN.sol\"\r\n # solDf = readSol(testSol)\r\n # print solDf\r\n # '''Traceback (most recent call last):\r\n # File \"Q:\\CHII\\_meta\\_dev\\read_nav_file.py\", line 177, in readSol\r\n # if checksum != ord(header[header_size-1]):\r\n # IndexError: string index out of range'''\r\n\r\n\r\n #Read file into memory, is a numpy array of tuples (not really tuples, these\r\n #are mutable, it looks like a tuple though)\r\n sol_data=numpy.fromfile(filename, dtype=data_type)\r\n #Check that the header checksum is correct for all records\r\n #The checksum is the bitwise xor of all elements in the header excluding the\r\n #checksum itself.\r\n #Do a bitwise xor on all header records, want to reduce along axis 1\r\n #to give an array of checksums\r\n if (not (numpy.bitwise_xor.reduce(\r\n #numpy.view gives the view of the array in the given data type,\r\n #and becuase we are doing a bitwise xor needs to be in unsigned 8bit\r\n #integers\r\n #reshape the array so that it is in records\r\n sol_data.view(numpy.uint8).reshape(sol_data.shape[0], record_length)\\\r\n #Want just the header records up to the\r\n #checksum\r\n [:, :header_length_noCheck], axis=1)\\\r\n ==\\\r\n #Check that the checksums are correct\r\n sol_data['header_checksum']).all()):\r\n raise IOError(\"Header checksum does not match, may not be a sol file\")\r\n #end if\r\n return sol_data", "title": "" }, { "docid": "eac38bb353f0b881e02bf8caa797b2ff", "score": "0.6142402", "text": "def read_file_bin():\n\tif not isfile(binpath):\n\t\twrite_file_bin([\"Hello\"], [\"Welcome!\"])\n\ttry:\n\t\tf = open(binpath, \"br\")\n\t\tdata = pickle.load(f)\n\t\tf.close()\n\t\tret_pages = []\n\t\tfor i,d in enumerate(data[0]):\n\t\t\tret_pages.append([d, data[1][i]])\n\t\treturn ret_pages\n\texcept:\n\t\tprint(\"Error reading file:\")\n\t\tprint(sys.exc_info())\n\t\texit(1)", "title": "" }, { "docid": "4d9a936f5a7923d72754eb54dc5257a3", "score": "0.60778", "text": "def import_bin_file(file_path):\n with open(file_path, 'rb') as file:\n data = pickle.load(file)\n return data", "title": "" }, { "docid": "22d6b335fef0ba95602885b246934d8d", "score": "0.6052145", "text": "def puzzle_02() -> None:\n\n with open(INPUT_FILE_PATH, \"r\") as f:\n document = loads(f.read())\n print_puzzle_solution(_object_sum(document))", "title": "" }, { "docid": "5863bfb14f50e9983918c9201312e3a9", "score": "0.6046847", "text": "def load_binary(binary_path=\"dictionary.dat\"):\r\n try:\r\n with open(binary_path, 'rb') as file:\r\n my_depickler = pickle.Unpickler(file)\r\n dictionary = my_depickler.load()\r\n return dictionary\r\n except:\r\n return {}", "title": "" }, { "docid": "50c72d58a0e1035a343bdf9cf22ae4f0", "score": "0.6030863", "text": "def load_from_bin(file: str) -> Any:\n t = time()\n with open(file, 'rb') as f:\n obj = load(f)\n print('loaded from binary in ' + timer(t))\n return obj", "title": "" }, { "docid": "743e91c8bf7dd88cf236572b856c9c0e", "score": "0.600608", "text": "def read_from_binary_file(file_name, lines='All'):\r\n with open(file_name, mode='rb') as f:\r\n global commands\r\n commands = pickle.loads(f.read())\r\n\r\n num_of_lines = len(commands) if lines == 'All' else int(lines)\r\n printed_commands = commands[-num_of_lines:]\r\n for command in printed_commands:\r\n print(command)", "title": "" }, { "docid": "1fc1d5463383615fa70253882f4bab37", "score": "0.5980304", "text": "def read_file(self, file_name):\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n path = Path(cur_dir)\n cur_dir = path.parent\n file_name = os.path.join(cur_dir, \"bin\", file_name)\n try:\n with open(file_name, 'r') as fileread:\n data = fileread.read()\n return file_name, data\n except IOError as error:\n print(error)", "title": "" }, { "docid": "8b1a5a329db491ecd915b0ffa0d70373", "score": "0.5975809", "text": "def load_bin(infile):\n\treturn np.load(infile,allow_pickle='TRUE')", "title": "" }, { "docid": "17575ce47aeea8722154bd8d24e88e73", "score": "0.5974111", "text": "def read_binary_file(filename, offset=0):\n read_buffer = open(filename, 'rb')\n read_buffer.seek(int(offset), 0)\n header = struct.unpack('<xcccc', read_buffer.read(5))\n if header[0] != 'B':\n print(\"Input .ark file is not binary\")\n sys.exit(-1)\n if header[1] == 'C':\n print(\"Input .ark file is compressed, exist now.\")\n sys.exit(-1)\n\n rows = 0; cols= 0\n _, rows = struct.unpack('<bi', read_buffer.read(5))\n _, cols = struct.unpack('<bi', read_buffer.read(5))\n\n if header[1] == \"F\":\n tmp_mat = np.frombuffer(read_buffer.read(rows * cols * 4),\n dtype=np.float32)\n elif header[1] == \"D\":\n tmp_mat = np.frombuffer(read_buffer.read(rows * cols * 8),\n dtype=np.float64)\n mat = np.reshape(tmp_mat, (rows, cols))\n\n read_buffer.close()\n\n return mat", "title": "" }, { "docid": "d6113ee473da2a279178fc743e473559", "score": "0.5967676", "text": "def read_bin_input_spec(f):\n with open(f, \"rb\") as ff:\n l = ff.readline().strip()\n if l == \"#v1\":\n x = BinInputSpecV1()\n else:\n print >>sys.stderr, \"Unknown file version for input/binary spec\", l\n \n x.read(ff)\n\n return x", "title": "" }, { "docid": "e85fa1660644f6e447cf79f684996e01", "score": "0.59667325", "text": "def load_bin(file_name: str, **params: Any) -> pd.DataFrame:\n file_path = (_PROJECT_DIR / file_name).resolve()\n info(f\"Reading from file {file_path}\")\n df = pd.read_feather(file_path, **params)\n return df", "title": "" }, { "docid": "96768f34615a64d3d96e395c62d29903", "score": "0.5932118", "text": "def decode_binary_file(self, file_path):\n if not os.path.exists(file_path):\n log.error(\"Missing: {}\".format(file_path))\n raise FileNotFoundError(\"Missing: {}\".format(file_path))\n log.info(\"Decoding file: {}\".format(file_path))\n return self.decode(open(file_path, \"rb\").read())", "title": "" }, { "docid": "1fe538ef4116dbe30eaa1c78ffdc07bd", "score": "0.5915349", "text": "def read_binary(self):\n with self.open('rb') as f:\n return f.read()", "title": "" }, { "docid": "2b96b9c2a6ec59e0034c4c2b652066d8", "score": "0.5899989", "text": "def read(filename='DOS'):\n from struct import pack,unpack,calcsize\n intSize = calcsize('<i')\n dubSize = calcsize('<d')\n strSize = calcsize('<s')\n version=1\n\n f=open(filename,'r').read()\n i = 0\n filetype, = unpack('<64s',f[i:i+64*strSize]) ; i += 64*strSize\n version, = unpack('<i',f[i:i+intSize]) ; i += intSize\n comment, = unpack('<1024s',f[i:i+1024*strSize]) ; i += 1024*strSize\n N_Bins, = unpack('<i',f[i:i+intSize]) ; i += intSize\n dE, = unpack('<d',f[i:i+dubSize]) ; i += dubSize\n DOS = unpack('<%id' % (N_Bins),f[i:])\n DOS = numpy.array(DOS)\n E = numpy.arange(0,N_Bins*dE,dE)\n return (filetype.strip('\\x00'),version,comment.strip('\\x00')),E,DOS", "title": "" }, { "docid": "7ee7f758e0988ae1341a55e32ca3da37", "score": "0.5864759", "text": "def load_file(path):\n with open(path, 'rb') as file:\n header = file.read(0x20)\n raw = file.read()\n return BinFile(header, raw)", "title": "" }, { "docid": "8302f81b1d862f5562c6ef0303446110", "score": "0.5856217", "text": "def testReadFile(self):\n definitions_file = self._GetTestFilePath(['definitions', 'integers.yaml'])\n self._SkipIfPathNotExists(definitions_file)\n\n definitions_registry = registry.DataTypeDefinitionsRegistry()\n definitions_reader = reader.DataTypeDefinitionsFileReader()\n\n definitions_reader.ReadFile(definitions_registry, definitions_file)", "title": "" }, { "docid": "baeb28f9c15a634b6ea9e6142545a4e4", "score": "0.58441705", "text": "def read():\n path = pathlib.Path(RAW_EVALATIN_DATA)\n files = fileinput.input(path.glob(\"*.conllu\"))\n return pyconll.unit.conll.Conll(files)", "title": "" }, { "docid": "45827cd91933db92f06db4adff4e308c", "score": "0.58408415", "text": "def load_binary_fixture(filename: str) -> bytes:\n path = get_fixture_path(filename)\n return path.read_bytes()", "title": "" }, { "docid": "7c82f10e8cf104b39926b6d4b920d3fc", "score": "0.5837009", "text": "def readSpecFile(fileName):\n fl = open(fileName, 'rb') # TODO wyjatek nie ma pliku\n A = pickle.load(fl)\n fl.close()\n return A", "title": "" }, { "docid": "ae6eacd24095b8c090c0ca86a28b50cf", "score": "0.58316576", "text": "def load(self):\n starting_memory = 0\n # debug = 1\n file_name = sys.argv[1]\n with open(file_name) as f:\n for line in f:\n # WE DIVIDE LINE IF THERE IS A COMMENT\n comment_split = line.split('#')\n # WE TAKE THE LEFT PART OF THE ARRAY, AND STRIP SPACE AT THE END\n instruction = comment_split[0].strip()\n # print(f'Instruction: {instruction} in line {debug}')\n if instruction == '':\n continue\n # WE CONVERT STRING WITH BINARY CODE INTO BINARY VALUE\n binary_code = int(instruction, 2)\n # WE SAVE EACH INSTRUCTION INTO RAM\n self.ram_write(starting_memory, binary_code)\n # WE INCREMENT THE ADDRESS IN MEMORE SO WE CAN ADD THE NEXT INSTRUCTION IN THE NEXT SLOT\n starting_memory += 1\n # debug += 1", "title": "" }, { "docid": "1708ef77c72d25d3c21b0ac67449c1c4", "score": "0.58202326", "text": "def readFiles( self ):\n\n # Read in the BIN file\n self._printer.printStatus( 0, 'Opening BIN file \"%s\" ...' % self.fileNameBin )\n if not os.path.isfile(self.fileNameBin):\n self._printer.errorExit( 2, 'File \"' + self.fileNameBin + '\" is not a readable file.' )\n binFile = open( self.fileNameBin, 'rb' )\n self.binData = binFile.read()\n binFile.close()\n if len(self.binData) <= 0:\n self._printer.errorExit( 2, 'File \"' + self.fileNameBin + '\" contains no data' )\n \n # Read in the raw CFG file\n self._printer.printStatus( 0, 'Opening CFG file \"%s\" ...' % self.fileNameCfg )\n if not os.path.isfile(self.fileNameCfg):\n self._printer.errorExit( 2, 'File \"' + self.fileNameCfg + '\" is not a readable file.' )\n cfgFile = open( self.fileNameCfg, 'r' )\n cfgData = cfgFile.read()\n cfgFile.close()\n \n #\n # Parse the CFG file (at least partially)\n #\n # Mostly, lines after a section (ex: [vars]) are associated with that section. Here are the exceptions:\n # 1. Contiguous comments immediately BEFORE a section as associated that section.\n # Blank lines and lines with non-comment data break contiguousness (i.e. these lines go with the PRIOR section, as per normal).\n # 2. [mapping] section is treated special since it is the only section that is truly parsed.\n # All other sections are just plain lines of text.\n # 3. Corner case: blank and comment lines that appear before the 1st section are associated with that section.\n #\n reSectionName = re.compile(r\"\\[([^\\]]*)\\]\")\n reMappingLine = re.compile(r\"\\$([0-9A-Fa-f]{1,4})\\s*-\\s*\\$([0-9A-Fa-f]{1,4})\\s*=\\s*\\$([0-9A-Fa-f]{1,4})\\s*(.*)\")\n sectionCurrent = self.__KEY_NONE\n cfgLines = cfgData.splitlines(False)\n for i in range(0, len(cfgLines)):\n cfgLine = cfgLines[i]\n appendLine = False\n hasComment = self.lineHasComment( cfgLine )\n cfgLineTrimmed = self.trimCommentAndWhitespace( cfgLine )\n isEmpty = len( cfgLineTrimmed ) == 0\n onlyComment = hasComment and isEmpty\n \n # Core parsing block\n reSectionMatch = reSectionName.fullmatch( cfgLineTrimmed )\n if reSectionMatch != None:\n # Handler for parsing a section header (ex: [vars] or [mapping])\n sectionCurrent = reSectionMatch.group(1)\n if sectionCurrent not in self.cfgSections:\n self.cfgSections[sectionCurrent] = []\n elif not isEmpty and sectionCurrent == self.__KEY_MAPPING:\n # Handler for lines in the [mapping] section\n reMappingMatch = reMappingLine.fullmatch( cfgLineTrimmed )\n if reMappingMatch == None:\n self._printer.errorPrint( 'Unrecognized [mapping] line: ' + cfgLine )\n self._printer.errorExit( 2, 'Unrecognized [mapping] line:\\n' + cfgLine )\n if reMappingMatch.group(4) != None and len( reMappingMatch.group(4) ) > 0:\n self._printer.errorExit( 2, 'Parser does not handle [mapping] lines with the extension of: \"%s\"' % reMappingMatch.group(4) )\n startOffset = int( reMappingMatch.group(1), 16)\n lenInBytes = ( int( reMappingMatch.group(2), 16) - startOffset + 1 ) * 2\n startAddress = int( reMappingMatch.group(3), 16)\n startOffset *= 2 # The 2x converts from WORD index into BYTE index\n self.memoryMap.writeByteBuffer( startAddress, self.binData, startOffset, lenInBytes, cfgLine )\n else:\n if sectionCurrent == self.__KEY_NONE and not self.__KEY_NONE in self.cfgSections:\n self.cfgSections[sectionCurrent] = []\n self.cfgSections[sectionCurrent].append( cfgLine )", "title": "" }, { "docid": "7cc97c8eadfac017949628e92f1d7595", "score": "0.58198744", "text": "def read_file(self) -> str:\r\n with open(self.path) as file_descriptor:\r\n try:\r\n contents = file_descriptor.read()\r\n return contents\r\n except UnicodeDecodeError:\r\n InputFile.logger.warning('\"%s\" seems to be a binary file and will be ignored.',\r\n self.path)", "title": "" }, { "docid": "c2f703e680ee2331d4a90632d199e17c", "score": "0.58051693", "text": "def read_binary_file(self, path: PATH_TYPE) -> bytes:\n with open(path, \"rb\") as fd:\n return fd.read()", "title": "" }, { "docid": "b2d06700deb6502e2012060d29b0c8f4", "score": "0.57962126", "text": "def load_binary_file(path: str) -> bytes:\n return Path(path).read_bytes()", "title": "" }, { "docid": "4eeac7b5b86dc318900c7006a0380491", "score": "0.5787983", "text": "def read(fname, mode = \"binary\"):\n \n fin = InputFile(fname, mode)\n\n return fin.readObject()", "title": "" }, { "docid": "11d012bb7a6ed682962194edad16a77a", "score": "0.5784898", "text": "def binary_file(fn):\r\n return binary(open(fn, 'rb').read(4096))", "title": "" }, { "docid": "39e09cd3e7e26700113bca790ed81d0d", "score": "0.5742841", "text": "def readbin(type, file):\n return unpack(type, file.read(calcsize(type)))", "title": "" }, { "docid": "859c632a64fdd5bba3147bc91b069a72", "score": "0.5739511", "text": "def load_from_binary_stram(path = \"Anaheim.graph\"):\n FIn = snap.TFIn(path)\n G = snap.TNEANet.Load(FIn)\n return G", "title": "" }, { "docid": "152cf4806de08e230fc094f6e6320349", "score": "0.57388985", "text": "def read_binary(filename,shape=(1),cplex=True,big=True):\n \n # open the file and get the data\n f = open(filename)\n data = get_data(f,big=big)\n\n # complexify if needed\n if cplex:\n data = complexify_data(data)\n\n # create dictionary\n dic = {\"FILE_SIZE\":os.stat(filename).st_size}\n\n # reshape if possible\n try:\n return dic,data.reshape(shape)\n\n except ValueError:\n print \"Warning:\",data.shape,\"cannot be shaped into\",shape\n return dic,data", "title": "" }, { "docid": "f17395302f5919ef115adddcc2219701", "score": "0.5735585", "text": "def readModel(fname, ):\n \n if (fname.endswith('wnd')):\n return bladed(fname,)\n elif (fname.endswith('bl')):\n return bladed(fname,)\n elif (fname.endswith('bts')):\n return turbsim(fname,)\n\n # Otherwise try reading it as a .wnd file.\n bladed(fname) # This will raise an error if it doesn't work. ", "title": "" }, { "docid": "ffe75d1f0571428ae1a2e4ddb0177d9b", "score": "0.57136637", "text": "def read(self):\n with open(self.path, \"rb\") as inputfile:\n return pickle.load(inputfile)", "title": "" }, { "docid": "955e1515d53d2d464c7adfbf9ff36b92", "score": "0.57072", "text": "def open_bin_data(path_to_file, file_name):\r\n with open(os.path.join(path_to_file, file_name + \".bin\"),'r') as current_file:\r\n data_type = np.dtype('>d') # assign data type: big-endian ordered 64 bit long data format \r\n raw_data = np.fromfile(current_file, dtype = data_type) # use numpy module to assign current trace data to [raw_data]\r\n raw_current = raw_data[0::2] # array of full baseline (current) data with first second and trailing values removed\r\n raw_voltage = raw_data[1::2]\r\n x_data_index_master = np.linspace(0,len(raw_current), len(raw_current), endpoint = True, dtype = 'int')\r\n current_file.close() # closes large data file\r\n del current_file\r\n \r\n return(raw_current, raw_voltage, x_data_index_master)", "title": "" }, { "docid": "69b26c33304302414b6d22d47d9334da", "score": "0.5701565", "text": "def load(self):\n\n address = 0\n load_address = 0\n\n program = [0] * 256\n\n if len(sys.argv) != 2:\n print(f\"Usage:\\npython3 {sys.argv[0]} filename.ls8\")\n exit()\n try:\n with open(sys.argv[1]) as f:\n for line in f:\n possible_num = line[:line.find('#')] # strip comments\n if possible_num == '': # strip blank lines\n continue\n # convert \"binary\" string into a number\n program[load_address] = (int(possible_num, 2))\n load_address += 1\n if load_address == 256:\n raise Exception(\"Out of memory. Program is too large.\")\n\n except FileNotFoundError:\n print(f\"{sys.argv[1]} not found.\")\n exit()\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1", "title": "" }, { "docid": "ae7e9ed273700f07793c53d5752f8b8f", "score": "0.568671", "text": "def read_data_from_bin_file(fileName):\n with open(fileName, mode='rb') as file: # b is important -> binary\n fileContent = file.read()\n\n (ChannelData, LenOf1Channel,\n NumOfChannels, SampleTime) = read_data_from_bytes(fileContent)\n \n return ChannelData, LenOf1Channel, NumOfChannels, SampleTime", "title": "" }, { "docid": "0fab1e55afca23d2e869bf99fd5dbd5f", "score": "0.568458", "text": "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "title": "" }, { "docid": "0fab1e55afca23d2e869bf99fd5dbd5f", "score": "0.568458", "text": "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "title": "" }, { "docid": "0fab1e55afca23d2e869bf99fd5dbd5f", "score": "0.568458", "text": "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "title": "" }, { "docid": "0fab1e55afca23d2e869bf99fd5dbd5f", "score": "0.568458", "text": "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "title": "" }, { "docid": "71d96782d57c91209adfd704d793fc6a", "score": "0.5680559", "text": "def read_is(filename, verbose=None): # noqa\n with open(filename, \"rb\") as f:\n logger.info(f\"Reading {filename}\")\n logger.info(f\"Reading Header...\")\n is_type = [\n struct.unpack(\"c\", f.read(1))[0].decode(\"utf-8\") for i in range(4)\n ]\n is_type = \"\".join(is_type)\n if is_type not in [\"IS01\", \"IS02\", \"IS03\"]:\n raise ValueError(\n f\"{is_type} : Invalid IS type, please check that \"\n \"input file is a Inverse Solution matrix\"\n )\n logger.info(f\"IS type: {is_type}\")\n n_channels = struct.unpack(\"I\", f.read(4))[0]\n logger.info(f\"n_channels: {n_channels}\")\n numsolutionpoints = struct.unpack(\"I\", f.read(4))[0]\n logger.info(f\"n_solutionpoints: {numsolutionpoints}\")\n numregularizations = struct.unpack(\"I\", f.read(4))[0]\n logger.info(f\"n_regularizations: {numregularizations}\")\n isinversescalar = struct.unpack(\"c\", f.read(1))[0]\n if isinversescalar == b\"\\x01\":\n n_dim = 1\n logger.info(f\"Inverse solution is Scalar\")\n elif isinversescalar == b\"\\x00\":\n logger.info(f\"Inverse solution is Vectorial\")\n n_dim = 3\n else:\n raise ValueError(\n f\"isinversescalar must be either 1 for scalar, \"\n f\"either 0 for vectorial, but \"\n f\"{ord(isinversescalar)} found.\"\n )\n\n if is_type in [\"IS01\", \"IS02\"]:\n buf = f.read(n_dim * numsolutionpoints * n_channels * 4)\n data = np.frombuffer(buf, dtype=np.float32)\n data = data.reshape(numsolutionpoints, n_dim, n_channels)\n data = np.array([data])\n data = np.swapaxes(data, 1, 2)\n\n elif is_type == \"IS03\":\n logger.info(f\"Reading Variable Header...\")\n\n ch_names = []\n for _ in range(n_channels):\n name = [\n char for char in f.read(32).split(b\"\\x00\") if char != b\"\"\n ][0]\n ch_names.append(name.decode(\"utf-8\"))\n\n solutionpoints_names = []\n for _ in range(numsolutionpoints):\n name = [\n char for char in f.read(16).split(b\"\\x00\") if char != b\"\"\n ][0]\n solutionpoints_names.append(name.decode(\"utf-8\"))\n\n regularizations_values = []\n for _ in range(numregularizations):\n value = struct.unpack(\"d\", f.read(8))[0]\n regularizations_values.append(value)\n logger.info(f\"Regularizations values: {regularizations_values}\")\n\n regularizations_names = []\n for _ in range(numregularizations):\n name = [\n char for char in f.read(32).split(b\"\\x00\") if char != b\"\"\n ][0]\n regularizations_names.append(name.decode(\"utf-8\"))\n logger.info(f\"Regularizations names: {regularizations_names}\")\n\n regularisation_solutions = []\n buf = f.read(\n numregularizations * n_dim * numsolutionpoints * n_channels * 4\n )\n data = np.frombuffer(buf, dtype=np.float32)\n data = data.reshape(\n numregularizations, numsolutionpoints, n_dim, n_channels\n )\n data = np.swapaxes(data, 1, 2)\n\n regularisation_solutions = np.array(regularisation_solutions)\n inverse_solution = {\n \"is_type\": is_type,\n \"is_scalar\": True if isinversescalar == \"0\" else False,\n \"ch_names\": ch_names,\n \"solutionpoints_names\": solutionpoints_names,\n \"regularizations_values\": regularizations_values,\n \"regularizations_names\": regularizations_names,\n \"regularisation_solutions\": data,\n }\n return inverse_solution", "title": "" }, { "docid": "ae93c608217144a7a805d253f2a70c76", "score": "0.5676097", "text": "def test_decode_from_file(tempdir):\n job = Decode()\n tempdir.write(\"input.md\", b\"### My heading\")\n result = job.run(tempdir.getpath(\"input.md\"),)\n assert result[\"result\"] == dict(\n type=\"Article\", content=[dict(type=\"Heading\", depth=3, content=[\"My heading\"])]\n )\n assert result[\"log\"] == []", "title": "" }, { "docid": "5572c6dc19de076c943ed068d85f839a", "score": "0.5675747", "text": "def read(dir=\".\",bin_file=None,acqus_files=None,pprog_file=None,shape=None,\n cplex=None,big=None,read_prog=True,read_acqus=True):\n\n if os.path.isdir(dir) != True:\n raise IOError,\"directory %s does not exist\"%(dir)\n\n # determind parameter automatically\n if bin_file == None:\n if os.path.isfile(os.path.join(dir,\"fid\")):\n bin_file = \"fid\"\n elif os.path.isfile(os.path.join(dir,\"ser\")):\n bin_file = \"ser\"\n else:\n raise IOError,\"no Bruker binary file could be found in %s\"%(dir)\n\n if acqus_files == None:\n acqus_files = []\n for f in [\"acqus\",\"acqu2s\",\"acqu3s\",\"acqu4s\"]:\n if os.path.isfile(os.path.join(dir,f)):\n acqus_files.append(f)\n\n if pprog_file == None:\n pprog_file = \"pulseprogram\"\n\n # create an empty dictionary\n dic = dict()\n\n # read the acqus_files and add to the dictionary\n if read_acqus:\n for f in acqus_files:\n dic[f] = read_jcamp(os.path.join(dir,f))\n\n # read the pulse program and add to the dictionary\n if read_prog:\n dic[\"pprog\"] = read_pprog(os.path.join(dir,pprog_file))\n\n # determind file size and add to the dictionary\n dic[\"FILE_SIZE\"] = os.stat(os.path.join(dir,bin_file)).st_size\n\n # determind shape and complexity for direct dim if needed\n if shape == None or cplex == None:\n gshape,gcplex = guess_shape(dic)\n if gcplex==True: # divide last dim by 2 if complex\n t = list(gshape)\n t[-1]=t[-1]/2\n gshape = tuple(t)\n if shape == None:\n shape = gshape\n if cplex == None:\n cplex = gcplex\n \n # determind endianness (assume little-endian unless BYTORDA is 1)\n if big == None:\n big = False # default value\n if \"acqus\" in dic and \"BYTORDA\" in dic[\"acqus\"]:\n if dic[\"acqus\"][\"BYTORDA\"] == 1:\n big = True\n else:\n big = False\n\n # read the binary file\n f = os.path.join(dir,bin_file)\n null,data = read_binary(f,shape=shape,cplex=cplex,big=big)\n return dic,data", "title": "" }, { "docid": "3a5280a68a5f3e0d6d76dcb240ff59ad", "score": "0.56719095", "text": "def test_read(file_system):\n workfolder = file_system[\"main\"][\"dir\"]\n target = file_system[\"main\"][\"file1\"].name\n instructions = {\n \"KeyName\": \"Host Name\",\n \"EnableLineNumber\": \"False\"\n }\n var = ReadFullDocument(workfolder, target, instructions)\n result = var.read()\n\n assert len(result.container) == 1\n data = result.container[0]\n\n assert data.key == \"Host Name\"\n assert data.values == (\"Beolab90\",)", "title": "" }, { "docid": "1ea1e41b13cc1d372d6d4db483f47d4b", "score": "0.56717515", "text": "def load(self):\n if STLMode.ascii == self._mode:\n if not os.path.exists(self._path):\n raise Exception(\"File not found: [%s]\" % self._path)\n with open(self._path) as f:\n ln = f.readline()\n while ln != '':\n line = ln.strip()\n self._process(line)\n\n # increment reader\n ln = f.readline()\n self._cur_line += 1\n elif STLMode.binary == self._mode:\n raise Exception(\"Not implemented yet\")\n else:\n raise Exception('No mode specified')", "title": "" }, { "docid": "92cab9d4106d20138619687a2ef59253", "score": "0.5668465", "text": "def binary_format_reader(filename):\n header_part = \"\"\n rawVectorData = None\n header = None\n with open(filename, 'rb') as f:\n x = f.readline()\n while x != b'# End: Header\\n':\n header_part += str(x)\n x = f.readline()\n\n header = ParsingUtils.process_header(header_part)\n\n byte_type = f.readline()\n while byte_type == b'#\\n':\n byte_type = f.readline()\n # compile struct byte type\n fmt, buff_size, val = ParsingUtils.decode_byte_size(byte_type)\n struct_object = struct.Struct(fmt)\n test_val = struct_object.unpack(f.read(buff_size))[0]\n if test_val != val:\n raise ValueError(\"Invalid file format with validation {} value, \\\n should be {}\".format(test_val, val))\n\n k = int(header['xnodes']*header['ynodes']*header['znodes'])\n rawVectorData = ParsingUtils.standard_vertex_mode(\n f, k, struct_object, buff_size)\n f.close()\n assert rawVectorData is not None\n assert header is not None\n return header, rawVectorData", "title": "" }, { "docid": "4a9ada01c65385b2871007056fa8103a", "score": "0.56642765", "text": "def read_file(file):", "title": "" }, { "docid": "cc75af9bb6c59c6776055e0fda3dcaba", "score": "0.5662355", "text": "def read_data(self, filepath):", "title": "" }, { "docid": "b7da863fec1eabd3641740f447a79c7b", "score": "0.5660204", "text": "def ibe_read_from_file(filename):\n with open(filename, \"rb\") as f:\n res = f.read()\n return res \n \n return None", "title": "" }, { "docid": "99a41036869100beed68325254c3bb7f", "score": "0.565705", "text": "def Read(self):\n\n\t\tself.data = np.loadtxt(self.fpath)", "title": "" }, { "docid": "e0af20f8b5a74bcd37c0631c68a536f7", "score": "0.5652648", "text": "def file_read(path):\n with open(path, mode='rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "766029f1d9d0d8fa560746e8577e78e4", "score": "0.56470966", "text": "def _read(self):\n with open(self.filename, 'r') as f:\n# self.title = f.read()\n # initializng data dictionary\n print('Opening: %s'%self.filename)\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab\n \n self.plot()", "title": "" }, { "docid": "827bfa72bf571703d29d5c2abb1e029a", "score": "0.56453896", "text": "def deserialize(infile):\n raise NotImplementedError", "title": "" }, { "docid": "212fbd56b6f9c334a3ce54ea263447dc", "score": "0.5645035", "text": "def read_binary(filename, dim, dtype = tf.float32):\n\n bin_file = tf.read_file(filename)\n bin_tensor = tf.decode_raw(bin_file, dtype)\n bin_tensor = tf.to_float(bin_tensor)\n bin_tensor = tf.reshape(bin_tensor,[dim])\n # bin_tensor = normalize_tensor(bin_tensor)\n\n return bin_tensor", "title": "" }, { "docid": "9b8bf689314b19d81b273c74c6c0d0f8", "score": "0.5644736", "text": "def read_nifti_file(filepath):\n\tscan = nib.load(filepath)\n\tscan = scan.get_data()\n\treturn scan", "title": "" }, { "docid": "232aa8aaba074414f04b2c6406e9d2a3", "score": "0.56351805", "text": "def read_binary_ivector(fname):\n \n f= open(fname, \"rb\")\n ivector, n_data, metadata = string_to_ivector(f)\n\n return ivector, n_data, metadata", "title": "" }, { "docid": "d6332b75c57f8e316b0e99a1ef17d8b1", "score": "0.56314677", "text": "def read_file(self):\n try:\n data = pickle.load(self.open_diary)\n return data\n except EOFError:\n #print('Your diary is empty!')\n pass", "title": "" }, { "docid": "127b3c6dbe7b24997d709620119ba0b7", "score": "0.562332", "text": "def ReadFile(self, filename):", "title": "" }, { "docid": "1def917729181ba740a36d735e3c43c3", "score": "0.5613102", "text": "def test_task2_with_input_file():\n with gzip.open(INPUT_FILE_PATH, \"rt\", encoding=\"ascii\") as file:\n result = solve_task2(file)\n assert result == 9533698720", "title": "" }, { "docid": "9ee5160e18af0baaea0694a85741f2ab", "score": "0.5610461", "text": "def test_embedded_bin_chars(self):\n file = io.StringIO(\"line\\x00\\x03\\x04\\x0c\\x17\\x1a\\x1c1\")\n self.reader._read_file_obj(file)\n\n self.assertEqual(len(self.parser.lines), 1);\n self.assertEqual(self.parser.lines[0].get_filename(), Helpers.TEST_FNAME)\n self.assertEqual(self.parser.lines[0].get_line(), 1)\n self.assertEqual(self.parser.lines[0].get_text(), \"line\\x00\\x03\\x04\\x0c\\x17\\x1a\\x1c1\")", "title": "" }, { "docid": "8a8b757ca487c46314722a37a08f69ad", "score": "0.56001437", "text": "def read_file(self, file):", "title": "" }, { "docid": "d38e85afed3be4e44fdf50d7433c0adc", "score": "0.55997443", "text": "def read_from_file():\n\tparser = argparse.ArgumentParser()\n\t# command cannot be executed without a path\n\tparser.add_argument(\"--input\", required=True,help=\"you need to parse a link to a file with instructions, please see the docs\")\n\targs = (parser.parse_args())\n\n\turi = str(args.input)\n\treq = urllib.request.urlopen(uri)\n\tdata = req.read().decode('utf-8')\n\t# first line is the size of the led board\n\tled_box_len = int(data.splitlines()[0])\n\t# create new board\n\tboard = (create_board(led_box_len))\n\t# the actions are the rest of the file\n\tcommands = data.splitlines()[1:]\n\t# cleaned up data\n\tfinal_data = clean_up_input_file(commands)\n\t# perform actions on the board\n\tresult = board_plotter(final_data, board)\n\tprint(print_lights_totals(result))", "title": "" }, { "docid": "1606243fa969203a08d8265a06349208", "score": "0.55972725", "text": "def _read_solution(self,\n symbols_filename,\n solution_filename):\n\n # parse the symbol map\n symbol_map = {}\n with open(symbols_filename) as f:\n for line in f:\n lp_symbol, scenario_tree_id = line.strip().split()\n symbol_map[lp_symbol] = scenario_tree_id\n\n results = SPSolverResults()\n results.status = None\n results.solver.status = None\n results.solver.termination_condition = None\n results.solver.message = None\n xhat = {}\n try:\n with open(solution_filename, 'r') as f:\n line = f.readline()\n assert line.startswith(\"Problem:\")\n assert line.split()[1].strip() == \"pysp_model\"\n line = f.readline()\n assert line.startswith(\"First Stage Rows:\")\n line = f.readline()\n assert line.startswith(\"First Stage Columns:\")\n line = f.readline()\n assert line.startswith(\"First Stage Non-zeros:\")\n line = f.readline()\n assert line.startswith(\"Replication No.\") or \\\n line.startswith(\"Number of replications:\")\n line = f.readline()\n assert line.startswith(\"Status:\")\n results.solver.message = line.split(\":\")[1].strip()\n (results.status,\n results.solver.status,\n results.solver.termination_condition) = \\\n _sd_status_map.get(results.solver.message,\n (SolutionStatus.unknown,\n SolverStatus.unknown,\n TerminationCondition.unknown))\n\n #\n # Objective and Bound\n #\n\n line = f.readline()\n assert line.startswith(\"Total Objective Function Upper Bound:\")\n line = line.split(':')\n if line[1].strip() == '':\n pass\n else:\n assert len(line) == 4\n line = line[1]\n if \"half-width\" in line:\n # we are given confidence intervals on the objective\n line = line.split(',')\n assert len(line) == 4\n results.objective = float(line[0])\n assert line[1].startswith('[')\n assert line[2].endswith(']')\n results.objective_interval = (float(line[1][1:]),\n float(line[2][:-1]))\n else:\n results.objective = float(line[1])\n line = f.readline()\n assert line.startswith(\"Total Objective Function Lower Bound:\")\n line = line.split(':')\n if line[1].strip() == '':\n pass\n else:\n if \"half-width\" in line[1]:\n # we are given confidence intervals on the bound\n line = line[1].split(',')\n assert len(line) == 4\n results.bound = float(line[0])\n assert line[1].startswith('[')\n assert line[2].endswith(']')\n results.bound_interval = (float(line[1][1:]),\n float(line[2][:-1]))\n else:\n results.bound = float(line[1].strip())\n\n #\n # Xhat\n #\n\n line = f.readline()\n assert line.strip() == ''\n line = f.readline()\n assert line.startswith('First Stage Solutions:')\n line = f.readline()\n assert line.startswith(' No. Row name Activity Lower bound Upper bound Dual Dual STDEV')\n line = f.readline()\n assert line.startswith('------ ------------ ------------- ------------- ------------- ------------- -------------')\n\n xhat_start_line = ' No. Column name Activity Lower bound Upper bound Reduced Cost RC STDEV'\n line = f.readline()\n while not line.startswith(xhat_start_line):\n line = f.readline()\n line = f.readline()\n assert line.startswith('------ ------------ ------------- ------------- ------------- ------------- -------------')\n line = f.readline().strip().split()\n while line:\n varlabel, varvalue = line[1:3]\n varlabel = varlabel.strip()\n varvalue = float(varvalue)\n xhat[symbol_map[varlabel]] = varvalue\n line = f.readline().strip().split()\n\n except (IOError, OSError):\n logger.warn(\n \"Exception encountered while parsing sd \"\n \"solution file '%s':\\n%s'\"\n % (solution_filename, traceback.format_exc()))\n xhat = None\n\n return xhat, results", "title": "" }, { "docid": "548368c802df754b17a79ed2c4295068", "score": "0.5592868", "text": "def testReadFileObjectFormatVersion1(self):\n output_writer = test_lib.TestOutputWriter()\n test_file = recycle_bin.RecycleBinMetadataFile(\n debug=True, output_writer=output_writer)\n\n test_file_path = self._GetTestFilePath(['$II3DF3L.zip'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_file.Open(test_file_path)", "title": "" }, { "docid": "bf106ada03f16d5af6d5606c2d9ef68b", "score": "0.5589384", "text": "def read(self, filename, nscale=1):\n reader = SHTextReader(filename)\n if filename.endswith(\".bdo\") or filename.endswith(\".bdox\"):\n reader = SHBinaryReader(filename)\n # find better way to discover if file comes from Fluka\n elif \"_fort\" in filename:\n reader = FlukaBinaryReader(filename)\n reader.read(self, nscale)\n self.counter = 1", "title": "" }, { "docid": "a8db1d58aa7dbcc575b0fab2e0449b79", "score": "0.55843747", "text": "def read(self, fileobj):\n raise NotImplementedError", "title": "" }, { "docid": "b82f75cdfe1e8ceff468ac2c426769f0", "score": "0.5572124", "text": "def read(file_path):\n\n data = numpy.load(file_path)\n return data", "title": "" }, { "docid": "637704aa0b14c2204edfb6f70530bc46", "score": "0.5569049", "text": "def read_input(fname=\"day14.in\"):\n with open(fname) as f:\n return lines2recipe(f.readlines())", "title": "" }, { "docid": "c700e2233d8a793c95a99b844fbf8c25", "score": "0.5567073", "text": "def read_label_file(label_file):\n with open(label_file,'rb') as f:\n data = f.read(4)\n magic_no = struct.unpack('>L',data)\n if magic_no==2049 or magic_no==2051:\n print 'Incorrectly parsing files'\n print ' magic no = %d '%magic_no\n data = f.read(4)\n num_data, = struct.unpack('>L',data)\n print ' Number of data points = %d '% num_data\n unpacked_data = np.zeros((num_data),np.uint8)\n for i in range(num_data):\n temp_data, = struct.unpack('>B',f.read(1))\n unpacked_data[i] = temp_data\n\n return unpacked_data", "title": "" }, { "docid": "7982bf506d30a156ccde787310f635b4", "score": "0.5565414", "text": "def readFlow(fn):\n # Code adapted from:\n # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy\n\n # WARNING: this will work on little-endian architectures (eg Intel x86) only!\n # print 'fn = %s'%(fn)\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print 'Reading %d x %d flo file\\n' % (w, h)\n data = np.fromfile(f, np.float32, count=2*int(w)*int(h))\n # Reshape data into 3D array (columns, rows, bands)\n # The reshape here is for visualization, the original code is (w,h,2)\n return np.resize(data, (int(h), int(w), 2))", "title": "" }, { "docid": "7982bf506d30a156ccde787310f635b4", "score": "0.5565414", "text": "def readFlow(fn):\n # Code adapted from:\n # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy\n\n # WARNING: this will work on little-endian architectures (eg Intel x86) only!\n # print 'fn = %s'%(fn)\n with open(fn, 'rb') as f:\n magic = np.fromfile(f, np.float32, count=1)\n if 202021.25 != magic:\n print('Magic number incorrect. Invalid .flo file')\n return None\n else:\n w = np.fromfile(f, np.int32, count=1)\n h = np.fromfile(f, np.int32, count=1)\n # print 'Reading %d x %d flo file\\n' % (w, h)\n data = np.fromfile(f, np.float32, count=2*int(w)*int(h))\n # Reshape data into 3D array (columns, rows, bands)\n # The reshape here is for visualization, the original code is (w,h,2)\n return np.resize(data, (int(h), int(w), 2))", "title": "" }, { "docid": "287e2d6c4fd39a889b210abc1a842643", "score": "0.55653274", "text": "def read_highd_binary(filename, shape, perm, dtype = tf.float32):\n\n bin_file = tf.read_file(filename)\n bin_tensor = tf.decode_raw(bin_file, dtype)\n bin_tensor = tf.to_float(bin_tensor)\n bin_tensor = tf.reshape(bin_tensor, shape)\n bin_tensor = tf.transpose(bin_tensor, perm= perm)\n return bin_tensor", "title": "" }, { "docid": "e3839f09e15da7f48ea2ff8793a231b3", "score": "0.55636847", "text": "def load_from_file(self):\n pass", "title": "" }, { "docid": "62707ee689da802dfae878980a1ae478", "score": "0.55600953", "text": "def readBinaryFiles(file, dimensions=(100, 100), saveFITS=True, output='tmp.fits'):\n fh = open(file, 'rb')\n #use numpy to read the binary format, the data is 16bit unsigned int\n a = np.fromfile(fh, dtype=np.uint16)\n fh.close()\n\n try:\n #note the transpose\n image = a.reshape(dimensions).T\n except:\n print 'Image shape as not expected'\n print a.shape\n return None\n\n if saveFITS:\n fileIO.writeFITS(image, output)\n\n return image", "title": "" }, { "docid": "6e080715b5f6482e47acabc414bf77fc", "score": "0.55571693", "text": "def importBinary(self):\n ...", "title": "" }, { "docid": "8b7e59093ab25aeedd2b28447a62e763", "score": "0.5556787", "text": "def read_vec_int(file_or_fd):\n fd = open_or_fd(file_or_fd)\n binary = fd.read(2)\n if binary == '\\0B': # binary flag\n assert(fd.read(1) == '\\4'); # int-size\n vec_size = struct.unpack('<i', fd.read(4))[0] # vector dim\n ans = np.zeros(vec_size, dtype=int)\n for i in range(vec_size):\n assert(fd.read(1) == '\\4'); # int-size\n ans[i] = struct.unpack('<i', fd.read(4))[0] #data\n return ans\n else: # ascii,\n arr = (binary + fd.readline()).strip().split()\n try:\n arr.remove('['); arr.remove(']') # optionally\n except ValueError:\n pass\n ans = np.array(arr, dtype=int)\n if fd is not file_or_fd : fd.close() # cleanup\n return ans", "title": "" }, { "docid": "476546683990b5670c60367797c425be", "score": "0.55560607", "text": "def structread(fromfile, decode_struct) : \n return struct.unpack(decode_struct, fromfile.read(struct.calcsize(decode_struct)))", "title": "" }, { "docid": "2e9fbdde41f75881fc42362c02755f4b", "score": "0.55545557", "text": "def readBinary(zTop=0, zBot=1200,\n min_utme=None, max_utme=None,\n min_utmn=None, max_utmn=None,\n newModelName='subModel.nc',\n binModelName='',\n saveFull='N'):\n\n # The following lines describe the size of the binary file values.\n #\n # Layer 1 0-1200m depth\n # 3271 in EW direction\n # 5367 in NS direction\n # 13 in z\n # dx=dy= 200m, dz= 100m\n #\n if 'l1' in binModelName:\n nx = 3271; ny = 5367; nz = 13\n dx = 200; dy = 200; dz = 100\n zmin = 0; zmax = 1200\n #\n # Layer 2 1500-9900m depth\n # 2181 in EW\n # 3578 in NS\n # 29 in z\n # dx=dy=dz=300m\n #\n if 'l2' in binModelName:\n nx = 2181; ny = 3578; nz = 29\n dx = 300; dy = 300; dz = 300\n zmin = 1500; zmax = 9900\n #\n # Layer 3 10800-59400m depth\n # 727 in EW\n # 1193 NS\n # 55 in z\n # dx=dy=dz=900m\n #\n if 'l3' in binModelName:\n nx = 727; ny = 1193; nz = 55\n dx = 900; dy = 900; dz = 900\n zmin = 10800; zmax = 59400\n #\n # The SW corner of the velocity model is -10800m East, 4467300m N Zone 10.\n SWcornerFull = [-10800, 4467300]\n #\n\n # Read in binary file\n v = np.fromfile(binModelName, dtype='<f4')\n\n # Generate arrays for x, y, & z locations\n z = np.linspace(zmin, zmax, nz)\n z = np.repeat(z, (np.ones(len(z))*nx*ny).astype(int))\n z = z[::-1] # Reverse array\n\n y = np.linspace(SWcornerFull[1], SWcornerFull[1]+ny*dy, ny, endpoint=False)\n y = np.repeat(y, (np.ones(len(y))*nx).astype(int))\n y = np.tile(y, nz) # Repeat array for each depth\n\n x = np.linspace(SWcornerFull[0], SWcornerFull[0]+nx*dx, nx, endpoint=False)\n x = np.tile(x, ny*nz)\n\n # Convert CVM to dataframe\n model = pd.DataFrame(np.column_stack((x,y,z,v)), columns=['utme','utmn','z',(binModelName.split('/'))[1].split('_16')[0]])\n\n if saveFull == 'N' or saveFull == 'n':\n # Subset model (speed things up)\n if min_utme==None:\n # Use full horizontal model extent\n subModel = model[(model[\"z\"] >= zTop) & (model[\"z\"] <= zBot)]\n else:\n subModel = model[(model[\"z\"] >= zTop) & (model[\"z\"] <= zBot)\n & (model[\"utme\"] >= min_utme) & (model[\"utme\"] <= max_utme)\n & (model[\"utmn\"] >= min_utmn) & (model[\"utmn\"] <= max_utmn)]\n subModel = subModel.set_index(['utme','utmn','z']) # Set these parameters as coordinates\n\n # Convert to xarray (slow)\n subModelxr = subModel.to_xarray()\n\n # Save xarray for faster reload later\n subModelxr.to_netcdf('../output/' + newModelName)\n\n return subModelxr\n\n # This can be VERY slow (~ 1 hour)\n if saveFull == 'Y' or saveFull == 'y':\n model = model.set_index(['utme','utmn','z'])\n modelxr = model.to_xarray()\n modelxr.to_netcdf('../output/' + (binModelName.split('/'))[1].split('.bin')[0]+'.nc')\n\n return modelxr", "title": "" }, { "docid": "2b7d2c3d0629055aabf0bb03abe5134f", "score": "0.5550434", "text": "def read_low(filepath):\n pass", "title": "" }, { "docid": "5d0bd63a786585d392b5683fea245461", "score": "0.55393505", "text": "def load_from_File(self, filePath):\n # Create file object\n fileObject = open(filePath, 'rb')\n # Load from the file object\n b = pickle.load(fileObject)\n return b", "title": "" }, { "docid": "702e0c9544e686a6ebcb974163ad5c12", "score": "0.5539064", "text": "def load_bitstream(self, bitfile):\n if not os.path.isfile(bitfile):\n raise ValueError(\"Cannot find specified bitfile {}\".format(bitfile))\n\n bitstream = open(bitfile, \"rb\")\n self._llint.FPGAProgram(bitstream)\n pass", "title": "" }, { "docid": "6dc76223ce23e3f476893a9fa85491ca", "score": "0.55328214", "text": "def read(filename):\n with open(filename, \"rb\") as f:\n mesh = read_buffer(f)\n return mesh", "title": "" }, { "docid": "ccc01eb139326e1f13ad57ca6ad5e5b4", "score": "0.55316436", "text": "def test_task1_with_input_file():\n with gzip.open(INPUT_FILE_PATH, \"rt\", encoding=\"ascii\") as file:\n trees = solve_task1(file)\n assert trees == 230", "title": "" }, { "docid": "b8315e84a83d80709f997ea77dfffacb", "score": "0.55310977", "text": "def loadbin(self, fobj, offset=0):\n fread = getattr(fobj, \"read\", None)\n if fread is None:\n f = open(fobj, \"rb\")\n fread = f.read\n fclose = f.close\n else:\n fclose = None\n\n try:\n self.frombytes(array('B', asbytes(fread())), offset=offset)\n finally:\n if fclose:\n fclose()", "title": "" }, { "docid": "629af03410e0d164a23edd6dee98d557", "score": "0.5530605", "text": "def load(self, filename):\n\n address = 0\n\n # In `load()`, you will now want to use those command line arguments to open a\n # file, read in its contents line by line, and save appropriate data into RAM. \n with open(filename) as file:\n\n # As you process lines from the file, you should be on the lookout for blank lines\n # (ignore them), and you should ignore everything after a `#`, since that's a\n # comment.\n for line in file:\n comment_split = line.split('#')\n instruction = comment_split[0]\n\n if instruction == '':\n continue\n \n first_bit = instruction[0]\n\n # You'll have to convert the binary strings to integer values to store in RAM. The\n # built-in `int()` function can do that when you specify a number base as the\n # second argument:\n if first_bit == '0' or first_bit == '1':\n self.ram[address] = int(instruction[:8], 2)\n address += 1", "title": "" }, { "docid": "482779e5cd2fa61dc4c94498d875f598", "score": "0.55265105", "text": "def load(self, path, offset=0):\n\n # Read the file into main memory byte-by-byte\n with open(path, \"rb\") as f:\n b = f.read(1)\n while b:\n self.memory[offset] = struct.unpack(\"B\", b)[0]\n offset += 1\n b = f.read(1)", "title": "" }, { "docid": "2740dda98dcf8ae0a6ed8066acc72a05", "score": "0.55191106", "text": "def load_solution(f):\n return json.load(f)", "title": "" }, { "docid": "a8db5b57ee26df6ff644366f13ff3578", "score": "0.551412", "text": "def _read_file_from_archive(cls, read_obj, src_file):\n with read_obj.open(src_file, 'r') as file_obj:\n return file_obj.read()", "title": "" }, { "docid": "dfb91ee7ec3032545954fa5472f4172a", "score": "0.55100393", "text": "def load_binary_data(filename):\n\n input_data = open(filename,\"rb\")\n grid = dill.load(input_data)\n input_data.close()\n retessellate = grid.remove_tracks(config.track_threshold) # filter out incomplete tracks\n if (config.distort_grid):\n grid.distort_grid()\n retessellate = True\n if (retessellate or config.retessellate):\n grid.tessellate()\n\n if (grid.user_params != config.user_params):\n print(\"WARNING: mismatch between the user_params in the binary grid file\")\n print(\" and in AIMS_configure.py. Will overwrite user_params.\")\n print(\" Binary grid file: \",grid.user_params)\n print(\" AIMS_configure.py: \",config.user_params)\n\n # manually correct relevant variables:\n config.user_params = grid.user_params\n model.nglb = 8 + len(config.user_params)\n model.nlin = 5 + len(config.user_params)\n model.ifreq_ref = 5 + len(config.user_params)\n model.iradius = 6 + len(config.user_params)\n model.iluminosity = 7 + len(config.user_params)\n model.init_user_param_dict()\n\n return grid", "title": "" }, { "docid": "0d404a72286b82bbcae00bffcded3153", "score": "0.55079895", "text": "def load(fname):\n try:\n with open(fname, 'rb') as fin:\n return fin.read()\n except FileNotFoundError:\n print(\"Couldn't read file\", fname)\n exit(1)", "title": "" }, { "docid": "792418a879f4b424fe407d0b8d29bcae", "score": "0.5493646", "text": "def load(self):\n\n params = sys.argv\n if len(params) != 2: # if no filename is passed\n print(\"usage: file.py filename\") # print error and exit\n sys.exit(1)\n if len(params) == 2: # if filename passed\n try:\n with open(params[1]) as f: # read filename\n address = 0\n for line in f: # for each line\n comment_split = line.split(\"#\") # SPlit at comments\n num = comment_split[0].strip() # strip whiteSPace\n if num == '': # if line is blank, continue\n continue\n val = int(\"0b\"+num,2) # otherwise convert oPCode to binary\n self.ram_write(address, val) # write the op code to memory\n address += 1 # increment the address to the next one in ram\n except FileNotFoundError: # if try fails, filename doesn't exist\n print(\"ERROR: File not found\")\n sys.exit(2)", "title": "" } ]
16d29fd4457273d8c9392acf2e3ee714
Builds predefined pages and puts them into the paginator.
[ { "docid": "13d00e00d4e316949f7f6b0d7981e6b5", "score": "0.758468", "text": "async def build_pages(self):\n paginator = LinePaginator(prefix=\"\", suffix=\"\")\n\n # Add general info page\n paginator.add_page(Page(\n strings.Info.bot_welcome,\n \"\",\n strings.Info.bot_tutorial,\n \"\",\n strings.Info.bot_browse,\n \"\",\n strings.Info.bot_output\n ))\n\n # Add rules page\n paginator.add_page(Page(\n strings.Info.rules_welcome,\n \"\",\n strings.Info.rules_first,\n strings.Info.rules_second,\n strings.Info.rules_third,\n strings.Info.rules_fourth,\n strings.Info.rules_fifth,\n strings.Info.rules_sixth\n ))\n\n # Add links page\n paginator.add_page(Page(\n strings.Info.links_welcome,\n \"\",\n strings.Info.links_ts,\n \"\",\n strings.Info.links_website,\n \"\",\n strings.Info.links_public_steam,\n \"\",\n strings.Info.links_private_steam\n ))\n\n # Add authors page\n paginator.add_page(Page(\n strings.Info.authors_welcome,\n \"\",\n strings.Info.authors_bertalicious,\n strings.Info.authors_white_noise,\n \"\",\n strings.Info.authors_support,\n \"\",\n strings.Info.authors_link\n ))\n\n # Save organised pages to session\n self.pages = paginator.pages", "title": "" } ]
[ { "docid": "d4904c1eae48a04ca36df142f4678fa7", "score": "0.71009964", "text": "def generate_all_pages():\n fetcher = getattr(parent, fetcher_str)\n\n first_page = fetcher.get(**kwargs)\n kwargs['page_size'] = len(first_page)\n count = fetcher.current_total_count\n yield first_page\n\n if count > kwargs['page_size']:\n num_pages = int(ceil(float(count) / kwargs['page_size']))\n for page in range(1, num_pages):\n kwargs['page'] = page\n yield fetcher.get(**kwargs)", "title": "" }, { "docid": "d8eec126418e333d5e2c9d0a81577084", "score": "0.68907714", "text": "def _setup_pages(self):\n stdkwargs = {\n 'template': 'nav_playground.html',\n 'language': 'en',\n 'published': True,\n 'in_navigation': True,\n }\n page_1 = create_page(\"page_1\", **stdkwargs) # first page slug is /\n page_2 = create_page(\"page_2\", parent=page_1, **stdkwargs)\n page_3 = create_page(\"page_3\", parent=page_2, **stdkwargs)\n page_4 = create_page(\"page_4\", parent=page_3, **stdkwargs)\n page_5 = create_page(\"page_5\", parent=page_1, **stdkwargs)\n page_6 = create_page(\"page_6\", parent=page_5, **stdkwargs)\n return [page_1,\n page_2,\n page_3,\n page_4,\n page_5,\n page_6,\n ]", "title": "" }, { "docid": "2944cb58de08e6eb89206ca25af8c078", "score": "0.67420703", "text": "def build_pages(n):\n pages = list()\n template = jinja_env.get_template('template.html')\n # i is the current page and we want the current\n # page to point to the next page and all of the \n # pages before it if it isn't the last page (page n).\n # Add 1 to n because we start from 1.\n for i in range(1, n + 1):\n links = list(range(1, i))\n # Add the next page if (i + 1) if it isn't the last page (n + 1)\n if i + 1 != n + 1:\n links.append(i + 1)\n context = dict(title='Page{}'.format(i), links=links)\n pages.append(template.render(context))\n\n return pages", "title": "" }, { "docid": "ff579bb2917c912ed7f607b6573dc837", "score": "0.6695581", "text": "def generate(self, **options):\n from pyClanSphere.application import url_for, get_application, \\\n DEFAULT_THEME_SETTINGS\n\n if self._skip_theme_defaults:\n settings = DEFAULT_THEME_SETTINGS\n else:\n settings = get_application().theme.settings\n\n def _getopt(name):\n value = options.pop(name, None)\n if value is not None:\n return value\n return settings['pagination.' + name]\n normal = _getopt('normal')\n active = _getopt('active')\n commata = _getopt('commata')\n ellipsis = _getopt('ellipsis')\n threshold = _getopt('threshold')\n left_threshold = _getopt('left_threshold')\n right_threshold = _getopt('right_threshold')\n prev_link = _getopt('prev_link')\n next_link = _getopt('next_link')\n gray_prev_link = _getopt('gray_prev_link')\n gray_next_link = _getopt('gray_next_link')\n simple = _getopt('simple')\n if options:\n raise TypeError('generate() got an unexpected keyword '\n 'argument %r' % iter(options).next())\n\n was_ellipsis = False\n result = []\n prev = None\n next = None\n get_link = lambda x: url_for(self.endpoint, page=x,\n per_page=self.per_page,\n post_id=self.post_id, **self.url_args)\n\n if simple:\n result.append(active % {\n 'url': get_link(self.page),\n 'page': self.page\n })\n if self.page > 1:\n prev = self.page - 1\n if self.page < self.pages:\n next = self.page + 1\n else:\n for num in xrange(1, self.pages + 1):\n if num == self.page:\n was_ellipsis = False\n if num - 1 == self.page:\n next = num\n if num + 1 == self.page:\n prev = num\n if num <= left_threshold or \\\n num > self.pages - right_threshold or \\\n abs(self.page - num) < threshold:\n if result and result[-1] != ellipsis:\n result.append(commata)\n link = get_link(num)\n template = num == self.page and active or normal\n result.append(template % {\n 'url': link,\n 'page': num\n })\n elif not was_ellipsis:\n was_ellipsis = True\n result.append(ellipsis)\n\n if next_link:\n if next is not None:\n result.append(u' <a href=\"%s\" class=\"next\">%s</a>' %\n (get_link(next), _(u'Next »')))\n elif gray_next_link:\n result.append(u' <span class=\"disabled next\">%s</span>' %\n _(u'Next »'))\n if prev_link:\n if prev is not None:\n result.insert(0, u'<a href=\"%s\" class=\"prev\">%s</a> ' %\n (get_link(prev), _(u'« Previous')))\n elif gray_prev_link:\n result.insert(0, u'<span class=\"disabled prev\">%s</span> ' %\n _(u'« Previous'))\n\n return Markup(u''.join(result))", "title": "" }, { "docid": "a3945a6a1b440b882a1afb72f01db9c9", "score": "0.66792846", "text": "def _create_pages(self):\n\n\t\tself.sidebar_modules = [] # add extra sidebar links to this\n\n\t\t# create a page for each class -- recursive for subclasses/subfunctions\n\t\tif len(self.data['classes']) > 0:\n\t\t\t# start a subelement list\n\t\t\tself.sidebar_modules.append('<span>Classes:</span>')\n\t\t\tself.sidebar_modules.append('<ul>')\n\n\t\t\tfor class_item in self.data['classes']:\n\t\t\t\tself._doc_class(class_item)\n\n\t\t\tself.sidebar_modules.append('</ul>')\n\n\n\t\t# create a page for each function\n\t\tif len(self.data['functions']) > 0:\n\t\t\tself.sidebar_modules.append('<span>Functions:</span>')\n\t\t\tself.sidebar_modules.append('<ul>')\n\t\t\t\n\t\t\tfor function_item in self.data['functions']:\n\t\t\t\tself._doc_function(function_item)\n\n\t\t\tself.sidebar_modules.append('</ul>')\n\n\n\t\t# end\n\t\tif len(self.sidebar_modules) > 0:\n\t\t\tself.sidebar += \"<p>Modules:</p>\\n\"\n\t\t\tfor item in self.sidebar_modules:\n\t\t\t\tself.sidebar += item + \"\\n\"\n\n\t\tpass", "title": "" }, { "docid": "ba06ff441157039f6355e13e8514b22b", "score": "0.65176576", "text": "def generate_pages(self, node, depth, kwargs={}):\n for child in node.children.all():\n \n \n page = self.page_class(child, depth, \n exporter = self,\n has_children=child.children.exists(),\n **kwargs)\n \n last_page = self.pages[-1] if self.pages else None\n if last_page:\n page.prev_page = last_page\n last_page.next_page = page\n self.pages.append(page)\n self.generate_pages(child, depth + 1)", "title": "" }, { "docid": "3b91fd69ceb1e16b3b51e4070e8c8f63", "score": "0.6274178", "text": "def generate_index_page_list(self, response):\n total_houses_str = response.css('h2.total.fl span::text').extract_first().strip()\n page_count = int(int(total_houses_str) / self.default_house_count + 1)\n for i in range(1, page_count + 1):\n index_url = self.index_url_template.format(str(i))\n self.start_urls.append(index_url)\n return self.initialized()", "title": "" }, { "docid": "171b8daf2ffedafa10102ff9b453c6a8", "score": "0.62389433", "text": "def generate_pages(self, writer):\n write = partial(writer.write_file,\n relative_urls=self.settings['RELATIVE_URLS'],\n override_output=True)\n\n # to minimize the number of relative path stuff modification\n # in writer, articles pass first\n # self.generate_articles(write)\n self.generate_period_archives(write)\n self.generate_direct_templates(write)\n\n # and subfolders after that\n self.generate_categories(write)\n self.generate_authors(write)", "title": "" }, { "docid": "02710a96eb532d702f34996c92665368", "score": "0.62368083", "text": "def page_links(paginator, current, view_name, suffix=\"?page=\"):\n num_pages = paginator.num_pages\n current = int(current)\n page_numbers = [current]\n for num in [1, 2, current - 1, current + 1, num_pages, num_pages - 1]:\n if num not in page_numbers and num > 0 and num <= num_pages:\n page_numbers.append(num)\n page_numbers.sort()\n\n new_page_numbers = [page_numbers[0]]\n j = 1\n for i in range(1, len(page_numbers)):\n if (page_numbers[i] - page_numbers[i-1] > 1):\n new_page_numbers.append(0)\n new_page_numbers.append(page_numbers[i])\n page_numbers = new_page_numbers\n\n return {\n \"page_numbers\": page_numbers,\n \"current\": current,\n \"view_name\": view_name,\n \"suffix\": suffix,\n }", "title": "" }, { "docid": "9f38c0ed0641ed446af60de3e71527e3", "score": "0.621711", "text": "def pagination(context, page):\n paginator, page_num = page.paginator, page.number\n \n pagination_required = paginator.num_pages > 1\n if not pagination_required:\n page_range = []\n else:\n ON_EACH_SIDE = 3\n ON_ENDS = 2\n \n # If there are 10 or fewer pages, display links to every page.\n # Otherwise, do some fancy\n if paginator.num_pages <= 10:\n page_range = range(paginator.num_pages)\n else:\n # Insert \"smart\" pagination links, so that there are always ON_ENDS\n # links at either end of the list of pages, and there are always\n # ON_EACH_SIDE links at either end of the \"current page\" link.\n page_range = []\n if page_num > (ON_EACH_SIDE + ON_ENDS):\n page_range.extend(range(0, ON_EACH_SIDE - 1))\n page_range.append(DOT)\n page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))\n else:\n page_range.extend(range(0, page_num + 1))\n if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):\n page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))\n page_range.append(DOT)\n page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))\n else:\n page_range.extend(range(page_num + 1, paginator.num_pages))\n \n return {\n 'context': context,\n 'page': page,\n 'pagination_required': pagination_required,\n 'page_range': page_range,\n }", "title": "" }, { "docid": "eb4759640ebab8a5ce27c16d5840907d", "score": "0.61539274", "text": "def test_paginator(self):\n nine = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n ten = nine + [10]\n eleven = ten + [11]\n tests = (\n # Each item is two tuples:\n # First tuple is pagination.Paginator parameters - object_list, per_page,\n # orphans, and allow_empty_first_page.\n # Second tuple is resulting pagination.Paginator attributes - count,\n # num_pages, and page_range.\n # Ten items, varying orphans, no empty first page.\n ((ten, 4, 0, False), (10, 3, [1, 2, 3])),\n ((ten, 4, 1, False), (10, 3, [1, 2, 3])),\n ((ten, 4, 2, False), (10, 2, [1, 2])),\n ((ten, 4, 5, False), (10, 2, [1, 2])),\n ((ten, 4, 6, False), (10, 1, [1])),\n # Ten items, varying orphans, allow empty first page.\n ((ten, 4, 0, True), (10, 3, [1, 2, 3])),\n ((ten, 4, 1, True), (10, 3, [1, 2, 3])),\n ((ten, 4, 2, True), (10, 2, [1, 2])),\n ((ten, 4, 5, True), (10, 2, [1, 2])),\n ((ten, 4, 6, True), (10, 1, [1])),\n # One item, varying orphans, no empty first page.\n (([1], 4, 0, False), (1, 1, [1])),\n (([1], 4, 1, False), (1, 1, [1])),\n (([1], 4, 2, False), (1, 1, [1])),\n # One item, varying orphans, allow empty first page.\n (([1], 4, 0, True), (1, 1, [1])),\n (([1], 4, 1, True), (1, 1, [1])),\n (([1], 4, 2, True), (1, 1, [1])),\n # Zero items, varying orphans, no empty first page.\n (([], 4, 0, False), (0, 0, [])),\n (([], 4, 1, False), (0, 0, [])),\n (([], 4, 2, False), (0, 0, [])),\n # Zero items, varying orphans, allow empty first page.\n (([], 4, 0, True), (0, 1, [1])),\n (([], 4, 1, True), (0, 1, [1])),\n (([], 4, 2, True), (0, 1, [1])),\n # Number if items one less than per_page.\n (([], 1, 0, True), (0, 1, [1])),\n (([], 1, 0, False), (0, 0, [])),\n (([1], 2, 0, True), (1, 1, [1])),\n ((nine, 10, 0, True), (9, 1, [1])),\n # Number if items equal to per_page.\n (([1], 1, 0, True), (1, 1, [1])),\n (([1, 2], 2, 0, True), (2, 1, [1])),\n ((ten, 10, 0, True), (10, 1, [1])),\n # Number if items one more than per_page.\n (([1, 2], 1, 0, True), (2, 2, [1, 2])),\n (([1, 2, 3], 2, 0, True), (3, 2, [1, 2])),\n ((eleven, 10, 0, True), (11, 2, [1, 2])),\n # Number if items one more than per_page with one orphan.\n (([1, 2], 1, 1, True), (2, 1, [1])),\n (([1, 2, 3], 2, 1, True), (3, 1, [1])),\n ((eleven, 10, 1, True), (11, 1, [1])),\n )\n for params, output in tests:\n self.check_paginator(params, output)", "title": "" }, { "docid": "a25bc5a2c73617805c76987b229f332d", "score": "0.6152125", "text": "def get_pages(self):\n raise NotImplementedError(\"get_pages\")", "title": "" }, { "docid": "58a3f64b467a15dde5fbf886ba5c60a2", "score": "0.6131596", "text": "def test_page_indexes(self):\n ten = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n tests = (\n # Each item is three tuples:\n # First tuple is pagination.Paginator parameters - object_list, per_page,\n # orphans, and allow_empty_first_page.\n # Second tuple is the start and end indexes of the first page.\n # Third tuple is the start and end indexes of the last page.\n # Ten items, varying per_page, no orphans.\n ((ten, 1, 0, True), (1, 1), (10, 10)),\n ((ten, 2, 0, True), (1, 2), (9, 10)),\n ((ten, 3, 0, True), (1, 3), (10, 10)),\n ((ten, 5, 0, True), (1, 5), (6, 10)),\n # Ten items, varying per_page, with orphans.\n ((ten, 1, 1, True), (1, 1), (9, 10)),\n ((ten, 1, 2, True), (1, 1), (8, 10)),\n ((ten, 3, 1, True), (1, 3), (7, 10)),\n ((ten, 3, 2, True), (1, 3), (7, 10)),\n ((ten, 3, 4, True), (1, 3), (4, 10)),\n ((ten, 5, 1, True), (1, 5), (6, 10)),\n ((ten, 5, 2, True), (1, 5), (6, 10)),\n ((ten, 5, 5, True), (1, 10), (1, 10)),\n # One item, varying orphans, no empty first page.\n (([1], 4, 0, False), (1, 1), (1, 1)),\n (([1], 4, 1, False), (1, 1), (1, 1)),\n (([1], 4, 2, False), (1, 1), (1, 1)),\n # One item, varying orphans, allow empty first page.\n (([1], 4, 0, True), (1, 1), (1, 1)),\n (([1], 4, 1, True), (1, 1), (1, 1)),\n (([1], 4, 2, True), (1, 1), (1, 1)),\n # Zero items, varying orphans, allow empty first page.\n (([], 4, 0, True), (0, 0), (0, 0)),\n (([], 4, 1, True), (0, 0), (0, 0)),\n (([], 4, 2, True), (0, 0), (0, 0)),\n )\n for params, first, last in tests:\n self.check_indexes(params, 'first', first)\n self.check_indexes(params, 'last', last)\n # When no items and no empty first page, we should get pagination.EmptyPage error.\n assert_raises(pagination.EmptyPage, self.check_indexes, ([], 4, 0, False), 1, None)\n assert_raises(pagination.EmptyPage, self.check_indexes, ([], 4, 1, False), 1, None)\n assert_raises(pagination.EmptyPage, self.check_indexes, ([], 4, 2, False), 1, None)", "title": "" }, { "docid": "589fcbcf6705d08c3a728772ae0ba330", "score": "0.60818326", "text": "def _get_gen_pages(self, gen, count: int = None, site=None):\n original_iter = iter(gen)\n\n gen = itertools.islice(original_iter, 0, count)\n\n gen_pages = list(gen)\n\n with suppress(StopIteration):\n gen_pages.append(next(original_iter))\n next(original_iter)\n if not site:\n site = gen_pages[0].site\n gen_pages.append(pywikibot.Page(site, '...'))\n\n for page in gen_pages:\n self.assertIsInstance(page, pywikibot.Page)\n if site:\n self.assertEqual(page.site, site)\n\n return gen_pages", "title": "" }, { "docid": "10cc4137c45385bea2f3f26c1e656bfd", "score": "0.60435414", "text": "def _get_pages(self, verb, route, params, model):\n response_list = []\n\n if params is None:\n params = {}\n params['per_page'] = 100\n\n if 'per_page' not in params:\n params['per_page'] = 100\n\n if 'page' in params:\n response = self._request(verb, route, params)\n if response.status_code != 200:\n logging.error(\"Error::{}\".format(response.text))\n return None\n return CBWParser().parse_response(model, response)\n\n response = self._request(verb, route, params)\n\n if response.status_code != 200:\n logging.error(\"Error::{}\".format(response.text))\n return None\n\n response_list.extend(CBWParser().parse_response(model, response))\n\n while 'next' in response.links:\n next_url = urlparse(response.links['next']['url'])\n params['page'] = parse_qs(next_url .query)['page'][0]\n response = self._request(verb, route, params)\n response_list.extend(CBWParser().parse_response(model, response))\n return response_list", "title": "" }, { "docid": "447e591b96bdb8e94c521f23429f22b0", "score": "0.59836143", "text": "def make_pages_list(self, pages):\n pages_list = \"const pages = [\"\n for page in pages:\n pages_list += '\\n \"pages/{}/index.html,\"'.format(page)\n pages_list += \"\\n];\\n\"\n return pages_list", "title": "" }, { "docid": "9e2b447f17b53e3918c1a541a0be1072", "score": "0.59680253", "text": "def create_pages(self, item, vbox):\n adapters = self.get_adapters(item)\n\n first = True\n for _, name, adapter in adapters:\n try:\n page = adapter.construct()\n if page is None:\n continue\n if first:\n vbox.pack_start(page, False, True, 0)\n first = False\n else:\n expander = Gtk.Expander()\n expander.set_use_markup(True)\n expander.set_label(f\"<b>{name}</b>\")\n expander.add(page)\n expander.show_all()\n vbox.pack_start(expander, False, True, 0)\n except Exception:\n log.error(\n \"Could not construct property page for \" + name, exc_info=True\n )", "title": "" }, { "docid": "b5f5074c12d95d44c3d668e6e386016e", "score": "0.5956614", "text": "def makePages(self, pageCnt, pn=1, template=None, name=None, w=None, h=None, **kwargs):\n for n in range(pageCnt): # First page is n + pn\n # Parent is forced to self.\n self.newPage(pn=pn+n, template=template, name=name, w=w, h=h, **kwargs)", "title": "" }, { "docid": "909d71dc88d5a2fb030f624f3da1c4a8", "score": "0.5955188", "text": "def _fill_pages(self):\n\t\tif self.pages is None:\n\t\t\tself.log.info(\"Couldn't get any information about the module.\")\n\t\t\treturn\n\n\t\t# unless this is the index file, it needs to go in the html subdirectory\n\t\tnewpages = {}\n\t\tfor page in self.pages:\n\t\t\tif page != \"/index.html\":\n\t\t\t\tnewpages[\"html/\" + page] = self.pages[page]\n\t\tnewpages[\"/index.html\"] = self.pages[\"/index.html\"]\n\n\t\t# grab the updated version of pages\n\t\tself.pages = newpages\n\n\t\t# iterate through pages and add html frame\n\t\tfor page in self.pages:\n\t\t\t# dont stick the css into an html file\n\t\t\tif page == \"style.css\":\n\t\t\t\tcontinue\n\n\t\t\t# see how many subdirectory levels the page is\n\t\t\tsubdirnum = len(ghtml.get_subdirs(page))\n\n\t\t\t# take the page body information and fill a base html page with it\n\t\t\tself.pages[page] = ghtml.fill_base( title = self.title,\n\t\t\t\t\t\t\t\t\t\t\t\troot_url = (subdirnum - 1) * '../',\n\t\t\t\t\t\t\t\t\t\t\t\tbody = self.pages[page],\n\t\t\t\t\t\t\t\t\t\t\t\tsidebar = self.sidebar,\n\t\t\t\t\t\t\t\t\t\t\t\tfooter = self.footer)\n\n\t\tpass", "title": "" }, { "docid": "ed30a5201ffd260586cf61a83fd26b51", "score": "0.5953235", "text": "def _generatePaginationLinks(\n self, offsetval, returned_count, number_found, params):\n\n doc_limit = self._getDocLimit()\n pcopy = params.copy()\n if offsetval - doc_limit >= 0:\n pcopy['offset'] = offsetval - doc_limit\n prev_link = '/psearch?' + urllib.urlencode(pcopy)\n else:\n prev_link = None\n if ((offsetval + doc_limit <= self._OFFSET_LIMIT)\n and (returned_count == doc_limit)\n and (offsetval + returned_count < number_found)):\n pcopy['offset'] = offsetval + doc_limit\n next_link = '/psearch?' + urllib.urlencode(pcopy)\n else:\n next_link = None\n return (prev_link, next_link)", "title": "" }, { "docid": "73a2eccccb520236572d48d29034c924", "score": "0.59350693", "text": "def get_pagination_parameters(request, paginator, paginated):\n LEADING_PAGE_RANGE_DISPLAYED = TRAILING_PAGE_RANGE_DISPLAYED = 10\n LEADING_PAGE_RANGE = TRAILING_PAGE_RANGE = 8\n NUM_PAGES_OUTSIDE_RANGE = 2\n ADJACENT_PAGES = 4\n\n pages = paginator.num_pages\n page = paginated.number\n in_leading_range = in_trailing_range = False\n pages_outside_leading_range = pages_outside_trailing_range = range(0)\n if pages <= LEADING_PAGE_RANGE_DISPLAYED + NUM_PAGES_OUTSIDE_RANGE + 1:\n in_leading_range = in_trailing_range = True\n page_range = [n for n in range(1, pages + 1)]\n elif page <= LEADING_PAGE_RANGE:\n in_leading_range = True\n page_range = [n for n in range(1, LEADING_PAGE_RANGE_DISPLAYED + 1)]\n pages_outside_leading_range = [\n n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]\n elif page > pages - TRAILING_PAGE_RANGE:\n in_trailing_range = True\n page_range = [n for n in range(\n pages - TRAILING_PAGE_RANGE_DISPLAYED + 1, pages + 1)\n if n > 0 and n <= pages]\n pages_outside_trailing_range = [\n n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]\n else:\n page_range = [n for n in range(\n page - ADJACENT_PAGES, page + ADJACENT_PAGES + 1)\n if n > 0 and n <= pages]\n pages_outside_leading_range = [\n n + pages for n in range(0, -NUM_PAGES_OUTSIDE_RANGE, -1)]\n pages_outside_trailing_range = [\n n + 1 for n in range(0, NUM_PAGES_OUTSIDE_RANGE)]\n\n # Now try to retain GET params, except for 'page'\n params = request.GET.copy()\n if 'page' in params:\n del(params['page'])\n get_params = params.urlencode()\n prev = paginated.previous_page_number() if paginated.has_previous() else \"\"\n\n return {\n 'pages': pages,\n 'page': page,\n 'previous': prev,\n 'next': paginated.next_page_number() if paginated.has_next() else \"\",\n 'has_previous': paginated.has_previous(),\n 'has_next': paginated.has_next(),\n 'page_range': page_range,\n 'in_leading_range': in_leading_range,\n 'in_trailing_range': in_trailing_range,\n 'pages_outside_leading_range': pages_outside_leading_range,\n 'pages_outside_trailing_range': pages_outside_trailing_range,\n 'get_params': get_params,\n 'count': paginator.count,\n }", "title": "" }, { "docid": "520186a2ff96f069d212438fd3609c57", "score": "0.5923028", "text": "def loadpages(self):\n newpages = {}\n for page in listfiles(PAGES_DIR):\n (path,ext) = os.path.splitext(page)\n if ext == '.markdown' or ext == '.html':\n path = os.path.join(PAGES_DIR,page)\n (rest,ext) = os.path.splitext(page)\n # reuse existing pages where we can\n if rest in self.pages:\n newpages[rest] = self.pages[rest]\n else:\n p = Page(self,path)\n for decorators in self.page_decorators:\n decorators.extend_page()\n newpages[rest] = p\n self.pages = newpages", "title": "" }, { "docid": "e727adfb5953ef2317650250a05855fc", "score": "0.59183145", "text": "def pagination_management(paginator, selected_page):\n \n try:\n objects = paginator.page(selected_page)\n except PageNotAnInteger:\n objects = paginator.page(1)\n except EmptyPage:\n objects = paginator.page(paginator.num_pages)\n return objects", "title": "" }, { "docid": "21a1480319a474bc49a85a94c9b8918c", "score": "0.5917556", "text": "def paginator(context, adjacent_pages=3):\n \n # Get the base query string to be used in the pagination\n request = context[\"request\"]\n base_query_string_data = {}\n for (arg_key, arg_value) in request.GET.items():\n if arg_key != PAGE_NUMBER_KEY:\n arg_key_encoded = arg_key.encode(\"utf8\")\n arg_value_encoded = arg_value.encode(\"utf8\")\n base_query_string_data[arg_key_encoded] = arg_value_encoded\n\n paginator_object = context[\"paginator\"]\n page_count = paginator_object.num_pages\n current_page = int(request.GET.get(PAGE_NUMBER_KEY, 1))\n\n # Get the links to the pages surrounding the current one\n bottom_page = max(current_page - adjacent_pages, 1)\n top_page = min(current_page + adjacent_pages, page_count) \n page_numbers = range(bottom_page, top_page + 1)\n\n return {\n \"current_page\": current_page,\n \"page_count\": page_count,\n \"page_numbers\": page_numbers,\n \"show_first\": 1 not in page_numbers,\n \"show_last\": page_count not in page_numbers,\n \"previous_page_number\": current_page - 1,\n \"next_page_number\": current_page + 1,\n \"base_query_string_data\": base_query_string_data,\n }", "title": "" }, { "docid": "aac55713cf835e0fc027ac72d982fb9f", "score": "0.5898167", "text": "def __init__(self) -> None:\n self.pages = []", "title": "" }, { "docid": "cf1c702aa080aefce14205cc430d6b4e", "score": "0.5882981", "text": "def pages(self):\n if self._pages is None:\n context = layout.LayoutContext(\n self.enable_hinting, self.style_for, self.get_image_from_uri)\n self._pages = list(layout.layout_document(\n context, self.formatting_structure))\n return self._pages", "title": "" }, { "docid": "24e59978d90f1a146323b0c1397c3242", "score": "0.58063865", "text": "def show_paginator(context, paginator, page, current_page, request, anchor=\"\", non_grouped_number_of_results = -1 ):\r\n \r\n adjacent_pages = 3\r\n total_wanted = adjacent_pages * 2 + 1\r\n min_page_num = max(current_page - adjacent_pages, 1)\r\n max_page_num = min(current_page + adjacent_pages + 1, paginator.num_pages + 1)\r\n\r\n num_items = max_page_num - min_page_num\r\n\r\n if num_items < total_wanted and num_items < paginator.num_pages:\r\n if min_page_num == 1:\r\n # we're at the start, increment max_page_num\r\n max_page_num += min(total_wanted - num_items, paginator.num_pages - num_items)\r\n else:\r\n # we're at the end, decrement\r\n min_page_num -= min(total_wanted - num_items, paginator.num_pages - num_items)\r\n\r\n # although paginator objects are 0-based, we use 1-based paging\r\n page_numbers = [n for n in range(min_page_num, max_page_num) if n > 0 and n <= paginator.num_pages]\r\n \r\n params = urllib.urlencode([(key, value.encode('utf-8')) for (key, value) in request.GET.items() if key.lower() != u\"page\"])\r\n \r\n if params == \"\":\r\n url = request.path + u\"?page=\"\r\n else:\r\n url = request.path + u\"?\" + params + u\"&page=\"\r\n\r\n return {\r\n \"page\": page,\r\n \"paginator\": paginator,\r\n \"current_page\": current_page,\r\n \"page_numbers\": page_numbers,\r\n \"show_first\": 1 not in page_numbers,\r\n \"show_last\": paginator.num_pages not in page_numbers,\r\n \"url\" : url,\r\n \"media_url\": context['media_url'],\r\n \"anchor\": anchor,\r\n \"non_grouped_number_of_results\": non_grouped_number_of_results\r\n }", "title": "" }, { "docid": "d9d08fe4ea612250f5ffc97f408d901a", "score": "0.5784741", "text": "def parse(self, response):\n s = Selector(response)\n pagination = s.css(\".pagination\")\n pagelinks = pagination.xpath('//a[contains(@href, \"&page=\")]/@href').extract()\n for pagelink in pagelinks:\n request = Request(\n urljoin(self.root, pagelink),\n callback=self.parse_jobspage,\n dont_filter=True,\n )\n yield request", "title": "" }, { "docid": "08998b293ef44108c4d93b015d1e23c7", "score": "0.57800114", "text": "def build_page(self):\n if self.query:\n try:\n page_no = int(self.request.GET.get('page', 1))\n except (TypeError, ValueError):\n raise Http404(\"Not a valid number for page.\")\n\n if page_no < 1:\n raise Http404(\"Pages should be 1 or greater.\")\n\n start_offset = (page_no - 1) * self.results_per_page\n self.results[start_offset:start_offset + self.results_per_page]\n\n paginator = Paginator(self.results, self.results_per_page)\n\n try:\n page = paginator.page(page_no)\n except InvalidPage:\n raise Http404(\"No such page!\")\n\n return (paginator, page)\n else:\n return (None, None)", "title": "" }, { "docid": "8ca2361f00c677ef6c375454c72e564a", "score": "0.5776995", "text": "def generator(self):\n cat = pywikibot.Category(self.site,\n \"%s:%s\"\n % (self.site.namespaces.CATEGORY.custom_name,\n self.source))\n gen = pagegenerators.CategorizedPageGenerator(cat)\n gen = pagegenerators.NamespaceFilterPageGenerator(\n gen, self.site.namespaces.FILE.custom_name)\n if not self.filter:\n gen = pagegenerators.PreloadingGenerator(gen)\n # gen = pagegenerators.ImageGenerator(gen)\n for item in gen:\n page = DUP_Image(item.site, item.title(),\n not self.filter and item.get() or None,\n item.editTime())\n if self.filter and page.hasRefs:\n continue\n if not self.filter and not page.validReasons:\n continue\n yield page", "title": "" }, { "docid": "b332c06add8221f8a4cff58e7ba47f87", "score": "0.57711005", "text": "def gen_pages_for_shop(shop_id):\n for i in range(1, 2):\n uri = ''.join((url_shop, shop_id,\n '/#reviews?ref=pagination&page=', str(i)))\n print(uri)\n yield uri", "title": "" }, { "docid": "c9744d40d4cf44d65760b67dd57b0b76", "score": "0.5763879", "text": "def allpages(self, start=None, prefix=None, namespace='0', filterredir='all',\n minsize=None, maxsize=None, prtype=None, prlevel=None,\n limit=None, dir='ascending', filterlanglinks='all', generator=True,\n end=None):\n\n pfx = listing.List.get_prefix('ap', generator)\n kwargs = dict(listing.List.generate_kwargs(\n pfx, ('from', start), ('to', end), prefix=prefix,\n minsize=minsize, maxsize=maxsize, prtype=prtype, prlevel=prlevel,\n namespace=namespace, filterredir=filterredir, dir=dir,\n filterlanglinks=filterlanglinks,\n ))\n return listing.List.get_list(generator)(self, 'allpages', 'ap',\n limit=limit, return_values='title',\n **kwargs)", "title": "" }, { "docid": "8a080ddb3221c9160acdd4fc5e48e91d", "score": "0.576003", "text": "def GetPageCount(self):", "title": "" }, { "docid": "e783672e3d5a5d38787eea8e528fafd5", "score": "0.57418764", "text": "def pages(per_page=1, show_page=True):\n def page_source(coro):\n async def create_page_header(self, menu, entry):\n result = await discord.utils.maybe_coroutine(coro, self, menu, entry)\n return menu.generate_page(result, self._max_pages)\n\n def __init__(self, list_pages):\n super(self.__class__, self).__init__(list_pages, per_page=per_page)\n kwargs = {\n '__init__': __init__,\n 'format_page': (coro, create_page_header)[show_page]\n }\n return type(coro.__name__, (menus.ListPageSource,), kwargs)\n return page_source", "title": "" }, { "docid": "119d5b173254b7d2f06149bbaf91528b", "score": "0.57410246", "text": "def paginate(self):\n raise NotImplementedError", "title": "" }, { "docid": "30775c9803d6cbe75b6b3b08ea2e22e7", "score": "0.573554", "text": "def parse_page(self):\n self._nav_to_header_parent()\n self._nav_to_posts_parent()\n self.parse_library()\n self.parse_last_pg_url()\n self.compile_records()", "title": "" }, { "docid": "24b88d6214f0138d7abd58c251665b1f", "score": "0.57333153", "text": "def paginate(self, url):\n #browser = Browser(\"chrome\")\n r = requests.get(\"http://localhost:8950/render.html?\", params= {\"url\":url})\n for i in range(10):\n try:\n browser.find_by_css(\".PagerLinks > a\")[-2].click()\n pages.append(browser.html)\n except:\n break\n return pages", "title": "" }, { "docid": "86045b49fa09d83bdbcb9154019baf9f", "score": "0.5726248", "text": "def _get_all_pages(cls, start_url, params=None):\n entries = []\n # Composing URL in advance since the link to the next page already have the params of the\n # first request and using `get_soup` with the params argument creates duplicate params\n next_url = ds_utils.compose_url_get(start_url, params)\n while next_url:\n soup = cls._get_soup(next_url)\n for entry in soup.feed.find_all('entry'):\n entries.append(cls(cls._parse_entry(entry)))\n next_link = soup.find('link', rel=\"next\")\n next_url = next_link and next_link.attrs.get('href', None)\n return entries", "title": "" }, { "docid": "6494149e83f9c5877bfd1cfb278abc61", "score": "0.5702473", "text": "def allpages(self, limit=100, namespace=0,\n prefix=None, getinfo=None, **evil):\n params = {\n 'action': 'query',\n 'list': 'allpages',\n 'aplimit': limit,\n 'apprefix': prefix,\n 'apnamespace': namespace,\n }\n params.update(evil)\n return self._generate(\n params,\n Page,\n ('query', 'allpages'),\n getinfo\n )", "title": "" }, { "docid": "4f6153aa78f96b69d3086a47417e0c71", "score": "0.5702232", "text": "def querypages(self,tag=None,category=None,maxitems=None,order=None):\n \n if self.preview_mode:\n for p in self.pages.values():\n p.refresh()\n \n pages = []\n for p in self.pages.values():\n if tag is not None and tag not in p.tags:\n continue\n if category is not None and category != p.category:\n continue \n pages.append(p)\n if order:\n if order == 'date_created':\n pages.sort(key=lambda p:p.date_created())\n pages.reverse()\n elif order == 'date_modified':\n pages.sort(key=lambda p:p.date_modified())\n pages.reverse()\n elif order == 'title':\n pages.sort(key=lambda p:p.title)\n \n if maxitems is not None:\n pages = pages[0:maxitems]\n return pages", "title": "" }, { "docid": "2061573eaf01340af95789722f841b36", "score": "0.5683553", "text": "def _setup_pages(cls, config):\n from cms.exceptions import NoHomeFound\n from cms.models import Page\n from cms.utils import get_language_list\n from django.conf import settings\n from django.utils.translation import override\n\n app_page = None\n get_url = False\n if getattr(settings, \"ALDRYN_SEARCH_CMS_PAGE\", False):\n from aldryn_search.search_indexes import TitleIndex\n\n def fake_url(self, obj):\n return \"\"\n\n get_url = TitleIndex.get_url\n TitleIndex.get_url = fake_url\n site = Site.objects.get_current()\n auto_sites = cls.auto_setup.get(\"sites\", True)\n if auto_sites is True or site.pk in auto_sites:\n if getattr(cls, \"app_config\", False):\n configs = cls.app_config.objects.all()\n if not configs.exists():\n config = cls._create_config()\n else:\n config = configs.first()\n\n langs = get_language_list(site.pk)\n if not Page.objects.on_site(site.pk).filter(application_urls=cls.__name__).exists():\n for lang in langs:\n with override(lang):\n if config:\n if cls.auto_setup[\"config_translated_fields\"]:\n cls._create_config_translation(config, lang)\n namespace = config.namespace\n elif cls.app_name:\n namespace = cls.app_name\n else:\n namespace = None\n try:\n home = Page.objects.get_home(site.pk).get_draft_object()\n except NoHomeFound:\n home = None\n set_home = hasattr(Page, \"set_as_homepage\")\n home = cls._create_page(home, lang, cls.auto_setup[\"home title\"], site=site, set_home=set_home)\n app_page = cls._create_page(\n app_page, lang, cls.auto_setup[\"page title\"], cls.__name__, home, namespace, site=site\n )\n if get_url:\n TitleIndex.get_url = get_url", "title": "" }, { "docid": "c9a5511d8c86c396c1982627dcc07741", "score": "0.5668219", "text": "def generatePageImages(self):", "title": "" }, { "docid": "4c97dfdf4d85e08e46c436c499df279f", "score": "0.56672287", "text": "def page_range(self):\n b_size = self._BATCH_SIZE\n range_start = max(self.page_number() - 5, 0)\n range_stop = min(max(self.page_number() + 5, 10), self.page_count())\n\n pages = []\n for p in range(range_start, range_stop):\n b_start = p * b_size\n pages.append({'number': p + 1,\n 'url': self._getNavigationURL(b_start)})\n return pages", "title": "" }, { "docid": "93df7f6c3692b8dcd223550674198c28", "score": "0.56533444", "text": "def get_pages(self):\n # Get base url\n base_url = self.chapter_url\n max_retries = deepcopy(self.max_retries)\n wait_retry_time = deepcopy(self.wait_time)\n\n while True:\n # Get javascript blocks\n r = requests.get(base_url, verify=self.verify_https)\n soup = bsoup.BeautifulSoup(r.text, 'html.parser')\n scripts = [script for script in soup.find_all(\n 'script', attrs={'type': 'text/javascript'})]\n\n if scripts:\n # Get total pages\n for script in scripts:\n if script.contents:\n matched_groups = re.search(\n 'var total_pages\\s?=\\s?(\\d*)\\s?;',\n script.contents[0])\n if matched_groups:\n total_pages = int(matched_groups.group(1))\n break\n # Get page urls\n page_urls = [\"%s/%d.html\" % (self.chapter_url, i + 1)\n for i in range(total_pages)]\n page_num = [i + 1 for i in range(total_pages)]\n pages = list(zip(page_urls, page_num))\n shuffle(pages)\n\n return True, pages\n\n elif (max_retries > 0):\n # Idea from manga_downloader (which in turn was from wget)\n sleep(uniform(0.5 * wait_retry_time, 1.5 * wait_retry_time))\n max_retries -= 1\n else:\n return False, None", "title": "" }, { "docid": "09ca102245f15657942a1c554de5d3c4", "score": "0.56481713", "text": "def apply_paginators(self, paginators, max_size=None):\n pagination = {\n 'limit': self.DEFAULT_LIMIT,\n 'offset': self.DEFAULT_OFFSET\n }\n for paginator in paginators:\n try:\n value = int(paginator.value)\n\n # Raise if the maximum page size was exceeded.\n if max_size is not None and \\\n paginator.strategy in ['limit', 'number'] and \\\n value > max_size:\n raise ValueError('Maximum query size exceeded.')\n\n pagination[paginator.strategy] = value\n except ValueError:\n raise errors.InvalidPaginationValue(item=paginator)\n if 'number' in pagination:\n limit = pagination['limit']\n pagination['offset'] = pagination['number'] * limit - limit\n return self.limit(pagination['limit']).offset(pagination['offset'])", "title": "" }, { "docid": "78e3ea1e0415c435376af4176808b171", "score": "0.5643073", "text": "def paginator(parser, token):\n args = token.split_contents()\n\n # Parameters\n page = args[1]\n\n style=None\n preload=True\n for a in args[1:]:\n if a[0:len('style=')] == 'style=':\n style = a[len('style='):]\n if a == 'nopreload':\n preload = False\n\n # Read nodelist\n nodelist = parser.parse(('endpaginate',))\n parser.delete_first_token()\n\n # Return meta node\n return PaginateNode(page, nodelist, style, preload)", "title": "" }, { "docid": "0a91e8db52121f3910dd0372ba5ef0ba", "score": "0.56149274", "text": "def __build_pages_matrices(self, keypoint_data_pages, label_data_pages):\n pages = []\n for keypoints, labels in zip(keypoint_data_pages, label_data_pages):\n pages.append(self.__build_page_matrix(keypoints, labels))\n return pages", "title": "" }, { "docid": "892648a228137fec2beaf03eebed7b94", "score": "0.5593152", "text": "def paginate(objects, page_num, items_per_page=20, padding_pages=3):\n\n context = {}\n paginate = Paginator(objects, items_per_page)\n\n try:\n page = paginate.page(page_num)\n context['current_page'] = int(page_num)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n page = paginate.page(1)\n context['current_page'] = 1\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n page = paginate.page(paginate.num_pages)\n context['current_page'] = paginate.num_pages\n\n context['objects'] = page.object_list\n context['totalPages'] = paginate.num_pages\n\n startPage = max(context['current_page'] - padding_pages - 1, 0)\n endPage = context['current_page'] + padding_pages\n context['pages'] = paginate.page_range[startPage:endPage]\n\n if page.has_next():\n context['nextPage'] = page.next_page_number()\n context['lastPage'] = paginate.num_pages\n else:\n context['nextPage'] = False\n context['lastPage'] = False\n\n if page.has_previous():\n context['prevPage'] = page.previous_page_number()\n context['firstPage'] = 1\n else:\n context['prevPage'] = False\n context['firstPage'] = False\n\n if not 1 in context['pages']:\n context['pages'].insert(0, '...')\n if not paginate.num_pages in context['pages']:\n context['pages'].append('...')\n\n return context", "title": "" }, { "docid": "07d667d9743e74f6e1954708ec6ee21f", "score": "0.55680984", "text": "def get_pages(context):\n # set the current search context\n if context is None:\n context = invoke_context()\n print('invoked context: %s' % context)\n limit = request.args.get('limit', None)\n return jsonify([page.to_json() for page in Page.all(index=context, limit=limit)])", "title": "" }, { "docid": "162c62851ddce79676488d4c7c8f6bb4", "score": "0.5564884", "text": "def _page0(self, page=None, pageSize=None):\n yield self.cpage\n self.npage = page\n url0 = self.mkUrl(page=self.npage, pageSize=self.pageSize)\n r = self.fetch(url=url0)\n self.cpage = self.extract(list=r.read())", "title": "" }, { "docid": "a694f6aebd5ee1b49db857fc03c39ffd", "score": "0.5563814", "text": "def pages(self, pages):\n\n self._pages = pages", "title": "" }, { "docid": "1af89ac1720d4323cce7f21ab3477e31", "score": "0.55627286", "text": "def build(self,flowables,onFirstPage=_doNothing, onLaterPages=_doNothing, canvasmaker=canvas.Canvas):\n self._calc() #in case we changed margins sizes etc\n frameT = Frame(self.leftMargin, self.bottomMargin, self.width, self.height, id='normal')\n self.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=onFirstPage,pagesize=self.pagesize),\n PageTemplate(id='Later',frames=frameT, onPage=onLaterPages,pagesize=self.pagesize)])\n if onFirstPage is _doNothing and hasattr(self,'onFirstPage'):\n self.pageTemplates[0].beforeDrawPage = self.onFirstPage\n if onLaterPages is _doNothing and hasattr(self,'onLaterPages'):\n self.pageTemplates[1].beforeDrawPage = self.onLaterPages\n BaseDocTemplate.build(self,flowables, canvasmaker=canvasmaker)", "title": "" }, { "docid": "18a72d8ef682f834541ee089f4705d11", "score": "0.5561412", "text": "def __init__(self, prop, **kwargs):\n QueryGenerator.__init__(self, prop=prop, **kwargs)\n self.resultkey = \"pages\"", "title": "" }, { "docid": "3b628462d31f4bb7c6c233c08d1ef138", "score": "0.55603456", "text": "def main_paginate():\n import argparse\n\n parser = argparse.ArgumentParser(prog='latexpages-paginate',\n description='Computes and updates start page numbers in compiled parts and contents')\n\n parser.add_argument('--version', action='version',\n version='%%(prog)s %s' % __version__)\n\n parser.add_argument('filename',\n help='INI file configuring the parts and paginate options')\n\n args = parser.parse_args()\n paginate(args.filename)", "title": "" }, { "docid": "42815a9482b425f90187ee638272a70c", "score": "0.555997", "text": "def get_main_pages_urls(self) -> Generator[str, None, None]:\n return (f\"{self.main_url}p={page}\" for page in range(1, self.pages_number + 1))", "title": "" }, { "docid": "1c6c1ec39a0fc6ab05220b4de04c594f", "score": "0.55559534", "text": "def _getPages(request, filter_regex=None):\n filter = None\n if filter_regex:\n filter = re.compile(filter_regex).match\n pages = request.rootpage.getPageList(filter=filter)\n pages.sort()\n return pages", "title": "" }, { "docid": "8e3001056fd0cc3bebf94aa1ef39f1df", "score": "0.55523103", "text": "def pagination(page, objects_caption, query_string=''):\n\n if not hasattr(page, 'has_other_pages') or not page.has_other_pages():\n return {\n 'page_object': page,\n }\n\n padding = settings.PAGINATION_PADDING\n tail = settings.PAGINATION_TAIL\n\n current_page = page.number\n pages_count = page.paginator.num_pages + 1\n\n # Create a list of pages which are within +/- `padding` items around the current one\n main_range_from = max(current_page - padding, 1)\n main_range_to = min(main_range_from + padding * 2 + 1, pages_count)\n main_range = range(main_range_from, main_range_to)\n\n # Create a list of `tail` pages located at the beginning of the navigation block\n left_range_from = 1\n left_range_to = min(left_range_from + tail, main_range_from)\n left_range = range(left_range_from, left_range_to)\n\n # If the lists main_range and left_range overlap, join them\n # to avoid a discontinuity in the navigation block\n if (main_range[0] - 1) <= tail:\n main_range = left_range + main_range\n left_range = []\n\n # Same for the right side of the navigation block\n right_range_to = pages_count\n right_range_from = max(right_range_to - tail, main_range_to)\n right_range = range(right_range_from, right_range_to)\n\n if (main_range[-1] + 1) >= (pages_count - tail):\n main_range = main_range + right_range\n right_range = []\n\n if query_string:\n query_string = '&' + query_string\n\n return {\n 'left_range': left_range,\n 'main_range': main_range,\n 'right_range': right_range,\n 'page': page,\n 'query_string': query_string,\n 'objects_caption': objects_caption,\n }", "title": "" }, { "docid": "7b5eeb014aa59cbc9acd49b4a2c0f902", "score": "0.5549387", "text": "def make_pagination(query, offset, list_name=\"objects\"):\n if type(query) == list:\n objs = len(query)\n else:\n objs = query.count()\n if objs == 0:\n return dict()\n if offset >= objs or offset%5 != 0:\n raise Exception(\"Bad request\")\n\n page = offset/5 + 1\n prev_pages = list(enumerate(range(0, offset, 5), start=1))\n next_pages = list(enumerate(range(offset+5, objs, 5), start=page+1))\n\n d = {\n list_name: query[offset:offset+5],\n \"page\": page,\n \"next_pages\": next_pages,\n \"prev_pages\": prev_pages,\n }\n\n return d", "title": "" }, { "docid": "686729ab98feb7d50d5d73706ca7b6a3", "score": "0.553319", "text": "def before():\n g.pages = PagesRepository(pages)", "title": "" }, { "docid": "48778d836e6ec813e8fcc57ed67db601", "score": "0.5519491", "text": "def process_multi_page_menu(pages): \n date = get_date(pages[0])\n lunch = get_meal(multi_page_classify,pages[1])\n dinner = get_meal(multi_page_classify,pages[2])\n\n return {\"dinner\":dinner,\"lunch\":lunch,\"base_date\":date}", "title": "" }, { "docid": "4cb829c1b7d9c5db3149c1c8e838bdf3", "score": "0.55104864", "text": "def add_pagination_context(context): \n per_page = context[\"paginator\"].per_page\n page_obj = context[\"page_obj\"]\n context[\"showing_first\"] = per_page * (page_obj.number - 1) + 1 if page_obj else 0\n context[\"showing_end\"] = (\n context[\"showing_first\"] + len(page_obj) - 1 if page_obj else 0\n )\n return context", "title": "" }, { "docid": "65c7b0d0ae3a88f4475c25398df9344c", "score": "0.55101967", "text": "def __init__(self, generator, g_content=False, **kwargs):\n # get some basic information about every page generated\n if 'prop' in kwargs:\n kwargs['prop'] += \"|info|imageinfo|categoryinfo\"\n else:\n kwargs['prop'] = 'info|imageinfo|categoryinfo'\n if g_content:\n # retrieve the current revision\n kwargs['prop'] += \"|revisions\"\n if \"rvprop\" in kwargs:\n kwargs[\"rvprop\"] += \"ids|timestamp|flags|comment|user|content\"\n else:\n kwargs[\"rvprop\"] = \"ids|timestamp|flags|comment|user|content\"\n if \"inprop\" in kwargs:\n if \"protection\" not in kwargs[\"inprop\"]:\n kwargs[\"inprop\"] += \"|protection\"\n else:\n kwargs['inprop'] = 'protection'\n if \"iiprop\" in kwargs:\n kwargs[\"iiprop\"] += 'timestamp|user|comment|url|size|sha1|metadata'\n else:\n kwargs['iiprop'] = 'timestamp|user|comment|url|size|sha1|metadata'\n QueryGenerator.__init__(self, generator=generator, **kwargs)\n self.resultkey = \"pages\" # element to look for in result", "title": "" }, { "docid": "8e2934ef3a85377fbc0a457a8517d0b8", "score": "0.5501297", "text": "def render_pagination(pagination: Optional[Dict[str, Any]]) -> None:\n if pagination and (int(pagination[\"current\"]) + 1 <= int(pagination[\"total\"])):\n kwargs = {\"page\": int(pagination[\"current\"]) + 1}\n if plugin.settings.exclude_anime == \"true\" and \"start_from\" in pagination:\n kwargs[\"start_from\"] = pagination[\"start_from\"]\n img = plugin.routing.build_icon_path(\"next_page\")\n # Next\n li = plugin.list_item(\n name=f\"[COLOR FFFFF000]{localize(32016)}[/COLOR]\", iconImage=img, thumbnailImage=img\n )\n url = plugin.routing.add_kwargs_to_url(**kwargs)\n xbmcplugin.addDirectoryItem(plugin.handle, url, li, True)\n img = plugin.routing.build_icon_path(\"home\")\n home_url = plugin.routing.build_url(\"/\")\n # Home\n li = plugin.list_item(\n name=f\"[COLOR FFFFF000]{localize(32017)}[/COLOR]\", iconImage=img, thumbnailImage=img\n )\n xbmcplugin.addDirectoryItem(plugin.handle, home_url, li, True)\n xbmcplugin.endOfDirectory(plugin.handle)", "title": "" }, { "docid": "2cdd07cf54df71c53bc87060436c9aac", "score": "0.5495602", "text": "def gen_tasks(self):\n self.site.scan_posts()\n yield self.group_task()\n\n # Cache classification sets per language for taxonomies where\n # add_other_languages_variable is True.\n classification_set_per_lang = {}\n for taxonomy in self.site.taxonomy_plugins.values():\n if taxonomy.add_other_languages_variable:\n lookup = self.site.posts_per_classification[taxonomy.classification_name]\n cspl = {lang: set(lookup[lang].keys()) for lang in lookup}\n classification_set_per_lang[taxonomy.classification_name] = cspl\n\n # Collect post lists for classification pages and determine whether\n # they should be generated.\n post_lists_per_lang = {}\n for taxonomy in self.site.taxonomy_plugins.values():\n plpl = {}\n for lang in self.site.config[\"TRANSLATIONS\"]:\n result = {}\n for classification, posts in self.site.posts_per_classification[taxonomy.classification_name][lang].items():\n # Filter list\n filtered_posts = self._filter_list(posts, lang)\n if len(filtered_posts) == 0 and taxonomy.omit_empty_classifications:\n generate_list = generate_rss = generate_atom = False\n else:\n # Should we create this list?\n generate_list = taxonomy.should_generate_classification_page(classification, filtered_posts, lang)\n generate_rss = taxonomy.should_generate_rss_for_classification_page(classification, filtered_posts, lang)\n generate_atom = taxonomy.should_generate_atom_for_classification_page(classification, filtered_posts, lang)\n result[classification] = (filtered_posts, generate_list, generate_rss, generate_atom)\n plpl[lang] = result\n post_lists_per_lang[taxonomy.classification_name] = plpl\n\n # Now generate pages\n for lang in self.site.config[\"TRANSLATIONS\"]:\n # To support that tag and category classifications share the same overview,\n # we explicitly detect this case:\n ignore_plugins_for_overview = set()\n if 'tag' in self.site.taxonomy_plugins and 'category' in self.site.taxonomy_plugins and self.site.link(\"tag_index\", None, lang) == self.site.link(\"category_index\", None, lang):\n # Block both plugins from creating overviews\n ignore_plugins_for_overview.add(self.site.taxonomy_plugins['tag'])\n ignore_plugins_for_overview.add(self.site.taxonomy_plugins['category'])\n for taxonomy in self.site.taxonomy_plugins.values():\n if not taxonomy.is_enabled(lang):\n continue\n # Generate list of classifications (i.e. classification overview)\n if taxonomy not in ignore_plugins_for_overview:\n if taxonomy.template_for_classification_overview is not None:\n for task in self._generate_classification_overview(taxonomy, lang):\n yield task\n\n # Process classifications\n for classification, (filtered_posts, generate_list, generate_rss, generate_atom) in post_lists_per_lang[taxonomy.classification_name][lang].items():\n for task in self._generate_classification_page(taxonomy, classification, filtered_posts,\n generate_list, generate_rss, generate_atom, lang,\n post_lists_per_lang[taxonomy.classification_name],\n classification_set_per_lang.get(taxonomy.classification_name)):\n yield task\n # In case we are ignoring plugins for overview, we must have a collision for\n # tags and categories. Handle this special case with extra code.\n if ignore_plugins_for_overview:\n for task in self._generate_tag_and_category_overview(self.site.taxonomy_plugins['tag'], self.site.taxonomy_plugins['category'], lang):\n yield task", "title": "" }, { "docid": "cd4dd2f9827651f7a5ae593b6b6026e5", "score": "0.54944646", "text": "def scraping_pages_links(self, void: str) -> List[str]:\r\n # Create link\r\n link = self.page + void\r\n\r\n try:\r\n # Read website, encode and create HTML parser\r\n soup_pages = self.enterPage_parser(link)\r\n\r\n # Extract max page number\r\n res = soup_pages.findAll('script')\r\n lengths = [len(str(el)) for el in res]\r\n json_object = json.loads(res[lengths.index(max(lengths))].contents[0])\r\n max_page_num = json_object[\"props\"][\"pageProps\"][\"tracking\"]['listing']['page_count']\r\n\r\n # Create all pages links\r\n all_pages_links = [link + '?page=' + str(page) for page in range(1, max_page_num + 1)]\r\n\r\n except:\r\n all_pages_links = link\r\n\r\n return all_pages_links", "title": "" }, { "docid": "926c4f2c038863745b487488d4021e1f", "score": "0.54927444", "text": "def paginator(context, adjacent_pages=2):\n if not 'page' in context:\n # improper use of paginator tag, bail out\n return {}\n\n page = context['page']\n page_no = int(page.number)\n\n s = max(1, page_no - adjacent_pages - max(0, page_no+adjacent_pages-page.paginator.num_pages))\n page_numbers = range(s, min(page.paginator.num_pages, s+2*adjacent_pages)+1)\n\n return {\n 'page': page,\n 'results_per_page': context['results_per_page'],\n 'page_numbers': page_numbers,\n 'show_first': 1 not in page_numbers,\n 'show_last': page.paginator.num_pages not in page_numbers,\n }", "title": "" }, { "docid": "b7d0c81e3c4385fe99299f1a54e5735b", "score": "0.54913855", "text": "def load_pages(self):\n\t\t#A dict of dicts to store the contents of _ directories. The first-\n\t\t# level key is the parent directory of the _ dir; the second-level\n\t\t# key is the name of the _ dir without the '_'. The value is a\n\t\t# list of rendered pages.\n\t\textravars = defaultdict(dict)\n\n\t\t#Walk through the directory bottom-up so that we get any\n\t\t# subdirectories with extra variables first.\n\t\tfor root, dirs, files in os.walk(self.options['content_dir'],\n\t\t\ttopdown=False, followlinks=True):\n\t\t\t#Per-directory environment to get templates from current\n\t\t\t# directory or its parents\n\t\t\tenvironment = jinja2.Environment(\n\t\t\t\tloader=ParentLoader(root, stop=self.root,\n\t\t\t\t\tdefault=self.options['default_template']),\n\t\t\t\textensions=self.options.get('jinja2_extensions', []))\n\n\t\t\t#Add any filters specified in options\n\t\t\tenvironment.filters.update(self.options.get('jinja2_filters', {}))\n\n\t\t\troot_basename = os.path.basename(root) #The name only of the current directory\n\t\t\tparent_dir = os.path.split(root)[0] #The full path of the parent directory\n\n\t\t\t#If the subdirectory starts with _, read and parse all .md files\n\t\t\t# within it and add them as variables with the directory name\n\t\t\t# (without the _).\n\t\t\tin_subfiles = root_basename.startswith('_')\n\t\t\tif in_subfiles:\n\t\t\t\troot_basename = root_basename[1:]\n\t\t\t\textravars[parent_dir][root_basename] = [] #Drop the _\n\n\t\t\t#Go through each file in the current directory (root)\n\t\t\tfor f in files:\n\t\t\t\trname, ext = os.path.splitext(f)\n\n\t\t\t\t#Skip hidden and template files\n\t\t\t\tif f.startswith('.') or ext == '.jinja':\n\t\t\t\t\tcontinue\n\n\t\t\t\t#Figure out where it should go in output for files to be\n\t\t\t\t# copied or written\n\t\t\t\tdestroot = os.path.relpath(\n\t\t\t\t\tos.path.join(\n\t\t\t\t\t\tos.path.join(parent_dir, root_basename) if in_subfiles else root,\n\t\t\t\t\t\trname), #Filename with no extension\n\t\t\t\t\tstart=self.options['content_dir'])\n\t\t\t\tif destroot.split(os.path.sep, 1)[0] == self.options['root_subdir']:\n\t\t\t\t\tdestroot = os.path.relpath(destroot, start=self.options['root_subdir'])\n\n\t\t\t\t#If it's a .md, we should render it\n\t\t\t\tif ext == '.md':\n\t\t\t\t\there = os.getcwd()\n\t\t\t\t\tos.chdir(root) #Be sure we're in root for relative paths\n\t\t\t\t\tmeta = self.render(f, environment, extravars.get(root, {}))\n\t\t\t\t\tos.chdir(here)\n\t\t\t\t\t#If we're in a _ dir, put the rendered file in extravars,\n\t\t\t\t\t# otherwise write the rendered result to disk\n\t\t\t\t\tif in_subfiles:\n\t\t\t\t\t\textravars[parent_dir][root_basename].append(meta)\n\t\t\t\t\telif meta['content']:\n\t\t\t\t\t\t\tself.write(meta['content'], destroot + '.html')\n\n\t\t\t\t#Otherwise copy it\n\t\t\t\telse:\n\t\t\t\t\tsrc = os.path.join(root, f)\n\t\t\t\t\tdst = os.path.join(self.options['output_dir'], destroot + ext)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.makedirs(os.path.dirname(dst))\n\t\t\t\t\texcept OSError as e:\n\t\t\t\t\t\tpass\n\t\t\t\t\tlogging.info(\"Copy file {0} to {1}\".format(src, dst))\n\t\t\t\t\tshutil.copy(src, dst)", "title": "" }, { "docid": "209d366f074719763b074282c9c3a85b", "score": "0.54750156", "text": "def populate(self):\n # type: () -> None\n self.container.clear_children()\n self.recent_pages = GUI.PagedContainer((20, 0), width=self.width - 40, height=60, hideControls=True)\n self.recent_pages.add_page(self.recent_pages.generate_page())\n self.btn_left = GUI.Button((0, 0), \"<\", state.color_palette.get_color(GUI.Palette.accent),\n state.color_palette.get_color(GUI.Palette.item), 20, width=20, height=60,\n onClick=self.recent_pages.page_left)\n self.btn_right = GUI.Button((self.width - 20, 0), \">\", state.color_palette.get_color(GUI.Palette.accent),\n state.color_palette.get_color(GUI.Palette.item), 20, width=20, height=60,\n onClick=self.recent_pages.page_right)\n per_app = (self.width - 40) / 4\n current = 0\n for app in state.application_list.active_applications:\n if app is not state.active_application and app.parameters.get(\"persist\", True) and app.name != \"home\":\n if current >= 4:\n current = 0\n self.recent_pages.add_page(self.recent_pages.generate_page())\n cont = GUI.Container((per_app * current, 0), transparent=True, width=per_app, height=self.height,\n border=1, borderColor=state.color_palette.get_color(GUI.Palette.item),\n onClick=self.activate, onClickData=(app,), onLongClick=self.close_ask,\n onLongClickData=(app,))\n cont.SKIP_CHILD_CHECK = True\n icon = app.getIcon()\n if not icon:\n icon = state.icons.get_loaded_icon(\"unknown\")\n img = GUI.Image((0, 5), surface=icon)\n img.position[0] = GUI.get_centered_coordinates(img, cont)[0]\n name = GUI.Text((0, 45), app.title, state.color_palette.get_color(GUI.Palette.item), 10)\n name.position[0] = GUI.get_centered_coordinates(name, cont)[0]\n cont.add_child(img)\n cont.add_child(name)\n self.recent_pages.add_child(cont)\n current += 1\n if len(self.recent_pages.get_page(0).child_components) == 0:\n notxt = GUI.Text((0, 0), \"No Recent Apps\", state.color_palette.get_color(GUI.Palette.item), 16)\n notxt.position = GUI.get_centered_coordinates(notxt, self.recent_pages.get_page(0))\n self.recent_pages.add_child(notxt)\n self.recent_pages.go_to_page()\n self.add_child(self.recent_pages)\n self.add_child(self.btn_left)\n self.add_child(self.btn_right)", "title": "" }, { "docid": "3718ba5b83568394a4038dea4d13f7dc", "score": "0.5471489", "text": "def get_all(parent, fetcher_str, **kwargs):\n # overwrite parameters that may interfere\n kwargs.update({'commit': True, 'page': 0})\n\n def generate_all_pages():\n \"\"\"generator for all pages.\"\"\"\n fetcher = getattr(parent, fetcher_str)\n\n first_page = fetcher.get(**kwargs)\n kwargs['page_size'] = len(first_page)\n count = fetcher.current_total_count\n yield first_page\n\n if count > kwargs['page_size']:\n num_pages = int(ceil(float(count) / kwargs['page_size']))\n for page in range(1, num_pages):\n kwargs['page'] = page\n yield fetcher.get(**kwargs)\n\n # flatten the pages into one generator for just the objects\n return chain.from_iterable(generate_all_pages())", "title": "" }, { "docid": "e26a017ac4f628e4de52230fc64d2684", "score": "0.5469787", "text": "def get_pages(self) -> List[str]:\r\n\r\n # Scrape all links for voivodeships in self.voivodeship variable\r\n results_pages = self.scraping_all_links(self.scraping_pages_links, self.voivodeships)\r\n results_pages = self.flatten(results_pages)\r\n\r\n # Verify weather there are some missing oferts\r\n missed_pages = [oferts for oferts in results_pages if \"page\" not in oferts]\r\n\r\n if len(missed_pages) != 0:\r\n results_pages = self.flatten(\r\n [properties for properties in results_pages if (properties != None) & (\"page\" in properties)])\r\n\r\n # Try to scrape missing links once again and join them with scraped before\r\n missed_pages_list = self.missed_links_all(missed_offers=missed_pages, func=self.missed_offers_pages,\r\n details=False, offers=False,\r\n func_pages_or_offers=self.scraping_pages_links)\r\n results_pages = self.join_missed_with_scraped(missed_pages_list, results_pages)\r\n\r\n return self.flatten(results_pages)", "title": "" }, { "docid": "b3bbcd891336824ba4d54606f47acf42", "score": "0.54651093", "text": "def page(self, *criterion, limit=None, offset=None):\n raise NotImplementedError(\"Subclasses should overwrite this Method.\")", "title": "" }, { "docid": "b3bbcd891336824ba4d54606f47acf42", "score": "0.54651093", "text": "def page(self, *criterion, limit=None, offset=None):\n raise NotImplementedError(\"Subclasses should overwrite this Method.\")", "title": "" }, { "docid": "4fa7e5b96053b854a6db20fdd919c089", "score": "0.54617846", "text": "def chunked_pages(self, tags_list: list, n: int):\n pages = []\n for chunk in self.chunk_list(tags_list, n):\n content = ''\n for col in self.chunk_list(chunk, 3):\n # the columns wont have the perfect number of elements every time, we need to append spaces if\n # the list entries is less then the number of columns\n while len(col) < 3:\n col.append(' ')\n\n # Concatenate the formatted column string to the page content string\n content += \"{: <20} {: <20} {: <20}\\n\".format(*col)\n\n # Append the content string to the list of pages to send to the paginator\n # Marked as a code block to ensure a monospaced font and even columns\n pages.append(f'```{content}```')\n return pages", "title": "" }, { "docid": "6176b69bdc6ee17ebf18f4d304fb119d", "score": "0.545429", "text": "def make_poss(current_page):\n print \"fetching possibilities\"\n possibilities = []\n\n for title in current_page.links():\n try:\n if debug: print \"fetching \" + str(title) \n possibilities.append(Page(title))\n except Exception, msg:\n if debug: print \"caught an error: \" + str(msg)\n return possibilities", "title": "" }, { "docid": "0ec86b4f5ccf3ba9711e952db01bfb86", "score": "0.54528755", "text": "def _fill_pages(self):\n tif = self._new_tif()\n draw = ImageDraw.Draw(tif)\n page_nb = 0\n x_pos = self.start_x\n y_pos = self.start_y\n if self.verbose:\n print('Generating individual tif image %s' % (self.indiv_page_prefix + str(page_nb) + '.tif'))\n for true_type in self.true_type_list:\n if x_pos != self.start_x or y_pos != self.start_y:\n x_pos = self.start_x\n y_pos = self.start_y\n self._save_tif(tif, page_nb) # save individual tif\n page_nb += 1\n if self.verbose:\n print('Generating individual tif image %s' % (self.indiv_page_prefix + str(page_nb) + '.tif'))\n tif = self._new_tif() # new page\n draw = ImageDraw.Draw(tif) # write on this new page\n for word in self.text:\n word += ' ' # add a space between each word\n wordsize_w, wordsize_h = true_type.getsize(word)\n # Check if word can fit the line, if not, newline\n # if newline, check if the newline fits the page\n # if not, save the current page and create a new one\n if not word_fits_in_line(self.W, x_pos, wordsize_w):\n if newline_fits_in_page(self.H, y_pos, wordsize_h):\n # newline\n x_pos = self.start_x\n y_pos += wordsize_h\n else:\n # newline AND newpage\n x_pos = self.start_x\n y_pos = self.start_y\n self._save_tif(tif, page_nb) # save individual tif\n page_nb += 1\n if self.verbose:\n print(\n 'Generating individual tif image %s' % (self.indiv_page_prefix + str(page_nb) + '.tif'))\n tif = self._new_tif() # new page\n draw = ImageDraw.Draw(tif) # write on this new page\n # write word\n for char in word:\n char_w, char_h = true_type.getsize(char) # get character height / width\n offset_x, offset_y = true_type.getoffset(char)\n top_left = (x_pos + offset_x, y_pos + offset_y) # character top-left corner coordinates\n bottom_right = (x_pos + char_w, y_pos + char_h) # character bottom-roght corner coordinates\n draw.text((x_pos, y_pos), char, fill=\"black\", font=true_type) # write character in tif file\n if char != ' ':\n # draw.rectangle([(char_x0, char_y0),(char_x1, char_y1)], outline=\"red\")\n self._write_boxline(char, top_left, bottom_right, self.H, page_nb) # add coordinates to boxfile\n x_pos += char_w\n self._save_tif(tif, page_nb) # save last tif", "title": "" }, { "docid": "386a714133811877b0279e2577fe78e2", "score": "0.54449975", "text": "def generate(root_url):\n \n result = []\n for c in Conference.objects.order_by('-start')[1:9999]:\n print '*** generating page for:', c\n year = str(c.start.year)\n\n result.append(\n build_page(parts={'title':year,'meta':'','fragment':''}, data=fields, url=root_url+'/'+year))\n\n return result", "title": "" }, { "docid": "d13b1f571c9265ef7f19a9102d5e55c2", "score": "0.5439712", "text": "def paginate(self, *args, **kwargs):\r\n op_dict = {'ListObjects': self.list_objects}\r\n yield op_dict[self.name](kwargs)", "title": "" }, { "docid": "2a63d69402f59db5cf475640a697079e", "score": "0.5438786", "text": "def make_pages(plugins, src, dst, layout, **params):\n items = []\n\n # Compute a \"base path\" from the first wildcard in src.\n # This will be used to compose a slug that includes subdirectories.\n basepath = get_base_path(src)\n for src_path in glob.glob(src):\n content = read_content(src_path, basepath)\n\n page_params = dict(params, **content)\n\n if page_params.get('draft') != True:\n # Populate placeholders in content if content-rendering is enabled.\n if page_params.get('render') == True:\n rendered_content = render(plugins, page_params['content'], **page_params)\n page_params['content'] = rendered_content\n content['content'] = rendered_content\n\n items.append(content)\n\n dst_path = render(plugins, dst, **page_params)\n # Certain meta params need processing\n # if page_params.get('guests')\n output = render(plugins, layout, **page_params)\n\n log('Rendering {} => {} ...', src_path, dst_path)\n fwrite(dst_path, output)\n\n comparison_fun = lambda x: datetime.datetime.strptime(x['date'], '%Y-%m-%d')\n return sorted(items, key=comparison_fun, reverse=True)", "title": "" }, { "docid": "f2a7d18560abfacd9b1edcd9b03a63be", "score": "0.54367083", "text": "def build_menus():\n # call the extract_iterations() method\n # to identify the number of pages to\n # be visited\n iterations = extract_iterations()\n print(\"Building menus...\")\n for page_num in range(1, iterations+1):\n # append page number to base_url\n # to get a specific page\n url = base_url + str(page_num)\n r = requests.get(url)\n data = r.json()\n menus_value = data['menus']\n for menu_entry in menus_value:\n # adding another field 'visited'\n # to each of the nodes, so as to\n # manage their visit status\n menu_entry['visited'] = False\n menus.insert(menu_entry)\n\n # the menus table is now complete\n # (with the additional 'visited' field)", "title": "" }, { "docid": "c7ea197640891059792fe168f1d036ac", "score": "0.5434656", "text": "def available_pagination_pages(self):\n chunked_posts = list(chunk_list(self.get_posts(), PAGINATION_PAGE_MAX))\n return list(range(1, len(chunked_posts) + 1))", "title": "" }, { "docid": "c1499067b8abdacb86aa2e1e9fc3dab5", "score": "0.54340935", "text": "def render(self, templ_vars=None):\n if not templ_vars:\n templ_vars = {}\n\n if 'pagination' in self.meta and 'list' in self.meta['pagination']:\n extra_pages = self.paginate()\n else:\n extra_pages = []\n\n if 'page' in templ_vars:\n logging.debug('Found defaulted page data.')\n templ_vars['page'].update(self.meta)\n else:\n templ_vars['page'] = self.meta\n\n if 'pagination' in templ_vars:\n templ_vars['pagination'].update(self.meta['pagination'])\n else:\n templ_vars['pagination'] = self.meta['pagination']\n\n logging.debug('templ_vars.keys(): ' + repr(templ_vars.keys()))\n self.rendered = self.template.render(templ_vars)\n\n logging.debug('extra pages is: ' + repr(extra_pages))\n return extra_pages", "title": "" }, { "docid": "9ae6a1906ad53393e6eededa46b95073", "score": "0.54241955", "text": "def generate_data(self):\n page_home_id = Page.objects.create(title='home').id\n page_section1_id = Page.objects.create(title='section1', parent_id=page_home_id, url='section1').id\n page_section2_id = Page.objects.create(title='section2', parent_id=page_home_id, url='section2').id\n page_abc_id = Page.objects.create(title='abc', parent_id=page_section1_id, url='abc').id\n Page.objects.create(title='xyz', parent_id=page_abc_id, url='xyz')\n page_def_id = Page.objects.create(title='def', parent_id=page_section2_id, url='/def/').id # absolute url\n page_ghi_id = Page.objects.create(title='ghi', parent_id=page_section2_id, url='ghi').id\n Page.objects.create(title='example', url='http://example.com')\n\n page_def = Page.objects.get(id=page_def_id)\n page_ghi = Page.objects.get(id=page_ghi_id)\n page_ghi.move_to(page_def, 'right')", "title": "" }, { "docid": "a1bbb24ffd7c59cbc31e634298f757f7", "score": "0.54222685", "text": "def getPages(\n self,\n pageNumSpec,\n refreshConfig=False,\n doRules=True,\n doFilter=True,\n onlyFnRules=False,\n ):\n\n if not self.good:\n print(\"SKIPPING because of config errors\")\n return\n\n fnRules = self.fnRules\n spaces = self.spaces\n columns = self.columns\n\n self.doRules = doRules\n self.doFilter = doFilter\n\n ruleIndex = self.ruleIndex\n rulesApplied = self.rulesApplied\n\n if refreshConfig:\n self.getCharConfig()\n\n for rn in ruleIndex:\n rulesApplied[rn] = collections.Counter()\n\n for (i, pageNum) in enumerate(self.parsePageNums(pageNumSpec)):\n self.pageNum = pageNum\n rep = (\n f\"{i + 1:>4} (page {pageNum:>4})\"\n if pageNum != i + 1\n else (f\"{i + 1:>4}\" + \" \" * 12)\n )\n sys.stdout.write(f\"\\r\\t{rep}\")\n sys.stdout.flush()\n doc = self.doc\n page = doc[pageNum - 1]\n\n theseFnRules = set()\n\n for fnRule in page.getDrawings():\n if RECT in fnRule and fnRule.get(COLOR, None):\n rect = fnRule[RECT]\n width = rect.x1 - rect.x0\n if width > FNRULE_WIDTH:\n theseFnRules.add(int(round(rect.y1)))\n\n fnRules[pageNum] = tuple(sorted(theseFnRules))\n spaces[pageNum] = {}\n columns[pageNum] = {}\n\n if onlyFnRules:\n continue\n\n textPage = page.getTextPage()\n data = textPage.extractRAWDICT()\n self.collectPage(data)", "title": "" }, { "docid": "4d8c1125ccddccd90b01594e1e488a68", "score": "0.54105824", "text": "def get_paginator(self, search, per_page, orphans=0, **kwargs):\n return self.paginator_class(search, per_page, orphans=orphans, **kwargs)", "title": "" }, { "docid": "8cd4c55d3d8e6a0cbd7ab492c072acbb", "score": "0.5408659", "text": "def _pagination_iter(self, more_func, *args, **kwargs):\n\n\t\toffset = 0\n\t\tpage = []\n\n\t\twhile True:\n\t\t\tif not page:\n\t\t\t\tpage = more_func(offset, *args, **kwargs)\n\t\t\t\tif not page:\n\t\t\t\t\tbreak\n\n\t\t\t\toffset += len(page)\n\n\t\t\tyield page.pop()", "title": "" }, { "docid": "e55c94dbac8f075e797497f3c215a458", "score": "0.5406382", "text": "def render_pages(pages, dry=False):\n filenames = [(page, dry) for page in pages]\n open_pool(create_page, filenames)", "title": "" }, { "docid": "1261bd1dbe04abcfaf08bec6f45f087c", "score": "0.5386752", "text": "def search_page_wise(self):\n results = {}\n for page in range(0,self.pages):\n args = {'q' : self.query,\n 'v' : '1.0',\n 'start' : page,\n 'rsz': RSZ_LARGE,\n 'safe' : SAFE_OFF, \n 'filter' : FILTER_ON, \n }\n q = urlencode(args)\n search_results = self.__urlopen(URL+q)\n data = json.loads((search_results.read() or '').decode(\"utf-8\"))\n urls = []\n if 'responseData' in data:\n for result in data['responseData'].get('results', []):\n if result and 'unescapedUrl' in result:\n url = urllib2.unquote(result['unescapedUrl'])\n urls.append(url) \n else:\n self.logger.error('no responseData key found in response')\n results[page] = urls\n return results", "title": "" }, { "docid": "b41fbf7098052f1e7bec48f62ff125bb", "score": "0.5370654", "text": "def build_data(api_key, query, begin_date, end_date, page='0'):\n \n dat = get_articles(api_key, query, begin_date, end_date, page='0')\n time.sleep(1)\n articles_data = dat['response']['docs']\n pages = ceil(dat['response']['meta']['hits']/10)\n for page in range(1, int(pages)):\n print('Gathering article page {} of {}'.format(page, pages))\n p = get_articles(api_key, query, begin_date, end_date, page=str(page))\n articles_data.extend(p['response']['docs'])\n time.sleep(0.75)\n \n return(articles_data)", "title": "" }, { "docid": "7f6973135c5a3bc750032e28ccc4d517", "score": "0.53705144", "text": "def annotation_pages(result: Optional[dict]) -> Optional[str]:\n if result:\n if result[\"total\"] > 0:\n last = urlparse(result[\"last\"])\n last_page = parse_qs(last.query)[\"page\"][0]\n for p in range(0, int(last_page) + 1):\n page = set_query_field(result[\"last\"], field=\"page\", value=p, replace=True)\n yield page\n else:\n return\n else:\n return", "title": "" }, { "docid": "2e850a43224fbc695d52eafc681d3d0a", "score": "0.53601015", "text": "def get_list(self, *args, **kwargs):\n \n # Set up the mongo connection.\n mongo_object = MongoConnection(db=self.db_name, collection=self.collection_name, auth=self.auth_string)\n connection = mongo_object.connect()\n \n # Get the total count and the number of pages.\n total_count = connection.find(self.query_dict).count()\n \n if (total_count % self.pagination_limit) > 0:\n self.pages = (total_count / self.pagination_limit) + 1\n else:\n self.pages = (total_count / self.pagination_limit)\n \n if self.pages == 1:\n self.page_range = [1]\n else:\n self.page_range = range(1,self.pages+1)\n \n try:\n query_kwargs = self.query_filter['kwargs']\n query_filter = { query_kwargs['mongo_field']: self.kwargs[query_kwargs['url_kwarg']] }\n \n except Exception, e:\n print Exception, e\n query_filter = None\n \n try:\n # Try to get the page from the URL.\n self.page = int(self.request.GET['page'])\n \n # If we can get the page from the URL, calculate the offset.\n \n # First, test if the current page is 0.\n if self.page == 0:\n \n # If so, short circuit the process and return a redirect URL while setting the type as a redirect.\n redirect_url = '%s?page=%s' % (self.request.META['PATH_INFO'], 1)\n return redirect_url, 'redirect'\n \n # Next, check if the current page is less than the total number of pages.\n elif self.page <= self.pages:\n \n # If so, set the offset to the 1-indexed page number * the pagination limit.\n self.offset = (self.page-1) * self.pagination_limit\n \n # Set the previous/next page numbers.\n if self.page == 1 and self.page == self.pages:\n pass\n \n elif self.page == 1:\n self.next_page_number = 2\n \n elif self.page == self.pages:\n self.previous_page_number = self.page - 1\n \n else:\n self.next_page_number = self.page + 1\n self.previous_page_number = self.page - 1\n \n # Finally, check if the current page is greater than the total number of pages.\n elif self.page > self.pages:\n \n # If so, short circuit the process and return a redirect URL while setting the type as a redirect.\n redirect_url = '%s?page=%s' % (self.request.META['PATH_INFO'], self.pages)\n return redirect_url, 'redirect'\n \n # Check if the sort is absent.\n if self.query_sort == None:\n \n # If there's no sort, just send along the connection query.\n query = connection.find(query_filter, limit=self.pagination_limit, skip=self.offset)\n \n # Otherwise, if the sort is present ...\n else:\n \n # ... send along the connection query with the sort attached.\n query = connection.find(query_filter, limit=self.pagination_limit, skip=self.offset).sort(self.query_sort['field'], direction=self.query_sort['direction'])\n \n except Exception, e:\n \n # If there's no page in the URL, set the page to 1.\n self.page = 1\n self.offset = 0\n \n # If so, set the offset to the 1-indexed page number * the pagination limit.\n self.offset = (self.page-1) * self.pagination_limit\n \n # Set the previous/next page numbers.\n if self.page == 1 and self.page == self.pages:\n pass\n \n elif self.page == 1:\n self.next_page_number = 2\n \n elif self.page == self.pages:\n self.previous_page_number = self.page - 1\n \n else:\n self.next_page_number = self.page + 1\n self.previous_page_number = self.page - 1\n \n # Check if the sort is absent.\n if self.query_sort == None:\n \n # If there's no sort, just send along the connection query.\n query = connection.find(query_filter, limit=self.pagination_limit, skip=self.offset)\n \n # Otherwise, if the sort is present ...\n else:\n \n # ... send along the connection query with the sort attached.\n query = connection.find(query_filter, limit=self.pagination_limit, skip=self.offset).sort(self.query_sort['field'], direction=self.query_sort['direction']) \n \n # Set up a query list.\n query_list = []\n \n # Append each item to the query_list.\n for item in query:\n item_dict = {}\n item_as_kwargs = dict(item)\n item_dict.update(**item_as_kwargs)\n item_dict['id'] = str(item['_id'])\n item_dict.pop('_id')\n query_list.append(item_dict)\n \n # Return the query list, and set the type of return to query.\n return query_list, 'query'", "title": "" }, { "docid": "756189bdfc65eac7274551cacdedd181", "score": "0.53503144", "text": "def aggregate_pages(roots):\n aggregate = {}\n req = requests.get(get_url(0))\n json_data = req.json()\n\n total_pages = int(json_data['pagination']['total']/json_data['pagination']['per_page']) + 1\n page = json_data['pagination']['current_page']\n while page <= total_pages:\n req = requests.get(get_url(page))\n json_data = req.json()\n menus = json_data['menus']\n for menu in menus:\n aggregate[(menu)['id']] = menu['child_ids']\n if not 'parent_id' in menu:\n roots.append((menu)['id'])\n page += 1\n return aggregate", "title": "" }, { "docid": "c0ff301db1885a2ebfb90ad7d1553910", "score": "0.5346458", "text": "def build_page(self, x_y):\n\n x, y = x_y\n min_x, min_y = x * self.value_delta, y * self.value_delta\n max_x, max_y = min_x + self.value_delta, min_y + self.value_delta\n points = [\n (i, j)\n for i in range(min_x, max_x, self.step_size)\n for j in range(min_y, max_y, self.step_size)\n ]\n altis = RgeAltiClient().fetch_points(points)\n page = np.array(altis).reshape((self.page_size, self.page_size))\n return page", "title": "" }, { "docid": "07f2dbe50cbb6a2325e177e6b9264610", "score": "0.53431183", "text": "def build_blog_list(query_list, date_range, phantom_path):\n \n driver = webdriver.PhantomJS(executable_path=phantom_path)\n\n buzz_info = []\n list_of_posts = []\n for query in query_list:\n searchword = query[0] \n print (searchword)\n for date in date_range:\n search_url = url_generator(searchword, date[0], date[1], '')\n print(search_url)\n soup = page_source_retriever(driver, search_url) \n num_of_posts, max_num = max_num_finder(soup)\n \n for i in range(max_num):\n current_page = str(10*i+1)\n search_url_by_page = url_generator(searchword, date[0], date[1], current_page)\n soup = page_source_retriever(driver, search_url_by_page)\n \n sections = soup.find_all('li', {'class':'sh_blog_top'})\n for section in sections:\n href = get_hrefs(section)\n date_posted = get_date(section)\n list_of_posts.append([searchword, date_posted,href]) \n buzz_info.append([searchword,date[0],date[1], num_of_posts]) \n\n driver.quit()\n \n return buzz_info, list_of_posts", "title": "" }, { "docid": "6c707e789296591918c1a5bef739489a", "score": "0.53393954", "text": "def test_scrap_for_pages(self):\n given_url = \"https://danbooru.donmai.us/posts?tags=short_sleeves\"\n \n expected_ouput = [\"https://danbooru.donmai.us/posts?tags=short_sleeves\",\n \"https://danbooru.donmai.us/posts?page=2&tags=short_sleeves\",\n \"https://danbooru.donmai.us/posts?page=3&tags=short_sleeves\",\n \"https://danbooru.donmai.us/posts?page=4&tags=short_sleeves\"]\n \n out = self.dan.scrap_for_pages(given_url, 4)\n for i in range(4):\n self.assertEqual(out[i], expected_ouput[i])", "title": "" }, { "docid": "2da5ba60bab5138e8e5fbc4880e29ccf", "score": "0.53393036", "text": "def pages(request):\n current_site = get_site()\n\n language = get_language({'country_code': ''})\n page = get_index_page(language)\n\n menuitems = get_main_menuitems('', page)\n\n meta_data = get_metadata(page)\n scripts = get_scripts()\n\n site_content = {\n 'site': current_site,\n 'languages': get_languages(),\n 'current_language': language,\n 'menuitems': menuitems,\n 'page': page,\n 'scripts': scripts,\n 'metadata': meta_data,\n }\n\n if has_other_menu():\n site_content['other_menuitems'] = get_other_menuitems()\n\n return {'site_content': site_content}", "title": "" }, { "docid": "4dcc28780ebec978c5a7f6a484657910", "score": "0.53388333", "text": "def scrape_pages(self, page):\n\n while True:\n print 'Scraping page ' + str(self.page)\n\n # Format of url is http://www.wyzant.com/New_York_City_tutors.aspx?sl=80075877&sort=27&pagesize=5&pagenum=1 where the 1 at the end is the page number. Increase page number by 1 each iteration of while loop in order to crawl through pages.\n self.current_url = self.region_url + str(self.page)\n ufile = urllib2.urlopen(self.current_url)\n\n if ufile.geturl()[-1:] == self.base_url:\n break # Check for end of list\n\n people = BeautifulSoup(ufile).findAll('div', {'class':'tutorFR hide medium-show'}) # picks out each person on the page\n\n self.scrape_people(people)\n\n self.page += 1 # Next page", "title": "" }, { "docid": "f6c9b705e2eb1fc4f8722c2398d2ab44", "score": "0.53386146", "text": "def get_all(self, **kwargs):\n\n all_params = ['page_index', 'page_size', 'sort', 'q']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_all\" % key\n )\n params[key] = val\n del params['kwargs']\n\n resource_path = '/build-configurations'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'page_index' in params:\n query_params['pageIndex'] = params['page_index']\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'sort' in params:\n query_params['sort'] = params['sort']\n if 'q' in params:\n query_params['q'] = params['q']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='BuildConfigurationPage',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "d3b93b5a9cace236e810063ab2a02783", "score": "0.53377736", "text": "def pages(per_page: Optional[int] = 1, show_page: Optional[bool] = True) -> Callable:\n def page_source(coro: Callable[[MenuBase, Any], Coroutine[Any, Any, discord.Embed]]) -> Type[menus.ListPageSource]:\n async def create_page_header(self, menu: MenuBase, entry: Any) -> Union[discord.Embed, str]:\n result = await discord.utils.maybe_coroutine(coro, self, menu, entry)\n return menu.generate_page(result, self._max_pages)\n\n def __init__(self, list_pages: Iterable):\n super(self.__class__, self).__init__(list_pages, per_page=per_page)\n kwargs = {\n '__init__': __init__,\n 'format_page': (coro, create_page_header)[show_page]\n }\n return type(coro.__name__, (menus.ListPageSource,), kwargs)\n return page_source", "title": "" }, { "docid": "90c9cf9b266149fcd97052e5a475ca18", "score": "0.5332687", "text": "def pushFront(self, *pages):\n self.pages = list(pages) + self.pages\n self.evaluatePages()", "title": "" }, { "docid": "4f269c434799334c8e2b01e790d63aa0", "score": "0.533135", "text": "def loadPage(self):\n\n #depending on currentPage, create the required page \n if self.currentPage == 0:\n self.page = InfoPage(self.screen, data.getChild(self.menuNode, \"info\"), self.poke)\n elif self.currentPage == 1:\n self.page = SkillsPage(self.screen, data.getChild(self.menuNode, \"skills\"), self.poke)\n elif self.currentPage == 2:\n self.page = MovesPage(self.screen, data.getChild(self.menuNode, \"moves\"), self.poke)", "title": "" } ]
afe6f0ceb315a8b809fa6471a104b182
Returns the number of days between year1/month1/day1 and year2/month2/day2. Assumes inputs are valid dates in Gregorian calendar.
[ { "docid": "d8b73162f072e405ab0afb00e8a23a25", "score": "0.75588286", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n # program defensively! Add an assertion if the input is not valid!\n assert month1 in range(1, 13) and month2 in range(1, 13), \" the value of month should be between 1 - 12\"\n # assert day1 < days_in_month(month1, year1), \"the value of day in %d should be <= %d\" % month1 % days_in_month(month1, year1) # zb\n # assert day2 < days_in_month(month2, year2), \"the value of day in %d should be <= %d\" % month2 % days_in_month(month2, year2) # zb\n assert not dateIsBefore(year2, month2, day2, year1, month1, day1), \\\n \"year1-month1-day1 should be earlier than year2-mont2-day2!\"\n days = 0\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\n year1, month1, day1 = nextDay(year1, month1, day1)\n days += 1\n return days", "title": "" } ]
[ { "docid": "5bb3242f99f100bec5f8ed8c05ac123d", "score": "0.84254104", "text": "def days_between_dates(year1, month1, day1, year2, month2, day2):\n\n days = date_in_days(year2, month2, day2) - date_in_days(year1, month1, day1)\n return days", "title": "" }, { "docid": "ff3fe8202499476d844f942bfac8ef9a", "score": "0.8176493", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n number_of_days = 0\n next_date = nextDay(year1, month1, day1)\n end_date = (year2, month2, day2)\n while next_date <= end_date:\n number_of_days += 1\n next_date = nextDay(next_date[0], next_date[1], next_date[2])\n return number_of_days", "title": "" }, { "docid": "084e553237e76537fb4ef319147a9d3b", "score": "0.81429404", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\r\n #Add an assertion if the input is not valid!\r\n assert not dateIsBefore(year2, month2, day2, year1, month1, day1)\r\n days = 0\r\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\r\n year1, month1, day1 = nextDay(year1, month1, day1)\r\n days += 1\r\n return days", "title": "" }, { "docid": "ced2c7607d9d53e7497ddedec89dac0a", "score": "0.8122042", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n # program defensively! Add an assertion if the input is not valid!\n assert not dateIsBefore(year2, month2, day2, year1, month1, day1) \n days = 0\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\n year1, month1, day1 = nextDay(year1, month1, day1)\n days += 1\n return days", "title": "" }, { "docid": "9cd893805164130d32c781f427341b38", "score": "0.8031734", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\r\n\r\n assert ((year1, month1, day1) <= (year2, month2, day2)), \"AssertionError\"\r\n days = 0\r\n while nextDay(year1, month1, day1) <= (year2, month2, day2):\r\n (year1, month1, day1) = nextDay(year1, month1, day1)\r\n days += 1\r\n return days", "title": "" }, { "docid": "a9c8f8b31851b669cbf450fd8c513432", "score": "0.79735273", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2): \r\n days = 0\r\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\r\n year1, month1, day1 == nextDay(year1, month1, day1)\r\n days += 1\r\n return days", "title": "" }, { "docid": "7ed807619ac4c5a9d27390bc7cdcbd1c", "score": "0.7773415", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n # program defensively! Add an assertion if the input is not valid!\n date1 = str(month1) + '/' + str(day1) + '/' + str(year1)\n date2 = str(month2) + '/' + str(day2) + '/'+ str(year2)\n days_per_month = 365.25 / 12\n assert not dateIsBefore(year2, month2, day2, year1, month1, day1)\n days = 0\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\n year1, month1, day1 = nextDay(year1, month1, day1)\n days += 1\n print (str(days) + ' days between ' + date1 + ' and ' + date2 + '\\n' +\n str(round(days / days_per_month, 1)) + ' months between ' + date1 + ' and ' + date2)", "title": "" }, { "docid": "d23653ab83fd6f6f979ed9c79add64f9", "score": "0.7499938", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n # YOUR CODE HERE!\n day=0\n while daysisbefore(year1, month1, day1, year2, month2, day2):\n year1,month1,day1=nextDay(year1,month1,day1)\n day=day+1\n return day", "title": "" }, { "docid": "028444fc684747c403d2b8858ee80769", "score": "0.74602246", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n \n def nextDay(year, month, day):\n daysInCurrentMonth = daysInMonth(year, month)\n if day < daysInCurrentMonth:\n day += 1 \n else:\n day = 1\n month += 1\n if month > 12:\n month = 1\n year += 1\n return year, month, day\n\n def dateIsBefore(year1, month1, day1, year2, month2, day2):\n \"\"\"Returns True if year1-month1-day1 is before\n year2-month2-day2. Otherwise, returns False.\"\"\"\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False \n\n def isLeapYear(year):\n if year % 4 != 0:\n return False\n elif year % 100 != 0:\n return True\n elif year % 400 != 0:\n return False\n else:\n return True\n\n def daysInMonth(year, month):\n #thirty = [4,6,9,11]\n thirtyOne = [1,3,5,7,8,10,12]\n\n if month == 2:\n if isLeapYear(year):\n return 29\n else:\n return 28\n elif month in thirtyOne:\n return 31\n else:\n return 30\n \n\n assert not dateIsBefore(year2, month2, day2, year1, month1, day1)\n days = 0\n while dateIsBefore(year1, month1, day1, year2, month2, day2):\n year1, month1, day1 = nextDay(year1, month1, day1)\n days += 1\n return days", "title": "" }, { "docid": "76bcbb13f8f89e830f76e3cc6bb2de89", "score": "0.7421402", "text": "def days_diff(date1, date2):\n year1, month1, day1 = date1\n year2, month2, day2 = date2\n days1, days2 = date(year1, month1, day1), date(year2, month2, day2)\n return abs(days2 - days1).days", "title": "" }, { "docid": "ad8caad5731700b792a8a1b33020a095", "score": "0.7386876", "text": "def daysBetweenDates(year1, month1, day1, year2, month2, day2):\n\n if(year2 > year1):\n\n if (month2 > month1):\n\n if(day2>day1):\n dayOfDay=day2-day1\n elif(day2==day1):\n dayOfDay=0\n else:\n month2=month2-1\n day2=day2+30\n dayOfDay=day2-day1\n minOfMonth = month2 - month1\n dayOfmonth = minOfMonth * 30\n elif(month2== month1):\n if (day2 > day1):\n dayOfDay = day2 - day1\n elif (day2 == day1):\n dayOfDay = 0\n else:\n year2=year2-1\n month2=month2+12\n month2=month2-1\n day2=day2+30\n dayOfDay=day2-day1\n dayOfmonth=0\n\n else:\n year2=year2-1\n month2=month2+12\n minOfMonth=month2-month1\n dayOfmonth=minOfMonth * 30\n\n minOfYear = year2 - year1\n dayofyear = minOfYear * 365\n\n totalofgreater=dayofyear+dayOfmonth+dayOfDay\n return total", "title": "" }, { "docid": "3437354c3d99bc3533e31e152e4c03e4", "score": "0.7249976", "text": "def leapdays(year1, year2):\r\n year1 -= 1\r\n year2 -= 1\r\n return (year2 // 4 - year1 // 4) - (year2 // 100 - year1 // 100) + (year2 // 400 - year1 // 400)", "title": "" }, { "docid": "5068893c9210be423a1ab035cda064d3", "score": "0.7225645", "text": "def days_between_dates(y1, m1, d1, y2, m2, d2): \n return total_days(y2,m2,d2) - total_days(y1,m1,d1)", "title": "" }, { "docid": "476cf4ffa03a32c111d97d70609f45f6", "score": "0.7024466", "text": "def days_between(cls, date1, date2):\n dd1 = date1.day\n dd2 = date2.day\n mm1 = date1.month\n mm2 = date2.month\n yy1 = date1.year\n yy2 = date2.year\n\n if (dd2 == 31) and (dd1 < 30):\n dd2 = 1\n mm2 += 1\n\n return 360*(yy2-yy1) + 30*(mm2-mm1-1) + max(0, 30-dd1) + min(30, dd2)", "title": "" }, { "docid": "a7c9c5865115d554d326d328828bc25b", "score": "0.6815656", "text": "def GetNumberOfDays(month, year=Inv_Year, cal=Gregorian):", "title": "" }, { "docid": "0a8a3efccc2c988a4a104143e2f3a30e", "score": "0.6784326", "text": "def count_days(self, date1, date2):\n\t\t\n\t\tif isinstance(date1,dt.datetime) and isinstance(date2,dt.datetime):\n\t\t\tn_days = date1 - date2\n\t\t\t\n\t\telif isinstance(date1,pd.Series) and isinstance(date2,dt.datetime):\n\t\t\tn_days = date1.apply(lambda x: (x-date2).days)\n\t\t\t\n\t\telif isinstance(date1,dt.datetime) and isinstance(date2,pd.Series):\n\t\t\tn_days = date2.apply(lambda x: (date1-x).days)\n\t\t\n\t\telif isinstance(date1,pd.Series) and isinstance(date2,pd.Series):\n\t\t\tn_days = (date1 - date2).apply(lambda x: x.days)\n\t\t\n\t\treturn n_days", "title": "" }, { "docid": "e4bfdbc17cf3ffb88e7fa0059bb2d435", "score": "0.6746539", "text": "def diff_year(start_date, end_date):\r\n diffyears = end_date.year - start_date.year\r\n difference = end_date - start_date.replace(end_date.year)\r\n days_in_year = isleap(end_date.year) and 366 or 365\r\n difference_in_years = diffyears + (difference.days + difference.seconds / 86400.0) / days_in_year\r\n return difference_in_years", "title": "" }, { "docid": "9176445cec0e47e020abb63711477cce", "score": "0.67233205", "text": "def count_days(year_input):\n total_days = 0\n for years in range(1900, year_input):\n if find_leap(years):\n total_days += 366\n else:\n total_days += 365\n return total_days", "title": "" }, { "docid": "90952de76697fdc7f0a2d11c390c2be9", "score": "0.67219394", "text": "def calc_years_diff(start_date, end_date):\n yd= datetime.strptime(end_date, '%Y-%m-%d')\n xd = datetime.strptime(start_date, '%Y-%m-%d')\n diff = yd - xd\n return round(diff.days / 365, 1)", "title": "" }, { "docid": "43972ebb51e4af62427502061e2b75be", "score": "0.67066115", "text": "def _get_days_diff(day1, day2):\n return (day1 - day2).days", "title": "" }, { "docid": "259d23a30130cd65714db1f42e62449d", "score": "0.66719353", "text": "def days_between(d1, d2):\n d1 = datetime.strptime(d1, \"%Y%m%d\")\n d2 = datetime.strptime(d2, \"%Y%m%d\")\n return abs((d2 - d1).days)", "title": "" }, { "docid": "ae4b90e1682b40de60a533f9e1cd8c90", "score": "0.65848184", "text": "def days_between(d1, d2): \r\n d1 = datetime.datetime.strptime(d1, \"%Y-%m-%d\").date()\r\n d2 = datetime.datetime.strptime(d2, \"%Y-%m-%d\").date()\r\n return abs((d2 - d1).days)", "title": "" }, { "docid": "40a96b3e1394e47d9e77b694e12b2a72", "score": "0.6583141", "text": "def date_diff(d1, d2):\r\n #Put the date into a list of data: ['year', 'month', 'day']\r\n d1_data = d1.split('-')\r\n d2_data = d2.split('-')\r\n\r\n #Create dates with the correct data\r\n date1 = datetime.date(int(d1_data[0]), int(d1_data[1]), int(d1_data[2]))\r\n date2 = datetime.date(int(d2_data[0]), int(d2_data[1]), int(d2_data[2]))\r\n\r\n #Find the difference between the two dates\r\n diff = date1 - date2\r\n\r\n #Return the days between the two dates\r\n return 0 - diff.days", "title": "" }, { "docid": "28126bc5cb21236175c13b88c59d7a76", "score": "0.6575045", "text": "def days_between(date1, date2, daycount_method):\n date1 = date_to_datetime(date1).date()\n date2 = date_to_datetime(date2).date()\n if daycount_method == DAYCOUNT_ACT_365:\n return (date2 - date1).days\n\n raise ValueError('Unsupported daycount method.')", "title": "" }, { "docid": "21aa446260afbd8dfd910af0dcb0d6e6", "score": "0.6555041", "text": "def days_between_dates(date1, date2):\n if date1 is not None and date2 is not None:\n date1 = datetime.strptime(date1, \"%Y-%m-%d\")\n date2 = datetime.strptime(date2, \"%Y-%m-%d\")\n return abs((date2 - date1).days)\n else:\n return None", "title": "" }, { "docid": "79397bbc52dee3488819b5e136cbe34e", "score": "0.65446794", "text": "def days_between(start: Date, end: Date) -> int:\n return Date.days_between(start=start, end=end)", "title": "" }, { "docid": "0f301185c61798f91e5f114f93833115", "score": "0.6533187", "text": "def calculate_date_diff(date_one: datetime, date_two: datetime) -> int:\n return (date_two - date_one).days", "title": "" }, { "docid": "7ced56a5b103e32e58e76ba9d4ceca7d", "score": "0.64924693", "text": "def _days_in_year(year, calendar, use_cftime=True):\n date_type = get_date_type(calendar, use_cftime=use_cftime)\n if year == -1 and calendar in _CALENDARS_WITHOUT_YEAR_ZERO:\n difference = date_type(year + 2, 1, 1) - date_type(year, 1, 1)\n else:\n difference = date_type(year + 1, 1, 1) - date_type(year, 1, 1)\n return difference.days", "title": "" }, { "docid": "01b8e86f011df62a5b53c11bdc65e3b2", "score": "0.648655", "text": "def days_between(first_date: typing.Iterable, second_date: typing.Iterable) -> int:\n try:\n difference = datetime.datetime(*fill_date(first_date)) - datetime.datetime(*fill_date(second_date))\n return abs(difference.days)\n except Exception as e:\n raise e", "title": "" }, { "docid": "6d31b36a164965508adcaa46f7d6a04d", "score": "0.63857275", "text": "def _get_number_of_days(self, date_from, date_to):\n from_dt = mx.DateTime.Parser.DateTimeFromString(date_from)\n to_dt = mx.DateTime.Parser.DateTimeFromString(date_to)\n timedelta = to_dt - from_dt\n diff_day = timedelta.days\n return diff_day", "title": "" }, { "docid": "412ab69c1a68f6efde6cbae4edf6a70b", "score": "0.63731587", "text": "def num_leap_years(year_start: int, year_end: int) -> int:\n start = year_start - 1\n end = year_end - 1\n\n return (end // 4 - start // 4) - (end // 100 - start // 100) + (end // 400 - start // 400)", "title": "" }, { "docid": "55fceb6318aa61b51db1bb024a49e0f4", "score": "0.63625443", "text": "def calc_day(year,month,day):\n\ti=1\n\ttotal_days=0\n\twhile i<month:\n\t\ttotal_days=total_days+months_length[i]\n\t\ti+=1\n\ttotal_days=total_days+day\n\treturn total_days", "title": "" }, { "docid": "266c2253f04f81d71db5fbfc39f94c97", "score": "0.6358348", "text": "def days_from(input_date, compare_date):\n\n con_comp = datetime.datetime.strptime(compare_date, '%Y-%m-%d')\n\n con_full = input_date\n\n comp_date = datetime.date(con_full.year, con_comp.month, con_comp.day)\n\n end_year = datetime.date(con_full.year, 12, 31)\n\n abs_diff = abs((con_full.timetuple().tm_yday) -\n (comp_date.timetuple().tm_yday))\n\n if (abs_diff >= 183):\n final = end_year.timetuple().tm_yday - abs_diff\n else:\n final = abs_diff\n return final", "title": "" }, { "docid": "8dda1a6261c27e618f683719652aa858", "score": "0.6348582", "text": "def dayssince(dt1, dt2):\n return (dt1.date() - dt2.date()).days", "title": "" }, { "docid": "b163c6377258ffc02facdb79bd946da8", "score": "0.6344517", "text": "def date_arithmetic():\n dt02272000 = datetime.strptime(\"Feb 27, 2000\", \"%b %d, %Y\")\n dt02272017 = datetime.strptime(\"Feb 27, 2017\", \"%b %d, %Y\")\n dt01012017 = datetime.strptime(\"Jan 1, 2017\", \"%b %d, %Y\")\n dt10312017 = datetime.strptime(\"Oct 31, 2017\", \"%b %d, %Y\")\n three_days_after_02272000 = (dt02272000 + timedelta(3)).strftime(\"%b %d, %Y\")\n three_days_after_02272017 = (dt02272017 + timedelta(3)).strftime(\"%b %d, %Y\")\n\n days_passed_01012017_10312017 = (dt10312017 - dt01012017).days\n\n return three_days_after_02272000, three_days_after_02272017, days_passed_01012017_10312017", "title": "" }, { "docid": "3e9a582b783091e188ef637ba80e933f", "score": "0.63353944", "text": "def dates_between(first: str, second: str) -> int:\n return abs((datetime.strptime(first, '%d.%m.%Y') - datetime.strptime(second, '%d.%m.%Y')).days)", "title": "" }, { "docid": "e44d0253b9869fc2974453af5e60d2ed", "score": "0.6318613", "text": "def dateDiff(date1,date2):\n d1 = date(int(date1[0:4]), int(date1[4:6]), int(date1[6:8]))\n d2 = date(int(date2[0:4]), int(date2[4:6]), int(date2[6:8]))\n delta = d2 - d1\n return delta.days", "title": "" }, { "docid": "547c223e39f9e5bd3fd1349b618767d9", "score": "0.63173074", "text": "def date_arithmetic():\n date1 = \"27 Feb 2000\"\n date2 = \"27 Feb 2017\"\n num_days = 3\n date3 = \"31 Oct 2017\"\n date4 = \"1 Jan 2017\"\n d1 = datetime.strptime(date1, \"%d %b %Y\")\n d2 = datetime.strptime(date2, \"%d %b %Y\")\n d3 = datetime.strptime(date3, \"%d %b %Y\")\n d4 = datetime.strptime(date4, \"%d %b %Y\")\n three_days_after_02272000 = d1 + timedelta(days=num_days)\n three_days_after_02272017 = d2 + timedelta(days=num_days)\n days_passed = d3 - d4\n\n return three_days_after_02272000, three_days_after_02272017, days_passed", "title": "" }, { "docid": "bd3baa84e6b1b94989adbba2ceee675f", "score": "0.63084006", "text": "def years_apart(date1, date2):", "title": "" }, { "docid": "f74320551d35ac59aec1821b1ff4d685", "score": "0.6305475", "text": "def days_in_year(year: int) -> int:\n return sum(DateBS.months_in_year(year))", "title": "" }, { "docid": "69e2ea4118ae6c8f0d6451c782025bbb", "score": "0.6305449", "text": "def year_as_days(year):\n\n days = 365 * year\n days += num_leap_years(year)\n return days", "title": "" }, { "docid": "c720904bbca941eafe4382e9b06f0590", "score": "0.6300241", "text": "def count_leap_year(from_year, to_year):\n\n count = 0\n for ele in range(from_year, to_year):\n if is_leap_year(ele):\n count += 1\n return count", "title": "" }, { "docid": "f2ccf1ecea25489a787ba636d7bf5e10", "score": "0.62962836", "text": "def get_age(d1, d2):\r\n #Check how many days apart the two dates are\r\n days_apart = date_diff(d1,d2)\r\n\r\n #Divide this number by the number of days in a year and round\r\n #down to get the number of complete years\r\n return int(days_apart/365.2425)", "title": "" }, { "docid": "c727c32d1eeefea2181d747d2b67aa5c", "score": "0.62929523", "text": "def NumDaysInYear(year):\n\n date_lo = datetime.date(year, 1, 1)\n date_hi = datetime.date(year + 1, 1, 1)\n num_days = (date_hi - date_lo).days\n\n # End of NumDaysInYear().\n return num_days", "title": "" }, { "docid": "0970e9088c9b826f7cd7c151bdd779f7", "score": "0.6269547", "text": "def date_in_days(year, month, day):\n\n # Year and month are subtracted by one because they are in-progress.\n days = year_as_days(year - 1)\n days += month_as_days(month - 1, year)\n days += day\n return days", "title": "" }, { "docid": "bd1067fad0c4588297c25d55caf676fd", "score": "0.6239519", "text": "def get_no_of_days(self):\r\n \r\n CurrentDate=dt.datetime(self.Current_year,self.Current_month,self.Current_day)\r\n BirthDate=dt.datetime(self.Current_year,self.Birth_month,self.Birth_day)\r\n\r\n #Intializing the difference in days to zero\r\n diffdays=0\r\n\r\n #Difference is to be calculated only if Current date and Birth date are unidentical\r\n \r\n if(CurrentDate!=BirthDate):\r\n difference=str(BirthDate-CurrentDate)\r\n diffdays=int(difference.split(\" \")[0])\r\n \r\n return diffdays", "title": "" }, { "docid": "c69f9517986f48ca7755556bd0cbcf1c", "score": "0.6195864", "text": "def days_in_year( year ):\n if year % 4 == 0:\n return 366\n else:\n return 365", "title": "" }, { "docid": "c3f8811ce1ffe5f2c5f8286235d69f77", "score": "0.6188364", "text": "def days_between(self, date1, date2):\n raise NotImplementedError(\"Method days_between needs to be implemented\")", "title": "" }, { "docid": "62a1d64e12b7d549e83f3aa2f848d431", "score": "0.61681944", "text": "def calc_months_passed(year, month, day):\r\n if not all(isinstance(x, int) for x in [year, month, day]):\r\n raise TypeError\r\n if (\r\n (year < START_DATE.year)\r\n or (month not in range(1, 13))\r\n or (day not in range(1, monthrange(year, month)[1]))\r\n or (date(year, month, day) < START_DATE)\r\n ):\r\n raise ValueError\r\n\r\n date_to_test = date(year, month, day)\r\n relative_delta = relativedelta(date_to_test, START_DATE)\r\n days_to_month = 1 if relative_delta.days >= MIN_DAYS_TO_COUNT_AS_MONTH else 0\r\n return (\r\n (relative_delta.years * MONTHS_PER_YEAR) + relative_delta.months + days_to_month\r\n )", "title": "" }, { "docid": "3cf94ae60d50e8f42ef760cf8064eacc", "score": "0.61270416", "text": "def _get_number_of_days(self, date_from, date_to):\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_day = timedelta.days + 1 + float(timedelta.seconds) / 86400\n return diff_day", "title": "" }, { "docid": "335ca6b7fa36f5c4efaff9cb8c821895", "score": "0.612495", "text": "def find_nb_days(start, end):\n # convert dates in numbers\n start = date_in_nm(start)\n end = date_in_nm(end)\n \n nb_days = end - start + 1\n return nb_days", "title": "" }, { "docid": "e92fc7ab9006be200a83645a99474f16", "score": "0.611288", "text": "def get_age(d1, d2=instantiate_date()):\n age_delta = d2.datetime - d1.datetime\n if d2.month < d1.month: #compared month comes before anniversary month\n next_anniversary = custom_date(d2.year, d1.month, d1.day, d1.hour, d1.minute, d1.second)\n elif d2.month > d1.month: #compared month comes after anniversary month\n next_anniversary = custom_date(d2.year + 1, d1.month, d1.day, d1.hour, d1.minute, d1.second)\n elif d2.day < d1.day: #compared month = anniversary month but compared day comes before anniversary day\n next_anniversary = custom_date(d2.year, d1.month, d1.day, d1.hour, d1.minute, d1.second)\n elif d2.day > d1.day: #compared month = anniversary month but compared day comes after anniversary day\n next_anniversary = custom_date(d2.year + 1, d1.month, d1.day, d1.hour, d1.minute, d1.second)\n next_anniversary_delta = next_anniversary.datetime - d2.datetime\n return age_delta, next_anniversary_delta", "title": "" }, { "docid": "67b855c8ea714a7a1dd01c9c0e6cc58f", "score": "0.6107837", "text": "def days(ctx, end_date, start_date):\n return datedif(ctx, start_date, end_date, 'd')", "title": "" }, { "docid": "1ae62ef213be35af6060e547d1697fab", "score": "0.6094515", "text": "def __getPeriod(d1,d2):\r\n\td1 = map(int,d1.split('-'))\r\n\td1 = date(d1[0],d1[1],d1[2])\r\n\td2 = map(int,d2.split('-'))\r\n\td2 = date(d2[0],d2[1],d2[2])\r\n\tdelta = d1-d2\r\n\treturn float(delta.days)/365.0", "title": "" }, { "docid": "8f4147cab3b7e292c4cd8d66e553dc1e", "score": "0.60763305", "text": "def parse_year_days(year_info):\n _, leap_days = _parse_leap(year_info)\n res = leap_days\n for month in range(1, 13):\n res += (year_info >> (16 - month)) % 2 + 29\n return res", "title": "" }, { "docid": "ceaf2396dba8d0b1db344ea4394c5431", "score": "0.60627824", "text": "def _difference_in_years(start, end):\n diff = end - start\n diff_in_years = (diff.days + diff.seconds/86400)/365.2425\n return diff_in_years", "title": "" }, { "docid": "5a31b6f8677dcadb94fe633043b0ea2b", "score": "0.6026908", "text": "def numDays(self, dt, mnt, yr):\n d = 0\n for i in range(1, mnt):\n d += self.mnthDay[str(self.mnth[i])]\n d += dt\n if mnt > 2 and self.leap(yr):\n d += 1\n return d", "title": "" }, { "docid": "ccd1b78448c05c7cf6cf8879c3481e04", "score": "0.6019438", "text": "def get_date_diff_count(from_date, to_date):\n if from_date and to_date:\n return numpy.busday_count(from_date.date(), to_date.date())\n\n return None", "title": "" }, { "docid": "ac2f04eaa0e607fe2946e6b48fc3e1b3", "score": "0.60106456", "text": "def days(m, y):\r\n d = 0\r\n if m == 2 and isLeap(y):\r\n d = 29\r\n elif m == 2:\r\n d = 28\r\n elif m % 2 == 0:\r\n d = 30\r\n else:\r\n d = 31\r\n\r\n return d", "title": "" }, { "docid": "7cf008f99ce1c9356b47292917f28994", "score": "0.6008513", "text": "def baselineDays(self):\n year, month, day = self.toTuple()\n days = firstDayOfYear(year)\n for m in range(month - 1):\n days += GenDate.daysInMonth[m]\n if isLeapYear(year) and month > 2:\n days += 1\n if year == 1582 and (month > 10 or (month == 10 and day > 4)):\n days -= 10\n days += day - 1\n return days", "title": "" }, { "docid": "1a0a8354ca95b4a0f866a620ee1f433d", "score": "0.59950364", "text": "def doubleday(b1, b2):\n birth1 = datetime.date(b1[0], b1[1], b1[2])\n birth2 = datetime.date(b2[0], b2[1], b2[2])\n \n difference = abs(birth1 - birth2)\n \n if birth1 > birth2:\n datediff = (birth1 + difference)\n elif birth2 > birth1:\n datediff = (birth2 + difference)\n \n return datediff", "title": "" }, { "docid": "9b1fea23299b7c9877b63ffa8166ea44", "score": "0.59703875", "text": "def _lunar_year_days(self, year: int) -> int:\n days = 0\n months_day = self.G_LUNAR_MONTH_DAYS[year - self.START_YEAR]\n for i in range(1, 13 if self._get_leap_month(year) == 0x0F else 14):\n day = 29 + ((months_day >> i) & 0x01)\n days += day\n return days", "title": "" }, { "docid": "9b1fea23299b7c9877b63ffa8166ea44", "score": "0.59703875", "text": "def _lunar_year_days(self, year: int) -> int:\n days = 0\n months_day = self.G_LUNAR_MONTH_DAYS[year - self.START_YEAR]\n for i in range(1, 13 if self._get_leap_month(year) == 0x0F else 14):\n day = 29 + ((months_day >> i) & 0x01)\n days += day\n return days", "title": "" }, { "docid": "60c5947e4b63c51f42dab9a615b6c8d6", "score": "0.5962528", "text": "def _count_days(self):\n delta = self.__end_date - self.__start_date\n return delta.days + 1", "title": "" }, { "docid": "4cd0e63e178922fa1db18a599884d727", "score": "0.59481615", "text": "def year_fraction_from_days(self, days: int) -> float:\n raise NotImplementedError", "title": "" }, { "docid": "5eb0e37015ff0be4531520355716a392", "score": "0.5940397", "text": "def is_year(day1, day2):\n return day1.year == day2.year and day1.month == 1 and day1.day == 1 and\\\n day2.month == 12 and day2.day == 31", "title": "" }, { "docid": "f7ffb68bb51eb902a04554dbd2cb1b90", "score": "0.5938862", "text": "def parse_year_days(year_info):\n year_info = int(year_info)\n res = 29 * 12\n\n leap = False\n if year_info % 16 != 0:\n leap = True\n res += 29\n\n year_info //= 16\n\n for i in range(12 + leap):\n if year_info % 2 == 1:\n res += 1\n year_info //= 2\n return res", "title": "" }, { "docid": "12aea3dae6d9d5d406625131c02735ce", "score": "0.59175766", "text": "def _get_number_of_days(self, date_from, date_to,employee_id):\n\n timedelta = date_to - date_from\n diff_day = timedelta.days + 1.0\n return diff_day", "title": "" }, { "docid": "716d9c82aeb9c9a7aa79fce919d464c5", "score": "0.59130925", "text": "def days_between(self,other):\n x = self.copy()\n y = other.copy()\n if x == y:\n return 0\n if x.is_before(y) == False:\n count = 0\n while x != y:\n y.advance_one()\n count += 1 \n elif x.is_before(y) == True:\n count = 0\n while x != y:\n x.advance_one()\n count += -1\n return count", "title": "" }, { "docid": "00b4fd3c6f14b2cb3024a9b6a55b6133", "score": "0.5881235", "text": "def date_diff(self):\n d1 = datetime.strptime(self.start_date, \"%Y-%m-%d\")\n d2 = datetime.strptime(self.end_date, \"%Y-%m-%d\")\n return abs((d2 - d1).days) + 1", "title": "" }, { "docid": "3133edc823d739283fb5a25949266c15", "score": "0.5864947", "text": "def diff_days(call_record1, call_record2):\n return (call_record2.format_time - call_record1.format_time).days", "title": "" }, { "docid": "f9c83896968537a074f2eb36a108f3d9", "score": "0.5840487", "text": "def date2consecutive_day(year=2000, month=12, day=31):\n num = (datetime.date(year, month, day)-datetime.date(year, 1, 1)).days\n if num > 364:\n num = num-1\n return num", "title": "" }, { "docid": "2eee886ab16b4695433fafc439899ba6", "score": "0.5840359", "text": "def _num_days(self, dt):\n\n return (dt - date(2001, 1, 1)).days", "title": "" }, { "docid": "6332f61cd77fd176910ac4af0960a5dc", "score": "0.5811715", "text": "def ndays(year: int, month: Optional[int] = None, leap: int = 0) -> int:\n _check_year_range(year)\n if month is None:\n return YEAR_DAYS[year - MIN_LUNAR_YEAR]\n leap = int(bool(leap))\n for _month, _days, _leap in LCalendars.iter_year_month(year):\n if (_month, _leap) == (month, leap):\n return _days\n else:\n raise InvalidLunarDateError(f'[year={year},month={month},leap={leap}]: Invalid month.')", "title": "" }, { "docid": "a182ef65286212f6ef2fe1d08740687a", "score": "0.5789213", "text": "def _span_days(self, year: int) -> int:\n return sum(self._lunar_year_days(y) for y in range(self.START_YEAR, year))", "title": "" }, { "docid": "04373743ebfc83ecc10dae320e249b42", "score": "0.5782142", "text": "def DayOfYear(year, month, day):\n total_days = 0\n # Validate the date ranges, months (1 to 12), and days (correct number of days per month), including Leap Year.\n if 0 < month <= 12 and (0 < day <= days_in_month[month - 1] or (IsYearLeap(year) and month == 2)):\n # January is easy, it's the number of days as defined by the value in 'day'.\n if month == 1:\n total_days = day\n return total_days\n # February is January (31) + number of days in February as defined by 'day'.\n if month == 2:\n total_days = day + days_in_month[0]\n return total_days\n # Add all days of the month up until current month-1, then add one (1) for February if Leap Year.\n total_days = day # Add number of days in month to 'total_days' before adding number of days per month before current month.\n for m in range(month - 1):\n total_days += days_in_month[m]\n if IsYearLeap(year):\n total_days += 1 # Leap Year Day, February 29\n return total_days\n else:\n return None # Invalid month or day.", "title": "" }, { "docid": "d917a03b99e8c1c5f2b3cd86931f0bae", "score": "0.5781079", "text": "def _get_month_days(self, start, end):\n today = datetime.date.today()\n days_worked = h.get_working_days(start, h.previous_day(today))\n days_left = h.get_working_days(today, end)\n return days_worked, days_left", "title": "" }, { "docid": "baf09eed69ac4bcd7b6365509c2658bb", "score": "0.5775615", "text": "def days_before_year(year):\n y = year - 1\n return y*365 + y//4 - y//100 + y//400", "title": "" }, { "docid": "b8db227d1108477f637b92c34be2a846", "score": "0.5769517", "text": "def get_number_of_days(date1):\n\n d0 = date1\n d1 = date.today()\n delta = d1 - d0\n return str(delta)", "title": "" }, { "docid": "d7d677ef644a1b35c6c8cb5a3f280625", "score": "0.5764401", "text": "def _calculate_years(self, year):\n diff = self._today.year - int(year)\n # include current year\n return diff", "title": "" }, { "docid": "f2d5a60d8e9e4f18c7bf9c043d2dfa59", "score": "0.5748859", "text": "def dateConv(x):\n startDate = datetime.date(1900, 1, 1)\n\n try:\n inputDate = datetime.datetime.strptime(x, \"%d/%m/%Y\")\n inputDate = inputDate.date()\n except ValueError:\n #Return the current year \n return float('nan')\n\n if inputDate.year < startDate.year:\n raise ValueError(\"Conversion of year before 1900 Error\")\n\n tDelta = inputDate - startDate\n\n return tDelta.days", "title": "" }, { "docid": "30cd75937122049f95b177cfb74c45c4", "score": "0.5746567", "text": "def days_number():\r\n # TODO: Assuming COVID-19 start date is considered as Jan 1st 2020. Jan 1st 2020 to current system date will be XYZ number of days.\r\n # The number of days generated by the program cannot exceed this date, if exceeded the days returned should be '9999'\r\n pass", "title": "" }, { "docid": "2280704122f51b9d919925ee4968007e", "score": "0.5737672", "text": "def number_of_days(df):\n first_day, last_day = first_and_last_days_df(df)\n return (last_day - first_day)/np.timedelta64(1, 'D')", "title": "" }, { "docid": "69a423e2dab7349ad0b0a851f40d8807", "score": "0.5727096", "text": "def test_calculate_years(self):\n self.assertEqual(calculate_years('09 FEB 1962', '11 SEP 2017'), 55)\n self.assertEqual(calculate_years('10 SEP 1969', '11 SEP 1999'), 30)\n self.assertEqual(calculate_years('23 OCT 1989', '11 SEP 2017'), 27)", "title": "" }, { "docid": "70cf2e9f6eea601a1d932ae39f1596a7", "score": "0.57255656", "text": "def days_in_month(year, month):\n month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n num_days=month_days[month-1] \n for days in month_days:\n if is_leap(year) is True:\n month_days=[31,29,31,30,31,30,31,31,30,31,30,31]\n num_days=month_days[month-1]\n return num_days", "title": "" }, { "docid": "5843d5eaf8f40e9fd0cc0bd64f9a97cb", "score": "0.5707042", "text": "def num_days_from_mmm_yyyy(date_str):\n date = datetime.datetime.strptime(date_str, '%b-%Y').date()\n delta = datetime.datetime.today().date() - date\n return delta.days", "title": "" }, { "docid": "5007ba143346d0a4216383768a34af80", "score": "0.5706403", "text": "def days_in_year(year: int, calendar: str = \"default\") -> int:\n return (\n (datetime_classes[calendar](year + 1, 1, 1) - pydt.timedelta(days=1))\n .timetuple()\n .tm_yday\n )", "title": "" }, { "docid": "013c088633ff7051dc2720e846f1aa9d", "score": "0.5697428", "text": "def get_no_of_days_basic(date):\n formatted_date = re.search(r'^(\\d{4})-(\\d{2})$', date)\n \n if not formatted_date:\n raise InvalidDateFormat('Entered invalid date format!')\n\n year = int(formatted_date.group(1))\n month = int(formatted_date.group(2))\n\n # monthrange returns [weekday of first day of the month, number of days in month]\n return monthrange(year, month)[1]", "title": "" }, { "docid": "333ae70a188431cd5b7f02605267d088", "score": "0.56913424", "text": "def calc_partial_year(firstDate, month_lengths_arr):\n date, month, year = map(int, firstDate.split(\"/\"))\n\n days_into_year = date - 1\n # accounts for the fact that today is not completed\n\n for i in xrange(0, month - 1):\n # excludes the current month; -1 accounts for zero indexing\n days_into_year += month_lengths_arr[i]\n\n if month > 2:\n # we do not need to check for the 29th here, because date takes care of it\n if is_leap(year):\n days_into_year += 1\n\n return days_into_year", "title": "" }, { "docid": "43b6723260508c962275290abfc93e98", "score": "0.56837106", "text": "def days_diff(day1: SciDate, day2: SciDate, *, include_first: bool = False, include_last: bool = False):\n\n # SciDate.sub includes the first day but not last, add last to the diff\n # to fulfil the specified include_first and include_last value\n diff_seconds = abs(day1.sub(day2)) + SECONDS_PER_DAY\n\n min_diff = 0\n if not include_first:\n min_diff += SECONDS_PER_DAY\n\n if not include_last:\n min_diff += SECONDS_PER_DAY\n\n if diff_seconds < min_diff:\n return 0\n\n return (diff_seconds - min_diff) // SECONDS_PER_DAY", "title": "" }, { "docid": "2e54538a139f590cefad7990e82f3dbd", "score": "0.5673075", "text": "def yf(self, start_date, end_date):\n return (end_date - start_date).days / 365.0", "title": "" }, { "docid": "3a90a464d5a8e4e9d138062a8e4ecb69", "score": "0.56671846", "text": "def get_day_num_in_assign_year(year):\n if calendar.isleap(int(year)):\n return 366\n else:\n return 365", "title": "" }, { "docid": "ae81a1607766293e335f70bb57afb79b", "score": "0.5635434", "text": "def get_count_days(\n self,\n ) -> List[Union[datetime.timedelta, List[datetime.timedelta]]]:\n days: List[Union[datetime.timedelta, List[datetime.timedelta]]] = []\n dates = self.get_working_dates()\n start_date = self._date\n days.append(dates[0] - start_date)\n\n for date, next_date in zip(dates, dates[1:]):\n start_year = datetime.date(next_date.year, 1, 1)\n\n if date.month == 12 and calendar.isleap(next_date.year):\n common_date, leap_date = start_year - date, next_date - start_year\n days.append([common_date, leap_date])\n\n elif date.month == 12 and calendar.isleap(date.year):\n leap_date, common_date = start_year - date, next_date - start_year\n days.append([common_date, leap_date])\n\n else:\n days.append(next_date - date)\n return days", "title": "" }, { "docid": "285588c22ed2bda902ca793535ee1159", "score": "0.5616895", "text": "def _span_days(self, year: int) -> int:\n span_days = 0\n for y in range(self.START_YEAR, year):\n span_days += self._lunar_year_days(y)\n return span_days", "title": "" }, { "docid": "fd6e0ed6c13d9dfdbdf3f3866035fd69", "score": "0.56074077", "text": "def _doy_days_since_doys(\n base: xr.DataArray, start: DayOfYearStr | None = None\n) -> tuple[xr.DataArray, xr.DataArray, xr.DataArray]:\n calendar = get_calendar(base)\n\n base_doy = base.dt.dayofyear\n\n doy_max = xr.apply_ufunc(\n days_in_year, base.dt.year, vectorize=True, kwargs={\"calendar\": calendar}\n )\n\n if start is not None:\n mm, dd = map(int, start.split(\"-\"))\n starts = xr.apply_ufunc(\n lambda y: datetime_classes[calendar](y, mm, dd),\n base.dt.year,\n vectorize=True,\n )\n start_doy = starts.dt.dayofyear\n start_doy = start_doy.where(start_doy >= base_doy, start_doy + doy_max)\n else:\n start_doy = base_doy\n\n return base_doy, start_doy, doy_max", "title": "" }, { "docid": "36c25b220cfdd580b25002ae942370a3", "score": "0.56073624", "text": "def _divide_by_year(self):\r\n \r\n # clean the user input\r\n from_ = datetime.datetime.strptime(self.from_date, format=\"%Y-%m-%D\")\r\n to_ = datetime.datetime.strptime(self.to_date, format=\"%Y-%m-%D\")\r\n\r\n # recursive functions to get the correct froms and tos\r\n def from_rec(start_date: datetime.datetime):\r\n new_date = datetime.datetime(start_date.year + 1, 1, 1)\r\n if new_date > to_:\r\n return []\r\n else:\r\n return [new_date] + from_rec(new_date)\r\n \r\n def to_rec(end_date: datetime.datetime):\r\n new_date = datetime.datetime(end_date.year - 1, 12, 31)\r\n if new_date < from_:\r\n return []\r\n else:\r\n return [new_date] + from_rec(new_date)\r\n\r\n # get the list with the star and end of each process\r\n from_list = [from_] + from_rec(from_)\r\n to_list = [to_] + from_rec(to_)\r\n to_list = to_list[::-1]\r\n\r\n # this attribute is an iterator\r\n self.correct_dates = zip(from_list, to_list)", "title": "" }, { "docid": "82220768fcfef3bc9b9dda9b1ca85f60", "score": "0.5606136", "text": "def days_months(months):\n\tif months in [1,3,5,7,8,10,12]:\n\t\tdays=np.arange(1,32)\n\telif months in [4,6,9,11]:\n\t\tdays=np.arange(1,31)\n\telse:\n\t\tdays=np.arange(1,30) #as feb 2016 has 29 days\n\treturn days", "title": "" }, { "docid": "82220768fcfef3bc9b9dda9b1ca85f60", "score": "0.5606136", "text": "def days_months(months):\n\tif months in [1,3,5,7,8,10,12]:\n\t\tdays=np.arange(1,32)\n\telif months in [4,6,9,11]:\n\t\tdays=np.arange(1,31)\n\telse:\n\t\tdays=np.arange(1,30) #as feb 2016 has 29 days\n\treturn days", "title": "" }, { "docid": "13900f89dd5edbcd8f90bdb0e4d36598", "score": "0.5593712", "text": "def _days_in_month(year: int, month: int) -> int:\n DAYS_IN_MONTH = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n is_leap = (year % 4 == 0) and ((year % 100 != 0) or (year % 400) == 0)\n days = DAYS_IN_MONTH[(month - 1) % 12]\n if month == 2:\n days += is_leap\n return days", "title": "" }, { "docid": "630a7e868a74cd840565f63e9ef7b1e5", "score": "0.55800176", "text": "def Days(days):", "title": "" } ]
05bbe2e0b7965809ebd65daeb904cc8c
Helper method to locate row in database
[ { "docid": "dbdd4b554715401408c8f7b90e2a0625", "score": "0.0", "text": "def check_for_app(app):\n if (app == \"all\"):\n return -1\n count = 0\n for row in cur.execute(\"SELECT * FROM passwords WHERE application=?\", (app,)):\n count+=1\n else:\n return count", "title": "" } ]
[ { "docid": "297fb191bddda71a9dc5e40d64edc8c6", "score": "0.667747", "text": "async def find(self, pk_value: Any) -> RowProxy:", "title": "" }, { "docid": "6c84afbfd114dc604b825ace3583251d", "score": "0.64952505", "text": "def find_row(table, row):\n for idx in range(len(table)):\n if table[idx][0] == row:\n return idx\n return -1", "title": "" }, { "docid": "28839e7d2598847d8979c750a714aad8", "score": "0.6296006", "text": "def get_rows(column_to_search, value_to_match, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT * FROM {t} WHERE {col}=\"{value}\"'.format(t=safe(table), \n col=safe(column_to_search), value=value_to_match))\n row = c.fetchall()\n conn.close()\n return row\n except Exception as e:\n print(\"Error when trying to get row in table\", table, \"in\", db_file)\n print(e)\n return None", "title": "" }, { "docid": "06ab440f6a0a50b09c16c1ed4cfd8241", "score": "0.62917984", "text": "def find_row(self, row_id):\n #Calculate what line in the file the row_id will be found at\n looking_for_line = self.__row_id_in_file(row_id)\n\n #Initiate line-counter\n current_line = 1\n with open(self.__data_file_for_row_id(row_id), 'r') as f:\n for line in f:\n if current_line == looking_for_line:\n return json.loads(line)\n current_line += 1\n\n raise Exception('Could not find row_id ' + row_id)", "title": "" }, { "docid": "504967e10d6ef22a6779ba7fc9bb6a3d", "score": "0.62687343", "text": "def get_row_index(self):\n for row in range(self.model.rowCount()):\n name_item = self.model.item(row, self.COL_NAME)\n fullpath = name_item.data(self.ROLE_FULLPATH)\n if fullpath == self.filepath:\n return row", "title": "" }, { "docid": "ca030c7af9b16e3976333b51a983936a", "score": "0.62624425", "text": "def search_db(self, key, item):\n db = self.check_db()\n data = [record for record in db if record[key] == item]\n if data:\n return data[0]\n else:\n return False", "title": "" }, { "docid": "17dbbe544a08939ae858d44b9d1cb459", "score": "0.6181649", "text": "def db_row(self):\n return self._model_cls.query.get(self._pk)", "title": "" }, { "docid": "8ef52de486d6046a1be64aaf8c778956", "score": "0.61808544", "text": "def find_id(self, uid: int) -> Optional[sqlite3.Row]:\n\n sql = \"\"\"SELECT rowid, url as 'url [url]', title,\n added as 'added [local_datetime]',\n updated as 'updated [local_datetime]',\n deleted as 'deleted [local_datetime]',\n tags, comments\n FROM bookmarks\n WHERE rowid=?\"\"\"\n return self._selectOne(sql, (uid,))", "title": "" }, { "docid": "3e94a68f483c751a05c47fa6a37fc0c8", "score": "0.60699207", "text": "def findAt(self, index):\n saveCursor = self.cursor\n i = 0\n if self.first():\n if index == i:\n return self.cursor\n else:\n while self.next():\n i += 1\n if index == i:\n return self.cursor\n self.cursor = saveCursor\n return None", "title": "" }, { "docid": "9e6ddd0dc35be2252846c08b164288b5", "score": "0.60465103", "text": "def WhereAmI (row, headers,table):\r\n\r\n return t.WhereAmI (row, headers,table)", "title": "" }, { "docid": "a0281990eb782393505e2e96e107bae8", "score": "0.604043", "text": "async def find_by_id(self, _id: int) -> Record:\n conn: Connection\n async with self.db_pool.acquire() as conn:\n return await conn.fetchrow(\n f\"SELECT * FROM {self.table_name} WHERE {self.primary_key}=$1\",\n _id,\n )", "title": "" }, { "docid": "cd5e03e17097b8c93b80158b2fba2911", "score": "0.6037111", "text": "def search(item):\n row = -1\n try:\n row = filter_rows.index(item)\n except Exception:\n logger.warning(\"%s was not found\", str(item))\n return row", "title": "" }, { "docid": "08227e90db2e48ee0cd434462b34c591", "score": "0.60301137", "text": "def find(self, value):\n for position in range(self.get_size()):\n if self.table[position] == value:\n return position", "title": "" }, { "docid": "4bc6881c01f37ad933d0b4d0792ee8e7", "score": "0.5997052", "text": "def rpc_database_get_row_by_id(self, row_id):\n\t\ttable_name = self.path.split('/')[-2]\n\t\ttable = DATABASE_TABLE_OBJECTS.get(table_name)\n\t\tassert table\n\t\tcolumns = DATABASE_TABLES[table_name]\n\t\tsession = db_manager.Session()\n\t\trow = db_manager.get_row_by_id(session, table, row_id)\n\t\tif row:\n\t\t\trow = dict(zip(columns, (getattr(row, c) for c in columns)))\n\t\tsession.close()\n\t\treturn row", "title": "" }, { "docid": "8c68ec8f12fdfa6f58d0854723163697", "score": "0.5869959", "text": "def _get_row_index(self, row: Row) -> int:\n row_index = -1\n for index, table_row in enumerate(self.table_data):\n if table_row.values == row.values:\n row_index = index\n break\n return row_index", "title": "" }, { "docid": "6d11892e3af4bd3bfe30a3c8e8865d3e", "score": "0.58069116", "text": "def row_by_value(idl_, table, column, match, default=_NO_DEFAULT):\n tab = idl_.tables[table]\n for r in tab.rows.values():\n if getattr(r, column) == match:\n return r\n if default is not _NO_DEFAULT:\n return default\n raise None", "title": "" }, { "docid": "5e84a966cb35da05610cc0e725ccec47", "score": "0.58031994", "text": "def get_row(self, pk):\n ans = self.execute(self.commands.get_row(\n cols=self._join_cols(self.columns),\n table=self.name,\n pk_col=self.primary_key_column,\n pk=pk\n ))\n if not ans:\n return None\n return self._dictify(self.columns, ans[0])", "title": "" }, { "docid": "4318deb02a39569b51bd6790e5f7d37c", "score": "0.57962567", "text": "def get_for_id(id,table):\n # Implement this function\n for row in range(1, len(table)):\n for col in range(len(table[0])):\n if id in table[row][col]:\n return table[row]", "title": "" }, { "docid": "c56499c3188e2df25a425606e4bf2aa2", "score": "0.5779984", "text": "def search_query(self, num, query, table, field):\n try:\n self.db_connection()\n if self.connection is not None: # If the database connection was established successfully\n with self.connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchone()\n print(type(result))\n if result is not None and result[field] == num: # check if query result is None and pk equal to num\n print(table + str(num), \"was found\")\n return True\n else:\n print(table + str(num), \"wasn't found\")\n return False\n except IntegrityError:\n print(\"A new record couldn't be added\")\n exit()\n finally:\n self.close_connection() # Closing the opened connection", "title": "" }, { "docid": "8982c47ca30a1f6d2fee8e79075b10ec", "score": "0.57695395", "text": "def pylookup(self, col1, col2, matrix, index=3):\n\n for row in matrix:\n if col1 == row[0] and col2 == row[1]:\n return row[index]\n return None", "title": "" }, { "docid": "3105696bb5e55a73dd6059a48aa35653", "score": "0.57678396", "text": "def locate(self, column, cond_inp):\n\n try:\n return self.df_input.loc[self.df_input[column] == cond_inp]\n except Exception as e:\n print(e)", "title": "" }, { "docid": "ee34301f45dcffed13f7dc7accf30b7a", "score": "0.5747678", "text": "def select_from_DB (itemToSearch, tableWhereToSearch):\n session = open_session()\n s = select([itemToSearch.tableWhereToSearch])\n result = session.execute(s)\n for row in result:\n print(row)", "title": "" }, { "docid": "6637a320c7cb67a30929380f2f37c8be", "score": "0.5729195", "text": "def find_result_row(self, search_text, html_column_name):\n headers = []\n results = []\n html_header = self.driver.find_elements_by_css_selector('.header')\n for header in html_header:\n headers.append(header.text)\n\n html_results = self.driver.find_elements_by_css_selector('.dataGridRow')\n for id, row in enumerate(html_results):\n results.append({headers[id]:item.text for id, item in enumerate(row.find_elements_by_css_selector('td'))})\n\n if results:\n # Find result row, 0 = first entry\n for id, person in enumerate(results):\n if search_text.lower() == person[html_column_name].lower():\n # Match found, but check for duplicates\n temp = []\n for people in results:\n if people[html_column_name] in temp:\n return -2 # Duplicate found\n else:\n temp.append(people[html_column_name])\n return id\n return -1 # No result or no match", "title": "" }, { "docid": "2a82c5f0cca6da9c941109922090673f", "score": "0.56894594", "text": "def get_result_from_db() -> list:\n global choice, confirmation, res, data, column_names, result, choice_row\n column_names = data[0]\n try:\n confirmation.after(1, confirmation.destroy)\n except AttributeError:\n pass\n choice_row = choice.get()\n res = place_for_enter.get()\n if choice_row in column_names:\n result = simple_search_from_db(data_base, table, choice_row, res)\n return result\n else:\n mistake_select_value()", "title": "" }, { "docid": "8ae0f9ef58912819debaf910055a9924", "score": "0.5688523", "text": "def search_element_of_table(path, table, nameColumn, data):\n conn = sqlite3.connect(path)\n c = conn.cursor()\n to_return = \"<tr>\"\n for row in c.execute('SELECT * FROM '+table +' where '+nameColumn+\" like '%\"+data+\"%'\").fetchall():\n for x in row:\n to_return = to_return + \"<td>\"+ str(x) + \"</td>\"\n to_return = to_return + \"</tr>\"\n\n conn.close()\n return to_return", "title": "" }, { "docid": "82303fc4f119816dddc36c000b2d804f", "score": "0.5684421", "text": "def dt_find(search = \"\",\n row = None,\n column = None,\n tableID = \"list\",\n first = False,\n ):\n # 'todo need to fix the searching on numbers\n config = current.test_config\n browser = config.browser\n\n # Calculate the rows that need to be navigated along to find the search string\n colList = []\n rowList = []\n if row == None:\n r = 1\n while True:\n tr = \".//*[@id='%s']/tbody/tr[%s]\" % (tableID, r)\n try:\n elem = browser.find_element_by_xpath(tr)\n rowList.append(r)\n r += 1\n except:\n break\n elif isinstance(row, int):\n rowList = [row]\n else:\n rowList = row\n # Calculate the columns that need to be navigated down to find the search string\n if column == None:\n c = 1\n while True:\n td = \".//*[@id='%s']/tbody/tr[1]/td[%s]\" % (tableID, c)\n try:\n elem = browser.find_element_by_xpath(td)\n colList.append(c)\n c += 1\n except:\n break\n elif isinstance(column, int):\n colList = [column]\n else:\n colList = column\n s3_debug(\"rows %s, columns %s\" % (rowList, colList))\n # Now try and find a match\n result = []\n for r in rowList:\n for c in colList:\n td = \".//*[@id='%s']/tbody/tr[%s]/td[%s]\" % (tableID, r, c)\n try:\n elem = browser.find_element_by_xpath(td)\n s3_debug(\"got %s, needle %s\" % (elem.text, search))\n if elem.text == search:\n if first:\n return (r, c)\n else:\n result.append((r, c))\n except:\n pass\n return result", "title": "" }, { "docid": "81ad858b56a2534b08e265d7a2225c2c", "score": "0.56746584", "text": "def get(self, question, rows):\n con.query(question)\n result = con.use_result()\n return result.fetch_row(rows)", "title": "" }, { "docid": "44bd82cca6db12cd561101fb3c1c3871", "score": "0.56663024", "text": "def extract_row(self, column, identifier, df_input=None):\n\n try:\n if df_input is None:\n return self.df_input.loc[self.df_input[column] == identifier]\n else:\n return df_input.loc[df_input[column] == identifier] \n except Exception as e:\n print(e)", "title": "" }, { "docid": "545604459049dd550b1ee2aa5db95af1", "score": "0.56301135", "text": "def find(self, target):\n try:\n if type(target) is int:\n for key, value in self.index.table.items():\n if value == target:\n return(key)\n elif type(target) is str:\n for key, value in self.index.table.items():\n if key == target:\n return(value)\n except Exception as error:\n print(f\"Error: self.find({target}) -> {error}\")", "title": "" }, { "docid": "2229e71295060ce4193b244fa7f52778", "score": "0.5620588", "text": "def get_column(col_to_search, value_to_match, col_to_get, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file) \n c.execute('SELECT {cg} FROM {t} WHERE {col}=\"{value}\"'.format(t=safe(table), \n cg=safe(col_to_get), col=safe(col_to_search), value=safe(value_to_match)))\n column = c.fetchone()\n conn.close()\n return column\n except Exception as e:\n print(\"Error when trying to fetch row in table\", table, \"in database file\", db_file)\n print(e)\n return None", "title": "" }, { "docid": "4a08d0e09bbaafef00bbac0522edd781", "score": "0.56205857", "text": "def getRowPosition(sheetData, text, column):\n for index, row in enumerate(sheetData):\n if row[column] == text:\n return index\n raise ValueError('{0} is not in column {1}'.format(text, column))", "title": "" }, { "docid": "6cb7cf555e3d50761a1b8d3cdd862b91", "score": "0.55995876", "text": "def get_row(self):\n return self._row_number", "title": "" }, { "docid": "f522d2eebf883389c89309ecbcc6e04b", "score": "0.55955905", "text": "def get_row(self, row):\n \n return self.sudoku_matrix[row]", "title": "" }, { "docid": "38c3db93e876eb6ee1d8016539e54fd4", "score": "0.5590182", "text": "def getCurrentRow(self):\n item = self.getSelectedItem()\n if item:\n return item.row()\n return None", "title": "" }, { "docid": "15b66f8d2c58613ebb903de672049399", "score": "0.55896556", "text": "def get_absolute_row(self, index):\n try:\n return self._table.getRow(index)\n except Registry.Table.INVALID_ROW:\n import traceback \n raise IndexError(\n \"Index %s out of bounds. Original exception: %s\" % \\\n (index, traceback.format_exc()))", "title": "" }, { "docid": "f192d09b2efca6af71a25e100b4413bf", "score": "0.5565859", "text": "def getTableEntryByAddress(self, address):\n for entry in self.table:\n if entry.hasPage():\n if entry.getPage().contains(address):\n return entry\n return None", "title": "" }, { "docid": "fc85145fa65f3739dd418d30afe4c6a8", "score": "0.5562986", "text": "def findWhere(cls, args):\n return cls.search(args)[0][0]", "title": "" }, { "docid": "c3092d6220f910c11542b7e4c6227c23", "score": "0.55624574", "text": "def get_query_single_row(query, *args):\n conn = get_connection()\n cursor = conn.cursor()\n\n rows = cursor.execute(query, args).fetchall()\n\n result = map_row(cursor, rows[0])\n\n cursor.close()\n conn.close()\n\n return result", "title": "" }, { "docid": "c6c0bfcd71d9e0d32208cbd00f0129a3", "score": "0.55473316", "text": "def db_search_by_path(self, path):\n \n try:\n with self.connection:\n c = self.connection.cursor()\n query = \"SELECT * FROM %s WHERE path=?\" % PUBLICATIONS_TABLE\n c.execute(query, [path])\n return c.fetchone() # This path is a unique absolute path.\n except sqlite.Error as e:\n logging.exception(e)\n return None", "title": "" }, { "docid": "8ddad2f320ea6a8bca786356440c70a1", "score": "0.55336905", "text": "def get_index_in_table_row(self, table_locator, row, expected, loglevel='INFO'):\n row = int(row)\n row_index = row - 1\n table = self._table_element_finder.find(self._current_browser(), table_locator)\n if table is not None:\n rows = table.find_elements_by_xpath(\"./thead/tr\")\n if row_index >= len(rows): rows.extend(table.find_elements_by_xpath(\"./tbody/tr\"))\n if row_index >= len(rows): rows.extend(table.find_elements_by_xpath(\"./tfoot/tr\"))\n if row_index < len(rows):\n columns = rows[row_index].find_elements_by_tag_name('th')\n columns.extend(rows[row_index].find_elements_by_tag_name('td'))\n column_index = 0\n for element in columns:\n column_index = column_index + 1\n element_text = element.text\n if element_text and expected in element_text:\n return column_index\n self.log_source(loglevel)\n raise AssertionError(\"%s could not be found in row #%s of table %s.\"\n % (expected, str(row), table_locator))", "title": "" }, { "docid": "ef9bc570b6e6387b4217ed40b613e96a", "score": "0.55283874", "text": "def get_row( self, rowIndex ):\n return self.data[rowIndex,:]", "title": "" }, { "docid": "ed462ee4637137dac2b15c49658b7b99", "score": "0.55276054", "text": "def get_source_row(self, row):\n return self._dataframe.index[row]", "title": "" }, { "docid": "cf55e9df461bdf8d17311a91903c3737", "score": "0.55211335", "text": "def find(self, id):\n return self._select_one('''\n select\n *\n from\n {table}\n where\n {primary_key} = %s\n '''.format(table=self.__class__._table,\n primary_key=self.__class__._primary_key), [id])", "title": "" }, { "docid": "a18505f4e884ee890561e9ab664322b8", "score": "0.5515408", "text": "def find_cell_row(cell_roi_df, master_index_df):\n cell_roi_cols = ['cell_index', 'xy', 'date']\n \n if 'sub_coord' in master_index_df.columns:\n master_index_cols = ['sub_coord', 'xy', 'date']\n elif 'cell_index' in master_index_df.columns:\n master_index_cols = ['cell_index', 'xy', 'date']\n else:\n print(f\"No cell_index or sub_coord column in master_index_df\")\n return None\n\n matching_rows = []\n for row in master_index_df.index:\n\n query = [int(cell_roi_df.loc[0, colname]) for colname in cell_roi_cols]\n master = [int(master_index_df.loc[row, colname]) for colname in master_index_cols]\n\n if query == master:\n matching_rows.append(row)\n\n if len(matching_rows) > 1:\n print(f\"Found multiple rows matching cell_roi_df. Check master index for duplicates at:/n{master_index.path.iloc[0]}\")\n return None\n elif len(matching_rows) == 0:\n print(\"Found no rows in master_index_df matching cell_roi_df\")\n return None\n else:\n matching_row = matching_rows[0]\n return matching_row", "title": "" }, { "docid": "53a9353c3c46d97e6ef02217439e1220", "score": "0.55056393", "text": "def get_or_raise(cls, pk, exc=Exception(\"数据库错误\")):\n row = DBSession().query(cls).get(pk)\n if row is None:\n raise Exception(\"{}记录不存在\".format(cls.__name__))\n return row", "title": "" }, { "docid": "8b223e28473f3fa112f715653822fc3c", "score": "0.5503251", "text": "def find(self, key):\n if isinstance(key, (int, long)):\n result = self.table.Find(<long> key).decode('utf8')\n if result == u'':\n raise KeyError(key)\n return result\n else:\n key = as_str(key)\n result = self.table.Find(<char*>key)\n if result == -1:\n raise KeyError(key)\n return result", "title": "" }, { "docid": "902fd6f2180439e169585dc803c9335a", "score": "0.5490408", "text": "def find(self,item):\n sig = str(item)\n try:\n return self.index[sig]\n except:\n return None", "title": "" }, { "docid": "a593527fb945b6805f8ff64b31e3c4b9", "score": "0.5488193", "text": "def getRowFromPath(self, path):\n query = \"SELECT resource_id, etag, title FROM docs WHERE local_path = ?\"\n res = self.db.execute(query, (path,)).fetchone()\n return res", "title": "" }, { "docid": "8c398b0681ac3a65593a01e1bd5411e4", "score": "0.5487673", "text": "def test_get_row_index(self):\n self.dboard.set_row_names([\"1\", \"2\"])\n self.assertEqual(0, self.dboard._get_row_index(\"1\"))\n self.assertEqual(1, self.dboard._get_row_index(\"2\"))", "title": "" }, { "docid": "cb09f0be0e172d40ae402c1c2222725c", "score": "0.54837364", "text": "def find(self, node):\n saveCursor = self.cursor\n if self.first() and self.cursor.nodeId == node.nodeId:\n return self.cursor\n while self.next():\n if self.cursor.nodeId == node.nodeId:\n return self.cursor\n self.cursor = saveCursor\n return None", "title": "" }, { "docid": "fc4c17cf78c7c55c73a01065c4087b42", "score": "0.54808366", "text": "def _find_by_key(self, key, find):\n index = hashId(key, self.capacity) # Get the index/ bucket based on hash code of the key\n \n hash_table_cell = self._entry[index]\n found_item = None\n for item in hash_table_cell: #Iterrate the entry array and check the key is matching and if key is same than get the value\n if item[0] == key:\n found_item = item\n break\n\n return find(found_item, hash_table_cell)", "title": "" }, { "docid": "260c045dfeca6f307e20a8e9056b6a30", "score": "0.5479559", "text": "def query_exists(self, q, param=None):\r\n \"\"\" Queries database and return one row \"\"\"\r\n try:\r\n c = self.connection.cursor()\r\n c.execute(q, param)\r\n self.logger.log(logger.LogLevel.DEBUG, 'database.query_exists: %s | %s' % (q, param)) \r\n if c.fetchone() is None:\r\n return False\r\n else:\r\n return True\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.ERROR, 'database.query_exists: %s. %s | %s' % (e, q, param))\r\n return False", "title": "" }, { "docid": "d5fdd447af75f01bbd50bbfbfcd6a099", "score": "0.54712725", "text": "def find_index(row):\n value = row[index]\n if value in seen:\n return seen[value]\n for row_ in merged.iter_dicts(True):\n if row_[index] == value:\n seen[value] = row_[\"index\"]\n return row_[\"index\"]\n return None", "title": "" }, { "docid": "6ff31caa5fabc6b7a788bb37a718cec3", "score": "0.54655886", "text": "def find(self, key):\n index = self.quadratic_probe(key)\n if index != -1:\n if self.table[index] is not None:\n return self.table[index]\n else:\n return False\n else:\n return False", "title": "" }, { "docid": "0bd41cd66077140c9b9e8a6d809f9d3e", "score": "0.54550296", "text": "def find(self, primary_key):\n sql = '{} WHERE {} = %s'.format(self._select(), self.PRIMARY_KEY)\n cursor = yield self._pool.execute(sql, [primary_key])\n result = cursor.fetchmany(1)\n return self.convert_result_to_object(result)", "title": "" }, { "docid": "f458214eeeeeca3d2f4365b6d1849ab0", "score": "0.54480684", "text": "def find(self, value):\n for row in range(self.getHeight()):\n for column in range(self.getWidth()):\n if self[row][column] == value:\n return (row, column)\n return None", "title": "" }, { "docid": "f79a69243ac3fc07171e16b942411cfe", "score": "0.54478145", "text": "def find_element(grid, target):\n \n # First, iterate over the row indices\n for row_number in range(len(grid)):\n \n# print(\"Checking row\", row_number)\n \n for col_number in range(len(grid[row_number])):\n \n# print(\"Checking column\", col_number)\n \n if grid[row_number][col_number] == target:\n return (row_number, col_number)\n \n return None", "title": "" }, { "docid": "b76737edef07065ba71e15286bf9a4b1", "score": "0.54471594", "text": "def GetTableCurrentRow (latextable):\r\n\r\n return t.GetTableCurrentRow (latextable)", "title": "" }, { "docid": "4feb83dc1859a58f08952fe4d0b89037", "score": "0.54371154", "text": "def get_entry_from_row(self, row: [str], column_header: str) -> str:\n column_index = self.header.index(column_header)\n return row[column_index]", "title": "" }, { "docid": "aef97bf5e8805263d44c2a3573aaca0c", "score": "0.5434794", "text": "def parent_row(self):\n dict_cur.execute('SELECT * FROM \"{}\" WHERE {} = {}'.format(\n self.parent_table(), self.id_col_of_parent(), self.parent_id\n ))\n return dict_cur.fetchone()", "title": "" }, { "docid": "6f12769f65651514e00012bb4e7378cd", "score": "0.5425477", "text": "def get_run(self, run_id: str) -> sqlite3.Row:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from runs\n WHERE run_id = ?;\n \"\"\",\n (run_id,),\n )\n results = c.fetchall()\n return results[0]", "title": "" }, { "docid": "ea8a9aff2330c25ba9d687615f6b24fc", "score": "0.5420388", "text": "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "title": "" }, { "docid": "ea8a9aff2330c25ba9d687615f6b24fc", "score": "0.5420388", "text": "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "title": "" }, { "docid": "682e2f197fe6f72a35f951069fe31a85", "score": "0.5411231", "text": "def check_if_row_exists(self, session, data):\n\n row_exists = None\n user_id = 0\n\n try:\n\n user_row = self.get_user_by_id(session, data)\n\n if user_row is not None:\n user_id = user_row.user_id\n else:\n user_id = 0\n\n logger.info('User Row object in DB: %s', str(user_row))\n\n row_exists = session.query(UsersAuthModel).filter(UsersAuthModel.user_id == user_id). \\\n filter(UsersAuthModel.is_active == \"true\").scalar()\n\n logger.info('Row to data: {}, Exists: %s'.format(data), str(row_exists))\n\n except SQLAlchemyError as exc:\n row_exists = None\n logger.exception('An exception was occurred while execute transactions: %s', str(str(exc.args) + ':' +\n str(exc.code)))\n raise mvc_exc.IntegrityError(\n 'Row not stored in \"{}\". IntegrityError: {}'.format(data.get('username'),\n str(str(exc.args) + ':' + str(exc.code)))\n )\n finally:\n session.close()\n\n return row_exists", "title": "" }, { "docid": "af5f55c5472a534f7da261b7b64221da", "score": "0.54105145", "text": "def fetch_fromDB(self, searchPhrase):\n pass", "title": "" }, { "docid": "e01435938315b83878c38f66df2dc21b", "score": "0.5407367", "text": "def findItem(self, item):\n found_location = self.__find(item)\n\n if found_location is not None and found_location.item == item:\n return found_location\n else:\n raise NotFoundError(\"The item '\" + str(item) + \"' was not found!\")", "title": "" }, { "docid": "ec253585855ac1e34da4648c657662e2", "score": "0.53953576", "text": "def __getitem__(self, index):\r\n if isinstance(index, int):\r\n try:\r\n return next(islice(self.iterall(), index, index + 1))\r\n except StopIteration:\r\n raise IndexError\r\n elif isinstance(index, six.string_types):\r\n for column in self.iterall():\r\n if column.name == index:\r\n return column\r\n raise KeyError(\"Column with name '%s' does not exist; \"\r\n \"choices are: %s\" % (index, self.names()))\r\n else:\r\n raise TypeError('row indices must be integers or str, not %s'\r\n % type(index).__name__)", "title": "" }, { "docid": "deaca3e8f066f1c213c58f758de8cb0a", "score": "0.5385548", "text": "def _check_row_exists(self, pk):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(id=pk).first()\n session.close()\n if exists:\n return True\n return False", "title": "" }, { "docid": "911b697a5889508444da2138328039e8", "score": "0.53843147", "text": "def row(self):\n return self[\"row\"]", "title": "" }, { "docid": "c5788160017bf4e0bdc13d9ff698f5ae", "score": "0.5382522", "text": "def row(self):\n return self.address.row", "title": "" }, { "docid": "39a151f6c63b68ff1a3348cd536d0bbe", "score": "0.53753215", "text": "def select_one(self, table, column, value):\n select_row = \"SELECT * FROM {} WHERE {}='{}' AND delete_status = FALSE;\".format(table, column, value)\n self.cursor.execute(select_row)\n row = self.cursor.fetchone()\n return row", "title": "" }, { "docid": "eedf5bd03cdc73b707c4faa585e330a0", "score": "0.5369321", "text": "def in_database(entry, column, table):\n \n conn = get_database_connection(port = 2001)\n cursor = conn.cursor()\n \n query = 'SELECT * FROM %s WHERE %s = %s' % (table, column, entry) \n cursor.execute(query)\n \n results = cursor.fetchall()\n \n cursor.close()\n conn.close()\n \n if len(results) == 0:\n return False\n else:\n return True", "title": "" }, { "docid": "92fe7c819cf5c9e1aa85f6dabc6cc820", "score": "0.5362303", "text": "def retrieve_from_db(self):\n pass", "title": "" }, { "docid": "7d3a5e9df103b1d87afb0076d7bf5b73", "score": "0.5359579", "text": "def row(self, row_number):\n return self[self.row_cache[row_number]]", "title": "" }, { "docid": "a3fa5e89cd379c20c386b865c7022466", "score": "0.53594106", "text": "def query_one(self, sql: str) -> Any:\n with self.connection.cursor() as cursor:\n self.connection.ping(reconnect=True)\n cursor.execute(sql)\n row = cursor.fetchone()\n self.connection.commit()\n return row", "title": "" }, { "docid": "eaca5db028cf617a9c092eae301b8fc1", "score": "0.53538996", "text": "def __getitem__(self, key):\n for db in self.db:\n if db.name == key:\n return db\n raise IndexError", "title": "" }, { "docid": "baedc83f21e5ba4d5c3af593fc64dba8", "score": "0.53482795", "text": "def fetch_row(self, sql):\n curs = self.q(sql, True)\n cols = curs.column_names\n row = curs.fetchone()\n if curs._have_unread_result():\n curs.fetchall()\n curs.close()\n\n if row:\n result = {}\n for field in cols:\n k = cols.index(field)\n result[cols[k]] = row[k]\n return result\n else:\n return None", "title": "" }, { "docid": "1b0b9fb5f094ca2d9b318c690f7c72f0", "score": "0.53389204", "text": "def _find(self, hashV):\n return self.table.search(hashV)", "title": "" }, { "docid": "30931ce1742213eaae316ceb4c08caeb", "score": "0.5336372", "text": "def get_row_by_position(self,row_number):\n\n if self.__rows is None:\n self.logger.debug('rbpos looking for rows matching %s' % (self.locators['row']))\n self.__rows = self.find_elements(self.locators['row'])\n self.logger.debug('self.__rows = %s' % (self.__rows))\n\n nrows = len(self.__rows)\n if (row_number+1) > nrows:\n raise IndexError(\"while attempting to retrieve row by position,\"\\\n + \" row_number == %d, max rows is %d\" % (row_number,nrows))\n\n # if row_number is out of range on the negative side, wrap around\n if row_number < 0:\n row_number = row_number % nrows\n\n # css and xpath use indicies starting from 1,\n # not 0 as was requested by this function.\n row_number = row_number + 1\n\n result = self.__row_class(self.owner,*self.__row_class_args,row_number=row_number)\n\n # make the result compatible with the iframewrap design pattern\n if self.iframe_tracker is not None:\n self.iframe_tracker.wrap_new_object(result)\n\n result.detach_from_owner()\n return result", "title": "" }, { "docid": "b555a60e47d7a433957d5a60281058fc", "score": "0.5335463", "text": "def get_entry_id(self, table, field, value):\n cur = self.con.execute(\"select rowid from %s where %s='%s'\" % (table, field, value))\n res = cur.fetchone()\n # Check if id exists\n if res is None:\n # Add to table if id does not exist\n cur = self.con.execute(\"insert into %s (%s) values ('%s')\" % (table, field, value))\n return cur.lastrowid\n return res[0]", "title": "" }, { "docid": "83af4c70302c53cefd541661d0776792", "score": "0.53331697", "text": "def find_in_db(self, *args, **kwargs):\n return self.relation.find_in_db(*args, **kwargs)", "title": "" }, { "docid": "64fdfa07b52b34b71bf710cecc6e6a97", "score": "0.5325645", "text": "def getItem(self,row,column,default=None):\n data = self.data\n if row in data and column in data[row]:\n return data[row][column]\n else:\n return default", "title": "" }, { "docid": "874a27dfe908150a1e04adbbbf46a830", "score": "0.5324717", "text": "def get_row_index(self, name):\n # type: (str) -> Tuple[str, int]\n if name == 'but':\n name = 'casc'\n if name not in self._row_lookup:\n raise ValueError('row %s not found.' % name)\n return self._row_lookup[name]", "title": "" }, { "docid": "25e73bba3c6ecf5aced000c21e6943fd", "score": "0.5324464", "text": "def search_product(df, product_id):\n if product_id in df.index:\n\n return df[product_id]\n\n else:\n\n return -1", "title": "" }, { "docid": "dccf16687d391bc5a2f7b0a3e64e5bb6", "score": "0.53240776", "text": "def find_asset_for_pathrow(sqlite_path, **kwargs):\n while True:\n # Generate query\n query = generate_query(**kwargs)\n\n # Find records for query\n iterator = find_records(sqlite_path, query)\n\n # Return if found\n try:\n return next(iterator)\n except StopIteration:\n if kwargs.get('max_cloud') >= 100 and kwargs.get(\n 'sort_preference') == 'closest-to-date':\n\n pathrow = kwargs.get('pathrow')\n print(\n f'Unable to find assets for pathrow {pathrow}',\n file=sys.stderr)\n return\n\n # Modify parameters\n kwargs = relax_params(**kwargs)", "title": "" }, { "docid": "99d2eeb901f2a2b543ef9356d27ac539", "score": "0.53221595", "text": "def find_cell(app, cell):\n if VALID_BLOCK(cell.block) and VALID_CELL(cell):\n app_block = app.board.blocks[cell.block.column][cell.block.row]\n app_cell = app_block.cells[cell.column][cell.row]\n return app_cell\n else:\n return None", "title": "" }, { "docid": "dbd8aad6624c354365a6ada50ad03e30", "score": "0.53142", "text": "def __getitem__(self, item) -> SurveyRow:\n return self.rows[item]", "title": "" }, { "docid": "f25fab614cdcc4a7c1de5f2dbfdb85ed", "score": "0.5305486", "text": "def find_by_id(self, uid):\n return self.mapper.find_by_id(uid)", "title": "" }, { "docid": "146418afc754703b7522a95208e98eb0", "score": "0.53007025", "text": "def dbGetFirstRow(con, cmd, args=[], skipTrace=False):\n cursor = dbExecute(con, cmd, args, skipTrace)\n return cursor.fetchone()", "title": "" }, { "docid": "278c2dfdb750956b08500efd8991b484", "score": "0.52967894", "text": "def _fetch_entry_from_table(conn, table_name, entry_id):\n # Make sure the table exists.\n if not _table_exists(conn, table_name):\n return None\n # Query for the classifier.\n with conn:\n cur = conn.cursor()\n cur.execute(\n 'SELECT * FROM %s WHERE Id=\\\"%s\\\"' % (table_name, entry_id))\n query_result = cur.fetchone()\n # If found, return the classifier.\n return query_result if query_result else None", "title": "" }, { "docid": "87c84fb02c30c08700689816c4461c58", "score": "0.52934545", "text": "def send_find(self, statement):\n msg_type, msg = self.protocol.build_find(statement)\n self._execute_prepared_pipeline(msg_type, msg, statement)\n return DocResult(self) if statement.is_doc_based() else RowResult(self)", "title": "" }, { "docid": "caee2485489d80b2d4d89bbb3b2691d9", "score": "0.52847517", "text": "def find_item(name, currentRoom):\n for i in currentRoom.contents:\n if i.name == name:\n return i\n\n return None", "title": "" }, { "docid": "217d45b12cd7db3dd98a8fd9aaf84c41", "score": "0.5283954", "text": "def test_data_returns_pointer_held_in_column(\n three_sour_model,\n): # pylint: disable=redefined-outer-name\n # Given\n index = three_sour_model.index(0, 0)\n\n # When\n pointer = three_sour_model.data(index)\n\n # Then\n assert pointer == \"I001\"", "title": "" }, { "docid": "0ea3af67d404921582ce3f3dfb02fe60", "score": "0.52785057", "text": "def __get_one_by_id(\n self, table_name: str, id_name: str, db_id: str\n ) -> Mapping[str, Any]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n f\"\"\"\n SELECT * FROM {table_name}\n WHERE ({id_name} = ?)\n \"\"\",\n (int(db_id),),\n )\n results = c.fetchall()\n if len(results) != 1:\n raise EntryDoesNotExistException(\n f\"Table {table_name} has no {id_name} {db_id}\"\n )\n return results[0]", "title": "" }, { "docid": "391823feae6952e3d5831d584efe0aaa", "score": "0.527754", "text": "def fetch_row(self, sql: str, values: Union[Tuple, Mapping] = None) -> Any:\n with self.cursor() as cursor:\n cursor.execute(sql, values)\n assert cursor.rowcount <= 1, f\"More than one result row for fetch_row({sql!r}, {values!r})\"\n return cursor.fetchone()", "title": "" }, { "docid": "8660e34e4b00e3fdf15bb58900794bc7", "score": "0.52706236", "text": "def query(self, columns, table, matchColumn=None, matchValue=None):\n if matchValue and matchColumn:\n sql = \"\"\"SELECT %s FROM %s WHERE %s='%s'\"\"\" % (','.join(columns), table, matchColumn, matchValue)\n print \"SQL Statement: \" + sql\n else:\n sql = \"\"\"SELECT %s FROM %s\"\"\" % (','.join(columns))\n\n self.db.query(sql)\n queryResult = self.db.store_result()\n return queryResult.fetch_row(maxrows=0)", "title": "" }, { "docid": "16ab6e4491239e0544f16857dec26ed6", "score": "0.5269898", "text": "def locate(self, geometry):\n cells = geometry.find(self.r)\n if cells:\n self._cell = cells[-1]\n else:\n self._cell = None", "title": "" }, { "docid": "0ea94bb710d5032d9a6ad9e76d9c2494", "score": "0.5251509", "text": "def search_by_time(integer, row):\n clear_screen()\n found = False\n for item in row:\n if item[\"Time\"] == str(integer):\n print_entry(item)\n found = True\n if found is False:\n print(\"No Entries Found..\")", "title": "" }, { "docid": "0c3cf3adefe9ad5b078301c54999afb3", "score": "0.52495384", "text": "def findUser(username):\n connector = appEngine.connect()\n userId = connector.execute(\"SELECT user.userID FROM user WHERE userName=(?)\", username).fetchone()\n #selectInput = select([user]).where(user.column.userName == username)\n #db.execute(selectInput)\n return userId", "title": "" }, { "docid": "697dc39452225e1be2f44c654ec014e5", "score": "0.52432054", "text": "def exact_search(string, row):\n clear_screen()\n found = False\n for item in row:\n if string.lower() in item[\"Task\"].lower() \\\n or string.lower() in item[\"Notes\"].lower():\n print_entry(item)\n found = True\n if found is False:\n print(\"No Entries Found..\")", "title": "" }, { "docid": "73c5360cddbec1475bcfddaff7b4b37d", "score": "0.5240922", "text": "def row(self, index):\n return self.data[index]", "title": "" } ]
61f0f13a2701498ff9538f00e55c8cc6
Returns the log of probability estimates.
[ { "docid": "6719902eb571034a7a96440a4eb1d871", "score": "0.68925774", "text": "def predict_log_proba(self, X):\n return np.log(self.predict_proba(X))", "title": "" } ]
[ { "docid": "e0cf42a38133d6cf396650edba63c72c", "score": "0.73786455", "text": "def priorlogprob(self):\n return numpy.array([p.logpdf(x) for p, x in zip(self.getpriors(), self.getparams())])", "title": "" }, { "docid": "01f0ec5bea8d6e48c9938f08f2177d9b", "score": "0.7042134", "text": "def logp(self):\n # logp for all the parameters\n return np.sum(\n [\n param.logp()\n for param in f_unique(flatten(self.data))\n if param.vary\n ]\n )", "title": "" }, { "docid": "167ecd81421b7fa8e53ceb2c10966c34", "score": "0.7031832", "text": "def log_probability(self, weights):\n strict_log_probability = sum(\n [self.strict_log_probability(preference, weights) for preference in self.strict_preferences])\n indifferent_log_probability = sum(\n [self.indifferent_log_probability(preference, weights) for preference in self.indifferent_preferences])\n log_prior = sum([self.priors[i](weight) for i, weight in enumerate(weights)])\n return strict_log_probability + indifferent_log_probability + log_prior", "title": "" }, { "docid": "266a554f4cb0f1c65daa397d65f3c6ad", "score": "0.7021919", "text": "def log_prob(self, X):\n p = self.p\n r = self.r\n\n # broadcasting\n if xr != None and isinstance(p, xr.DataArray) and isinstance(r, xr.DataArray) and isinstance(X, xr.DataArray):\n p, r, X = xr.align(p, r, X)\n else:\n p, r, X = np.broadcast_arrays(p, r, X)\n\n # return scipy.stats.nbinom(n=r, p=1 - p).logpmf(X)\n coeff = gammaln(r + X) - gammaln(X + 1) - gammaln(r)\n return coeff + r * np.log(1 - p) + X * np.log(p)", "title": "" }, { "docid": "976925ca9b633b9bfc5b3b2ea67f5ab1", "score": "0.699978", "text": "def log_prob(self, x):\n function = lambda x: x * np.exp(-x ** 2)\n return torch.sum(np.log(self.pi) + self.gaussian1.log_prob(x)\n + np.log1p(((1 - self.pi) / self.pi) * function(self.sigma1 / self.sigma2)))", "title": "" }, { "docid": "2367ce660f041700be584b7822f80fc0", "score": "0.69937867", "text": "def lnprob(pars):\n model = residual(pars, logx)\n return -0.5 * np.sum(((model - logy) / logyerr)**2 + np.log(2 * np.pi * logyerr**2))", "title": "" }, { "docid": "f32edd9c8c1f078ed9f7ce9a69ffa15d", "score": "0.6952415", "text": "def log_probability(theta, V, EW, FeH, e_ew, e_feh, power):\n lp = log_prior(theta)\n if not np.isfinite(lp):\n return -np.inf\n return lp + log_likelihood(theta, V, EW, FeH, e_ew, e_feh, power)", "title": "" }, { "docid": "334169aea74692f33f07de4347728e4e", "score": "0.69425666", "text": "def _logProbability(self, x, mask, mask_):\n z = self._inference(x, mask, mask_, True)\n return self.prior.logProbability(z) + self._inferenceLogjac", "title": "" }, { "docid": "289574b65fb4ab3786f40f310d3bf4c0", "score": "0.6909309", "text": "def predict_log_proba(self, X):\n y_prob = self.predict_proba(X)\n return np.log(y_prob, out=y_prob)", "title": "" }, { "docid": "7671c58c44bd79041fdbde5b06d885a1", "score": "0.6893093", "text": "def log_prob(self, x):\n likelihood = self.likelihood(x)\n return likelihood.view(x.shape[0], -1).sum(1)", "title": "" }, { "docid": "8c6a8762384bd2265a41321a0ebd81e4", "score": "0.68431693", "text": "def logp(self, val):\n return self.rv.logpdf(val)", "title": "" }, { "docid": "3ad7336d40d74fe657264882fc696016", "score": "0.6834927", "text": "def log_prob(self, x):\n return torch.sum(-0.5 * torch.log(2 * np.pi * self.sigma ** 2)\n - 0.5 * (x - self.mu) ** 2 / self.sigma ** 2)", "title": "" }, { "docid": "9054bee2923aaf52227b7764b8aca041", "score": "0.68092906", "text": "def log_prob(self, xs, zs):\n # Note there are no parameters we're being Bayesian about. The\n # parameters are baked into how we specify the neural networks.\n X, y = xs['X'], xs['y']\n self.neural_network(X)\n result = self.pi * norm.prob(y, self.mus, self.sigmas)\n result = tf.log(tf.reduce_sum(result, 1))\n return tf.reduce_sum(result)", "title": "" }, { "docid": "ebcb31722ddf2df794a52aae9723d5cc", "score": "0.68025124", "text": "def logprob(predictions, labels):\n predictions[predictions < 1e-10] = 1e-10\n return np.sum(np.multiply(labels, -np.log(predictions))) / labels.shape[0] #labels.shape=(640,27)", "title": "" }, { "docid": "7ecc6fccf0462a4e65f1beebc7518ea9", "score": "0.6796203", "text": "def log_probabilities(stprob):\n sprob, tprob = stprob\n with np.errstate(divide='ignore'): # log(0) = -inf OK\n sprob = np.log(sprob)\n tprob = np.log(tprob)\n return sprob, tprob", "title": "" }, { "docid": "2f961529a03d5d39ef442b82351194a9", "score": "0.6753031", "text": "def _get_log_probs(self, seq):\n # NB: NgramModel returns negative log probs\n return np.negative(np.asarray([self.m1.prob_seq(seq), self.m2.prob_seq(seq)],\n dtype=np.float128))", "title": "" }, { "docid": "756be7fad3cf08e45df2e33f4c493e23", "score": "0.6732442", "text": "def log_likelihood(self):\n # loop through each word in vocabulary.\n for word in self.vocabulary:\n self.vocabulary[word][\"probability\"] = {} # initialise 'probability' key for each word.\n\n # retrieve the frequency and class of each word.\n for cls, wfr in self.vocabulary[word][\"frequency\"].items():\n # calculate and add log probability to vocabulary\n cf = self.voc_classes[cls]\n\n p = log10((wfr + 1) / (cf + len(self.vocabulary)))\n\n self.vocabulary[word][\"probability\"][cls] = p\n\n # console readout\n if not self.quiet:\n print(\"Log probabilities calculated\\n\")", "title": "" }, { "docid": "15b99934bd316f0a4bf5cd1c8dbda57f", "score": "0.6694303", "text": "def _log_prob(self, response, predicted_linear_response):\n likelihood = self.as_distribution(predicted_linear_response)\n return likelihood.log_prob(response)", "title": "" }, { "docid": "59ba654e0b0ef8efb38d61bca95eaa4c", "score": "0.6693244", "text": "def _outcome_log_probs(self):\n if self._logits is None:\n probs = tf.convert_to_tensor(self._probs)\n logits = tf.math.log(probs) - tf.math.log1p(-probs)\n return tf.math.log1p(-probs), tf.math.log(probs), logits\n s = tf.convert_to_tensor(self._logits)\n # softplus(s) = -Log[1 - p]\n # -softplus(-s) = Log[p]\n # softplus(+inf) = +inf, softplus(-inf) = 0, so...\n # logits = -inf ==> log_probs0 = 0, log_probs1 = -inf (as desired)\n # logits = +inf ==> log_probs0 = -inf, log_probs1 = 0 (as desired)\n return -tf.math.softplus(s), -tf.math.softplus(-s), s", "title": "" }, { "docid": "c29f92648f62a159771d1e927c05f989", "score": "0.66870743", "text": "def log_pred_prob(self, t, x):\n # Posterior predictive: see eq. 40 in (Murphy 2007).\n post_means = self.mean_params[:t]\n post_stds = np.sqrt(self.var_params[:t])\n return norm(post_means, post_stds).logpdf(x)", "title": "" }, { "docid": "aa3e026240a8f584bab99c0801291cbc", "score": "0.66751224", "text": "def log_prob_from_logits(x):\n # TF ordering\n axis = len(x.size()) - 1\n m, _ = torch.max(x, dim=axis, keepdim=True)\n return x - m - torch.log(torch.sum(torch.exp(x - m), dim=axis, keepdim=True))", "title": "" }, { "docid": "aa3e026240a8f584bab99c0801291cbc", "score": "0.66751224", "text": "def log_prob_from_logits(x):\n # TF ordering\n axis = len(x.size()) - 1\n m, _ = torch.max(x, dim=axis, keepdim=True)\n return x - m - torch.log(torch.sum(torch.exp(x - m), dim=axis, keepdim=True))", "title": "" }, { "docid": "b2e3e5eef3e56aaab92cbc89114a0d3b", "score": "0.66571367", "text": "def log_probability(self, X):\n\n\n\t\tX = _check_parameter(_cast_as_tensor(X), \"X\", ndim=3, \n\t\t\tcheck_parameter=self.check_data)\n\t\tself.d = X.shape[1]\n\n\t\tlogps = self.distributions[0].log_probability(X[:, 0])\n\t\tfor i, distribution in enumerate(self.distributions[1:-1]):\n\t\t\tlogps += distribution.log_probability(X[:, :i+2])\n\n\t\tfor i in range(X.shape[1] - self.k):\n\t\t\tj = i + self.k + 1\n\t\t\tlogps += self.distributions[-1].log_probability(X[:, i:j])\n\n\t\treturn logps", "title": "" }, { "docid": "a8abbe6183aaa8f35a813102098eb85d", "score": "0.6651732", "text": "def log_probability(self, X):\n\n\t\tX = _check_parameter(_cast_as_tensor(X), \"X\", ndim=2, \n\t\t\tshape=(-1, self.d), check_parameter=self.check_data)\n\n\t\tt = (X - self.means) ** 2 / self.covs\n\t\treturn torch.sum(self._lgamma_dofsp1 - self._lgamma_dofs - \\\n\t\t\tself._log_sqrt_dofs_pi_cov -((self.dofs + 1) / 2.0) * \n\t\t\ttorch.log(1 + t / self.dofs), dim=-1)", "title": "" }, { "docid": "2aedbbbac5efd0f65ee60c6678336829", "score": "0.6651381", "text": "def log_prob(self, model_trace):\n if not self.has_enumerable_sites:\n return model_trace.log_prob_sum()\n log_probs = self._get_log_factors(model_trace)\n with shared_intermediates() as cache:\n return contract_to_tensor(log_probs, self._enum_dims, cache=cache)", "title": "" }, { "docid": "39f1337c62cba202d4342f8a3b2a67e1", "score": "0.6640335", "text": "def log_prob(self, value):\n raise NotImplementedError", "title": "" }, { "docid": "5b1051e7e81579b76e39cd8b21a19461", "score": "0.6623184", "text": "def log_probability(self, X):\n\n\t\tcdef int i, j, n\n\t\tcdef numpy.ndarray X_ndarray\n\t\tcdef double* X_ptr\n\t\tcdef double logp\n\t\tcdef numpy.ndarray logp_array\n\t\tcdef double* logp_ptr\n\n\t\tif self.discrete:\n\t\t\tif not isinstance(X[0], (list, tuple, numpy.ndarray)) or len(X) == 1:\n\t\t\t\tn = 1\n\t\t\telse:\n\t\t\t\tn = len(X)\n\n\t\t\tlogp_array = numpy.zeros(n)\n\t\t\tfor i in range(n):\n\t\t\t\tfor j in range(self.d):\n\t\t\t\t\tlogp_array[i] += self.distributions[j].log_probability(X[i][j]) * self.weights[j]\n\n\t\t\tif n == 1:\n\t\t\t\treturn logp_array[0]\n\t\t\telse:\n\t\t\t\treturn logp_array\n\n\t\telse:\n\t\t\tif isinstance(X[0], (int, float)) or len(X) == 1:\n\t\t\t\tn = 1\n\t\t\telse:\n\t\t\t\tn = len(X)\n\n\t\t\tX_ndarray = numpy.array(X, dtype='float64')\n\t\t\tX_ptr = <double*> X_ndarray.data\n\n\t\t\tlogp_array = numpy.empty(n, dtype='float64')\n\t\t\tlogp_ptr = <double*> logp_array.data\n\n\t\t\twith nogil:\n\t\t\t\tself._log_probability(X_ptr, logp_ptr, n)\n\n\t\t\tif n == 1:\n\t\t\t\treturn logp_array[0]\n\t\t\telse:\n\t\t\t\treturn logp_array", "title": "" }, { "docid": "e25ccf73df8a8ed491cfbba05322e650", "score": "0.6618147", "text": "def indifferent_log_probability(self, preference, weights):\n delta = self.data.loc[preference[0], :].values - self.data.loc[preference[1], :].values\n variance = delta.dot(self.Sigma).dot(delta)\n sd = np.sqrt(variance)\n mean = weights.dot(delta)\n return np.log(norm.cdf(mean + 0.5, loc=0, scale=sd) - norm.cdf(mean - 0.5, loc=0, scale=sd))", "title": "" }, { "docid": "a463d36e0aca733631b4f84f8d116992", "score": "0.66125643", "text": "def log_prob(self, sentence_list):\n return list(map(lambda x: self.__log_prob_sentence(x), sentence_list))", "title": "" }, { "docid": "d5f5343d29bf424f723ce707c1a28faf", "score": "0.660946", "text": "def emission_probabilities(dataitems, log_prob=True):\n eprob = np.array([ i.prediction for i in dataitems ])\n if log_prob:\n with np.errstate(divide='ignore'): # log(0) = -inf OK\n eprob = np.log(eprob)\n return eprob", "title": "" }, { "docid": "805f1291799c832355e04af2926a88c8", "score": "0.6609043", "text": "def log(self):\n return Discrete(np.log(self.pmf), *self.dim)", "title": "" }, { "docid": "be83456c9840d6b1ff06a0758803e54f", "score": "0.65975183", "text": "def _calc_log_prob_from_prob(self, value, *args, **kwargs):\n return self.log_base(self._prob(value, *args, **kwargs))", "title": "" }, { "docid": "75567363651e0e14eea796c3649ecde9", "score": "0.6595068", "text": "def prob_to_log_odds(p):\r\n return np.log(p/(1-p))", "title": "" }, { "docid": "797ce1c2d7fb7c636eb66234233bf907", "score": "0.6580121", "text": "def log_py_xw(self):\n return self.model.local_log_prob('y')", "title": "" }, { "docid": "252678f1f7948117f9136c24f0b08fed", "score": "0.65762323", "text": "def log_probability(self, sents, base=2.):\n return sum(self.sent_log_prob(sent, base) for sent in sents)", "title": "" }, { "docid": "b79806deebc3c9a1d6ecc7825f9a07de", "score": "0.6569243", "text": "def prob_ratio(log_probs): \n return np.exp(log_probs[1,:] - log_probs[0,:])", "title": "" }, { "docid": "5598b1aa60ff379e2362cfbc6e4024ce", "score": "0.65688914", "text": "def compute_log_likelihood(self, indicators, weights):\n scores, _ = self.predict_probability(self.train_feature_x, weights)\n probs = self.predict_probability(self.train_feature_x, weights)\n lp = np.sum((indicators-1)*scores + np.log(probs))\n return lp", "title": "" }, { "docid": "b50f7dc1923ddfb53b244064a4d2db1a", "score": "0.65623105", "text": "def logprobdist(self):\n # Have the features already been computed and stored?\n if not hasattr(self, 'F'):\n raise AttributeError(\"first set the feature matrix F\")\n\n # Yes:\n # p(x) = exp(params.f(x)) / sum_y[exp params.f(y)]\n # = exp[log p_dot(x) - logsumexp{log(p_dot(y))}]\n\n #log_p_dot = innerprodtranspose(self.F, self.params)\n # Calculate the dot product of F^T and the parameter vector:\n log_p_dot = self.F.T.dot(self.params)\n\n # Do we have a prior distribution p_0?\n if self.priorlogprobs is not None:\n log_p_dot += self.priorlogprobs\n if not hasattr(self, 'logZ'):\n # Compute the norm constant (quickly!)\n self.logZ = logsumexp(log_p_dot)\n return log_p_dot - self.logZ", "title": "" }, { "docid": "7bbbb3e75aaa7626b731dccd61efdd29", "score": "0.65508044", "text": "def logprob(self, sample, params, n_iters):\n eps = 1e-20\n params = params.reshape(self.dim, self.dim)\n unnormalized_logprob = np.sum(sample * params)\n\n # Approximate the matrix permanent of exp(params) with Bethe permanent of exp (params)\n # In the indigo code there is an additional division by the temperature in here...\n gamma_matrix = self.bethe_permanent_gamma(params, n_iters, 1e-20)\n term_1 = np.sum(params * gamma_matrix)\n term_2 = -np.sum(gamma_matrix * np.log(gamma_matrix + eps))\n term_3 = np.sum((1 - gamma_matrix) * np.log(1 - gamma_matrix + eps))\n log_approx_perm = term_1 + term_2 + term_3\n return unnormalized_logprob - log_approx_perm", "title": "" }, { "docid": "b23ff0d6135b958dbc373c704b6c416a", "score": "0.6548126", "text": "def log_probability(value, mean, variance): \n\n if variance == 0:\n return 0\n else:\n const = 1 / (2*kPI*variance)**0.5\n ex = exp(-(value - mean)**2 / (2*variance))\n return log(const * ex)", "title": "" }, { "docid": "50d9fe668f84e62b52c1bde0def6bee5", "score": "0.6524495", "text": "def get_log_probability(self, input_data):\n _, _, masked_array = self.get_probability(input_data)\n log_prob_array = np.log2(masked_array)\n total_log_prob = np.sum(log_prob_array, axis=1, keepdims=True)\n return total_log_prob, log_prob_array, masked_array", "title": "" }, { "docid": "0410f0a909542c4b9f6b4cc0555eabae", "score": "0.6519585", "text": "def logP(self):\n raise NotImplementedError", "title": "" }, { "docid": "72c3ea3b6018d9e6715f0e9b40542d6c", "score": "0.6517506", "text": "def log_p(cls, pi, a):\n return pi.log_prob(a).sum(\n axis=-1) # Sum needed for Torch normal distr.", "title": "" }, { "docid": "6bc5dfc002f47d186bd9fad914b3358d", "score": "0.6506105", "text": "def loglike(self, params):\n return self.loglikeobs(params).sum(0)", "title": "" }, { "docid": "43d7c5fa613540fb4ec41e80053e778d", "score": "0.6494451", "text": "def log_p(cls, p, a):\n\n return p.log_prob(a)", "title": "" }, { "docid": "719966915af4a5f034d6f512d0251ff2", "score": "0.64931244", "text": "def logpdf(self, state):\n \n data, opt_vars = state\n param, subgrad, opt_vec = self.penalty.form_optimization_vector(opt_vars)\n gradient = self.loss.gradient(data, param)\n val = - gradient - opt_vec\n generalized_logpdf = self.randomization.logpdf(val).sum() + np.log(self.abs_det(state))\n \n return generalized_logpdf", "title": "" }, { "docid": "b5b081e1baa37ebd3408774505427460", "score": "0.6484868", "text": "def marginal_log_likelihood(dataSet):\n\n z_shape = (25, 25) # two dimensional shape for prior\n Z_prior, P0, P1 = get_probabilities()\n\n\n # going to the logarithmic space\n Z_prior = np.log(Z_prior)\n P0 = np.log(P0)\n P1 = np.log(P1)\n\n\n logs = np.zeros(len(dataSet))\n\n #getting the values\n i=0\n for X in dataSet:\n #probabilities \n V = np.where(X == 0, P0, P1)\n\n #adding prior\n V = np.c_[Z_prior, V]\n\n #considering logsumexp\n V = np.sum(V, axis = 1)\n V = logsumexp(V)\n\n logs[i] = V\n i += 1\n return logs", "title": "" }, { "docid": "a7a2ef21c8b8ac8836dd3f1e68b3fc57", "score": "0.6479175", "text": "def log_prob(self, new_obs: List[Tensor], params: List[TensorDict]) -> List[Tensor]:\n return [m.log_prob(new_obs[i], params[i]) for i, m in enumerate(self)]", "title": "" }, { "docid": "c378ad36bdbbf70141ac8f565baebaee", "score": "0.64639133", "text": "def log_prob_X_given_z(self):\n return self.components.log_marg()", "title": "" }, { "docid": "5cac6215d82f559968ea915318b70623", "score": "0.64637846", "text": "def predict_log_proba(self, X):\r\n self._check_is_fitted('predict_log_proba')\r\n return self.best_estimator_.predict_log_proba(X)", "title": "" }, { "docid": "a124729a3c0db043d372f237f367fa73", "score": "0.6461569", "text": "def log_prior(theta):\n return 0.0", "title": "" }, { "docid": "51a8c4a241ffec2e2c8d8dbfe1d830b0", "score": "0.64577913", "text": "def logprob(self, s, a):\n m = self.dist(s)\n logprob = m.log_prob(a)\n entropy = m.entropy()\n if self.multivariate:\n logprob = logprob.sum(dim=-1)\n entropy = entropy.sum(dim=-1)\n return logprob, entropy", "title": "" }, { "docid": "83f3a48b7ed18abfe17cb17d329ce80f", "score": "0.64519083", "text": "def predict_log_proba(self, X):\n # Use log properties to multiply the two probabilities in log-space\n # FIXME: The mathematical formulation calls for weighting by class\n # probabilities, but that doesn't seem to have any meaningful impact on the\n # results.\n return np.asarray([self._get_log_probs(s) + self.y_log_probs for s in X],\n dtype=np.float128)\n # return np.asarray([self._get_log_probs(s) for s in X],\n # dtype=np.float128)", "title": "" }, { "docid": "26acc94b8576f25f42543ec8a57563c5", "score": "0.64461917", "text": "def log_prob(self, value, *args, **kwargs):\n return self._call_log_prob(value, *args, **kwargs)", "title": "" }, { "docid": "a0d0c3c7ba21a88b4c22a751b3f969e8", "score": "0.6432684", "text": "def priorloggrad(self):\n return numpy.array([p.logpdfgrad(x) for p, x in zip(self.getpriors(), self.getparams())])", "title": "" }, { "docid": "383f96f2d54b7bdbfbd181355c9ae120", "score": "0.6432392", "text": "def get_probability(self, actions, states):\n\n #mean of the distribution\n mu = self.actor.model.forward(states)\n multivariate_gaussian_distribution = MultivariateNormal(loc=mu, covariance_matrix=self.actor.covariance_matrix)\n logarithmic_probability = multivariate_gaussian_distribution.log_prob(value=actions)\n return logarithmic_probability", "title": "" }, { "docid": "0e706a79f4e9126e009411ff14f1982b", "score": "0.64214075", "text": "def log_prob(theta: np.ndarray, r: np.ndarray, y: np.ndarray, y_err: np.ndarray,\n m_func, pr) -> np.ndarray:\n lp = log_uniform_prior(theta, pr)\n if not np.isfinite(lp):\n ret_val = -np.inf\n else:\n ret_val = lp + log_likelihood(theta, r, y, y_err, m_func)\n\n if np.isnan(ret_val):\n ret_val = -np.inf\n\n return ret_val", "title": "" }, { "docid": "1a3f48561b8ea5fdce22895d024b81b9", "score": "0.64195216", "text": "def logpdf(self, state):\n data, opt_vars = state\n param, subgrad, opt_vec = self.penalty.form_optimization_vector(opt_vars)\n gradient = self.loss.gradient(data, param)\n hessian = self.loss.hessian(data, param)\n log_jacobian = self.penalty.log_jacobian(hessian)\n val = - gradient - opt_vec\n\n return self.randomization.logpdf(val).sum() + log_jacobian", "title": "" }, { "docid": "db3fa272ed9e675a8b3a8c949dab6407", "score": "0.64097315", "text": "def _logProbabilityWithSlice(self, x, sliceDim):\n z = self._inferenceWithSlice(x, sliceDim, True)\n return self.prior.logProbability(z) + self._inferenceLogjac", "title": "" }, { "docid": "27a5faa93d070e8b114a4486277369de", "score": "0.6403142", "text": "def _log_pdf(x, mean, log_scale):\n\tz = (x - mean) * torch.exp(-log_scale)\n\tlog_p = z - log_scale - 2 * F.softplus(z)\n\n\treturn log_p", "title": "" }, { "docid": "9eb3b03baaf673a2a8fb74bfb75f1e9e", "score": "0.6396589", "text": "def log_prob(self, sents):\n prob = 0\n for sent in sents:\n prob += self.sent_log_prob(sent)\n\n return prob", "title": "" }, { "docid": "fd557ea817cb09bbbed86e2335c91c63", "score": "0.6376421", "text": "def log_likelihood(self):\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood", "title": "" }, { "docid": "fd557ea817cb09bbbed86e2335c91c63", "score": "0.6376421", "text": "def log_likelihood(self):\n if self._log_likelihood is None:\n self._log_likelihood = logpdf(x=self.y, cov=self.S)\n return self._log_likelihood", "title": "" }, { "docid": "a738f46e81b7ea34e869c9c92619e5da", "score": "0.6362299", "text": "def log_prob_from_logits(x):\n axis = len(x.get_shape())-1\n m = tf.reduce_max(x, axis, keepdims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x-m), axis, keepdims=True))", "title": "" }, { "docid": "65948047bb55ff31b8e723878d82141c", "score": "0.636072", "text": "def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keepdims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keepdims=True))", "title": "" }, { "docid": "73f5a8e83545592da0346444a037c27e", "score": "0.6359348", "text": "def log_odds_to_prob(l):\r\n return 1 - 1 / (1 + np.exp(l))", "title": "" }, { "docid": "7a090ac3b517bd5a1ac23c10fe6fb52a", "score": "0.63587546", "text": "def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keepdims=True)\n return x - m - tf.math.log(tf.reduce_sum(tf.exp(x - m), axis, keepdims=True))", "title": "" }, { "docid": "67c3e30477eeb34695bfac53bb6279ee", "score": "0.63526064", "text": "def log_prob_from_logits(x):\n axis = len(x.get_shape()) - 1\n m = tf.reduce_max(x, axis, keep_dims=True)\n return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))", "title": "" }, { "docid": "677af6c31097f00971c2289c2e526f64", "score": "0.63486314", "text": "def log_prob(params, data, dropout=None):\n W1, b1, W2, b2, W3, b3 = params\n\n if dropout is None:\n dropout = DEFAULT_DROPOUT\n\n a1 = data.inputs\n z2 = a1.dot(W1.T * (1 - dropout[0])) + b1\n a2 = logistic(z2) \n z3 = a2.dot(W2.T * (1 - dropout[1])) + b2\n a3 = logistic(z3)\n U = a3.dot(W3.T * (1 - dropout[2])) + b3\n log_prob = U - np.log(np.sum(np.exp(U), 1))[:,np.newaxis]\n return log_prob", "title": "" }, { "docid": "2e696bc97db94f33bea2c4402e263b6a", "score": "0.63455206", "text": "def logp(self, obs, acs):", "title": "" }, { "docid": "6e6159b8ea922746783286a87f9c9e0c", "score": "0.6341308", "text": "def _calc_prob_from_log_prob(self, value, *args, **kwargs):\n return self.exp_base(self._log_prob(value, *args, **kwargs))", "title": "" }, { "docid": "e58dd95733962690e4ddf0ee9b3b8b0c", "score": "0.6340186", "text": "def __call__(self, theta):\n logl = log_gaussian_pdf(theta, sigma=self.sigma, mu=0)\n return logl, [0.0] * self.nderived", "title": "" }, { "docid": "950036354dc3bb4dad6428dcf085f240", "score": "0.6323601", "text": "def mixture_log_pdf(x, prior_logits, means, log_scales):\n\tlog_ps = F.log_softmax(prior_logits, dim=-1) \\\n\t\t+ _log_pdf(x.unsqueeze(dim=-1), means, log_scales)\n\tlog_p = torch.logsumexp(log_ps, dim=-1)\n\n\treturn log_p", "title": "" }, { "docid": "2274e9ddbfa6f190e151e1ff818356e5", "score": "0.6323542", "text": "def log_prob(self, xs):\n # Loop over each random variable.\n # If distribution is univariate, this is over all indices; if\n # distribution is multivariate, this is over all but the last\n # index.\n n = get_dims(xs)[0]\n log_prob = tf.zeros([n], dtype=tf.float32)\n if len(self.shape) == 1:\n if self.is_multivariate:\n idx = ()\n log_prob += self.log_prob_idx(idx, xs)\n else:\n for idx in product(range(self.shape[0])):\n log_prob += self.log_prob_idx(idx, xs)\n\n elif len(self.shape) == 2:\n if self.is_multivariate:\n for idx in product(range(self.shape[0])):\n log_prob += self.log_prob_idx(idx, xs)\n\n else:\n for idx in product(range(self.shape[0]), range(self.shape[1])):\n log_prob += self.log_prob_idx(idx, xs)\n\n elif len(self.shape) == 3:\n if self.is_multivariate:\n for idx in product(range(self.shape[0]), range(self.shape[1])):\n log_prob += self.log_prob_idx(idx, xs)\n\n else:\n for idx in product(range(self.shape[0]), range(self.shape[1]), range(self.shape[2])):\n log_prob += self.log_prob_idx(idx, xs)\n\n else: # len(self.shape) >= 4\n # There should be a generic solution.\n raise NotImplementedError()\n\n return log_prob", "title": "" }, { "docid": "59430b40b3a41dadd2cacddfdabac0aa", "score": "0.6309538", "text": "def _logprobs(self, text):\n raise NotImplementedError", "title": "" }, { "docid": "096b546099009566dfd1f82dcff84cd8", "score": "0.6286248", "text": "def strict_log_probability(self, preference, weights):\n delta = self.data.loc[preference[0], :].values - self.data.loc[preference[1], :].values\n variance = delta.dot(self.Sigma).dot(delta)\n sd = np.sqrt(variance)\n mean = weights.dot(delta)\n return norm.logcdf(mean, loc=0, scale=sd)", "title": "" }, { "docid": "b371ae5eed2268e33594c991a4abfe75", "score": "0.62859297", "text": "def _glm_logp(self, x_vec, x_all):\n # Extract the glm parameters\n x_imp = unpackdict(x_vec, self.glm_shapes)\n set_vars(self.bkgd_syms, x_all['glm']['bkgd'], x_imp)\n lp = seval(self.glm_logprior,\n self.syms,\n x_all)\n\n # Compute the log likelihood for each data sequence\n for data in self.population.data_sequences:\n self.population.set_data(data)\n lp += seval(self.glm_ll,\n self.syms,\n x_all)\n\n return lp", "title": "" }, { "docid": "e054948b8c3a485575e3a2709f49803a", "score": "0.62852496", "text": "def prior(params):\n\n p_mean = scipy.stats.norm(1, 0.5).logpdf(params[0])\n p_log_amp = scipy.stats.norm(np.log(0.15), np.log(2)).logpdf(params[1])\n p_log_gamma = scipy.stats.norm(np.log(10), np.log(2)).logpdf(np.log(params[2]))\n #print(params[2])\n #print(\" \" + str(p_log_gamma))\n p_log_period = scipy.stats.norm(np.log(4./24.), (12./24.)).logpdf(params[3])\n # log period (period between 0.5hrs and 36hrs)\n #p_log_period = scipy.stats.uniform(np.log(0.5/24), -(np.log(2/3)+np.log(0.5/24))).logpdf((params[3]))\n\n sum_log_prior = p_mean + p_log_amp + p_log_gamma + p_log_period\n\n if np.isnan(sum_log_prior) == True:\n return -np.inf\n\n return sum_log_prior", "title": "" }, { "docid": "3aea952fed0ea63f54dc4e27942400df", "score": "0.62807274", "text": "def _get_log_pis(self, agent):\n latent_pi, latent_vf, latent_sde = agent.policy._get_latent(self.observations)\n distribution = agent.policy._get_action_dist_from_latent(latent_pi, latent_sde)\n return distribution.distribution.probs", "title": "" }, { "docid": "53fe15b632189af3b6c14d3d7755fbdf", "score": "0.62724346", "text": "def logistic_log_pdf(log_odds, x, temp, eps=10e-10):\n return (\n tf.log(temp + eps)\n - x * temp\n + log_odds\n - 2.0 * tf.log(1.0 + tf.exp(-x*temp + log_odds) + eps)\n )", "title": "" }, { "docid": "521149dbc7c057aca05ac1752e6aba96", "score": "0.6268935", "text": "def predict_log_proba(self, X):\n ...", "title": "" }, { "docid": "521149dbc7c057aca05ac1752e6aba96", "score": "0.6268935", "text": "def predict_log_proba(self, X):\n ...", "title": "" }, { "docid": "2b7affc49c8ee5ce4b6042460263949e", "score": "0.6260619", "text": "def _log_likelihood(self, **kwargs):\n \n a,b,c,=kwargs[\"hmm\"].asMatrices()\n print np.array(c).sum(axis=0)\n return kwargs[\"hmm\"].loglikelihood(self._create_sequence_set(\n qsr_seq=kwargs[\"qsr_seq\"],\n symbols=self.generate_alphabet(num_symbols=int(NUM_SYMBOLS))\n ))", "title": "" }, { "docid": "548bd0158fa85ce9c7b2e71868d14f20", "score": "0.62585104", "text": "def logistic_demo_prob():\n logt = sim_log_data(0.5, 0.5, 50, 1, -0.5, -0.5, 50, 1)\n \n probs = [1, 2, 4]\n for p in probs:\n logMod = logistic_mod(logt, p)\n eval_logistic(logMod)\n return 'Done'", "title": "" }, { "docid": "b975e146b1df40e1771b46c217a7a10a", "score": "0.6258035", "text": "def logp(self, pars, blob=None):\n\n # Unpack parameters from vector\n MFe, MNi, MSi, MCO, lpc, y, w, Eth51, muoff, texp, aNi = pars\n # Total white dwarf mass\n MWD = MFe + MNi + MSi + MCO\n # Neutronization prior (Krueger+ 2012, Seitenzahl+ 2013)\n eta = MNi/(MFe + MNi)\n self.eta_Pmu, self.eta_Psig = SNPhysics.krueger12_eta(lpc)\n # Other prior terms P(theta) for ancillary parameters\n chpri = ((muoff - self.muoff_Pmu) / self.muoff_Psig)**2\n chpri += ((eta - self.eta_Pmu) / self.eta_Psig)**2\n chpri += ((aNi - self.aNi_Pmu) / self.aNi_Psig)**2\n chpri += ((MCO/MWD - 0.0) / 0.05)**2\n return -0.5*chpri", "title": "" }, { "docid": "be9598f23df72114295fcb2f66bcf5e3", "score": "0.6253691", "text": "def log_prob(self, x):\n if self._validate_args:\n self._validate_sample(value)\n # compute the variance\n scale = self.scale\n var = scale.pow(2.)\n A = self.expamplitude\n xp = self.loc\n \n # For numerically stable wiki implementation\n sigma = scale\n mu = xp\n tau = A\n \n x_inp_dims = x.size()\n x = torch.tensor(x).to(scale.device).view([-1]+[1]*self.loc.ndimension())\n \n z = ((scale/A)-(x-xp).div(scale)).mul(torch.tensor(1./2., device=x.device).sqrt())\n\n # print(z.max())\n # print(z.min())\n\n\n # Compute all 3 wikipedia implementation, then fill in the result based on value of z\n\n # model 1\n #norm_part = sigma.div(tau).mul(torch.tensor(math.pi/2.).sqrt())\n #norm_part = torch.tensor(1.)\n norm_part = (2.*tau).reciprocal()\n\n exp_part = (var-2.*tau*(x[z<3.]-mu)).div(2.*tau.pow(2))\n erf_part = z[z<3.]\n\n ret1 = norm_part.log() + exp_part + (1.-erf_part.erf()).log()\n\n # model 2\n\n exp_part_approx = (x[(z>=3.)*(z<8191.)]-xp).pow(2).div(-2.*var)\n erfcx_part = z[(z>=3.)*(z<8191.)]\n\n ret2 = norm_part.log() + exp_part_approx + erfcx(erfcx_part).log()\n\n\n # model 3\n exp_part_approx_model3 = (x[z>=8191.]-xp).pow(2).div(-2.*var)\n div_part = 1.-(x[z>=8191.]-xp).mul(A).div(var) \n\n ret3 = exp_part_approx_model3 - div_part.log()\n\n #set_trace()\n\n ret_final = torch.zeros_like(z) \n ret_final[z<3.] += ret1\n ret_final[(z>=3.)*(z<8191.)] += ret2\n ret_final[z>=8191.] += ret3\n \n \n \n #ret_final = ret1*(z<3.).float() + ret2*(z<8191.).float()*(z>=3.).float() + ret3*(z>=8191.).float() \n \n # This does not seem to work for backward pass:\n# ret_final = ret3 # Bad estimate for small-ish z, goes over 1 around 4\n# ret_final[z<8191.] = ret2[z<8191.] # based on single-precision float\n# ret_final[z<3.] = ret1[z<3.] # Experimentally, see below\n \n \n if self.loc.ndimension()>0:\n return ret_final.view(*(x_inp_dims + self.loc.size())).squeeze(-self.loc.ndimension()-1)\n else:\n return ret_final.view(*x_inp_dims)\n \n \n \n \n \"\"\"\n # Check which part works best where - seems like the erfcx implementation is not very good\n return torch.stack([ret1[:,1,1], ret2[:,1,1], ret3[:,1,1]], dim=1), z[:,1,1]\n a = ExponentiallyModifiedGaussian(loc = 0.*torch.ones(3,4), \n scale = 0.6*torch.ones(3,4), \n expamplitude=.1*torch.ones(3,4))\n inp = torch.arange(-20., 30., 0.01)\n retval, z = a.log_prob(inp.view(-1,1,1))#[:,1,1]\n #print(retval)\n plot(retval.view(-1,3).exp().clamp(0.,1.), z.view(-1))\n \"\"\"", "title": "" }, { "docid": "78e55da1ecd487aaff4a9994eec1fbbf", "score": "0.62512845", "text": "def prob(self, X):\n # p = self.p\n # r = self.r\n # return scipy.stats.nbinom(n=r, p=1 - p).pmf(X)\n # return binom(X + r - 1, X) * np.power(p, X) * np.power(1 - p, r)\n return np.exp(self.log_prob(X))", "title": "" }, { "docid": "66abbfbbb6356d5c84f4f6201a84cf5a", "score": "0.62478876", "text": "def estimate_log_likelihood(self, X, num_samples=200):\n\n\t\timport estimator\n\t\treturn estimator.Estimator(self).estimate_log_probability(X, num_samples)[0].mean()", "title": "" }, { "docid": "730da6b1362effe9a6862c660f7685f6", "score": "0.62462026", "text": "def predict_log_proba(self, X):\n if self.intercept_fit:\n X = self._add_constant(X)\n return self.model.predict_log_proba(X)", "title": "" }, { "docid": "814ab7b06de102bce114a688163630ec", "score": "0.6244322", "text": "def log_prob(normal_distribution, action):\n logp_pi = normal_distribution.log_prob(action).sum(axis=-1)\n val = func.softplus(-2 * action)\n logp_pi -= (2 * (np.log(2) - action - val)).sum(axis=1)\n return logp_pi", "title": "" }, { "docid": "0476e66c0a337841d0718be630610bc9", "score": "0.6243777", "text": "def expected_log_prob(nonconjugate_params,\n conjugate_params,\n expectations,\n data,\n covariates,\n **kwargs):\n df, = nonconjugate_params\n weights, covariance_matrix = conjugate_params\n scale = np.linalg.cholesky(covariance_matrix)\n scale_diag = np.reshape(scale, scale.shape[:-2] + (-1,))[..., ::dim + 1]\n predictions = covariates @ weights.T\n\n E_tau, E_log_tau = expectations\n hdof = 0.5 * df\n # lp = -np.sum(np.log(np.diag(scale)))\n lp = -np.sum(np.log(scale_diag), axis=-1).reshape(scale.shape[:-2])\n lp += 0.5 * E_log_tau\n lp += hdof * np.log(hdof)\n lp -= spsp.gammaln(hdof)\n lp += (hdof - 1) * E_log_tau\n lp -= hdof * E_tau\n tmp = np.linalg.solve(scale, (data - predictions).T).T\n lp -= 0.5 * E_tau * np.sum(tmp**2, axis=1)\n\n # Optional regularization on the degrees of freedom parameter\n lp -= 1e-8 * hdof # regularization (exponential prior)\n return lp", "title": "" }, { "docid": "0318c6a4a528d4e9c5579ae059822a59", "score": "0.624108", "text": "def perplexity(probabs):\n probabs = np.asarray(probabs)\n return np.exp(-np.sum(np.log(probabs)) / np.max(probabs.shape))", "title": "" }, { "docid": "9477eae1158728497c6a3e4747e5090f", "score": "0.6240986", "text": "def log_prob_data(w, b_h, b_v, log_z, data):\n log_likelihood = data.dot(b_v) + np.sum(np.log(1+np.exp(b_h+data.dot(w))), axis = 1)\n log_prob_of_data = np.sum(log_likelihood)/data.shape[0] - log_z\n return log_prob_of_data", "title": "" }, { "docid": "7fab67fd7c47fb7cec10d3787f8ad97b", "score": "0.6235636", "text": "def log_likelihood_list(self,dataset):\r\n lls = []\r\n for instance,count in dataset:\r\n self._mark_decisions(instance)\r\n pr = self.value_top_down(instance)/self.theta_sum\r\n if pr == 0.0:\r\n print \"error: instance has zero probability\"\r\n raise Exception\r\n lls.append(math.log(pr))\r\n self.clear_bits()\r\n self.clear_data()\r\n return lls", "title": "" }, { "docid": "0f50ff60bf05678b4a97cc880594c468", "score": "0.62266606", "text": "def _py_log_prob(self, zs):\n lp = np.zeros((zs.shape[0]), dtype=np.float32)\n for b, z in enumerate(zs):\n z_dict = OrderedDict()\n idx = 0\n for dim, par in zip(self.model.par_dims, self.model.model_pars):\n elems = np.sum(dim)\n if elems == 0:\n z_dict[par] = float(z[idx])\n idx += 1\n else:\n z_dict[par] = z[idx:(idx+elems)].reshape(dim)\n idx += elems\n\n z_unconst = self.model.unconstrain_pars(z_dict)\n lp[b] = self.model.log_prob(z_unconst, adjust_transform=False)\n\n return lp", "title": "" }, { "docid": "4ce9b7abec40fe157ac54de14f5fe391", "score": "0.62235934", "text": "def _glm_logp(self, x_vec, x_all):\n # Extract the glm parameters\n x_imp = unpackdict(x_vec, self.glm_shapes)\n set_vars(self.impulse_syms, x_all['glm']['imp'], x_imp)\n lp = seval(self.glm_logprior,\n self.syms,\n x_all)\n\n # Compute the log likelihood for each data sequence\n for data in self.population.data_sequences:\n self.population.set_data(data)\n lp += seval(self.glm_ll,\n self.syms,\n x_all)\n\n return lp", "title": "" }, { "docid": "2c75db278f557f73b204557c41315946", "score": "0.6222881", "text": "def get_logproba(self, states, actions):\n action_mean, action_logstd = self._forward_actor(states)\n logproba = self.normal_logproba(actions, action_mean, action_logstd)\n return logproba", "title": "" }, { "docid": "37428e05a805d53a665e60a6fe872c3b", "score": "0.62193966", "text": "def logLikelihoods(self, sequences: List[List[ExternalSymbol]]) -> List[LogProbability]:\n raise NotImplementedError", "title": "" }, { "docid": "b8c9f8219fe2a3171c1228e65ab4e0b6", "score": "0.6216099", "text": "def log_prior(self,theta):\n return np.sum((-(theta-self.mu_prior)**2)/(2*self.sigma_prior**2))", "title": "" }, { "docid": "bf8989d222c7efdac0cde262ef27f9dc", "score": "0.62121755", "text": "def probability(cls, logodds: float) -> float:\n return exp(logodds)/(1 + exp(logodds))", "title": "" } ]