blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f770460af92c083939a9077de2e0ff05c4d2e287 | 81539aba88c22cf75bd2e14f5e0e92f2bf54e962 | /DarkMatterMap2017/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8/TTbarDMJets_Inclusive_pseudoscalar_LO_Mchi-55_Mphi-100_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8_230000_21_cff.py | 46e64e91dd2c9553f431c1f2ed047e1d5fbdff6d | [] | no_license | nistefan/RandomizedParametersSeparator | ad35b48b95e9745814c0bf9d8d8b6eb8aa479177 | 66a0e291b59113c6b5301768f1c10e36cf23d3c3 | refs/heads/master | 2021-01-03T00:41:17.415005 | 2020-02-19T13:30:54 | 2020-02-19T13:30:54 | 239,838,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,005 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:64595', '1:67002', '1:89900', '1:77078', '1:70987', '1:89308', '1:96817', '1:88552', '1:95790', '1:97608', '1:97710', '1:97959', '1:97964', '1:73811', '1:77032', '1:66553', '1:67863', '1:82190', '1:82453', '1:78967', '1:88229', '1:79761', '1:81211', '1:98820', '1:103174', '1:26404', '1:26444', '1:26492', '1:26591', '1:26640', '1:26937', '1:31018', '1:40577', '1:41094', '1:42735', '1:42832', '1:42929', '1:44245', '1:44277', '1:44097', '1:44179', '1:74399', '1:42014', '1:42776', '1:90050', '1:104683', '1:102324', '1:102453', '1:102527', '1:102543', '1:102616', '1:102929', '1:84859', '1:85096', '1:85478', '1:67435', '1:89390', '1:104524', '1:92758', '1:92571', '1:95371', '1:95873', '1:96281', '1:86766', '1:102101', '1:88951', '1:89933', '1:52600', '1:52673', '1:52794', '1:52811', '1:52819', '1:52909', '1:104138', '1:104155', '1:104511', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/E4D2AD02-D40A-EA11-A0B7-0025905B8582.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/E48D6400-92FB-E911-A2F9-0CC47A7FC6D0.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/F89F393C-D012-EA11-B06C-44A842BE76FE.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/56BCA64F-BC12-EA11-86AC-002590D425C0.root']); | [
"[email protected]"
] | |
1b12a7d6ad2c4b6ef67aca576b7ce3ed3d735639 | dbc3e853a1aa22db5d8a58c8dab04108fd82d38f | /order/migrations/0004_auto_20181004_1605.py | 7e690da0f34a3ec30a4a2ac5a5c67bf7ffff7c34 | [] | no_license | izonenav/subul | a6f1baeccec936f91a27ce0ddbb098e13c385b9a | 8128666d4e14878cfb04e7a35eb7e1178c61756e | refs/heads/master | 2022-11-13T04:47:40.943093 | 2020-07-14T02:29:09 | 2020-07-14T02:29:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | # Generated by Django 2.0 on 2018-10-04 07:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('order', '0003_auto_20181004_1557'),
]
operations = [
migrations.AlterField(
model_name='order',
name='setProduct',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='product.SetProductCode'),
),
]
| [
"[email protected]"
] | |
63293f3162d18bbda0f7150cfc3e85c652108004 | 04e99e6d9375cd6fe2d0f872539c9f86245466c1 | /60-1.py | a92aa1b5294afbce04f1ec3a739324f0caca20de | [] | no_license | joyce0623/AE401-Python | 5265612e19c0526d54a6236b8817768df3d34b57 | 92f742e93cb1d851d562c962ccb2825ca03b9d74 | refs/heads/main | 2023-03-01T22:08:59.629509 | 2021-02-05T06:06:32 | 2021-02-05T06:06:32 | 334,825,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | score=input('請輸入成績')
score=int(score)
if score>60:
print('PASS')
elif score==60:
print('低空飛過')
else:
print('gg')
| [
"[email protected]"
] | |
8239948750c25d3e8bbbcbdfe375b03ae5d8dc57 | 7fe4daab7282d08933cef12b1bd9dfb4671c3817 | /logging/adv_logging.py | 77bd967eb6e9ed1e6194e30240029aa97d7b1ade | [] | no_license | cheshen1/python_summary | 0055ea34bb7527934e8bf7d45fb923395cb2b4ce | c3e62190202d4c80ca5e9bba4f73cefa003d05ed | refs/heads/master | 2020-03-11T22:45:35.390689 | 2018-04-21T20:39:01 | 2018-04-21T20:39:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import logging
import logging.config
import json
with open('conf.json', 'r') as fd:
logging.config.dictConfig(json.load(fd))
logger = logging.getLogger('simpleExample')
logger.debug('debug level')
logger.info("info level")
logger.warning("warning level")
logger.error('error level')
logger.critical('critical level') | [
"[email protected]"
] | |
d3a513e37855de056093224b683e5255b9333014 | 7c278ab8887095e82b074464527c349cc67bad65 | /spider_review/822/show_timeout.py | fc13baf6cb9fe6f57112f92f72b59fa5704304b1 | [] | no_license | Berg1994/project_test | a452f01ffc9b01f7cdda060e84ee80d4aef2e31b | 56ed0a04e4e5ea3a3e60d0aa9db3918818dcfa6d | refs/heads/master | 2020-03-22T16:58:06.179871 | 2018-08-31T01:08:22 | 2018-08-31T01:08:22 | 140,363,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | import urllib.request
response = urllib.request.urlopen('http://httpbin.org/get',timeout=1)
print(response.read().decode('utf8')) | [
"[email protected]"
] | |
fd400b0ea89ea5ce4a711be812d9523550c30172 | 8bba6bc097af5161946ffbaa18d69cf3d3fac44f | /processFile.py | dc872a2e1201513970bf32f3799a4c4834d038cf | [] | no_license | fahimbinkhair/python-for-beginner | 8febf40c205a5a17a3e9e2f707316d0e69df0d4e | db15850e9b88a421ac191c33c05d821917504304 | refs/heads/master | 2021-06-16T06:14:53.427621 | 2021-04-11T12:01:52 | 2021-04-11T12:01:52 | 190,476,364 | 0 | 0 | null | 2021-04-11T12:01:32 | 2019-06-05T22:18:53 | Python | UTF-8 | Python | false | false | 746 | py | #!/usr/bin/python3
from lib import DB
from datetime import datetime
db_conn = DB.DbConn()
sql = """ INSERT INTO student
(name, address_line_1, address_line_2, postcode, when_created)
VALUES
(%s, %s, %s, %s, %s)"""
# open and read the file line by line
lines = open("student.txt", "r")
for line in lines:
line = line.strip()
line = line.strip("|")
lineData = line.split('|')
name = lineData[0]
addressLine1 = lineData[1]
addressLine2 = lineData[2]
postCode = lineData[3]
print('Saving: ' + name)
sqlValue = (name, addressLine1, addressLine2, postCode, datetime.now())
db_conn.get_cursor().execute(sql, sqlValue)
db_conn.commit().close_cursor().close_db_connection()
| [
"[email protected]"
] | |
c44711e666d734e379abb3124c353bfc29294675 | 7e62f0928681aaaecae7daf360bdd9166299b000 | /external/DirectXShaderCompiler/tools/clang/utils/check_cfc/obj_diff.py | f89ffd12de55259283b177f26b3af86efdfc3de9 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yuri410/rpg | 949b001bd0aec47e2a046421da0ff2a1db62ce34 | 266282ed8cfc7cd82e8c853f6f01706903c24628 | refs/heads/master | 2020-08-03T09:39:42.253100 | 2020-06-16T15:38:03 | 2020-06-16T15:38:03 | 211,698,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py | #!/usr/bin/env python2.7
from __future__ import print_function
import argparse
import difflib
import filecmp
import os
import subprocess
import sys
disassembler = 'objdump'
def keep_line(line):
"""Returns true for lines that should be compared in the disassembly
output."""
return "file format" not in line
def disassemble(objfile):
"""Disassemble object to a file."""
p = subprocess.Popen([disassembler, '-d', objfile],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode or err:
print("Disassemble failed: {}".format(objfile))
sys.exit(1)
return filter(keep_line, out.split(os.linesep))
def dump_debug(objfile):
"""Dump all of the debug info from a file."""
p = subprocess.Popen([disassembler, '-WliaprmfsoRt', objfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode or err:
print("Dump debug failed: {}".format(objfile))
sys.exit(1)
return filter(keep_line, out.split(os.linesep))
def first_diff(a, b, fromfile, tofile):
"""Returns the first few lines of a difference, if there is one. Python
diff can be very slow with large objects and the most interesting changes
are the first ones. Truncate data before sending to difflib. Returns None
is there is no difference."""
# Find first diff
first_diff_idx = None
for idx, val in enumerate(a):
if val != b[idx]:
first_diff_idx = idx
break
if first_diff_idx == None:
# No difference
return None
# Diff to first line of diff plus some lines
context = 3
diff = difflib.unified_diff(a[:first_diff_idx+context],
b[:first_diff_idx+context],
fromfile,
tofile)
difference = "\n".join(diff)
if first_diff_idx + context < len(a):
difference += "\n*** Diff truncated ***"
return difference
def compare_object_files(objfilea, objfileb):
"""Compare disassembly of two different files.
Allowing unavoidable differences, such as filenames.
Return the first difference if the disassembly differs, or None.
"""
disa = disassemble(objfilea)
disb = disassemble(objfileb)
return first_diff(disa, disb, objfilea, objfileb)
def compare_debug_info(objfilea, objfileb):
"""Compare debug info of two different files.
Allowing unavoidable differences, such as filenames.
Return the first difference if the debug info differs, or None.
If there are differences in the code, there will almost certainly be differences in the debug info too.
"""
dbga = dump_debug(objfilea)
dbgb = dump_debug(objfileb)
return first_diff(dbga, dbgb, objfilea, objfileb)
def compare_exact(objfilea, objfileb):
"""Byte for byte comparison between object files.
Returns True if equal, False otherwise.
"""
return filecmp.cmp(objfilea, objfileb)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('objfilea', nargs=1)
parser.add_argument('objfileb', nargs=1)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
diff = compare_object_files(args.objfilea[0], args.objfileb[0])
if diff:
print("Difference detected")
if args.verbose:
print(diff)
sys.exit(1)
else:
print("The same")
| [
"[email protected]"
] | |
908fc268a280cf50cef0db9a60f38306992badae | e288eda6ddbba137a33f137cbf0b23b003847b4d | /auto/exception.py | 7987b78e9f1cca64b7b817dff19fd232715b7476 | [
"MIT"
] | permissive | awake006/auto | f0e932f86e51d2a624f1ffb30c96edf31a59b77d | 7966115621e342dcac3feab45289358dd7b85cbd | refs/heads/master | 2021-06-24T04:56:16.479874 | 2019-06-14T07:33:10 | 2019-06-14T07:33:10 | 99,061,581 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | class TestCaseNotException(Exception):
pass
class ParameterBuildFailedException(Exception):
pass
class ResponseErr(Exception):
pass
class ReturnFormatException(Exception):
pass
class CaseFailException(Exception):
pass
class CaseRequiredDataException(Exception):
pass
class ParameterFormatException(Exception):
pass
| [
"[email protected]"
] | |
a345bd157f88e5758b1a12545dce0ad7fc4fd07b | 3dbe42f81501c7538741b03d634eefdedcd95e72 | /Homework/hw3_knapsack_&_slicing/hw3_template.py | 5f9ed17771a8f00197483c7903b9b249855a06c2 | [] | no_license | krodrig91/intro-to-prog-python | 195025958bcbeeda0e40c57570ee2729440e2ba3 | 19aeb980c3acbe819368c36c9239e4c19795262e | refs/heads/master | 2020-05-17T02:34:29.787831 | 2015-09-10T01:35:48 | 2015-09-10T01:35:48 | 42,210,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | # Be sure to submit hw3.py. Remove the '_template' from the file name.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' PROBLEM 0
' Implement the function giveChange() here:
' See the PDF in Canvas for more details.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# your code goes here
# Here's the list of letter values and a small dictionary to use.
# Leave the following lists in place.
scrabbleScores = \
[ ['a', 1], ['b', 3], ['c', 3], ['d', 2], ['e', 1], ['f', 4], ['g', 2],
['h', 4], ['i', 1], ['j', 8], ['k', 5], ['l', 1], ['m', 3], ['n', 1],
['o', 1], ['p', 3], ['q', 10], ['r', 1], ['s', 1], ['t', 1], ['u', 1],
['v', 4], ['w', 4], ['x', 8], ['y', 4], ['z', 10] ]
Dictionary = ['a', 'am', 'at', 'apple', 'bat', 'bar', 'babble', 'can', 'foo',
'spam', 'spammy', 'zzyzva']
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' PROBLEM 1
' Implement wordsWithScore() which is specified below.
' Hints: Use map. Feel free to use some of the functions you did for
' homework 2 (Scrabble Scoring). As always, include any helper
' functions in this file, so we can test it.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def wordsWithScore(dct, scores):
'''List of words in dct, with their Scrabble score.
Assume dct is a list of words and scores is a list of [letter,number]
pairs. Return the dictionary annotated so each word is paired with its
value. For example, wordsWithScore(scrabbleScores, Dictionary) should
return [['a', 1], ['am', 4], ['at', 2] ...etc... ]
'''
return None # your code goes here
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' PROBLEM 2
' For the sake of an exercise, we will implement a function
' that does a kind of slice. You must use recursion for this
' one. Your code is allowed to refer to list index L[0] and
' also use slice notation L[1:] but no other slices.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def take(n, L):
'''Returns the list L[0:n].'''
return None # your code goes here
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' PROBLEM 3
' Similar to problem 2, will implement another function
' that does a kind of slice. You must use recursion for this
' one. Your code is allowed to refer to list index L[0] and
' also use slice notation L[1:] but no other slices.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def drop(n, L):
'''Returns the list L[n:].'''
return None # your code goes here
| [
"[email protected]"
] | |
92a0a08e2763650f2669521f631586dc290cc80f | 20109ea82489e7eb55a76812d34ff8ada0b1a4d5 | /scripts/generate_gene_fasta.py | 478f5f65c40c15dfa5dd61a06d36159326f7d16e | [] | no_license | mbhall88/tubemaps_pilot | e8ece81f5ec8f456cc9abb2f34d770de10fcba14 | 31471f2afb390be9340d213cb4e91019b81f69d2 | refs/heads/master | 2022-11-25T22:56:22.222974 | 2019-06-19T13:08:08 | 2019-06-20T05:39:13 | 109,889,266 | 1 | 1 | null | 2022-11-08T22:05:07 | 2017-11-07T20:50:09 | xBase | UTF-8 | Python | false | false | 2,185 | py | """
The purpose of this script is to take a GFF file and a reference genome and
generate a fasta file for each gene in the GFF file - based on the coordinates
in the GFF file. The GFF file should obviously be for the given reference genome.
First argument is the reference genome.
Second argument is the GFF file containing genes you want fastas for.
Third argument is the directory to write the fastas to.
"""
import os
import sys
def fasta_parser(filename):
"""Parse a fasta file and withdraw the sequences and their sequence/read id
Args:
¦ filename (str): Path for the fasta file.
¦
Returns:
¦ fasta (dict[str]): A dictionary where the keys are the sequence/read
¦ id and the value is the sequence for that sequence/read.
¦
"""
fasta = {}
with open(filename, 'r') as f:
contents = f.read()[1:].split('\n>')
for section in contents:
sample = section.split('\n')
sample_id = sample[0]
seq = ''.join(sample[1:]).strip()
fasta[sample_id] = seq
return fasta
def fasta_writer(file_obj, header, seq, wrap=60):
"""file_obj must be an open file object"""
file_obj.write(header + '\n')
for i in range(0, len(seq), wrap):
file_obj.write(seq[i: i + wrap] + '\n')
def get_gene_name(field):
return field.split('Name=')[-1].split(';')[0]
ref = fasta_parser(sys.argv[1])
ref_genome = list(ref.values())[0]
gff_filename = sys.argv[2]
output_dir = os.path.realpath(sys.argv[3])
offset = 100
with open(gff_filename, 'r') as gff:
for row in gff:
if row.startswith('#'):
continue
elements = row.split('\t')
gene = get_gene_name(elements[-1])
# minus one for start index due to 0-based vs 1-based indexing
start = int(elements[3]) - 1
end = int(elements[4])
with open(os.path.join(output_dir, gene + '.fa'), 'w') as fout:
seq = ref_genome[start-offset: end+offset]
header = '>{0}|gene_start={2}|offset={3}|gene={1}'\
.format(list(ref.keys())[0], gene, start, offset)
# write fasta file
fasta_writer(fout, header, seq)
| [
"[email protected]"
] | |
f1e29b62c9a28e72424aa83a0914d9bb39afb379 | 30c43c3ecea184bccfa63a5b94af7553aff26208 | /Scrapy/Maoyan/Maoyan/pipelines.py | 23355ac6f14c0d25fc78006d6ffc63bc226b57f2 | [] | no_license | wangxinchao-bit/SpiderProject | d1f8940b4e7b33e28f93120d18cb4685714bb97f | 771d71e39635840e93bd43dbfe3731c80e7b81b0 | refs/heads/master | 2023-02-24T23:55:08.112114 | 2021-01-30T02:21:18 | 2021-01-30T02:21:18 | 329,494,442 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # -*- coding: utf-8 -*-
import csv
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanPipeline:
def process_item(self, item, spider):
print('*'*50)
print(item ,'\n')
# print(item,'\t') | [
"[email protected]"
] | |
eba1f95254ed280e1ccf7b3ad2d9ca703ea47941 | f485dff7fcb036868d6e4053a7a6ccd7f95214bf | /week11/triathlete_v3_111.py | 3cece7689cd808f21dbf758baf8fc6549c695eb9 | [] | no_license | jamesfallon99/CA117 | aa4f851365aafe8a4888c85e1b8b2f571e2c9b2a | 0055ccbbd710453c9574930b361c26fcde2b9036 | refs/heads/master | 2020-06-28T14:26:06.829418 | 2019-08-02T16:41:38 | 2019-08-02T16:41:38 | 200,254,591 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | #!/usr/bin/env python3
class Triathlete(object):
def __init__(self, name, tid):
self.name = name
self.tid = tid
self.times = {}
self.race_time = 0
def add_time(self, sport, time):
self.times[sport] = time
self.race_time += time
def get_time(self, sport):
return self.times[sport]
def __eq__(self, other):
return self.race_time == other.race_time
def __gt__(self, other):
return self.race_time > other.race_time
def __str__(self):
l = []
l.append("Name: {}".format(self.name))
l.append("ID: {}".format(self.tid))
l.append("Race time: {}".format(self.race_time))
return "\n".join(l)
| [
"[email protected]"
] | |
112c208e0ac353fc8cd0be37ffb0547e0eee41a2 | d6a24411501e6a004e3f5357830d9eafb85651e4 | /mysite/stockAnalysis/forms.py | 1245aa4e2c00854ed84e5ce7a64d4d39ac95212a | [] | no_license | kelvonlys/Double-Top-and-Bottom | f38957753ef059362dc86a194eff217adcaa369f | 9ac0e9993e23d20bd542137bd4f685d448bbdc9f | refs/heads/master | 2023-03-10T06:01:09.600940 | 2021-02-20T15:43:39 | 2021-02-20T15:43:39 | 340,569,010 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | from django import forms
class NameForm(forms.Form):
stockNum = forms.CharField(label='Stock num', max_length=100) | [
"[email protected]"
] | |
013f2e30cd5d895440421b5907eff3e4fb6a0c0e | 5333649845c6c6dbee1be2cea7250ac97f1c4d91 | /spider/app_spider/app_spider/run.py | 9148e834890f1247d69c3c2bfa1ef507d36fad4e | [] | no_license | shy-shhy/Top5 | 8c9ef21cb7bc73f245cbcfa47156f9fcb5e3aaff | e3b542b7867fc59084a845ff6206f091239e1797 | refs/heads/master | 2022-11-12T10:09:10.483114 | 2020-07-02T10:54:49 | 2020-07-02T10:54:49 | 276,609,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | from scrapy import cmdline
cmdline.execute('scrapy crawl score'.split()) | [
"[email protected]"
] | |
05534d99c100465e8105af77ff2a572f4486ffdd | 761852e730557bd2c5e4fe2417d7891fc115dd9a | /app/healthcare/migrations/0030_auto_20191020_0029.py | 0de0b082fff306e7dd1b3ea1d81ed6c6a6d51b86 | [] | no_license | ajay2016/ZenWellness | 468810aef2d7298ccfe7961c519a23ed7d355ebb | 94f80b7352523f33dec9cd78157bdd02bb68a31c | refs/heads/master | 2023-06-01T21:55:10.374847 | 2021-06-21T06:47:28 | 2021-06-21T06:47:28 | 378,832,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | # Generated by Django 2.2.4 on 2019-10-20 00:29
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('healthcare', '0029_auto_20191020_0019'),
]
operations = [
migrations.RemoveField(
model_name='patientlabtest',
name='requested_delivery_date',
),
migrations.AddField(
model_name='patientlabtest',
name='requested_date',
field=models.DateTimeField(default=datetime.datetime(2019, 10, 20, 0, 29, 50, 989477)),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
0c7c403ed0564e6700eb2d19b217eb94edb2927e | 4f9e6b686bc369ea9ba0e712a268bef22727657a | /build/arbotix_ros-indigo-devel/arbotix_controllers/catkin_generated/pkg.installspace.context.pc.py | 9db299a9f026fd7e7a7c15c51f419aef02736ba9 | [] | no_license | YongzheZhang/catkin_make | ce8efd05887b6bbaa6c01294c37a5a939bd29e33 | ad79cc20ef0021b8f8e6ab10d9183044acd151e2 | refs/heads/master | 2020-03-19T07:41:38.271776 | 2018-06-05T08:51:55 | 2018-06-05T08:51:55 | 136,106,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "arbotix_controllers"
PROJECT_SPACE_DIR = "/home/zhe/catkin_ws/install"
PROJECT_VERSION = "0.10.0"
| [
"[email protected]"
] | |
30f98cefec9fc5094ffbd13f37ef7228eb03829c | dd27808508beb837acd99ef4dbe3089b442a5171 | /mrlwebsite/ui/views.py | 0ebcf71a9e90f6a3a94e7f2447f5216f965ffb40 | [] | no_license | ayushmr/mrlwebsite | b18762248dba7f20768d6d2a23abe2a36fb9f297 | 3acca2685558a0f153e8599dea833b52b7ee989f | refs/heads/master | 2022-08-13T19:16:06.009230 | 2020-05-22T11:38:17 | 2020-05-22T11:38:17 | 266,092,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,855 | py | from django.shortcuts import render
from .models import Master,Countries,ComCountryRelation,RegulatoryParameters,TypeOfParameters,Profile,Commodities
# Create your views here.
from django.http import HttpResponse
from django.db.models import Q
from django.shortcuts import redirect
import xlwt
import csv
from operator import itemgetter
from itertools import groupby
from .forms import Getdata
def index(request):
countrieslist=request.session['col']
commoditylist=request.session['coml']
parameterlist=request.session['parl']
master=Master.objects.filter(Q(country__in=countrieslist)&Q(product__in=commoditylist)&Q(parameter__in=parameterlist)).all()
# master_by_countries=Master.objects.values('country')
# master_by_commodities=Master.objects.values('product')
# master_by_country_commodity=master.filter()
# master_by_parameters=Master.objects.values('parameter')
# for country in master_by_countries:
# for commodity in master_by_commodities:
# for parameter in master_by_parameters:
# master.filter()
countries=Countries.objects.all()
com=Commodities.objects.all()
comcon=ComCountryRelation.objects.all()
params=RegulatoryParameters.objects.all()
paramtype=TypeOfParameters.objects.all()
prof=Profile.objects.all()
return render(request,'report.html',{'master': master,'countries':countries,'com':com,'comcon':comcon,'params':params,'paramtype':paramtype,'prof':prof})
# def new_report(request):
# form=Getdata(request.POST or None)
# if request.POST:
# data=request.POST.copy()
# countrylist=data.getlist('countries')
# comlist=data.getlist('commodities')
# paramlist=data.getlist('parameters')
def form(request):
# context={'form':}
form=Getdata(request.POST or None)
# context['form']= Getdata()
if request.POST:
# if form.is_valid():
data = request.POST.copy()
request.session['col']=data.getlist('countries')
request.session['coml']=data.getlist('commodities')
request.session['parl']=data.getlist('parameters')
return redirect('/ui')
# temp=form.cleaned_data.get()
# print(temp)
return render(request,"form.html",{'form':form,})
def excel_view(request):
normal_style = xlwt.easyxf("""
font:
name Verdana
""")
response = HttpResponse(content_type='ui/ms-excel')
# response['Content-Disposition'] = 'attachment; filename="data.csv"'
# writer = csv.writer(response)
# writer.writerow(['Username', 'First name', 'Last name', 'Email address'])
# print(request.GET.copy())
wb = xlwt.Workbook()
ws0 = wb.add_sheet('Worksheet')
ws0.write(0, 0, "something", normal_style)
wb.save(response)
return response
| [
"[email protected]"
] | |
9ad54547b06cefe0619d3bd07eaed8a5feaee11f | 4a752714c4b967f7ba0cc25dceacd32791e7d427 | /SchemaPages/schemapages_pb2.py | 74777ad128f2bba30abebbe01d8db769119be2a7 | [
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] | permissive | shaedrich/schemaorg | 9d734c4c6e14ded1b4d6cdd592d6eff831652358 | 69a0cd2470fcf85daf527097506e4087831289c2 | refs/heads/main | 2023-03-06T06:17:45.384106 | 2020-09-30T19:45:40 | 2020-09-30T19:45:40 | 430,631,313 | 1 | 0 | Apache-2.0 | 2021-11-22T08:54:56 | 2021-11-22T08:54:55 | null | UTF-8 | Python | false | true | 34,111 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: schemapages.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='schemapages.proto',
package='SchemaPages',
syntax='proto2',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11schemapages.proto\x12\x0bSchemaPages\"\x1e\n\tSuperPath\x12\x11\n\tsuperPath\x18\x01 \x03(\t\"\x97\x02\n\x07SDOTerm\x12\'\n\x08termType\x18\x01 \x02(\x0e\x32\x15.SchemaPages.TermType\x12\x0b\n\x03uri\x18\x02 \x02(\t\x12\r\n\x05label\x18\x04 \x02(\t\x12*\n\nsuperPaths\x18\x06 \x03(\x0b\x32\x16.SchemaPages.SuperPath\x12\x18\n\x10\x61\x63knowledgements\x18\x05 \x03(\t\x12\x0f\n\x07\x63omment\x18\x07 \x02(\t\x12\x13\n\x0b\x65quivalents\x18\x08 \x03(\t\x12\x0f\n\x07pending\x18\t \x02(\x08\x12\x0f\n\x07retired\x18\n \x02(\x08\x12\x14\n\x0csupersededBy\x18\x0b \x01(\t\x12\x12\n\nsupersedes\x18\x0c \x03(\t\x12\x0f\n\x07sources\x18\r \x03(\t\"\xd8\x01\n\x0bSDOBaseType\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaPages.SDOTerm\x12\x12\n\nproperties\x18\x03 \x03(\t\x12\x15\n\rallproperties\x18\x04 \x03(\t\x12\x17\n\x0f\x65xpectedTypeFor\x18\x05 \x03(\t\x12\x1a\n\x12\x65numerationMembers\x18\x06 \x03(\t\x12\x0c\n\x04subs\x18\t \x03(\t\x12\x0e\n\x06supers\x18\n \x03(\t\x12\x11\n\ttermStack\x18\x0b \x03(\t\"\xb8\x01\n\x0bSDOProperty\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaPages.SDOTerm\x12\x16\n\x0e\x64omainIncludes\x18\x03 \x03(\t\x12\x15\n\rrangeIncludes\x18\x04 \x03(\t\x12\x0f\n\x07inverse\x18\x05 \x02(\t\x12\x0c\n\x04subs\x18\x06 \x03(\t\x12\x0e\n\x06supers\x18\x07 \x03(\t\x12\x11\n\ttermStack\x18\x08 \x03(\t\"j\n\x13SDOEnumerationValue\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaPages.SDOTerm\x12\x19\n\x11\x65numerationParent\x18\x03 \x02(\t\"\'\n\x0cSDOReference\x12\n\n\x02id\x18\x01 \x02(\t\x12\x0b\n\x03uri\x18\x02 \x02(\t\"\xa8\x02\n\x13SDOBaseTypeExpanded\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaPages.SDOTerm\x12,\n\nproperties\x18\x03 \x03(\x0b\x32\x18.SchemaPages.SDOProperty\x12\x31\n\x0f\x65xpectedTypeFor\x18\x04 \x03(\x0b\x32\x18.SchemaPages.SDOProperty\x12\x1a\n\x12\x65numerationMembers\x18\x05 \x03(\t\x12\x0c\n\x04subs\x18\x06 \x03(\t\x12\x0e\n\x06supers\x18\x07 \x03(\t\x12<\n\ttermStack\x18\x08 \x03(\x0b\x32).SchemaPages.SDOBaseTypeExpandedPropsOnly\"\x86\x02\n\x1cSDOBaseTypeExpandedPropsOnly\x12\n\n\x02id\x18\x01 \x02(\t\x12,\n\x0etermdescriptor\x18\x02 \x03(\x0b\x32\x14.SchemaPages.SDOTerm\x12,\n\nproperties\x18\x03 \x03(\x0b\x32\x18.SchemaPages.SDOProperty\x12\x31\n\x0f\x65xpectedTypeFor\x18\x04 \x03(\x0b\x32\x18.SchemaPages.SDOProperty\x12\x1a\n\x12\x65numerationMembers\x18\x05 \x03(\t\x12\x0c\n\x04subs\x18\x06 \x03(\t\x12\x0e\n\x06supers\x18\x07 \x03(\t\x12\x11\n\ttermStack\x18\x08 \x03(\t*f\n\x08TermType\x12\x08\n\x04TYPE\x10\x00\x12\x0c\n\x08PROPERTY\x10\x01\x12\x0c\n\x08\x44\x41TATYPE\x10\x02\x12\x0f\n\x0b\x45NUMERATION\x10\x03\x12\x14\n\x10\x45NUMERATIONVALUE\x10\x04\x12\r\n\tREFERENCE\x10\x05'
)
_TERMTYPE = _descriptor.EnumDescriptor(
name='TermType',
full_name='SchemaPages.TermType',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='PROPERTY', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='DATATYPE', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENUMERATION', index=3, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='ENUMERATIONVALUE', index=4, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='REFERENCE', index=5, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=1467,
serialized_end=1569,
)
_sym_db.RegisterEnumDescriptor(_TERMTYPE)
TermType = enum_type_wrapper.EnumTypeWrapper(_TERMTYPE)
TYPE = 0
PROPERTY = 1
DATATYPE = 2
ENUMERATION = 3
ENUMERATIONVALUE = 4
REFERENCE = 5
_SUPERPATH = _descriptor.Descriptor(
name='SuperPath',
full_name='SchemaPages.SuperPath',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='superPath', full_name='SchemaPages.SuperPath.superPath', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=64,
)
_SDOTERM = _descriptor.Descriptor(
name='SDOTerm',
full_name='SchemaPages.SDOTerm',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='termType', full_name='SchemaPages.SDOTerm.termType', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='SchemaPages.SDOTerm.uri', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='label', full_name='SchemaPages.SDOTerm.label', index=2,
number=4, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='superPaths', full_name='SchemaPages.SDOTerm.superPaths', index=3,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='acknowledgements', full_name='SchemaPages.SDOTerm.acknowledgements', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='comment', full_name='SchemaPages.SDOTerm.comment', index=5,
number=7, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='equivalents', full_name='SchemaPages.SDOTerm.equivalents', index=6,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='pending', full_name='SchemaPages.SDOTerm.pending', index=7,
number=9, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='retired', full_name='SchemaPages.SDOTerm.retired', index=8,
number=10, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supersededBy', full_name='SchemaPages.SDOTerm.supersededBy', index=9,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supersedes', full_name='SchemaPages.SDOTerm.supersedes', index=10,
number=12, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sources', full_name='SchemaPages.SDOTerm.sources', index=11,
number=13, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=67,
serialized_end=346,
)
_SDOBASETYPE = _descriptor.Descriptor(
name='SDOBaseType',
full_name='SchemaPages.SDOBaseType',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaPages.SDOBaseType.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaPages.SDOBaseType.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='SchemaPages.SDOBaseType.properties', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='allproperties', full_name='SchemaPages.SDOBaseType.allproperties', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expectedTypeFor', full_name='SchemaPages.SDOBaseType.expectedTypeFor', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumerationMembers', full_name='SchemaPages.SDOBaseType.enumerationMembers', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subs', full_name='SchemaPages.SDOBaseType.subs', index=6,
number=9, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supers', full_name='SchemaPages.SDOBaseType.supers', index=7,
number=10, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termStack', full_name='SchemaPages.SDOBaseType.termStack', index=8,
number=11, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=565,
)
_SDOPROPERTY = _descriptor.Descriptor(
name='SDOProperty',
full_name='SchemaPages.SDOProperty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaPages.SDOProperty.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaPages.SDOProperty.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='domainIncludes', full_name='SchemaPages.SDOProperty.domainIncludes', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rangeIncludes', full_name='SchemaPages.SDOProperty.rangeIncludes', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='inverse', full_name='SchemaPages.SDOProperty.inverse', index=4,
number=5, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subs', full_name='SchemaPages.SDOProperty.subs', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supers', full_name='SchemaPages.SDOProperty.supers', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termStack', full_name='SchemaPages.SDOProperty.termStack', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=568,
serialized_end=752,
)
_SDOENUMERATIONVALUE = _descriptor.Descriptor(
name='SDOEnumerationValue',
full_name='SchemaPages.SDOEnumerationValue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaPages.SDOEnumerationValue.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaPages.SDOEnumerationValue.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumerationParent', full_name='SchemaPages.SDOEnumerationValue.enumerationParent', index=2,
number=3, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=754,
serialized_end=860,
)
_SDOREFERENCE = _descriptor.Descriptor(
name='SDOReference',
full_name='SchemaPages.SDOReference',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaPages.SDOReference.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='uri', full_name='SchemaPages.SDOReference.uri', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=862,
serialized_end=901,
)
_SDOBASETYPEEXPANDED = _descriptor.Descriptor(
name='SDOBaseTypeExpanded',
full_name='SchemaPages.SDOBaseTypeExpanded',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaPages.SDOBaseTypeExpanded.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaPages.SDOBaseTypeExpanded.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='SchemaPages.SDOBaseTypeExpanded.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expectedTypeFor', full_name='SchemaPages.SDOBaseTypeExpanded.expectedTypeFor', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumerationMembers', full_name='SchemaPages.SDOBaseTypeExpanded.enumerationMembers', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subs', full_name='SchemaPages.SDOBaseTypeExpanded.subs', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supers', full_name='SchemaPages.SDOBaseTypeExpanded.supers', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termStack', full_name='SchemaPages.SDOBaseTypeExpanded.termStack', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=904,
serialized_end=1200,
)
_SDOBASETYPEEXPANDEDPROPSONLY = _descriptor.Descriptor(
name='SDOBaseTypeExpandedPropsOnly',
full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termdescriptor', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.termdescriptor', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='properties', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.properties', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='expectedTypeFor', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.expectedTypeFor', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enumerationMembers', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.enumerationMembers', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='subs', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.subs', index=5,
number=6, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='supers', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.supers', index=6,
number=7, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='termStack', full_name='SchemaPages.SDOBaseTypeExpandedPropsOnly.termStack', index=7,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1203,
serialized_end=1465,
)
_SDOTERM.fields_by_name['termType'].enum_type = _TERMTYPE
_SDOTERM.fields_by_name['superPaths'].message_type = _SUPERPATH
_SDOBASETYPE.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOPROPERTY.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOENUMERATIONVALUE.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOBASETYPEEXPANDED.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOBASETYPEEXPANDED.fields_by_name['properties'].message_type = _SDOPROPERTY
_SDOBASETYPEEXPANDED.fields_by_name['expectedTypeFor'].message_type = _SDOPROPERTY
_SDOBASETYPEEXPANDED.fields_by_name['termStack'].message_type = _SDOBASETYPEEXPANDEDPROPSONLY
_SDOBASETYPEEXPANDEDPROPSONLY.fields_by_name['termdescriptor'].message_type = _SDOTERM
_SDOBASETYPEEXPANDEDPROPSONLY.fields_by_name['properties'].message_type = _SDOPROPERTY
_SDOBASETYPEEXPANDEDPROPSONLY.fields_by_name['expectedTypeFor'].message_type = _SDOPROPERTY
DESCRIPTOR.message_types_by_name['SuperPath'] = _SUPERPATH
DESCRIPTOR.message_types_by_name['SDOTerm'] = _SDOTERM
DESCRIPTOR.message_types_by_name['SDOBaseType'] = _SDOBASETYPE
DESCRIPTOR.message_types_by_name['SDOProperty'] = _SDOPROPERTY
DESCRIPTOR.message_types_by_name['SDOEnumerationValue'] = _SDOENUMERATIONVALUE
DESCRIPTOR.message_types_by_name['SDOReference'] = _SDOREFERENCE
DESCRIPTOR.message_types_by_name['SDOBaseTypeExpanded'] = _SDOBASETYPEEXPANDED
DESCRIPTOR.message_types_by_name['SDOBaseTypeExpandedPropsOnly'] = _SDOBASETYPEEXPANDEDPROPSONLY
DESCRIPTOR.enum_types_by_name['TermType'] = _TERMTYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SuperPath = _reflection.GeneratedProtocolMessageType('SuperPath', (_message.Message,), {
'DESCRIPTOR' : _SUPERPATH,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SuperPath)
})
_sym_db.RegisterMessage(SuperPath)
SDOTerm = _reflection.GeneratedProtocolMessageType('SDOTerm', (_message.Message,), {
'DESCRIPTOR' : _SDOTERM,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SDOTerm)
})
_sym_db.RegisterMessage(SDOTerm)
SDOBaseType = _reflection.GeneratedProtocolMessageType('SDOBaseType', (_message.Message,), {
'DESCRIPTOR' : _SDOBASETYPE,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SDOBaseType)
})
_sym_db.RegisterMessage(SDOBaseType)
SDOProperty = _reflection.GeneratedProtocolMessageType('SDOProperty', (_message.Message,), {
'DESCRIPTOR' : _SDOPROPERTY,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SDOProperty)
})
_sym_db.RegisterMessage(SDOProperty)
SDOEnumerationValue = _reflection.GeneratedProtocolMessageType('SDOEnumerationValue', (_message.Message,), {
'DESCRIPTOR' : _SDOENUMERATIONVALUE,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SDOEnumerationValue)
})
_sym_db.RegisterMessage(SDOEnumerationValue)
SDOReference = _reflection.GeneratedProtocolMessageType('SDOReference', (_message.Message,), {
'DESCRIPTOR' : _SDOREFERENCE,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SDOReference)
})
_sym_db.RegisterMessage(SDOReference)
SDOBaseTypeExpanded = _reflection.GeneratedProtocolMessageType('SDOBaseTypeExpanded', (_message.Message,), {
'DESCRIPTOR' : _SDOBASETYPEEXPANDED,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SDOBaseTypeExpanded)
})
_sym_db.RegisterMessage(SDOBaseTypeExpanded)
SDOBaseTypeExpandedPropsOnly = _reflection.GeneratedProtocolMessageType('SDOBaseTypeExpandedPropsOnly', (_message.Message,), {
'DESCRIPTOR' : _SDOBASETYPEEXPANDEDPROPSONLY,
'__module__' : 'schemapages_pb2'
# @@protoc_insertion_point(class_scope:SchemaPages.SDOBaseTypeExpandedPropsOnly)
})
_sym_db.RegisterMessage(SDOBaseTypeExpandedPropsOnly)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
8da6a8f21c3b11a94d0b449b2031f7f8329cc06d | 8ed2a620a7b61a9cef6b059caca4c8c82c792aaf | /training/ocr_training.py | e09d60026ba1a1756f64320e6687121d6d938b47 | [] | no_license | Chappelliu/646project | d2a68ac33af0964d61923fa454f61ef61af5142a | 1bcf7f06427f20d622fd74a411152b8602907f65 | refs/heads/master | 2020-11-24T11:39:17.670142 | 2019-12-15T12:26:11 | 2019-12-15T12:26:11 | 228,128,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,355 | py | #training alogrithm is modified from ALPR in Unscontrained Scenarios https://github.com/sergiomsilva/alpr-unconstrained/blob/master/license-plate-ocr.py
import sys
import numpy as np
import cv2
import keras
from random import choice
from os.path import isfile, isdir, basename, splitext
from os import makedirs
from src.keras_utils import save_model, load_model
from src.label import readShapes
from src.loss import loss
from src.utils import image_files_from_folder, show
from src.sampler import augment_sample, labels2output_map
from src.data_generator import DataGenerator, process_data_item
from pdb import set_trace as pause
def process_data_item(data_item,dim,model_stride):
XX,llp,pts = augment_sample(data_item[0],data_item[1].pts,dim)
YY = labels2output_map(llp,pts,dim,model_stride)
return XX,YY
if __name__ == '__main__':
netname = basename('ocr-trained')
outdir = 'trained_data'
iterations = 30000
batch_size = 32
#load model from the pre-created model file
model = load_model('646-ocr')
xshape = (dim,dim,3)
inputs = keras.layers.Input(shape=(dim,dim,3))
outputs = model(inputs)
yshape = tuple([s.value for s in outputs.shape[1:]])
output_dim = yshape[1]
model_stride = dim / output_dim
opt = getattr(keras.optimizers,'Adam')(lr=0.01)
model.compile(loss=loss, optimizer=opt)
#read the database from the input folder
print 'Scanning the data from the input file...'
Files = image_files_from_folder('input')
Data = []
for file in Files:
labfile = splitext(file)[0] + '.txt'
if isfile(labfile):
L = readShapes(labfile)
I = cv2.imread(file)
Data.append([I,L[0]])
dg = DataGenerator( data=Data, \
process_data_item_func=lambda x: process_data_item(x,dim,model_stride),\
xshape=xshape, \
yshape=(yshape[0],yshape[1],yshape[2]+1), \
nthreads=2, \
pool_size=1000, \
min_nsamples=100 )
dg.start()
Xtrain = np.empty((batch_size,dim,dim,3),dtype='single')
Ytrain = np.empty((batch_size,dim/model_stride,dim/model_stride,2*4+1))
model_path_final = '%s/%s_final' % (outdir,netname)
for it in range(iterations):
print 'Iter. %d (of %d)' % (it+1,iterations)
Xtrain,Ytrain = dg.get_batch(batch_size)
train_loss = model.train_on_batch(Xtrain,Ytrain)
print '\tLoss: %f' % train_loss
print 'Saving model (%s)' % model_path_final
save_model(model,model_path_final)
| [
"[email protected]"
] | |
e109d5583405fbf4abcf7259c341444c299fc477 | d014a0f60ffe2e3e5ca923d7bc3578ecb2c8dd75 | /componentapp/cylinder/migrations/0001_initial.py | ffa39ee44c72b072f82b5e3ba8fbd5e450472aa8 | [] | no_license | shovan777/pressureVessel | d4e625bb1b769659d29face15513ba134b7527bb | 909dd54576d267cd32ea0c0f4e2129e702be6ce7 | refs/heads/master | 2022-12-12T09:13:52.429923 | 2019-04-28T04:11:36 | 2019-04-28T04:11:36 | 151,068,742 | 0 | 3 | null | 2022-12-08T01:21:00 | 2018-10-01T10:05:18 | CSS | UTF-8 | Python | false | false | 492 | py | # Generated by Django 2.1.1 on 2018-10-02 06:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('thickness', models.IntegerField(default=0)),
],
),
]
| [
"[email protected]"
] | |
17e14b24f0f7ce2bfd2f52cc400fc31c5426beef | 2e70365b8639d6414b1ad452371bbf289e032747 | /ex14.py | a3ee00577a788d2b6ecda8d4a22bb2abc6a7426d | [] | no_license | godumuyiwa/learnpythonthehardway | 023e49869ccb3b67b30a0a6392065e4c4925332c | 35c7ad699c2b452c2230ed306d49165b893a0e73 | refs/heads/master | 2020-04-13T16:47:15.958298 | 2019-01-01T19:22:38 | 2019-01-01T19:22:38 | 163,330,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | from sys import argv
script, user_name, age = argv
prompt = '---->'
print(f"Hi {user_name}, I'm the {script} script.")
print("I'd like to ask you a few questions.")
print(f"Do you like me {user_name}?")
likes = input(prompt)
print(f"Where do you live {user_name}")
lives = input(prompt)
print("What kind of computer do you have?")
computer =input(prompt)
print(f"""
Alright, so you have said {likes} about liking me.
You live in {lives} and you are {age} years old. Not sure where that is.
And you have a {computer} computer. Nice.
""")
| [
"[email protected]"
] | |
482797ecc0beecf9ce5e79b621802f66c79379d9 | 6ac2d54a00b484551971f77fddb9042e4671a656 | /src/pyrad_proc/pyrad/proc/process_spectra.py | 18803d319044ec057a04fe797864d06a9fc04591 | [
"BSD-3-Clause"
] | permissive | Guidicel/pyrad | 15f26a9921b75c2e978949f70694d82ab79f7d67 | 95def11a6cb8fa956034bb227e2ad31dbdb1e7fb | refs/heads/master | 2020-11-25T01:36:56.276000 | 2019-12-10T14:31:41 | 2019-12-10T14:31:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,062 | py | """
pyrad.proc.process_spectra
==========================
Functions to processes spectral data.
.. autosummary::
:toctree: generated/
process_raw_spectra
process_ifft
process_spectra_point
process_filter_0Doppler
process_filter_srhohv
process_filter_spectra_noise
process_spectra_ang_avg
process_spectral_power
process_spectral_noise
process_spectral_phase
process_spectral_reflectivity
process_spectral_differential_reflectivity
process_spectral_differential_phase
process_spectral_rhohv
process_pol_variables
process_noise_power
process_reflectivity
process_differential_reflectivity
process_differential_phase
process_rhohv
process_Doppler_velocity
process_Doppler_width
"""
from copy import deepcopy
from warnings import warn
import numpy as np
from netCDF4 import num2date
import pyart
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
def process_raw_spectra(procstatus, dscfg, radar_list=None):
"""
Dummy function that returns the initial input data set
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, _, _, _ = get_datatype_fields(datatypedescr)
break
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
new_dataset = {'radar_out': deepcopy(radar_list[ind_rad])}
return new_dataset, ind_rad
def process_ifft(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler spectrum width from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(dscfg['datatype'][0])
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
radar = radar_list[ind_rad]
wind_params = dscfg.get('window', ['None'])
if len(wind_params) == 1:
window = wind_params[0]
if window == 'None':
window = None
else:
try:
window = float(window)
except ValueError:
pass
else:
window = wind_params
for i in range(1, len(window)):
window[i] = float(window[i])
window = tuple(window)
fields_in_list = []
fields_out_list = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn(field_name+' not in radar')
continue
if field_name in ('unfiltered_complex_spectra_hh_ADU',
'complex_spectra_hh_ADU'):
fields_out_list.append('IQ_hh_ADU')
elif field_name in ('unfiltered_complex_spectra_vv_ADU',
'complex_spectra_vv_ADU'):
fields_out_list.append('IQ_vv_ADU')
elif field_name == 'spectral_noise_power_hh_ADU':
fields_out_list.append('IQ_noise_power_hh_ADU')
elif field_name == 'spectral_noise_power_vv_ADU':
fields_out_list.append('IQ_noise_power_vv_ADU')
else:
warn(field_name+' can not be inverse Fourier transformed')
fields_in_list.append(field_name)
radar_out = pyart.retrieve.compute_iq(
radar, fields_in_list, fields_out_list, window=window)
# prepare for exit
new_dataset = {'radar_out': radar_out}
return new_dataset, ind_rad
def process_spectra_point(procstatus, dscfg, radar_list=None):
"""
Obtains the spectra or IQ data at a point location.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : string. Dataset keyword
The data type where we want to extract the point measurement
latlon : boolean. Dataset keyword
if True position is obtained from latitude, longitude information,
otherwise position is obtained from antenna coordinates
(range, azimuth, elevation). Default False
truealt : boolean. Dataset keyword
if True the user input altitude is used to determine the point of
interest.
if False use the altitude at a given radar elevation ele over the
point of interest. Default True
lon : float. Dataset keyword
the longitude [deg]. Use when latlon is True.
lat : float. Dataset keyword
the latitude [deg]. Use when latlon is True.
alt : float. Dataset keyword
altitude [m MSL]. Use when latlon is True. Default 0.
ele : float. Dataset keyword
radar elevation [deg]. Use when latlon is False or when latlon is
True and truealt is False
azi : float. Dataset keyword
radar azimuth [deg]. Use when latlon is False
rng : float. Dataset keyword
range from radar [m]. Use when latlon is False
AziTol : float. Dataset keyword
azimuthal tolerance to determine which radar azimuth to use [deg].
Default 0.5
EleTol : float. Dataset keyword
elevation tolerance to determine which radar elevation to use
[deg]. Default 0.5
RngTol : float. Dataset keyword
range tolerance to determine which radar bin to use [m]. Default
50.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the data and metadata at the point of interest
ind_rad : int
radar index
"""
if procstatus == 0:
return None, None
field_names = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_names.append(get_fieldname_pyart(datatype))
ind_rad = int(radarnr[5:8])-1
if procstatus == 2:
if dscfg['initialized'] == 0:
return None, None
# prepare for exit
new_dataset = {
'radar_out': dscfg['global_data']['psr_poi'],
'point_coordinates_WGS84_lon_lat_alt': (
dscfg['global_data']['point_coordinates_WGS84_lon_lat_alt']),
'antenna_coordinates_az_el_r': (
dscfg['global_data']['antenna_coordinates_az_el_r']),
'final': True}
return new_dataset, ind_rad
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid psr')
return None, None
psr = radar_list[ind_rad]
projparams = dict()
projparams.update({'proj': 'pyart_aeqd'})
projparams.update({'lon_0': psr.longitude['data']})
projparams.update({'lat_0': psr.latitude['data']})
truealt = dscfg.get('truealt', True)
latlon = dscfg.get('latlon', False)
if latlon:
lon = dscfg['lon']
lat = dscfg['lat']
alt = dscfg.get('alt', 0.)
latlon_tol = dscfg.get('latlonTol', 1.)
alt_tol = dscfg.get('altTol', 100.)
x, y = pyart.core.geographic_to_cartesian(lon, lat, projparams)
if not truealt:
ke = 4./3. # constant for effective radius
a = 6378100. # earth radius
re = a * ke # effective radius
elrad = dscfg['ele'] * np.pi / 180.
r_ground = np.sqrt(x ** 2. + y ** 2.)
r = r_ground / np.cos(elrad)
alt_psr = psr.altitude['data']+np.sqrt(
r ** 2. + re ** 2. + 2. * r * re * np.sin(elrad)) - re
alt_psr = alt_psr[0]
else:
alt_psr = alt
r, az, el = pyart.core.cartesian_to_antenna(
x, y, alt_psr-psr.altitude['data'])
r = r[0]
az = az[0]
el = el[0]
else:
r = dscfg['rng']
az = dscfg['azi']
el = dscfg['ele']
azi_tol = dscfg.get('AziTol', 0.5)
ele_tol = dscfg.get('EleTol', 0.5)
rng_tol = dscfg.get('RngTol', 50.)
x, y, alt = pyart.core.antenna_to_cartesian(r/1000., az, el)
lon, lat = pyart.core.cartesian_to_geographic(x, y, projparams)
lon = lon[0]
lat = lat[0]
d_az = np.min(np.abs(psr.azimuth['data'] - az))
if d_az > azi_tol:
warn(' No psr bin found for point (az, el, r):(' +
str(az)+', '+str(el)+', '+str(r) +
'). Minimum distance to psr azimuth '+str(d_az) +
' larger than tolerance')
return None, None
d_el = np.min(np.abs(psr.elevation['data'] - el))
if d_el > ele_tol:
warn(' No psr bin found for point (az, el, r):(' +
str(az)+', '+str(el)+', '+str(r) +
'). Minimum distance to psr elevation '+str(d_el) +
' larger than tolerance')
return None, None
d_r = np.min(np.abs(psr.range['data'] - r))
if d_r > rng_tol:
warn(' No psr bin found for point (az, el, r):(' +
str(az)+', '+str(el)+', '+str(r) +
'). Minimum distance to psr range bin '+str(d_r) +
' larger than tolerance')
return None, None
ind_ray = np.argmin(np.abs(psr.azimuth['data'] - az) +
np.abs(psr.elevation['data'] - el))
ind_rng = np.argmin(np.abs(psr.range['data'] - r))
time_poi = num2date(psr.time['data'][ind_ray], psr.time['units'],
psr.time['calendar'])
# initialize dataset
if not dscfg['initialized']:
psr_poi = deepcopy(psr)
# prepare space for field
psr_poi.fields = dict()
for field_name in field_names:
psr_poi.add_field(field_name, deepcopy(psr.fields[field_name]))
psr_poi.fields[field_name]['data'] = np.array([])
# fixed psr objects parameters
psr_poi.range['data'] = np.array([r])
psr_poi.ngates = 1
psr_poi.time['units'] = pyart.io.make_time_unit_str(time_poi)
psr_poi.time['data'] = np.array([])
psr_poi.scan_type = 'poi_time_series'
psr_poi.sweep_number['data'] = np.array([], dtype=np.int32)
psr_poi.nsweeps = 1
psr_poi.sweep_mode['data'] = np.array(['poi_time_series'])
psr_poi.rays_are_indexed = None
psr_poi.ray_angle_res = None
psr_poi.fixed_angle['data'] = np.array([az])
# ray dependent psr objects parameters
psr_poi.sweep_end_ray_index['data'] = np.array([-1], dtype='int32')
psr_poi.rays_per_sweep['data'] = np.array([0], dtype='int32')
psr_poi.azimuth['data'] = np.array([], dtype='float64')
psr_poi.elevation['data'] = np.array([], dtype='float64')
psr_poi.nrays = 0
psr_poi.npulses['data'] = np.array([], dtype=np.int)
if psr_poi.Doppler_velocity is not None:
psr_poi.Doppler_velocity['data'] = np.array([])
if psr_poi.Doppler_frequency is not None:
psr_poi.Doppler_frequency['data'] = np.array([])
dscfg['global_data'] = {
'psr_poi': psr_poi,
'point_coordinates_WGS84_lon_lat_alt': [lon, lat, alt],
'antenna_coordinates_az_el_r': [az, el, r]}
dscfg['initialized'] = 1
psr_poi = dscfg['global_data']['psr_poi']
start_time = num2date(
0, psr_poi.time['units'], psr_poi.time['calendar'])
psr_poi.time['data'] = np.append(
psr_poi.time['data'], (time_poi - start_time).total_seconds())
psr_poi.sweep_end_ray_index['data'][0] += 1
psr_poi.rays_per_sweep['data'][0] += 1
psr_poi.nrays += 1
psr_poi.azimuth['data'] = np.append(psr_poi.azimuth['data'], az)
psr_poi.elevation['data'] = np.append(psr_poi.elevation['data'], el)
psr_poi.gate_longitude['data'] = (
np.ones((psr_poi.nrays, psr_poi.ngates), dtype='float64')*lon)
psr_poi.gate_latitude['data'] = (
np.ones((psr_poi.nrays, psr_poi.ngates), dtype='float64')*lat)
psr_poi.gate_altitude['data'] = np.broadcast_to(
alt, (psr_poi.nrays, psr_poi.ngates))
for field_name in field_names:
dtype = psr.fields[field_name]['data'].dtype
if field_name not in psr.fields:
warn('Field '+field_name+' not in psr object')
poi_data = np.ma.masked_all((1, 1, psr.npulses_max), dtype=dtype)
else:
poi_data = psr.fields[field_name]['data'][ind_ray, ind_rng, :]
poi_data = poi_data.reshape(1, 1, psr.npulses_max)
# Put data in radar object
if np.size(psr_poi.fields[field_name]['data']) == 0:
psr_poi.fields[field_name]['data'] = poi_data.reshape(
1, 1, psr_poi.npulses_max)
else:
if psr_poi.npulses_max == psr.npulses_max:
psr_poi.fields[field_name]['data'] = np.ma.append(
psr_poi.fields[field_name]['data'], poi_data, axis=0)
elif psr.npulses_max < psr_poi.npulses_max:
poi_data_aux = np.ma.masked_all(
(1, 1, psr_poi.npulses_max), dtype=dtype)
poi_data_aux[0, 0, 0:psr.npulses_max] = poi_data
psr_poi.fields[field_name]['data'] = np.ma.append(
psr_poi.fields[field_name]['data'], poi_data_aux, axis=0)
else:
poi_data_aux = np.ma.masked_all(
(psr_poi.nrays, 1, psr.npulses_max), dtype=dtype)
poi_data_aux[0:psr_poi.nrays-1, :, 0:psr_poi.npulses_max] = (
psr_poi.fields[field_name]['data'])
poi_data_aux[psr_poi.nrays-1, :, :] = poi_data
psr_poi.fields[field_name]['data'] = poi_data_aux
psr_poi.npulses['data'] = np.append(
psr_poi.npulses['data'], psr.npulses['data'][ind_ray])
if psr_poi.Doppler_velocity is not None:
if np.size(psr_poi.Doppler_velocity['data']) == 0:
psr_poi.Doppler_velocity['data'] = (
psr.Doppler_velocity['data'][ind_ray, :].reshape(
1, psr_poi.npulses_max))
else:
Doppler_data = psr.Doppler_velocity['data'][ind_ray, :]
Doppler_data = Doppler_data.reshape(1, psr.npulses_max)
if psr_poi.npulses_max == psr.npulses_max:
psr_poi.Doppler_velocity['data'] = np.ma.append(
psr_poi.Doppler_velocity['data'],
Doppler_data, axis=0)
elif psr.npulses_max < psr_poi.npulses_max:
Doppler_aux = np.ma.masked_all((1, psr_poi.npulses_max))
Doppler_aux[0, 0:psr.npulses_max] = Doppler_data
psr_poi.Doppler_velocity['data'] = np.ma.append(
psr_poi.Doppler_velocity['data'], Doppler_aux, axis=0)
else:
Doppler_aux = np.ma.masked_all(
(psr_poi.nrays, psr.npulses_max))
Doppler_aux[0:psr_poi.nrays-1, 0:psr_poi.npulses_max] = (
psr_poi.Doppler_velocity['data'])
Doppler_aux[psr_poi.nrays-1, :] = Doppler_data
psr_poi.Doppler_velocity['data'] = Doppler_aux
if psr_poi.Doppler_frequency is not None:
if np.size(psr_poi.Doppler_frequency['data']) == 0:
psr_poi.Doppler_frequency['data'] = (
psr.Doppler_frequency['data'][ind_ray, :].reshape(
1, psr_poi.npulses_max))
else:
Doppler_data = psr.Doppler_frequency['data'][ind_ray, :]
Doppler_data = Doppler_data.reshape(1, psr.npulses_max)
if psr_poi.npulses_max == psr.npulses_max:
psr_poi.Doppler_frequency['data'] = np.ma.append(
psr_poi.Doppler_frequency['data'],
Doppler_data, axis=0)
elif psr.npulses_max < psr_poi.npulses_max:
Doppler_aux = np.ma.masked_all((1, psr_poi.npulses_max))
Doppler_aux[0, 0:psr.npulses_max] = Doppler_data
psr_poi.Doppler_frequency['data'] = np.ma.append(
psr_poi.Doppler_frequency['data'], Doppler_aux, axis=0)
else:
Doppler_aux = np.ma.masked_all(
(psr_poi.nrays, psr.npulses_max))
Doppler_aux[0:psr_poi.nrays-1, 0:psr_poi.npulses_max] = (
psr_poi.Doppler_frequency['data'])
Doppler_aux[psr_poi.nrays-1, :] = Doppler_data
psr_poi.Doppler_frequency['data'] = Doppler_aux
psr_poi.npulses_max = max(psr_poi.npulses_max, psr.npulses_max)
dscfg['global_data']['psr_poi'] = psr_poi
# prepare for exit
new_dataset = {
'radar_out': psr_poi,
'point_coordinates_WGS84_lon_lat_alt': (
dscfg['global_data']['point_coordinates_WGS84_lon_lat_alt']),
'antenna_coordinates_az_el_r': (
dscfg['global_data']['antenna_coordinates_az_el_r']),
'final': False}
return new_dataset, ind_rad
def process_filter_0Doppler(procstatus, dscfg, radar_list=None):
"""
Function to filter the 0-Doppler line bin and neighbours of the
Doppler spectra
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
filter_width : float
The Doppler filter width. Default 0.
filter_units : str
Can be 'm/s' or 'Hz'. Default 'm/s'
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
field_name_list = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name_list.append(get_fieldname_pyart(datatype))
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
filter_width = dscfg.get('filter_width', 0.)
filter_units = dscfg.get('filter_units', 'm/s')
if filter_units == 'm/s':
axis = psr.Doppler_velocity['data']
else:
axis = psr.Doppler_frequency['data']
fields = dict()
for field_name in field_name_list:
if field_name not in psr.fields:
warn('Unable to filter 0-Doppler. Missing field '+field_name)
continue
field_name_aux = field_name.replace('unfiltered_', '')
field = pyart.config.get_metadata(field_name_aux)
field['data'] = deepcopy(psr.fields[field_name]['data'])
for ray in range(psr.nrays):
ind = np.ma.where(np.logical_and(
axis[ray, :] >= -filter_width/2.,
axis[ray, :] <= filter_width/2.))
field['data'][ray, :, ind] = np.ma.masked
fields.update({field_name_aux: field})
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
for field_name in fields.keys():
new_dataset['radar_out'].add_field(field_name, fields[field_name])
return new_dataset, ind_rad
def process_filter_srhohv(procstatus, dscfg, radar_list=None):
"""
Filter Doppler spectra as a function of spectral RhoHV
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
sRhoHV_threshold : float
Data with sRhoHV module above this threshold will be filtered.
Default 1.
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
field_name_list = []
sRhoHV_found = False
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('sRhoHV', 'sRhoHVu') and not sRhoHV_found:
sRhoHV_field = get_fieldname_pyart(datatype)
sRhoHV_found = True
else:
field_name_list.append(get_fieldname_pyart(datatype))
if not sRhoHV_found:
warn('sRhoHV field is required for sRhoHV filtering')
return None, None
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if sRhoHV_field not in psr.fields:
warn('Unable to obtain apply sRhoHV filter. Missing field ' +
sRhoHV_field)
return None, None
sRhoHV_threshold = dscfg.get('sRhoHV_threshold', 0.9)
sRhoHV = psr.fields[sRhoHV_field]['data']
fields = dict()
for field_name in field_name_list:
if field_name not in psr.fields:
warn('Unable to filter according to sRhoHV. Missing field ' +
field_name)
continue
field_name_aux = field_name.replace('unfiltered_', '')
field = pyart.config.get_metadata(field_name_aux)
field['data'] = deepcopy(psr.fields[field_name]['data'])
field['data'][np.ma.abs(sRhoHV) <= sRhoHV_threshold] = np.ma.masked
fields.update({field_name_aux: field})
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
for field_name in fields.keys():
new_dataset['radar_out'].add_field(field_name, fields[field_name])
return new_dataset, ind_rad
def process_filter_spectra_noise(procstatus, dscfg, radar_list=None):
"""
Filter the noise of the Doppler spectra by clipping any data below
the noise level plus a margin
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
clipping_level : float
The clipping level [dB above noise level]. Default 10.
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
field_name_list = []
signal_found = False
noise_found = False
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if (datatype in ('ShhADU', 'SvvADU', 'ShhADUu', 'SvvADUu') and
not signal_found):
signal_field = get_fieldname_pyart(datatype)
signal_found = True
elif datatype in ('sNADUh', 'sNADUv') and not noise_found:
noise_field = get_fieldname_pyart(datatype)
noise_found = True
else:
field_name_list.append(get_fieldname_pyart(datatype))
if not signal_found or not noise_found:
warn('Signal and noise fields are required for noise filtering')
return None, None
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if signal_field not in psr.fields or noise_field not in psr.fields:
warn('Unable to obtain apply spectral noise filter. Missing fields')
return None, None
clipping_level = dscfg.get('clipping_level', 10.)
# get Doppler bins below clipping level
clip_pwr = (
psr.fields[noise_field]['data']*np.power(10., 0.1*clipping_level))
s_pwr = pyart.retrieve.compute_spectral_power(
psr, units='ADU', signal_field=signal_field,
noise_field=noise_field)
mask = np.ma.less_equal(s_pwr['data'], clip_pwr)
# filter data
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
for field_name in field_name_list:
if field_name not in psr.fields:
warn('Unable to filter field '+field_name)
continue
new_dataset['radar_out'].add_field(
field_name, psr.fields[field_name])
new_dataset['radar_out'].fields[field_name]['data'][mask] = (
np.ma.masked)
return new_dataset, ind_rad
def process_spectra_ang_avg(procstatus, dscfg, radar_list=None):
"""
Function to average the spectra over the rays. This function is
intended mainly for vertically pointing scans. The function assumes
the volume is composed of a single sweep, it averages over the number
of rays specified by the user and produces a single ray output.
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
navg : int
Number of spectra to average. If -1 all spectra will be averaged.
Default -1.
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
field_name_list = []
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
field_name_list.append(get_fieldname_pyart(datatype))
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
navg = dscfg.get('navg', -1)
# keep only fields of interest
psr_aux = deepcopy(psr)
psr_aux.fields = dict()
for field_name in field_name_list:
if field_name not in psr.fields:
warn('Field '+field_name+' missing')
continue
psr_aux.add_field(field_name, psr.fields[field_name])
psr_aux = pyart.util.interpol_spectra(psr_aux)
if navg == -1:
navg = psr.nrays
elif navg > psr.nrays:
warn('Number of rays '+str(psr.nrays)+' smaller than number of '
'desired spectra to average '+str(navg))
navg = psr.nrays
for field_name in psr_aux.fields.keys():
data_mean = np.ma.mean(
psr_aux.fields[field_name]['data'][
0:navg, :, 0:psr_aux.npulses_max], axis=0)
psr_aux.fields[field_name]['data'] = np.ma.masked_all(
(1, psr_aux.ngates, psr_aux.npulses_max),
dtype=psr_aux.fields[field_name]['data'].dtype)
psr_aux.fields[field_name]['data'][0, :, :] = data_mean
psr_aux.time['data'] = np.array([psr_aux.time['data'][int(navg/2)]])
psr_aux.azimuth['data'] = np.array([0], dtype=np.float32)
psr_aux.elevation['data'] = np.array(
[psr_aux.elevation['data'][int(navg/2)]])
psr_aux.nrays = 1
psr_aux.sweep_end_ray_index['data'] = np.array([0.], dtype=np.int32)
psr_aux.init_rays_per_sweep()
psr_aux.init_gate_x_y_z()
psr_aux.init_gate_longitude_latitude()
psr_aux.init_gate_altitude()
if psr_aux.Doppler_velocity is not None:
psr_aux.Doppler_velocity['data'] = np.ma.expand_dims(
psr_aux.Doppler_velocity['data'][0, :], axis=0)
if psr_aux.Doppler_frequency is not None:
psr_aux.Doppler_frequency['data'] = np.ma.expand_dims(
psr_aux.Doppler_frequency['data'][0, :], axis=0)
# prepare for exit
new_dataset = {'radar_out': psr_aux}
return new_dataset, ind_rad
def process_spectral_power(procstatus, dscfg, radar_list=None):
"""
Computes the spectral power
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
units : str
The units of the returned signal. Can be 'ADU', 'dBADU' or 'dBm'
subtract_noise : Bool
If True noise will be subtracted from the signal
smooth_window : int or None
Size of the moving Gaussian smoothing window. If none no smoothing
will be applied
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'SvvADU', 'ShhADUu', 'SvvADUu'):
signal_field = get_fieldname_pyart(datatype)
elif datatype in ('sNADUh', 'sNADUv'):
noise_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if signal_field not in psr.fields:
warn('Unable to obtain spectral signal power. Missing field ' +
signal_field)
return None, None
units = dscfg.get('units', 'dBADU')
subtract_noise = dscfg.get('subtract_noise', False)
smooth_window = dscfg.get('smooth_window', None)
s_pwr = pyart.retrieve.compute_spectral_power(
psr, units=units, subtract_noise=subtract_noise,
smooth_window=smooth_window, signal_field=signal_field,
noise_field=noise_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(s_pwr['standard_name'], s_pwr)
return new_dataset, ind_rad
def process_spectral_noise(procstatus, dscfg, radar_list=None):
"""
Computes the spectral noise
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
units : str
The units of the returned signal. Can be 'ADU', 'dBADU' or 'dBm'
navg : int
Number of spectra averaged
rmin : int
Range from which the data is used to estimate the noise
nnoise_min : int
Minimum number of samples to consider the estimated noise power
valid
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'SvvADU', 'ShhADUu', 'SvvADUu'):
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if signal_field not in psr.fields:
warn('Unable to obtain spectral noise power. Missing field ' +
signal_field)
return None, None
units = dscfg.get('units', 'ADU')
navg = dscfg.get('navg', 1)
rmin = dscfg.get('rmin', 0.)
nnoise_min = dscfg.get('nnoise_min', 100)
s_pwr = pyart.retrieve.compute_spectral_noise(
psr, units=units, navg=navg, rmin=rmin, nnoise_min=nnoise_min,
signal_field=signal_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(s_pwr['standard_name'], s_pwr)
return new_dataset, ind_rad
def process_spectral_phase(procstatus, dscfg, radar_list=None):
"""
Computes the spectral phase
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'SvvADU', 'ShhADUu', 'SvvADUu'):
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if signal_field not in psr.fields:
warn('Unable to obtain spectral phase. Missing field ' +
signal_field)
return None, None
s_phase = pyart.retrieve.compute_spectral_phase(
psr, signal_field=signal_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(s_phase['standard_name'], s_phase)
return new_dataset, ind_rad
def process_spectral_reflectivity(procstatus, dscfg, radar_list=None):
"""
Computes spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
smooth_window : int or None
Size of the moving Gaussian smoothing window. If none no smoothing
will be applied
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_field = None
signal_field = None
pwr_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'SvvADU', 'ShhADUu', 'SvvADUu'):
signal_field = get_fieldname_pyart(datatype)
elif datatype in ('sNADUh', 'sNADUv'):
noise_field = get_fieldname_pyart(datatype)
elif datatype in ('sPhhADU', 'sPvvADU', 'sPhhADUu', 'sPvvADUu'):
pwr_field = get_fieldname_pyart(datatype)
if pwr_field is None and signal_field is None:
warn('Either signal or power fields must be specified')
return None, None
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
compute_power = True
if pwr_field is not None:
compute_power = False
if compute_power and signal_field not in psr.fields:
warn('Unable to obtain spectral reflectivity. Missing field ' +
signal_field)
return None, None
if not compute_power and pwr_field not in psr.fields:
warn('Unable to obtain spectral reflectivity. Missing field ' +
pwr_field)
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
smooth_window = dscfg.get('smooth_window', None)
sdBZ = pyart.retrieve.compute_spectral_reflectivity(
psr, compute_power=compute_power, subtract_noise=subtract_noise,
smooth_window=smooth_window, pwr_field=pwr_field,
signal_field=signal_field, noise_field=noise_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(sdBZ['standard_name'], sdBZ)
return new_dataset, ind_rad
def process_spectral_differential_reflectivity(procstatus, dscfg,
radar_list=None):
"""
Computes spectral differential reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
smooth_window : int or None
Size of the moving Gaussian smoothing window. If none no smoothing
will be applied
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
signal_h_field = None
signal_v_field = None
pwr_h_field = None
pwr_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'ShhADUu'):
signal_h_field = get_fieldname_pyart(datatype)
elif datatype in ('SvvADU', 'SvvADUu'):
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUv':
noise_v_field = get_fieldname_pyart(datatype)
elif datatype in ('sPhhADU', 'sPhhADUu'):
pwr_h_field = get_fieldname_pyart(datatype)
elif datatype in ('sPvvADU', 'sPvvADUu'):
pwr_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
compute_power = True
if pwr_h_field is not None and pwr_v_field is not None:
compute_power = False
if (compute_power and (signal_h_field not in psr.fields or
signal_v_field not in psr.fields)):
warn('Unable to obtain spectral differential reflectivity. ' +
'Missing fields')
return None, None
if (not compute_power and (pwr_h_field not in psr.fields or
pwr_v_field not in psr.fields)):
warn('Unable to obtain spectral differential reflectivity. ' +
'Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
smooth_window = dscfg.get('smooth_window', None)
sZDR = pyart.retrieve.compute_spectral_differential_reflectivity(
psr, compute_power=compute_power, subtract_noise=subtract_noise,
smooth_window=smooth_window, pwr_h_field=pwr_h_field,
pwr_v_field=pwr_v_field, signal_h_field=signal_h_field,
signal_v_field=signal_v_field, noise_h_field=noise_h_field,
noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(sZDR['standard_name'], sZDR)
return new_dataset, ind_rad
def process_spectral_differential_phase(procstatus, dscfg, radar_list=None):
"""
Computes the spectral differential phase
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
signal_h_field = None
signal_v_field = None
srhohv_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'ShhADUu'):
signal_h_field = get_fieldname_pyart(datatype)
elif datatype in ('SvvADU', 'SvvADUu'):
signal_v_field = get_fieldname_pyart(datatype)
elif datatype in ('sRhoHV', 'sRhoHVu'):
srhohv_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
use_rhohv = False
if srhohv_field is not None:
use_rhohv = True
if (not use_rhohv and (signal_h_field not in psr.fields or
signal_v_field not in psr.fields)):
warn('Unable to obtain spectral signal differential phase. ' +
'Missing fields')
return None, None
if use_rhohv and srhohv_field not in psr.fields:
warn('Unable to obtain spectral signal differential phase. ' +
'Missing fields')
return None, None
sPhiDP = pyart.retrieve.compute_spectral_differential_phase(
psr, use_rhohv=use_rhohv, srhohv_field=srhohv_field,
signal_h_field=signal_h_field, signal_v_field=signal_v_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(sPhiDP['standard_name'], sPhiDP)
return new_dataset, ind_rad
def process_spectral_rhohv(procstatus, dscfg, radar_list=None):
"""
Computes the spectral RhoHV
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'ShhADUu'):
signal_h_field = get_fieldname_pyart(datatype)
elif datatype in ('SvvADU', 'SvvADUu'):
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUv':
noise_v_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if signal_h_field not in psr.fields or signal_v_field not in psr.fields:
warn('Unable to obtain spectral RhoHV. ' +
'Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
sRhoHV = pyart.retrieve.compute_spectral_rhohv(
psr, subtract_noise=subtract_noise, signal_h_field=signal_h_field,
signal_v_field=signal_v_field, noise_h_field=noise_h_field,
noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': deepcopy(psr)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(sRhoHV['standard_name'], sRhoHV)
return new_dataset, ind_rad
def process_pol_variables(procstatus, dscfg, radar_list=None):
"""
Computes the polarimetric variables from the complex spectra
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
smooth_window : int or None
Size of the moving Gaussian smoothing window. If none no smoothing
will be applied
variables : list of str
list of variables to compute. Default dBZ
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
signal_h_field = None
signal_v_field = None
pwr_h_field = None
pwr_v_field = None
srhohv_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'ShhADUu'):
signal_h_field = get_fieldname_pyart(datatype)
elif datatype in ('SvvADU', 'SvvADUu'):
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUv':
noise_v_field = get_fieldname_pyart(datatype)
elif datatype in ('sPhhADU', 'sPhhADUu'):
pwr_h_field = get_fieldname_pyart(datatype)
elif datatype in ('sPvvADU', 'sPvvADUu'):
pwr_v_field = get_fieldname_pyart(datatype)
elif datatype in ('sRhoHV', 'sRhoHVu'):
srhohv_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
use_pwr = False
if (pwr_h_field is not None or pwr_v_field is not None or
srhohv_field is not None):
use_pwr = True
if (not use_pwr and (signal_h_field not in psr.fields and
signal_v_field not in psr.fields)):
warn('Unable to obtain polarimetric variables. Missing fields')
return None, None
if (use_pwr and (pwr_h_field not in psr.fields and
pwr_h_field not in psr.fields and
srhohv_field not in psr.fields)):
warn('Unable to obtain polarimetric variables. Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
smooth_window = dscfg.get('smooth_window', None)
variables = dscfg.get('variables', ['dBZ'])
fields_list = []
for variable in variables:
fields_list.append(get_fieldname_pyart(variable))
radar = pyart.retrieve.compute_pol_variables(
psr, fields_list, use_pwr=use_pwr, subtract_noise=subtract_noise,
smooth_window=smooth_window, srhohv_field=srhohv_field,
pwr_h_field=pwr_h_field, pwr_v_field=pwr_v_field,
signal_h_field=signal_h_field, signal_v_field=signal_v_field,
noise_h_field=noise_h_field, noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': radar}
return new_dataset, ind_rad
def process_noise_power(procstatus, dscfg, radar_list=None):
"""
Computes the noise power from the spectra
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
units : str
The units of the returned signal. Can be 'ADU', 'dBADU' or 'dBm'
navg : int
Number of spectra averaged
rmin : int
Range from which the data is used to estimate the noise
nnoise_min : int
Minimum number of samples to consider the estimated noise power
valid
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'SvvADU', 'ShhADUu', 'SvvADUu'):
signal_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if signal_field not in psr.fields:
warn('Unable to obtain spectral noise power. Missing field ' +
signal_field)
return None, None
units = dscfg.get('units', 'ADU')
navg = dscfg.get('navg', 1)
rmin = dscfg.get('rmin', 0.)
nnoise_min = dscfg.get('nnoise_min', 100)
noise = pyart.retrieve.compute_noise_power(
psr, units=units, navg=navg, rmin=rmin, nnoise_min=nnoise_min,
signal_field=signal_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(psr)}
new_dataset['radar_out'].add_field(noise['standard_name'], noise)
return new_dataset, ind_rad
def process_reflectivity(procstatus, dscfg, radar_list=None):
"""
Computes reflectivity from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('sdBZ', 'sdBZv', 'sdBuZ', 'sdBuZv'):
sdBZ_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if sdBZ_field not in psr.fields:
warn('Unable to obtain reflectivity. ' +
'Missing field '+sdBZ_field)
return None, None
dBZ = pyart.retrieve.compute_reflectivity(
psr, sdBZ_field=sdBZ_field)
reflectivity_field = 'reflectivity'
if datatype in ('sdBZv', 'sdBuZv'):
reflectivity_field += 'vv'
if datatype in ('sdBuZ', 'sdBuZv'):
reflectivity_field = 'unfiltered_'+reflectivity_field
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(psr)}
new_dataset['radar_out'].add_field(reflectivity_field, dBZ)
return new_dataset, ind_rad
def process_differential_reflectivity(procstatus, dscfg, radar_list=None):
"""
Computes differential reflectivity from the horizontal and vertical
spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('sdBZ', 'sdBuZ'):
sdBZ_field = get_fieldname_pyart(datatype)
elif datatype in ('sdBZv', 'sdBuZv'):
sdBZv_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if sdBZ_field not in psr.fields or sdBZv_field not in psr.fields:
warn('Unable to obtain differential reflectivity. ' +
'Missing fields.')
return None, None
zdr = pyart.retrieve.compute_differential_reflectivity(
psr, sdBZ_field=sdBZ_field, sdBZv_field=sdBZv_field)
zdr_field = 'differential_reflectivity'
if 'unfiltered' in sdBZ_field:
zdr_field = 'unfiltered_'+zdr_field
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(psr)}
new_dataset['radar_out'].add_field(zdr_field, zdr)
return new_dataset, ind_rad
def process_differential_phase(procstatus, dscfg, radar_list=None):
"""
Computes the differential phase from the spectral differential phase and
the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('sdBZ', 'sdBZv', 'sdBuZ', 'sdBuZv'):
sdBZ_field = get_fieldname_pyart(datatype)
elif datatype in ('sPhiDP', 'sPhiDPu'):
sPhiDP_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if sdBZ_field not in psr.fields or sPhiDP_field not in psr.fields:
warn('Unable to obtain PhiDP. Missing fields')
return None, None
uphidp = pyart.retrieve.compute_differential_phase(
psr, sdBZ_field=sdBZ_field, sPhiDP_field=sPhiDP_field)
uphidp_field = 'uncorrected_differential_phase'
if 'unfiltered' in sPhiDP_field:
uphidp_field = 'uncorrected_unfiltered_differential_phase'
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(psr)}
new_dataset['radar_out'].add_field(uphidp_field, uphidp)
return new_dataset, ind_rad
def process_rhohv(procstatus, dscfg, radar_list=None):
"""
Computes RhoHV from the complex spectras
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
subtract_noise : Bool
If True noise will be subtracted from the signal
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
noise_h_field = None
noise_v_field = None
signal_h_field = None
signal_v_field = None
pwr_h_field = None
pwr_v_field = None
srhohv_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('ShhADU', 'ShhADUu'):
signal_h_field = get_fieldname_pyart(datatype)
elif datatype in ('SvvADU', 'SvvADUu'):
signal_v_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUh':
noise_h_field = get_fieldname_pyart(datatype)
elif datatype == 'sNADUv':
noise_v_field = get_fieldname_pyart(datatype)
elif datatype in ('sPhhADU', 'sPhhADUu'):
pwr_h_field = get_fieldname_pyart(datatype)
elif datatype in ('sPvvADU', 'sPvvADUu'):
pwr_v_field = get_fieldname_pyart(datatype)
elif datatype in ('sRhoHV', 'sRhoHVu'):
srhohv_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if srhohv_field is not None:
use_rhohv = True
rhohv_field = 'cross_correlation_ratio'
if 'unfiltered' in srhohv_field:
rhohv_field = 'unfiltered_cross_correlation_ratio'
else:
use_rhohv = False
rhohv_field = 'cross_correlation_ratio'
if 'unfiltered' in signal_h_field:
rhohv_field = 'unfiltered_cross_correlation_ratio'
if (not use_rhohv and (signal_h_field not in psr.fields or
signal_v_field not in psr.fields)):
warn('Unable to obtain RhoHV. Missing fields')
return None, None
if use_rhohv and (srhohv_field not in psr.fields or
pwr_h_field not in psr.fields or
pwr_v_field not in psr.fields):
warn('Unable to obtain RhoHV. Missing fields')
return None, None
subtract_noise = dscfg.get('subtract_noise', False)
rhohv = pyart.retrieve.compute_rhohv(
psr, use_rhohv=use_rhohv, subtract_noise=subtract_noise,
srhohv_field=srhohv_field, pwr_h_field=pwr_h_field,
pwr_v_field=pwr_v_field, signal_h_field=signal_h_field,
signal_v_field=signal_v_field, noise_h_field=noise_h_field,
noise_v_field=noise_v_field)
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(psr)}
new_dataset['radar_out'].add_field(rhohv_field, rhohv)
return new_dataset, ind_rad
def process_Doppler_velocity(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler velocity from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('sdBZ', 'sdBZv', 'sdBuZ', 'sdBuZv'):
sdBZ_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if sdBZ_field not in psr.fields:
warn('Unable to obtain Doppler velocity. ' +
'Missing field '+sdBZ_field)
return None, None
vel = pyart.retrieve.compute_Doppler_velocity(
psr, sdBZ_field=sdBZ_field)
vel_field = 'velocity'
if datatype in ('sdBZv', 'sdBuZv'):
vel_field += '_vv'
if datatype in ('sdBuZ', 'sdBuZv'):
vel_field = 'unfiltered_'+vel_field
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(psr)}
new_dataset['radar_out'].add_field(vel_field, vel)
return new_dataset, ind_rad
def process_Doppler_width(procstatus, dscfg, radar_list=None):
"""
Compute the Doppler spectrum width from the spectral reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted configuration keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of spectra objects
Optional. list of spectra objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('sdBZ', 'sdBZv', 'sdBuZ', 'sdBuZv'):
sdBZ_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if (radar_list is None) or (radar_list[ind_rad] is None):
warn('ERROR: No valid radar')
return None, None
psr = radar_list[ind_rad]
if sdBZ_field not in psr.fields:
warn('Unable to obtain Doppler spectrum width. ' +
'Missing field '+sdBZ_field)
return None, None
width = pyart.retrieve.compute_Doppler_width(
psr, sdBZ_field=sdBZ_field)
width_field = 'spectrum_width'
if datatype in ('sdBZv', 'sdBuZv'):
width_field += '_vv'
if datatype in ('sdBuZ', 'sdBuZv'):
width_field = 'unfiltered_'+width_field
# prepare for exit
new_dataset = {'radar_out': pyart.util.radar_from_spectra(psr)}
new_dataset['radar_out'].add_field(width_field, width)
return new_dataset, ind_rad
| [
"[email protected]"
] | |
63bbe3a79c758a557d44b17a3559c97ae68c211a | 21400be8b89db652343673086ad74440f2c158cc | /TestAPI/asgi.py | 3aef68716bc5a15ebe04ec79a422c11aeba9ed02 | [] | no_license | terminator-droid/TestAPI | 1e7c890a2b0afdb00fff6240d20581234b2a097b | 33052e6c5742dc39c9fb2e7ffa18a36685b0cca3 | refs/heads/main | 2023-09-05T20:24:45.025790 | 2021-11-11T19:01:28 | 2021-11-11T19:01:28 | 427,109,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for TestAPI project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TestAPI.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
76c6726b4c4b2e498fb2ca77e6c9905dd6660ea3 | ea919ac3f7c91f62fdf849b67edf02a9dc3d034e | /examples/ent-rsvp/backend/src/schema/versions/19e7969c6a61_2021129232023_add_auth_codes_table.py | 36e6b3c14f0cf83cea826f77e26c0e3898c8098a | [
"MIT"
] | permissive | lazytype/ent | ae8e03906c76cad33274452687fdc5a8bcf5e97a | d9729f3bb5c2410021e58dfdac0ef03efb439edb | refs/heads/main | 2023-08-28T17:23:23.532296 | 2021-11-03T20:33:35 | 2021-11-03T20:33:35 | 425,598,003 | 0 | 0 | MIT | 2021-11-07T19:35:12 | 2021-11-07T19:35:11 | null | UTF-8 | Python | false | false | 1,682 | py | # Code generated by github.com/lolopinto/ent/ent, DO NOT edit.
"""add auth_codes table
Revision ID: 19e7969c6a61
Revises: 2a5627d47472
Create Date: 2021-01-29 23:20:23.550366+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '19e7969c6a61'
down_revision = '2a5627d47472'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('auth_codes',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), nullable=False),
sa.Column('updated_at', sa.TIMESTAMP(), nullable=False),
sa.Column('code', sa.Text(), nullable=False),
sa.Column('guest_id', postgresql.UUID(), nullable=False),
sa.Column('email_address', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['guest_id'], [
'guests.id'], name='auth_codes_guest_id_fkey', ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', name='auth_codes_id_pkey'),
sa.UniqueConstraint(
'email_address', 'code', name='uniqueCode'),
sa.UniqueConstraint(
'guest_id', name='auth_codes_unique_guest_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('auth_codes')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
bb42f667fba2b0c8fc157bfbf14569d44db6c0b9 | 64d7bb916a9db3e5630750a423872cd136947d9a | /ecom_app/migrations/0008_auto_20200805_0100.py | 539b4d2c1a6ae9ce3fcb48222d0edf1e3bcf9803 | [] | no_license | KaiserKamruzzaman/Django-E-Commerce- | 6c979ab29885ef5c97c788857d9b6d4d2ed3ff64 | 93d00568ad78cd6c2a3a0daada6ae28f5ddb9243 | refs/heads/master | 2022-12-03T04:45:34.493236 | 2020-08-24T05:52:15 | 2020-08-24T05:52:15 | 289,840,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | # Generated by Django 3.0.3 on 2020-08-04 19:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ecom_app', '0007_auto_20200804_1928'),
]
operations = [
migrations.RenameField(
model_name='order',
old_name='Customer',
new_name='customer',
),
]
| [
"[email protected]"
] | |
394014b3cf84399204151193a86550a25e7cf426 | 8c3bb557767824f2b80f84352d3c9e1fc898467a | /ToDo_project_files/routes.py | dce6179f011fb178c4795a19ee56bc65431c1064 | [] | no_license | shreyanshshah91/Flask_To-Do | 340d20eb55c910df817071babb89c2be310ec071 | 62c56350c694fc053c316006389adcd71594e78c | refs/heads/main | 2023-03-11T04:09:43.438520 | 2021-03-01T17:11:16 | 2021-03-01T17:11:16 | 343,491,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,509 | py | from flask import render_template, redirect, request, flash
from ToDo_project_files import app, db, bcrypt
from ToDo_project_files.models import User, Todo
from ToDo_project_files.forms import RegistrationForm, LoginForm
from flask_login import login_user, current_user, logout_user
#displays the homepage
@app.route("/")
def home_page():
return render_template("home.html")
#registeration processing route
@app.route("/register", methods=["GET", "POST"])
def register():
if current_user.is_authenticated:
return redirect("/task/" + str(current_user.id))
form = RegistrationForm()
if form.validate_on_submit():
password_hashed = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(username=form.username.data, email=form.email.data, password=password_hashed)
db.session.add(user)
db.session.commit()
flash("Account Created! Please login.")
return redirect("/login")
return render_template("register.html", title='Register', form=form)
#login processing route
@app.route("/login", methods=["GET", "POST"])
def login():
if current_user.is_authenticated:
return redirect("/task/" + str(current_user.id))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user)
return redirect("/task/" + str(current_user.id))
else:
flash('Login Failed!')
return render_template("login.html", title='Login', form=form)
#route that will display all tasks of that specific logged in user
@app.route("/task/<int:get_id>")
def display_tasks(get_id):
if current_user.is_authenticated:
tasking = User.query.get(get_id)
task_list = tasking.tasks
return render_template("display.html", task_list = task_list)
else:
return redirect("/register")
#for adding new tasks and redirecting back to homescreen to display
@app.route("/task/<int:get_id>/add-task", methods = ["GET", "POST"])
def add_task(get_id):
task = request.form["fetch_task"]
if task:
new_task = Todo(task) #creating an instance for the model
new_task.user = current_user #fetching details of the current logged in users who create a task
if get_id == current_user.id:
db.session.add(new_task)
db.session.commit()
return redirect("/task/" + str(get_id))
else:
raise Exception("Please check the input!")
#for updating a particular task
@app.route("/task/<int:get_userid>/edit-task/<int:get_id>", methods = ["GET", "POST"])
def edit_task(get_userid, get_id):
task = request.form["edit_task"]
if task:
fetch = Todo.query.filter_by(id = get_id).first()
if get_userid == current_user.id:
fetch.tasks = task
db.session.commit()
return redirect('/task/' + str(get_userid))
else:
raise Exception("Please enter the updated task!")
#for deleting a particular task
@app.route("/task/<int:get_userid>/delete-task/<int:get_id>")
def delete_task(get_userid, get_id):
fetched_task = Todo.query.get(get_id)
if get_userid == current_user.id:
db.session.delete(fetched_task)
db.session.commit()
return redirect("/task/" + str(get_userid))
else:
raise Exception("Attempt Failed! Please try again!")
#striking of or unstriking (Mark as done) a task
@app.route("/task/<int:get_userid>/completed/<int:get_id>")
def mark_as_complete(get_userid, get_id):
fetched_task = Todo.query.get(get_id)
if get_userid == current_user.id:
if fetched_task.marked_complete:
fetched_task.marked_complete = False
else:
fetched_task.marked_complete = True
db.session.commit()
return redirect("/task/" + str(get_userid))
else:
raise Exception("Attempt Failed! Please try again!")
#searching for tasks based on __searchable__ in model
@app.route("/task/<int:get_id>/search")
def search(get_id):
tasks = Todo.query.whoosh_search(request.args.get('query')).all()
if get_id == current_user.id:
return render_template("/task/" + str(get_id), task_list=tasks)
else:
raise Exception("Attempt Failed!")
#logging out process for current user
@app.route("/logout")
def logout():
logout_user()
return redirect("/")
| [
"[email protected]"
] | |
cb66827182a2da5d52aff216601272cbf9917ff5 | e88ac5c879326f47fa154baa6f0319240707d10f | /removeDuplication.py | 7f7025b72708bbd54b9efe5b656aee309b29b4e4 | [] | no_license | Clempops/algorithms | af326124379faf402aeb59383965b05ce7ef55f3 | 5576bbd47715c37b7f711ae43e6481d2947afa45 | refs/heads/master | 2020-05-19T09:46:26.502991 | 2015-06-23T19:47:56 | 2015-06-23T19:47:56 | 37,941,161 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | string = 'hello'
def removeDuplication(string):
res = ''
for i in string:
if i not in res:
res += i
return res
print removeDuplication(string)
| [
"[email protected]"
] | |
e50aa6aef1cfcde44c6922636a2032ccbb64594b | 6a250dcf1e1a294d4cb2f467ddb481cd67bc271f | /www/app.py | 821f3a2bd19d208f82e60b9645c2fd7b7ba44ca5 | [] | no_license | czj4093/awesome-python3-webapp | eea7765afdd62d751e6197d8f265fd94f8cb1b50 | 1061542cc3599d1cab6a5f6bcfcc1d639d7ecae0 | refs/heads/master | 2020-05-09T17:03:58.666619 | 2019-04-14T12:46:51 | 2019-04-14T12:46:51 | 181,294,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'chenzejin'
'''
async web application.
'''
import logging; logging.basicConfig(level=logging.INFO)
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
def index(request):
return web.Response(body=b'<h1>Awesome</h1>',content_type='text/html')
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET','/',index)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1',9000)
logging.info('server started at http://127.0.0.1:9000...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | [
"[email protected]"
] | |
c749abb0e1a9f27ebd9ec5fcc93d2afaa0b99d41 | a66e0ca90bbc98fef62a24c2826d76a171b3fa6e | /main.py | 652df02a52edda412864bcb4cb2d316ecc5868d2 | [] | no_license | dillondesilva/dingo-bot | 934b8584ffe0a411282e18f94bba81b9e49c0776 | b9ef602291a8f5bd3cee14e127ed79c196d8bf21 | refs/heads/master | 2020-09-16T05:27:43.088390 | 2019-12-31T01:01:18 | 2019-12-31T01:01:18 | 223,667,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python
import time
import serial
ser = serial.Serial(
port='/dev/ttyAMA0',
baudrate = 9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
while True:
txt = ser.readline()
print(txt)
time.sleep(100) | [
"[email protected]"
] | |
e9560659a9ebcc10e180e3d6f2ea34632653e2b5 | 684a7d56589f7b96002646dfc26ba2de52eb7d80 | /source/callback/callback.py | c0fcf24a8585a430d2e0d7670736cac95efafde9 | [
"Apache-2.0"
] | permissive | adewin/lambda-deep-learning-demo | 7a42b935ca1ab1e92a0170bf28c7e526cffa5cb6 | ebbbd63c0abf87a1a4155b17cef145039b7a1ef7 | refs/heads/master | 2020-07-08T13:15:51.476791 | 2019-04-26T21:25:44 | 2019-04-26T21:25:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | """
Copyright 2018 Lambda Labs. All Rights Reserved.
Licensed under
==========================================================================
"""
from __future__ import print_function
class Callback(object):
def __init__(self, config):
self.config = config
def before_run(self, *argv):
pass
def after_run(self, *argv):
pass
def before_step(self, *argv):
pass
def after_step(self, *argv):
pass
def build(config):
return Callback(config)
| [
"[email protected]"
] | |
db1709241653d86d91f1534902bd829e968cad18 | 7eb45006096f6bc3560af0ef1e54923e3f6b35d2 | /geofdw/fdw/geocode.py | f1c01490ad83c14ca8ce827e2323ab61ca77db9c | [] | no_license | Vadim0908/geofdw | 51112543fb93cb3b5263b7f876d797343ee03d68 | 629e97d9fea9c235fdc6abc60624498cb2075b20 | refs/heads/master | 2021-01-17T06:35:06.849460 | 2015-08-09T09:37:27 | 2015-08-09T09:37:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,307 | py | """
:class:`FGeocode` and `RGeocode` are foreign data wrappers for the geopy
geocoding module.
"""
from geofdw.base import *
from shapely.geometry import Point
import geopy
import pypg
class _Geocode(GeoFDW):
def __init__(self, options, columns):
super(_Geocode, self).__init__(options, columns, srid=4326)
self.service = options.get('service', 'googlev3')
geocoder = geopy.get_geocoder_for_service(self.service)
if geocoder == geopy.geocoders.googlev3.GoogleV3:
api_key = options.get('api_key')
self.geocoder = geocoder(api_key = api_key)
elif geocoder == geopy.geocoders.arcgis.ArcGIS:
username = options.get('username')
password = options.get('password')
self.geocoder = geocoder(username = username, password = password)
else:
self.geocoder = geocoder()
def get_path_keys(self):
"""
Query planner helper.
"""
return [ ('rank', 1), ('geom', 1), ('address', 1) ]
class FGeocode(_Geocode):
"""
The FGeocode foreign data wrapper can do forward geocoding using a number of
online services. The following columns may exist in the table: query TEXT,
rank INTEGER, geom GEOMETRY(POINTZ, 4326), address TEXT.
Note that the geometry will be a 3d point with SRID 4326. At present, no
supported geocoder returns a useful elevation (the GoogleV3 geocoder, for
example, returns a static elevation of 0).
"""
def __init__(self, options, columns):
"""
Create the table that uses GoogleV3 by default or one of the following
named geocoders: ArcGIS; GoogleV3; Nominatim.
:param dict options: Options passed to the table creation.
service: 'arcgis', 'googlev3', 'nominatim'
api_key: API key for GoogleV3 (optional)
username: user name for ArcGIS (optional)
password: password for ArcGIS (optional)
:param list columns: Columns the user has specified in PostGIS.
"""
super(FGeocode, self).__init__(options, columns)
def execute(self, quals, columns):
"""
Execute the query on the geocoder.
:param list quals: List of predicates from the WHERE clause of the SQL
statement. The geocoder expects that one of these predicates will be of the
form "query = 'Helsinki, Finland". Optionally, a bounding polygon can be
used to influence the geocoder if it is supported; the following formats
are recognised (and treated equivalently):
geom && ST_GeomFromText('POLYGON(...)')
ST_GeomFromText('POLYGON(...)') && geom
geom @ ST_GeomFromText('POLYGON(...)')
ST_GeomFromText('POLYGON(...)') ~ geom
Other predicates may be added, but they will be evaluated in PostgreSQL
and not here.
:param list columns: List of columns requested in the SELECT statement.
"""
query, bounds = self._get_predicates(quals)
if query:
return self._execute(columns, query, bounds)
else:
return []
def _execute(self, columns, query, bounds = None):
rank = 0
col_geom = 'geom' in columns
col_addr = 'address' in columns
col_query = 'query' in columns
locations = self._get_locations(query, bounds)
if locations:
for location in locations:
rank = rank + 1
row = { 'rank' : rank }
if col_geom:
geom = pypg.geometry.shape.to_postgis(Point(location.latitude, location.longitude, location.altitude), self.srid)
row['geom'] = geom
if col_addr:
row['address'] = location.address
if col_query:
row['query'] = query
yield row
def _get_predicates(self, quals):
query = None
bounds = None
for qual in quals:
if qual.field_name == 'query' and qual.operator == '=':
query = qual.value
if qual.field_name == 'geom' and qual.operator in ['&&', '@']: # note A ~ B is transformed into B @ A
shape, srid = pypg.geometry.postgis.to_shape(qual.value)
bounds = shape.bounds
elif qual.value == 'geom' and qual.operator == '&&':
shape, srid = pypg.geometry.postgis.to_shape(qual.field_name)
bounds = shape.bounds
return query, bounds
def _get_locations(self, query, bounds):
log_to_postgres('Geocode (%s): running query "%s" with bounds = %s' % (self.service, query, str(bounds)), DEBUG)
if bounds and self.service == 'googlev3':
return self.geocoder.geocode(query, False, bounds = bounds)
else:
return self.geocoder.geocode(query, False)
class RGeocode(_Geocode):
"""
The RGeocode foreign data wrapper can do reverse geocoding using a number of
online services. The following columns may exist in the table: query
GEOMETRY(POINT, 4326), rank INTEGER, geom GEOMETRY(POINTZ, 4326), address
TEXT.
Note that the geometry will be a 3d point with SRID 4326. At present, no
supported geocoder returns a useful elevation (the GoogleV3 geocoder, for
example, returns a static elevation of 0).
"""
def __init__(self, options, columns):
"""
Create the table that uses GoogleV3 by default or one of the following
named geocoders: ArcGIS; GoogleV3; Nominatim.
:param dict options: Options passed to the table creation.
service: 'arcgis', 'googlev3', 'nominatim'
api_key: API key for GoogleV3 (optional)
username: user name for ArcGIS (optional)
password: password for ArcGIS (optional)
:param list columns: Columns the user has specified in PostGIS.
"""
super(RGeocode, self).__init__(options, columns)
def execute(self, quals, columns):
"""
Execute the query on the geocoder.
:param list quals: List of predicates from the WHERE clause of the SQL
statement. The geocoder expects that one of these predicates will be of the
form "query = ST_MakePoint(52, 0)"
Other predicates may be added, but they will be evaluated in PostgreSQL
and not here.
:param list columns: List of columns requested in the SELECT statement.
"""
query = self._get_predicates(quals)
if query:
return self._execute(columns, query)
else:
return []
def _execute(self, columns, query):
rank = 0
col_geom = 'geom' in columns
col_addr = 'address' in columns
col_query = 'query' in columns
locations = self._get_locations(query)
for location in locations:
rank = rank + 1
row = { 'rank' : rank }
if col_geom:
geom = pypg.geometry.shape.to_postgis(Point(location.latitude, location.longitude, location.altitude), self.srid)
row['geom'] = geom
if col_addr:
row['address'] = location.address
if col_query:
row['query'] = pypg.geometry.shape.to_postgis(query, self.srid)
yield row
def _get_predicates(self, quals):
for qual in quals:
if qual.field_name == 'query' and qual.operator == '=':
shape, srid = pypg.geometry.postgis.to_shape(qual.value)
return shape
return None
def _get_locations(self, query):
log_to_postgres('GeocodeR (%s): running query "%s"' % (self.service, query.wkt), DEBUG)
return self.geocoder.reverse([query.x, query.y])
| [
"[email protected]"
] | |
2ff36457c22757394018c77a76b95d9a1928e8d6 | 31d5db91af6591206174fa3e2c862b94bf8d9235 | /getfullscreenimage.py | 22af92b449e968ae352a629bb39d24baf5bf40ba | [] | no_license | gdnyfcuso/ElementPosition | adb2a585688ff9d86168e87bd419ea9681675a28 | 5784f61d1f4cc4cf8a94a56c18759c282783f46b | refs/heads/main | 2023-07-12T16:31:14.521148 | 2021-08-20T07:23:09 | 2021-08-20T07:23:09 | 398,186,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py |
import time
import win32gui, win32ui, win32con, win32api
def window_capture(filename):
hwnd = 0 # 窗口的编号,0号表示当前活跃窗口
# 根据窗口句柄获取窗口的设备上下文DC(Divice Context)
hwndDC = win32gui.GetWindowDC(hwnd)
# 根据窗口的DC获取mfcDC
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
# mfcDC创建可兼容的DC
saveDC = mfcDC.CreateCompatibleDC()
# 创建bigmap准备保存图片
saveBitMap = win32ui.CreateBitmap()
# 获取监控器信息
MoniterDev = win32api.EnumDisplayMonitors(None, None)
w = MoniterDev[0][2][2]
h = MoniterDev[0][2][3]
# print w,h #图片大小
# 为bitmap开辟空间
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
# 高度saveDC,将截图保存到saveBitmap中
saveDC.SelectObject(saveBitMap)
# 截取从左上角(0,0)长宽为(w,h)的图片
saveDC.BitBlt((0, 0), (w, h), mfcDC, (0, 0), win32con.SRCCOPY)
saveBitMap.SaveBitmapFile(saveDC, filename)
if __name__ == "__main__":
beg = time.time()
for i in range(10):
window_capture("haha.jpg")
end = time.time()
print(end - beg)
# import sys
# from PyQt5.QtWidgets import *
# from PyQt5.QtGui import *
# from PyQt5.QtCore import Qt
# from PyQt5.QtWidgets import *
# from PyQt5.QtCore import *
# from PyQt5 import QtGui,QtCore
# import keyboard
# import random
# class Trans(QWidget):
# def cut(self):
# screenshot = QApplication.primaryScreen().grabWindow(QApplication.desktop().winId())
# outputRegion = screenshot.copy()
# outputRegion.save('sho54t.bmp', format = 'bmp', quality = 100)
# self.close()
# if __name__ == '__main__':
# app = QApplication(sys.argv)
# trans = Trans()
# trans.cut()
# trans.show()
# sys.exit(app.exec_())
| [
"[email protected]"
] | |
8b9260ba1a175ef2441418fc049795b45fc5084a | a838d4bed14d5df5314000b41f8318c4ebe0974e | /eng/versioning/version_increment.py | 3ac56b45ee4c4c9cd8d2ab2669167b90040d2ab8 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 2,599 | py | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Below are common methods for the devops build steps. This is the common location that will be updated with
# package targeting during release.
import os
import argparse
from packaging.version import parse
import logging
from version_shared import get_packages, set_version_py, set_dev_classifier, update_change_log
logging.getLogger().setLevel(logging.INFO)
def increment_version(old_version):
parsed_version = parse(old_version)
release = parsed_version.release
if parsed_version.is_prerelease:
prerelease_version = parsed_version.pre[1]
return '{0}.{1}.{2}b{3}'.format(release[0], release[1], release[2], prerelease_version + 1)
return '{0}.{1}.{2}'.format(release[0], release[1], release[2] + 1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Increments version for a given package name based on the released version')
parser.add_argument('--package-name', required=True, help='name of package (accetps both formats: azure-service-package and azure_service_pacage)')
parser.add_argument(
dest="glob_string",
nargs="?",
help=(
"A comma separated list of glob strings that will target the top level directories that contain packages."
'Examples: All = "azure-*", Single = "azure-keyvault", Targeted Multiple = "azure-keyvault,azure-mgmt-resource"'
),
)
parser.add_argument('--service', required=True, help='name of the service for which to set the dev build id (e.g. keyvault)')
args = parser.parse_args()
package_name = args.package_name.replace('_', '-')
packages = get_packages(args, package_name)
package_map = { pkg[1][0]: pkg for pkg in packages }
if package_name not in package_map:
raise ValueError("Package name not found: {}".format(package_name))
target_package = package_map[package_name]
new_version = increment_version(target_package[1][1])
print('{0}: {1} -> {2}'.format(package_name,target_package[1][1], new_version))
set_version_py(target_package[0], new_version)
set_dev_classifier(target_package[0], new_version)
update_change_log(target_package[0], new_version, args.service, args.package_name, True, False) | [
"[email protected]"
] | |
0ce168ab0aa8130b7f3107d039fdc86989a2f07c | e180e68c468557b186d083869c005c98abdf539a | /Testing Hardcoded format/test8.py | 886890d05c57d885c0e77ab4610ea4ab489df179 | [] | no_license | singhalshubh/Notification-system-Testing-using-selenium | 8a58977d7d63c1216e420363f408826e9bfccf7a | e460e7ceeb63e5eea9a914be0ed84febaebe47c7 | refs/heads/master | 2020-03-21T21:33:03.046748 | 2020-01-17T09:53:24 | 2020-01-17T09:53:24 | 139,069,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,816 | py | __author__= 'shubh'
import unittest
from selenium import webdriver
class signup(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
def test_unsubscribe_community(self):
user ="admin"
pwd= "sha123#56su"
driver = webdriver.Firefox()
driver.maximize_window() #For maximizing window
driver.implicitly_wait(20) #gives an implicit wait for 20 seconds
driver.get("http://127.0.0.1:8000/")
driver.find_element_by_xpath('//a [@href="/login/?next=/"]').click()
driver.get("http://localhost:8000/login/?next=/")
elem = driver.find_element_by_id("id_username")
elem.send_keys(user)
elem = driver.find_element_by_id("id_password")
elem.send_keys(pwd)
driver.find_element_by_class_name('btn-block').click()
driver.find_element_by_xpath('//a [@href="/communities/"]').click()
driver.find_element_by_xpath('//a [@href="/community-view/1/"]').click()
driver.find_element_by_xpath('//a [@href="/community_content/1/"]').click()
driver.find_element_by_xpath('//a [@href="/article-view/5/"]').click()
driver.find_element_by_xpath('//a [@href="/article-edit/5/"]').click()
#publish is an id
driver.find_element_by_id("publish").click()
driver.find_element_by_xpath('//a [@href="/logout/"]').click()
user ="shubh"
pwd= "sha123#56su"
driver.get("http://127.0.0.1:8000/")
driver.find_element_by_xpath('//a [@href="/login/?next=/"]').click()
driver.get("http://localhost:8000/login/?next=/")
elem = driver.find_element_by_id("id_username")
elem.send_keys(user)
elem = driver.find_element_by_id("id_password")
elem.send_keys(pwd)
driver.find_element_by_class_name('btn-block').click()
driver.find_element_by_xpath('//a [@href="/notifications/"]').click()
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
d4ebbd0498578f14b876e398a62aab73d0bc638c | 56b6b15d1ceddf544fb7f80b6cbdc4c1b068577e | /stable_baselines_custom/common/mpi_adam.py | 4ad503d1c9f05c27826298da9334279b38ec1ba0 | [
"MIT"
] | permissive | iamlab-cmu/stable-baselines | c83c5a598ed8b37adfc9ecd465bac3cf4b44db6b | 6e9a8b2ad1d690bd9a9611405e4f319a52101540 | refs/heads/master | 2022-09-17T13:13:50.792728 | 2020-05-29T18:57:13 | 2020-05-29T18:57:13 | 266,206,473 | 0 | 0 | MIT | 2020-05-29T18:57:14 | 2020-05-22T21:00:05 | Python | UTF-8 | Python | false | false | 4,558 | py | import tensorflow as tf
import numpy as np
import mpi4py
import stable_baselines_custom.common.tf_util as tf_utils
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None,
sess=None):
"""
A parallel MPI implementation of the Adam optimizer for TensorFlow
https://arxiv.org/abs/1412.6980
:param var_list: ([TensorFlow Tensor]) the variables
:param beta1: (float) Adam beta1 parameter
:param beta2: (float) Adam beta1 parameter
:param epsilon: (float) to help with preventing arithmetic issues
:param scale_grad_by_procs: (bool) if the scaling should be done by processes
:param comm: (MPI Communicators) if None, mpi4py.MPI.COMM_WORLD
:param sess: (TensorFlow Session) if None, tf.get_default_session()
"""
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(tf_utils.numel(v) for v in var_list)
# Exponential moving average of gradient values
# "first moment estimate" m in the paper
self.exp_avg = np.zeros(size, 'float32')
# Exponential moving average of squared gradient values
# "second raw moment estimate" v in the paper
self.exp_avg_sq = np.zeros(size, 'float32')
self.step = 0
self.setfromflat = tf_utils.SetFromFlat(var_list, sess=sess)
self.getflat = tf_utils.GetFlat(var_list, sess=sess)
self.comm = mpi4py.MPI.COMM_WORLD if comm is None else comm
def update(self, local_grad, learning_rate):
"""
update the values of the graph
:param local_grad: (numpy float) the gradient
:param learning_rate: (float) the learning_rate for the update
"""
if self.step % 100 == 0:
self.check_synced()
local_grad = local_grad.astype('float32')
global_grad = np.zeros_like(local_grad)
self.comm.Allreduce(local_grad, global_grad, op=mpi4py.MPI.SUM)
if self.scale_grad_by_procs:
global_grad /= self.comm.Get_size()
self.step += 1
# Learning rate with bias correction
step_size = learning_rate * np.sqrt(1 - self.beta2 ** self.step) / (1 - self.beta1 ** self.step)
# Decay the first and second moment running average coefficient
self.exp_avg = self.beta1 * self.exp_avg + (1 - self.beta1) * global_grad
self.exp_avg_sq = self.beta2 * self.exp_avg_sq + (1 - self.beta2) * (global_grad * global_grad)
step = (- step_size) * self.exp_avg / (np.sqrt(self.exp_avg_sq) + self.epsilon)
self.setfromflat(self.getflat() + step)
def sync(self):
"""
syncronize the MPI threads
"""
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
"""
confirm the MPI threads are synced
"""
if self.comm.Get_rank() == 0: # this is root
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
@tf_utils.in_session
def test_mpi_adam():
"""
tests the MpiAdam object's functionality
"""
np.random.seed(0)
tf.set_random_seed(0)
a_var = tf.Variable(np.random.randn(3).astype('float32'))
b_var = tf.Variable(np.random.randn(2, 5).astype('float32'))
loss = tf.reduce_sum(tf.square(a_var)) + tf.reduce_sum(tf.sin(b_var))
learning_rate = 1e-2
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
do_update = tf_utils.function([], loss, updates=[update_op])
tf.get_default_session().run(tf.global_variables_initializer())
for step in range(10):
print(step, do_update())
tf.set_random_seed(0)
tf.get_default_session().run(tf.global_variables_initializer())
var_list = [a_var, b_var]
lossandgrad = tf_utils.function([], [loss, tf_utils.flatgrad(loss, var_list)], updates=[update_op])
adam = MpiAdam(var_list)
for step in range(10):
loss, grad = lossandgrad()
adam.update(grad, learning_rate)
print(step, loss)
if __name__ == "__main__":
# Run with mpirun -np 2 python <filename>
test_mpi_adam()
| [
"[email protected]"
] | |
156e3872790dc73bdc64d9acd45216282f9550eb | 2999692f7f535e91f9d9da2d2d4db32b1c271114 | /chapter_3/lesson2_step9_error_messages_for_substrings.py | 5adf19a02d621c9507ba0bab1c323510f86f2ac1 | [] | no_license | Tester5657/stepik-auto-tests-course | aa026eee07fd22a22482a84b95f5a37aaf51eb15 | d97c660f6eb960bf4ee4c133dc901244f9eb49fb | refs/heads/master | 2021-06-27T14:23:46.693036 | 2020-01-29T12:37:18 | 2020-01-29T12:37:18 | 230,717,325 | 0 | 0 | null | 2021-06-02T00:52:44 | 2019-12-29T07:22:07 | Python | UTF-8 | Python | false | false | 335 | py | s = 'My Name is Julia'
if 'Name' in s:
print('Substring found')
index = s.find('Name')
if index != -1:
print(f'Substring found at index {index}')
full_string = "text"
substring = "text2"
assert substring in full_string, f"expected \'{substring}\' to be substring of \'{full_string}\'"
if __name__ == '__main__':
main() | [
"Uezdny_gorod12"
] | Uezdny_gorod12 |
db0f1e75164799cda6029b459b5690ad20b51f7d | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractNellietranslationWordpressCom.py | c62296b95129383ec41b43e5473b47f679d163d3 | [
"BSD-3-Clause"
] | permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 574 | py |
def extractNellietranslationWordpressCom(item):
'''
Parser for 'nellietranslation.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
] | |
f64852dade55046f20d33c1f29c23c4208ccdacc | 3395a234e7c80d011607e79c49cd48bf516f256b | /dependencies/jedi/third_party/typeshed/third_party/2and3/flask/json/tag.pyi | b1648dc79877207e7cc04222ee66c11f34502a79 | [
"MIT",
"Apache-2.0"
] | permissive | srusskih/SublimeJEDI | 67329b72e184bc9584843968dcc534a002c797a1 | 95c185d778425c04536d53517b0e3fe6dedf8e59 | refs/heads/master | 2023-08-24T11:30:37.801834 | 2022-08-30T09:04:17 | 2022-08-30T09:04:17 | 6,241,108 | 669 | 125 | MIT | 2022-08-30T09:04:18 | 2012-10-16T08:23:57 | Python | UTF-8 | Python | false | false | 2,037 | pyi | # Stubs for flask.json.tag (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
class JSONTag:
key: Any = ...
serializer: Any = ...
def __init__(self, serializer: Any) -> None: ...
def check(self, value: Any) -> None: ...
def to_json(self, value: Any) -> None: ...
def to_python(self, value: Any) -> None: ...
def tag(self, value: Any): ...
class TagDict(JSONTag):
key: str = ...
def check(self, value: Any): ...
def to_json(self, value: Any): ...
def to_python(self, value: Any): ...
class PassDict(JSONTag):
def check(self, value: Any): ...
def to_json(self, value: Any): ...
tag: Any = ...
class TagTuple(JSONTag):
key: str = ...
def check(self, value: Any): ...
def to_json(self, value: Any): ...
def to_python(self, value: Any): ...
class PassList(JSONTag):
def check(self, value: Any): ...
def to_json(self, value: Any): ...
tag: Any = ...
class TagBytes(JSONTag):
key: str = ...
def check(self, value: Any): ...
def to_json(self, value: Any): ...
def to_python(self, value: Any): ...
class TagMarkup(JSONTag):
key: str = ...
def check(self, value: Any): ...
def to_json(self, value: Any): ...
def to_python(self, value: Any): ...
class TagUUID(JSONTag):
key: str = ...
def check(self, value: Any): ...
def to_json(self, value: Any): ...
def to_python(self, value: Any): ...
class TagDateTime(JSONTag):
key: str = ...
def check(self, value: Any): ...
def to_json(self, value: Any): ...
def to_python(self, value: Any): ...
class TaggedJSONSerializer:
default_tags: Any = ...
tags: Any = ...
order: Any = ...
def __init__(self) -> None: ...
def register(self, tag_class: Any, force: bool = ..., index: Optional[Any] = ...) -> None: ...
def tag(self, value: Any): ...
def untag(self, value: Any): ...
def dumps(self, value: Any): ...
def loads(self, value: Any): ...
| [
"[email protected]"
] | |
8be2dbc27574f4b50a8b455aa7f1fde1b1d1a032 | 86ae81570cf5ab07e07c40ee3ec41a25ca8b871e | /manage.py | 44acc6d5a44cc7aabe7e1526a6aa4af2a95396a2 | [] | no_license | damscassiani1994/apiusuarios | df0ded67af752ab3165c719d6eb442af9b67d66e | 9603134639d3fb1f9e7d833cc43de6c83039e459 | refs/heads/master | 2021-08-07T08:25:02.612742 | 2017-11-07T22:18:07 | 2017-11-07T22:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api_usuarios.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
d4403a791ecc88f293464b246c21949eee1a9a90 | 061ff7a2155d206cbe30a4a3c247a0537f263044 | /portal/gradebook.py | e921ee60cf3946e6a938b1e8e2b049a9a6d4ba23 | [
"MIT"
] | permissive | BillSpyro/tsct-portal | 6436eba8104633c0cff9a2c8341253ed829f0dcb | dced1220f04234cc80c1975f960a1470d503cfc4 | refs/heads/master | 2021-05-24T12:25:41.949604 | 2020-05-01T20:33:27 | 2020-05-01T20:33:27 | 253,560,906 | 2 | 0 | MIT | 2020-05-01T20:33:28 | 2020-04-06T16:53:16 | Python | UTF-8 | Python | false | false | 3,736 | py | from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from portal.auth import (login_required, teacher_required)
from . import db
bp = Blueprint('gradebook', __name__, url_prefix='/portal/gradebook')
@bp.route('/')
@login_required
def gradebook():
if g.users['role'] == 'teacher':
cur = db.get_db().cursor()
cur.execute("""SELECT * FROM courses""")
courses = cur.fetchall()
cur.execute("""SELECT courses.id AS c_id, session.* FROM session
JOIN courses ON courses.id = session.courses_id""")
sessions = cur.fetchall()
cur.execute("""SELECT session.id AS s_id, users.id AS u_id, SUM(submissions.points) AS s_points, SUM(assignments.points) AS a_points, '' as grade FROM users
JOIN roster ON roster.users_id = users.id
JOIN session ON roster.session_id = session.id
JOIN assignments ON assignments.session_id=session.id
JOIN submissions ON submissions.assignments_id = assignments.id and users.id = submissions.users_id
GROUP BY session.id, users.id
ORDER BY session.id""")
grades = cur.fetchall()
else:
cur = db.get_db().cursor()
cur.execute("""
SELECT DISTINCT ON (courses.id) roster.*, users.*, session.*, courses.* FROM roster
JOIN users ON users.id = roster.users_id
JOIN session ON session.id = roster.session_id
JOIN courses ON courses.id = session.courses_id
WHERE users.id = %s;""",
(g.users['id'],))
courses = cur.fetchall()
cur.execute("""
SELECT DISTINCT ON (session.id) session.*, roster.id AS r_id, users.id AS u_id, courses.id AS c_id FROM roster
JOIN users ON users.id = roster.users_id
JOIN session ON session.id = roster.session_id
JOIN courses ON courses.id = session.courses_id
WHERE users.id = %s;""",
(g.users['id'],))
sessions = cur.fetchall()
cur.execute("""SELECT session.id AS s_id, users.id AS u_id, SUM(submissions.points) AS s_points, SUM(assignments.points) AS a_points, '' as grade FROM users
JOIN roster ON roster.users_id = users.id
JOIN session ON roster.session_id = session.id
JOIN assignments ON assignments.session_id=session.id
JOIN submissions ON submissions.assignments_id = assignments.id and users.id = submissions.users_id
WHERE users.id = %s
GROUP BY session.id, users.id
ORDER BY session.id""",
(g.users['id'],))
grades = cur.fetchall()
for grade in grades:
grade[4] = grade[2]/grade[3]
if grade[4] >= 0.98:
grade[4] = 'A+'
elif grade[4] >= 0.93:
grade[4] = 'A'
elif grade[4] >= 0.90:
grade[4] = 'A-'
elif grade[4] >= 0.87:
grade[4] = 'B+'
elif grade[4] >= 0.83:
grade[4] = 'B'
elif grade[4] >= 0.80:
grade[4] = 'B-'
elif grade[4] >= 0.77:
grade[4] = 'C+'
elif grade[4] >= 0.73:
grade[4] = 'C'
elif grade[4] >= 0.70:
grade[4] = 'C-'
elif grade[4] >= 0.67:
grade[4] = 'D+'
elif grade[4] >= 0.63:
grade[4]= 'D'
elif grade[4] >= 0.60:
grade[4] = 'D-'
else:
grade[4] = 'F'
cur.close()
return render_template('portal/gradebook/view-gradebook.html', courses=courses, sessions=sessions, grades=grades)
| [
"[email protected]"
] | |
0f646ca51eea59fd4f4fe0e3e10b1824d8c540d9 | cbfb679bd068a1153ed855f0db1a8b9e0d4bfd98 | /leet/dp/1140_Stone_Game_II.py | d1ed5925f0f4caf1298bf5358edbaf4abe9af491 | [] | no_license | arsamigullin/problem_solving_python | 47715858a394ba9298e04c11f2fe7f5ec0ee443a | 59f70dc4466e15df591ba285317e4a1fe808ed60 | refs/heads/master | 2023-03-04T01:13:51.280001 | 2023-02-27T18:20:56 | 2023-02-27T18:20:56 | 212,953,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from typing import List
class Solution:
def stoneGameII(self, piles: List[int]) -> int:
n = len(piles)
A = 0
L = 0
def helper(i, m, cur, turn=1):
if i >= n:
return 0
res = 0
for j in range(1, 2 * m + 1):
if turn == 1:
res = max(helper(cur + piles[i:i+ j], max(m, j), turn ^ 1), 0)
else:
res = max(helper(cur + piles[i:i+ j], max(m, j), turn ^ 1), 0)
return res
helper(0,1,0,1)
print('done')
if __name__ == '__main__':
s = Solution()
s.stoneGameII([2,7,9,4,4]) | [
"[email protected]"
] | |
8f250333563f5a706d986bac623086de8c233d7f | a3175746b3304108d261f163c2ff231454ead4cd | /Facebookapi.py | c2c9b2e5d930d1c94c0c8506a28318fe858c9156 | [] | no_license | christianangel15/CodeSnippets | dccc6ecbad8d64a3be490f4199fddf28fa01e595 | f43bdf36efc8055a72447e06e5d3ef10121e1bd1 | refs/heads/master | 2023-02-17T10:31:21.877630 | 2021-01-18T15:21:08 | 2021-01-18T15:21:08 | 267,920,642 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | import facebook
token = 'Your token here'
fbobj = facebook.GraphAPI(access_token=token)
# fbobj.put_object('me', 'feed', message='Posted using Graph API')
fbobj.put_photo(image=open('photo-1522364723953-452d3431c267.jpg', 'rb'),
message='Photo posted using Graph API..Cool!')
print('Posted')
| [
"[email protected]"
] | |
cb9e4681176994e682013a8fd58c66406e38dba8 | fa4829f71092aeb8fd7b66b3c97f3bfa957daf41 | /MyProject/settings.py | f285ba90178060a902613da0bf29622c00f82792 | [] | no_license | bfrost831/DjangoProj_repo | da545c703dc79062252b60ff5a0ec9002278d66e | a93ef915133e63d44896d4737e0d270c1b085e20 | refs/heads/master | 2021-04-24T05:48:47.810489 | 2020-03-25T20:42:51 | 2020-03-25T20:42:51 | 250,087,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | """
Django settings for MyProject project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(9+i$c#hjn9x#20yjzer7bnm55@h$3cml*piigjg#k&7uulac0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'homepage',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'homepage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MyProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
5490a436cce969d697354cec0600e851e31852cd | 030724b60fb4f8b63953b7401702a98072993e94 | /python/140.word_break_II.py | 2647735c996d2fc35d3f3ac0926b4beba47ec0ba | [] | no_license | MtTsai/Leetcode | 5f51a892b78cf6427ce2b4891a10bc2d4ed4d972 | 21e83294aee779a16a8c1b96089da4a40eb03035 | refs/heads/master | 2021-01-24T17:17:52.909429 | 2019-08-04T06:53:53 | 2019-08-04T06:54:23 | 123,228,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | class Solution(object):
def wordBreak(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: List[str]
"""
tree = {}
for w in wordDict:
trie = tree
for c in w:
if c not in trie.keys():
trie[c] = {}
trie = trie[c]
trie['#'] = '#'
def find(s, start, tree, dp):
if start == len(s):
return [[]]
if type(dp[start]) is list:
return dp[start]
trie = tree
dp[start] = []
for i in range(start, len(s)):
c = s[i]
if c in trie:
trie = trie[c]
if '#' in trie:
ret = find(s, i + 1, tree, dp)
dp[start] += [[s[start:i + 1]] + str_list for str_list in ret]
else:
break
return dp[start]
no_visit_dp = [0] * len(s)
return [' '.join(str_list) for str_list in find(s, 0, tree, no_visit_dp)]
| [
"[email protected]"
] | |
35bacb289cdf03acdbbf11dec16583ce0e68e78e | b10aa7305fa3e1947c949026984aaacc32b9e350 | /sql_datas.py | ede79501b4c23813b40aa860dc2bc49dc3ae17c5 | [] | no_license | MYoung-coder/Pi_flask | a319b9edb5b3df4d4fd31f1b1dd484e7fc3074c0 | 7cf6a51051af7514e2bd44d3da40f4822ece0efc | refs/heads/master | 2020-12-06T21:51:08.420574 | 2020-01-08T12:37:04 | 2020-01-08T12:37:04 | 232,559,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | import psycopg2
def latest_row():
conn = psycopg2.connect(dbname="AgroIot", user="postgres",password="950315", host="39.97.186.109", port="5432")
cur = conn.cursor()
str_sql = "select * from dbo.klha_data"
cur.execute(str_sql)
rows = cur.fetchall()
# print(len(rows))
# print(rows[-1])
data_time=rows[-1][5].strftime(('%H:%M %d/%m') )
# print(data_time)
soil_humidity=rows[-1][4]
soil_temp=rows[-2][4]
light=rows[-3][4]
air_humidity=rows[-4][4]
air_temp=rows[-5][4]
latest_row_data=[air_temp,air_humidity,light,soil_temp,soil_humidity,data_time]
conn.commit()
conn.close()
# print(latest_row_data)
return latest_row_data
# latest_row() | [
"[email protected]"
] | |
3f953d162b2d1a1d32bc9b8b49129d395aa5a6e9 | 68a66b222a8e81fbbef36e3b26cff16215a21f90 | /fibonacci.py | ec1b4d14a3fd6a21f92b05116541b6d3cd20ba78 | [] | no_license | jeffwright13/codewars | 725fd7d19db4b31f1d4c45fbb21f0e2b8f774425 | 92d16edd1441230e7c4ddc466b893e5ba5929e98 | refs/heads/master | 2020-04-15T15:02:36.704016 | 2016-11-09T18:38:17 | 2016-11-09T18:38:17 | 53,176,971 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | def fibonacci(n):
if n<0:
return None
if n==0:
return 0
if n==1 or n==2:
return 1
else:
return fibonacci(n-2) + fibonacci(n-1)
def test_fibonacci():
assert fibonacci(-1) == None
assert fibonacci(0) == 0
assert fibonacci(1) == 1
assert fibonacci(2) == 1
assert fibonacci(3) == 2
assert fibonacci(6) == 8
assert fibonacci(13) == 233
assert fibonacci(20) == 6765
assert fibonacci(38) == 39088169
| [
"[email protected]"
] | |
f2ceca0732225c696b0d45ea0f94c8dd333138d6 | bc22137812d53cda9c3c056484b10a0619f21184 | /snippets/views/syntaxes.py | d71bf331fe0a7dde00a071ad9ab319f982f11369 | [] | no_license | vovean/code.re | 58edbd7415bd9b5c81d5e5fbd51e01d3c1cdc160 | 425ed18ba11ab487b46b4c20f406084eb3a5342f | refs/heads/master | 2023-04-30T05:35:14.368705 | 2021-05-18T21:26:41 | 2021-05-18T21:26:41 | 343,167,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponseBadRequest, JsonResponse
from snippets.models import Snippet
def list_syntaxes(request: WSGIRequest):
if request.method != 'GET':
return HttpResponseBadRequest(f"Unacceptable method {request.method}")
return JsonResponse(Snippet.SyntaxChoices.values, safe=False)
| [
"[email protected]"
] | |
ee6a5db8a7b96b90ba2f91d343dac58cf8e81c81 | 3a5786a907af3f96d7f3200b94df21f3be4b1211 | /AMDiS_Sandbox2/saves/nonicPressedExt_badShape/evalFinalEnergy.py | 07576f875e14c0371e9f86c0ee4cc93aa8661bba | [] | no_license | nitschke/main | 7635879bba3fb603e23e5678511fcbfcbca13d3d | ecc4db17f46de87c8e7027f0e37031893f9dc5a9 | refs/heads/master | 2021-04-12T04:35:53.825044 | 2017-06-08T15:04:06 | 2017-06-08T15:04:06 | 13,807,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | #!/usr/bin/python
import csv
from pylab import *
fn = "finalEnergy.csv"
lineStyles = ['-','--', '-.', ':']
lw = 3;
with open(fn, 'rb') as f:
reader = csv.DictReader(f, skipinitialspace = True)
stretch=[];
press=[];
e2D=[];
e4D=[];
for row in reader:
stretch.append(row['stretch'])
press.append(row['press'])
e2D.append(row['EnergyOf2Defects'])
e4D.append(row['EnergyOf4Defects'])
stretch = array(stretch,dtype=float)
press = array(press,dtype=float)
e2D = array(e2D,dtype=float)
e4D = array(e4D,dtype=float)
fig = plt.figure()
ax1 = fig.add_subplot(111)
plot(stretch, e2D, label='2 Defects', linewidth=lw)
cFusion = 0.625 #(0.83325+0.8335)/2. # lin. interpol.
dataFilter = stretch > cFusion
plot(stretch[dataFilter], e4D[dataFilter], '*-',label='4 Defects', linewidth=lw)
axvspan(0.0,cFusion, facecolor='0.5', alpha=0.5)
text(0.4, 16.2, "Not Stable 4 Defects", horizontalalignment='center')
text(1.4, 16.2, "Stable 4 Defects", horizontalalignment='center')
xlim(0.0,2.0)
xlabel('Stretch Factor C')
ylabel('Energy E')
locator_params(nbins=10)
grid(True)
legend()
# C = (20/7)*B
ax2 = ax1.twiny()
ax2.set_xlabel('Press Factor B')
ax2.set_xlim(0.0,0.7)
#grid(True)
show()
| [
"[email protected]"
] | |
dcd7a41f7881f0548c60d58a4613f7acfb721adb | a002850e2d2f6f183751f2a761d717f6ef97e4f4 | /hello/pages/migrations/0016_auto_20190928_0409.py | 4ec1cb0fd70ea854af225a8f000406ae9b7b54a1 | [] | no_license | dipak122/Hackathon_Project | 73cea235527530503c737b2e1052246b6911e202 | dba65ac0ce6c588865f5b50ccb0d989eda80c39d | refs/heads/master | 2022-12-03T19:08:56.956980 | 2021-04-16T07:17:01 | 2021-04-16T07:17:01 | 210,911,196 | 2 | 1 | null | 2022-11-22T07:38:14 | 2019-09-25T18:15:42 | JavaScript | UTF-8 | Python | false | false | 585 | py | # Generated by Django 2.2.5 on 2019-09-27 22:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0015_auto_20190928_0349'),
]
operations = [
migrations.AddField(
model_name='logtable',
name='opt',
field=models.IntegerField(default=12, max_length=5),
preserve_default=False,
),
migrations.AlterField(
model_name='logtable',
name='email',
field=models.TextField(max_length=20),
),
]
| [
"[email protected]"
] | |
6fe7f545a1be0fce9b8dd4f94b6d9a90bdcc9aa8 | abaa6a5d1bced4c01f425ed65daced74110a119a | /demos/form/forms.py | 746ade2acd739fd06c5cb6dc29a68700deab5846 | [] | no_license | bigbigrock/flaskstudy | 6fe4d16bfb903757d57915bdfa9a4fac0914b66b | 880208af82bfe672816a9424fe5aff5a1ab3e4c5 | refs/heads/master | 2023-01-30T10:33:32.023378 | 2020-12-15T12:21:19 | 2020-12-15T12:21:19 | 309,948,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,940 | py | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,BooleanField,SubmitField,IntegerField,MultipleFileField
from wtforms.validators import DataRequired,Length,ValidationError
from flask_wtf.file import FileField,FileRequired,FileAllowed
from flask_ckeditor import CKEditorField
class LoginForm(FlaskForm):
username = StringField('Username',validators=[DataRequired()])
password = PasswordField('Password',validators=[DataRequired(),Length(8,128)])
remember = BooleanField('Remember me')
submit = SubmitField('Log in')
#行内验证器
class FortyTwoForm(FlaskForm):
answer = IntegerField('The Number')
submit = SubmitField()
def validate_answer(form,field):
if field.data != 42:
raise ValidationError("Must be 42")
#全局验证器
def is_42(form,field):
if field.data !=42:
raise ValidationError('Must be 42')
class FortyTwoForm(FlaskForm):
answer = IntegerField('The Number',validators=[is_42])
submit = SubmitField()
#工厂函数形式的全局验证器示例
def is_42(message=None):
if message is None:
message = 'Must be 42'
def _is_42(form,field):
if field.data != 42:
raise ValidationError(message)
return _is_42
class FortyTwoForm(FlaskForm):
answer = IntegerField('The Number',validators=[is_42()])
submit = SubmitField
#创建上传表单
class UploadForm(FlaskForm):
photo = FileField('Upload Image',validators=[FileRequired(),FileAllowed(['jpg','jpeg','png','gif'])])
submit = SubmitField()
#多文件上传
class MultiUploadForm(FlaskForm):
photo = MultipleFileField('Upload Image',validators=[DataRequired()])
submit = SubmitField()
#文章表单
class RichTextForm(FlaskForm):
title = StringField('title',validators=[DataRequired(),Length(1,50)])
body = CKEditorField('Body',validators=[DataRequired()])
submit = SubmitField('Publish')
| [
"[email protected]"
] | |
ab6541cecef8408ea284de51aaf882d1b5618307 | 48123b667ed75e550b83e90cf756cce84ed43f76 | /blockchain_davidcoin/Module 2 - Create a Cryptocurrency/davidcoin_node_5003.py | 5b186cfc660db2aa835998769f3252fc608b75eb | [
"MIT"
] | permissive | mrn01/Blockchain_Project | e7ada6d4ca5b1d2c5bb9895738844a6e5b470756 | c91602686331e6917482731b9ad8e2e64fbf640f | refs/heads/master | 2022-11-09T03:10:37.442224 | 2020-06-27T17:34:32 | 2020-06-27T17:34:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,517 | py | # Module 2 - Create a Cryptocurrency
# Importing the libraries
from datetime import datetime
from hashlib import sha256
from json import dumps
from flask import Flask, jsonify, request
from requests import get
from uuid import uuid4
from urllib.parse import urlparse
# Part 1 - Building a blockchain_davidcoin
class Blockchain:
def __init__(self):
self.chain = []
self.transactions = []
self.create_block(proof=1, previous_hash="0")
self.nodes = set()
def create_block(self, proof, previous_hash):
block = {
"index": len(self.chain) + 1,
"timestamp": str(datetime.now()),
"proof": proof,
"previous_hash": previous_hash,
"transactions": self.transactions
}
self.transactions = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
@staticmethod
def proof_of_work(previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = sha256(str(new_proof ** 2 - previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] == "0000":
check_proof = True
else:
new_proof += 1
return new_proof
@staticmethod
def hash(block):
encoded_block = dumps(block, sort_keys=True).encode()
return sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block["previous_hash"] != self.hash(previous_block):
return False
previous_proof = previous_block["proof"]
proof = block["proof"]
hash_operation = sha256(str(proof ** 2 - previous_proof ** 2).encode()).hexdigest()
if hash_operation[:4] != "0000":
return False
previous_block = block
block_index += 1
return True
def add_transactions(self, sender, receiver, amount):
self.transactions.append(
{
"sender": sender,
"receiver": receiver,
"amount": amount
}
)
previous_block = self.get_previous_block()
return previous_block["index"] + 1
def add_node(self, address):
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
response = get(f"http://{node}/get_chain")
if response.status_code == 200:
length = response.json()["length"]
chain = response.json()["chain"]
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
if longest_chain:
self.chain = longest_chain
return True
return False
# Part 2 - Mining our blockchain_davidcoin
# Creating a Web App
app = Flask(__name__)
# Creating an address for the node on Port 5000
node_address = str(uuid4()).replace("-", "")
# Creating a blockchain_davidcoin
blockchain = Blockchain()
# Mining a new block
@app.route("/mine_block", methods=["GET"])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block["proof"]
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transactions(node_address, "You", 1)
block = blockchain.create_block(proof, previous_hash)
response = {
"message": "Congratulation, you just mined a block",
"index": block["index"],
"timestamp": block["timestamp"],
"proof": block["proof"],
"previous_hash": block["previous_hash"],
"transactions": block["transactions"]
}
return jsonify(response), 200
# Getting the full blockchain_davidcoin
@app.route("/get_chain", methods=["GET"])
def get_chain():
response = {
"chain": blockchain.chain,
"length": len(blockchain.chain)
}
return jsonify(response), 200
# Checking if the blockchain_davidcoin is valid
@app.route("/is_valid", methods=["GET"])
def is_valid():
is_valid_ = blockchain.is_chain_valid(blockchain.chain)
if is_valid_:
response = {"message": "All good. The blockchain_davidcoin is valid."}
else:
response = {"message": "BRO, we've a problem. The blockchain_davidcoin isn't valid."}
return response, 200
# Adding a new transaction to the blockchain
@app.route("/add_transaction", methods=["POST"])
def add_transaction():
json = request.get_json()
transaction_keys = ["sender", "receiver", "amount"]
if not all(key in json for key in transaction_keys):
return "Some elements of the transaction are missing", 400
index = blockchain.add_transactions(json["sender"], json["receiver"], json["amount"])
response = {"message": f"This transaction will be added to Block {index}"}
return jsonify(response), 201
# Part 3 - Decentralizing our blockchain_davidcoin
# Connecting new nodes
@app.route("/connect_node", methods=["POST"])
def connect_node():
json = request.get_json()
nodes = json.get("nodes")
if nodes is None:
return "No node", 400
for node in nodes:
blockchain.add_node(node)
response = {
"message": "All the nodes are now connected. The Davidcoin blockchain_davidcoin now contains teh following nodes: ",
"total_nodes": list(blockchain.nodes),
}
return jsonify(response), 201
# Replacing the chain by the longest chain if needed
@app.route("/replace_chain", methods=["GET"])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
response = {"message": "The node had different chains so the chain was replaced by the longest one.",
"new_chain": blockchain.chain}
else:
response = {"message": "All good. The chain is the largest one",
"actual_chain": blockchain.chain}
return response, 200
# Running the app
app.run(host="0.0.0.0", port=5003)
| [
"[email protected]"
] | |
7292e2f8a9ab85385480226ae620e9a019cf6abf | 5fd449b430afe4c5c05e13b3076aaae3595b9fe4 | /models/planet.py | e0955b80d9044e670a7c3f06e1bf5f01ff1e8ec9 | [] | no_license | perodriguezl/weather_calculator | ff1f542b147c8e0310da28e9ab96df043d98947e | 49786bb5da40ff83bc4e9f41e0c7cc9f85dc3b64 | refs/heads/master | 2020-03-26T07:18:37.889524 | 2018-08-22T02:24:31 | 2018-08-22T02:24:31 | 144,647,727 | 0 | 0 | null | 2018-08-22T02:27:39 | 2018-08-14T00:24:56 | Python | UTF-8 | Python | false | false | 1,264 | py |
class planet():
'''
Planet model -- 6/6 methods expected to be testeables
'''
name = None
speed = None
ratio = None
def __init__(self, name, speed, ratio):
'''
@param self:
@param name: planet name string
@param speed: speed numeric value
@param ratio: ratio numeric value
@return:
'''
self.name = name
self.speed = speed
self.ratio = ratio
def set_name(self, name):
'''
@param self:
@param: name string value
'''
self.name = name
def set_speed(self, speed):
'''
@param self:
@param: speed numeric value
'''
self.speed = speed
def set_ratio(self, ratio):
'''
@param self:
@param: ratio numeric value
'''
self.ratio = ratio
def get_name(self):
'''
@param self:
@return: name string value
'''
return self.name
def get_speed(self):
'''
@param self:
@return: speed numeric value
'''
return self.speed
def get_ratio(self):
'''
@param self:
@return: ratio numeric value
'''
return self.ratio | [
"[email protected]"
] | |
3f743c23fbdbd2148dd3da3c2781d36792943cb5 | 259a933d406fafb661272e82a1db260291ffbe5a | /ex_06.py | 0785a56c4bc089bbbb7a91c9bb34e1483e55538d | [] | no_license | jnassula/exercicios_python | 276d0825ad02fd287f249b1417e4539fb148c397 | b121ac3051a3c843fac9fabcf3c19271e663fadd | refs/heads/master | 2022-11-06T21:31:59.274761 | 2020-06-21T18:08:20 | 2020-06-21T18:08:20 | 273,950,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | valor_h = float(input("Informe o valor da hora: "))
h = int(input("Informe a quantidade de horas trabalhadas no mês: "))
salario = h * valor_h
print("O sálario desse mês é €{0:.2f}.".format(salario))
| [
"[email protected]"
] | |
acacf18f9a5bb341086503508081cc9a538e6d95 | 943322afb21045fabb6c33bb2f38d3f676ad4403 | /weppy_haml/ext.py | 0d485eb44c4d9c70095c983da9edf415f82f9eac | [
"BSD-3-Clause"
] | permissive | gi0baro/weppy-haml | 673af96899cc2ab7beba5b5085db37d8664ba10f | 1bbcd44290fcf41f9d9c1fc73622b74320c0d8e1 | refs/heads/master | 2021-01-20T10:15:38.356433 | 2017-07-17T12:51:09 | 2017-07-17T12:51:09 | 25,488,086 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,951 | py | # -*- coding: utf-8 -*-
"""
weppy_haml.ext
--------------
Provides the Haml extension for weppy
:copyright: (c) 2017 by Giovanni Barillari
:license: BSD, see LICENSE for more details.
"""
import os
import codecs
from weppy.extensions import Extension, TemplateExtension
from weppy.utils import cachedprop
from .hamlpy import Compiler
def _read_source(filepath):
with codecs.open(filepath, 'r', encoding='utf-8') as f:
rv = f.read()
return rv
def _store_compiled(filepath, code):
with codecs.open(filepath + ".html", 'w', encoding='utf-8') as dest:
dest.write(code)
class Haml(Extension):
default_config = dict(
set_as_default=False,
auto_reload=False,
preload=True
)
def on_load(self):
self.env.ext = self
self.env.mtimes = {}
self.env.builts = {}
self.env.compiler = Compiler()
self.app.add_template_extension(HamlTemplate)
if self.config.set_as_default:
self.app.template_default_extension = '.haml'
if not self.config.preload:
return
for path, dirs, files in os.walk(self.app.template_path):
for fname in files:
if os.path.splitext(fname)[1] == ".haml":
file_path = os.path.join(path, fname)
rel_path = file_path.split(self.app.template_path + "/")[1]
self._build_html(
os.path.join(path, fname),
rel_path)
@property
def changes(self):
return self.config.auto_reload or self.app.debug
def _build_html(self, file_path, fname):
source = _read_source(file_path)
code = self.env.compiler.process_lines(source.splitlines())
_store_compiled(file_path, code)
self.env.mtimes[file_path] = os.stat(file_path).st_mtime
self.env.builts[file_path] = fname + '.html'
return self.env.builts[file_path]
class HamlTemplate(TemplateExtension):
namespace = 'Haml'
file_extension = '.haml'
def is_cache_valid(self, file_path):
try:
mtime = os.stat(file_path).st_mtime
except Exception:
return False
old_time = self.env.mtimes.get(file_path, 0)
if mtime > old_time:
return False
return True
def reloader_get(self, file_path):
if self.is_cache_valid(file_path):
return self.cached_get(file_path)
return None
def cached_get(self, file_path):
return self.env.builts.get(file_path)
@cachedprop
def get_template(self):
if self.env.ext.changes:
return self.reloader_get
return self.cached_get
def preload(self, path, name):
file_path = os.path.join(path, name)
html_name = self.get_template(file_path) or self.env.ext._build_html(
file_path, name)
return path, html_name
| [
"[email protected]"
] | |
7d087a15abe85b5f42109f936590af5881f4435b | ee699abb42daa4ccaadcd1f4183527fb1439ea52 | /ex17.py | 1ba1ea213d8bdf2db204d993328ecbffcd30a1a2 | [] | no_license | PavelKabenyuk/Python-Example | 316bafaa2f729dd844940a0cb9de4201bcb72779 | a01a063f17b3c548a41801b40815350e10a90ace | refs/heads/master | 2020-07-19T02:37:52.592534 | 2019-09-05T05:02:59 | 2019-09-05T05:02:59 | 206,360,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | # ex17
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print(f"Копирование данных из файла {from_file} в файл {to_file}")
in_file = open(from_file)
indata = in_file.read()
print(f"Исходный размер файла {len(indata)} байт")
print(f"Целевой файл существует? {exists(to_file)}")
print("Готов, нажимай клавишу Enter для продолжения или CTRL+C для отмены.")
input()
out_file = open(to_file, 'w')
out_file.write(indata)
print("Отлично, все сделано.")
out_file.close()
in_file.close() | [
"[email protected]"
] | |
bb71910acae04af227a92320850a5661f4c776dc | b0d616237d9eee802f70880c39141fbb620f3dfe | /ntutm/vocab/doc2vocab_count/standford_vocab.py | 0870dc2c4c4e35e13d6470ca4fc1eca8c15b6f8a | [
"MIT"
] | permissive | m516825/IR_Lab_Package | 6b25e5c92de1eeb8a9f764b66d22a42b727fb2ac | eabb30cd2898b4cdf39b514038fa42d509d67e77 | refs/heads/master | 2021-01-01T05:13:06.856099 | 2016-05-22T09:55:23 | 2016-05-22T09:55:23 | 58,849,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,512 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import vocab
import os
import sys, getopt
from nltk.tokenize.stanford_segmenter import StanfordSegmenter
def count_em(valid_path):
x = 0
for root, dirs, files in os.walk(valid_path):
for f in files:
x = x+1
return x
if __name__ == "__main__":
#########################
inputdir = ''
outputfile = ''
data_type = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"ht:d:o",["idir=","ofile="])
except getopt.GetoptError:
print 'test.py -t <datatype> -d <inputdir> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'test.py -t <datatype> -d <inputdir> -o <outputfile>'
sys.exit()
elif opt in ("-d", "--idir"):
inputdir = arg
elif opt in ("-t"):
data_type = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
if inputdir == '':
print 'test.py -t <datatype> -d <inputdir> -o <outputfile>'
sys.exit(2)
if outputfile == '':
outputfile = 'vocab.out'
#########################
segmenter = StanfordSegmenter(path_to_jar="../stanford-segmenter-2015-12-09/stanford-segmenter-3.6.0.jar", path_to_slf4j = "../stanford-segmenter-2015-12-09/slf4j-api.jar", path_to_sihan_corpora_dict="../stanford-segmenter-2015-12-09/data", path_to_model="../stanford-segmenter-2015-12-09/data/pku.gz", path_to_dict="../stanford-segmenter-2015-12-09/data/dict-chris6.ser.gz")
vocabDict = dict()
build_time = 0.
total = count_em(inputdir)
for dirPath, dirNames, fileNames in os.walk(inputdir):
if len(fileNames) > 0 :
sumContain = ''
for f in fileNames:
try:
if data_type == 'CIRB010':
root = ET.parse(dirPath+'/'+f).getroot()
date = root[0][1].text.strip()
title = root[0][2].text.strip()
text = ''
for p in root[0][3]:
text += p.text.strip()
contain = date + title + text
sumContain += contain
else:
fin = open(dirPath+'/'+f, 'r')
for line in fin.readlines():
sumContain += line.strip()
except:
a = ''
build_time += 1.
parsed_data = segmenter.segment(sumContain).split()
for w in parsed_data:
try:
vocabDict[word] += 1
except:
vocabDict[word] = 1
print >> sys.stderr, '\rdone building '+str(float("{0:.2f}".format(build_time/total*100.)))+'% vocabulary set ',
print >> sys.stderr, '\nstart dumping vocabulary set'
vocab.dumpVocabWithCount('vocab_count_wordS.out', vocabDict, key=0)
print >> sys.stderr, 'done dumping vocabulary set'
| [
"[email protected]"
] | |
22fb865c991fc1bdacb9423fa47687f5a9aef42f | c66aff7b083adee01d265d1bf487ccee3af42488 | /group/group_ug1.py | 524e51c0ca86ba2377402813699123d39b8109d3 | [] | no_license | qinlu520/stats_expample | f9e7a4deff4fbabfdbacf479848e3850960a3805 | 0bf744006f000d91566e84eeea520c4087982b6b | refs/heads/main | 2023-04-19T16:57:48.934366 | 2021-05-13T14:34:24 | 2021-05-13T14:34:24 | 360,178,956 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | import pandas as pd
import numpy as np
index = pd.date_range("1999/1/1", periods=1100)
ts = pd.Series(data=np.random.normal(0.5, 2, 1100), index=index)
# print(ts)
ts = ts.rolling(window=100, min_periods=100).mean().dropna()
print(ts.head())
| [
"[email protected]"
] | |
26d7045e30f6c04d9c96abe28e2a6fac267874d3 | bcd7ff0ebf80e61b5602d66afd8c43078b28b034 | /Capstone/Merge_LargeData.py | a197d6545e2d12959687d0322267489a96690597 | [] | no_license | prashantkolkur/UCSD | a22e57c6bb0ab90b4278486b722d90df15777ea6 | fc00f68927b850f4c3d7f6678689eb66eb0d11ad | refs/heads/master | 2020-06-21T02:15:12.872352 | 2019-07-17T07:19:14 | 2019-07-17T07:19:14 | 134,093,961 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,908 | py | # Merge LargeData
#
# After segmentation of smaller image packages this
# script will stitch the initial dataset back together
# Assumes Packages are in the subdirectories of 1fm / 3fm / 5fm
# an expects a de_augmentation_info.mat in the parent directory thereof.
#
# Runs after StartPostProcessing which merges the 16variations
# and already removed z-padding.
#
#
# Use: Merge_LargeData ~/prediction/1fm
# expects de_augmentation_info.mat in the parent directory
#
#------------------------------------------------------------------
## NCMIR/NBCR, UCSD -- Author: M Haberl -- Date: 10/2017
#------------------------------------------------------------------
import os
import sys
import time
import json
import numpy as np
import skimage
from read_files_in_folder import read_files_in_folder
from PIL import Image
Image.MAX_IMAGE_PIXELS = 10000000000000
print('Starting to merge large image dataset')
if len(sys.argv) == 1:
print('Use -> Merge_LargeData ~/prediction/1fm')
exit()
else:
fm_dir = sys.argv[1]
tic = time.time()
path_separator = os.path.join(fm_dir, '')[-1]
if fm_dir[-1]==path_separator: #fixing special case which can cause error
fm_dir=fm_dir[:-1]
parent_dir = path_separator.join(fm_dir.split(path_separator)[:-1])
de_aug_file = os.path.join(parent_dir, 'de_augmentation_info.json')
print('Processing:', de_aug_file)
with open(de_aug_file, 'r') as json_file:
json_file_contents = json.load(json_file)
packages = json_file_contents['packages']
num_of_pkg = json_file_contents['num_of_pkg']
imagesize = json_file_contents['imagesize']
#zplanes = json_file_contents['zplanes']
z_blocks = json_file_contents['z_blocks']
## Merge Z-sections
# first combine images from the same x/y areas through all z-planes
print('Combining image stacks')
for x_y_num in range(1, len(packages)+1):
imcounter = 0 #Reset imagecounter to combine next Package
combined_folder = os.path.join(fm_dir, "Pkg_%03d"%(x_y_num))
os.mkdir(combined_folder)
for z_plane in range(1, len(z_blocks)):
in_folder = os.path.join(fm_dir, 'Pkg%03d_Z%02d'%(x_y_num, z_plane))
print('Reading:', in_folder)
imlist = read_files_in_folder(in_folder)[0]
imlist = [file_name for file_name in imlist if file_name.endswith('.png')]
for filenum in range(0, len(imlist)):
imcounter = imcounter + 1
in_filename = os.path.join(in_folder, imlist[filenum])
out_filename = os.path.join(combined_folder, 'segmentation_%04d.png' %(imcounter))
os.rename(in_filename, out_filename)
z_found = len([file_name for file_name in read_files_in_folder(os.path.join(fm_dir, 'Pkg_001'))[0]
if file_name.endswith('.png')])
print('Expected number of planes: %s ... Found: %s planes\n' %(str(z_blocks[-1]), str(z_found)))
## Now stitch individual sections
combined_folder = os.path.join(fm_dir, 'Pkg_%03d'%(1)) #read in the filenames of the first Pkg
filelist = read_files_in_folder(combined_folder)[0]
for z_plane in range(0, z_found): #one z-plane at a time
print('Merging image no. %s\n'%(str(z_plane)))
merger_image = np.array(np.zeros(imagesize[0:2])) #Initialize empty image in x/y 2 in z
for x_y_num in range(0, len(packages)):
packagedir = os.path.join(fm_dir, 'Pkg_%03d'%(x_y_num+1))
filename = os.path.join(packagedir, filelist[z_plane])
small_patch = skimage.io.imread(filename)
#bitdepth = single(2.^([1:16]));
#[~,idx] = min(abs(bitdepth - max(small_patch(:))));
#fprintf('Scaling %s bit image\n', num2str(idx));
#save_plane = uint8((255 /bitdepth(idx))*combined_plane);
#small_patch = single((255 /bitdepth(idx))*small_patch);
#small_patch = single((255 /max(small_patch(:)))*small_patch);
area = packages[x_y_num]
if len(packages)>1:
corners = [area[0]+12, area[1]-12, area[2]+12, area[3]-12]
if area[0]==0:
corners[0] = 0
if area[1]==np.shape(merger_image)[0]:
corners[1] = np.shape(merger_image)[0]
if area[2]==0:
corners[2] = 0
if area[3]==np.shape(merger_image)[1]:
corners[3] = np.shape(merger_image)[1]
if corners[1]>np.shape(merger_image)[0]:
corners[1] = np.shape(merger_image)[0]
if corners[3]>np.shape(merger_image)[1]:
corners[3] = np.shape(merger_image)[1]
insertsize = [corners[1]-corners[0], corners[3]-corners[2]]
merger_image[corners[0]:corners[1], corners[2]:corners[3]] = small_patch[12:insertsize[0]+12, 12:insertsize[1]+12]
else: #if there is only one package
start = [0, 0]
if imagesize[0]<=1012: #define where the image has been padded
start[0] = 12
else:
start[0] = 0
if imagesize[1]<=1012: #define where the image has been padded
start[1] = 12;
else:
start[1] = 0;
#clear merger_image;
merger_image = small_patch[start[0]:(imagesize[0]+start[0]), start[1]:(imagesize[1]+start[1])]
bitdepth = [2**i for i in range(1, 17)]
#print('Scaling %s bit image\n' %(num2str(idx)))
idx = abs(np.array(bitdepth) - max(merger_image.flatten())).argmin()
save_plane = np.uint8(np.round((255.0 / bitdepth[idx])*merger_image))
outfile = os.path.join(fm_dir, 'Segmented_%04d.png' %(z_plane+1))
#print('Saving image %s\n' %(outfile))
try:
skimage.io.imsave(outfile, save_plane, as_grey=True)
except:
skimage.io.imsave(outfile, save_plane)
print('Merging large image dataset completed')
print("Total time = ", time.time()-tic)
print('Your results are in: %s\n' %(fm_dir))
with open(os.path.join(fm_dir, "DONE"), "w") as done_file:
done_file.write("0\n")
| [
"[email protected]"
] | |
117bd3a2e1b64dd036264d6602970ec73d177027 | f2befaae3840bafd181cc712108e3b64caf2696f | /app/portal/horizon/openstack_dashboard/contrib/developer/profiler/urls.py | 78549a3554efe7781a1b9a5f6429ee2ab6cfa203 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-adcaas-openstack | 17d5c408d421dcfe542002e1f850b2d9f29f1663 | 02bd8a606215c0fa08b926bac1b092b5e8b278df | refs/heads/master | 2023-08-28T12:09:54.972191 | 2022-08-12T02:03:43 | 2022-08-12T02:03:43 | 164,592,273 | 4 | 23 | Apache-2.0 | 2022-08-12T02:03:44 | 2019-01-08T07:40:35 | Python | UTF-8 | Python | false | false | 806 | py | # Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.contrib.developer.profiler import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
]
| [
"[email protected]"
] | |
3b7da18a374ec699d64dd6dcd89b452ffaab245d | 0b38252762c708ca2e696d671cb8dc36be39a307 | /P75AssignmentProblem/AssignmentProblem_pulp.py | 335b4485496782507c3f8a1e40faa8c801fd4392 | [] | no_license | 84monta/OR | edc8266fe1f8b0d99afd170c0c00eb6a870b2e2c | 23378e06cb4befda81b67336ecc20723d84bddc6 | refs/heads/master | 2023-02-22T02:59:27.314837 | 2021-01-28T09:47:36 | 2021-01-28T09:47:36 | 290,365,152 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,587 | py | import pulp
import numpy as np
import random
from itertools import product
import pandas as pd
#ランダム設定 同じ条件で評価できるように
random.seed(1)
np.random.seed(1)
#仕事の数m、エージェント数n
m=10
n=5
#仕事の最大サイズ(調整用)
JOB_SIZE=10
#仕事jの資源要求量
a = np.random.randint(2,JOB_SIZE,size=(n,m))
#エージェントの利用可能資源量
b = np.random.randint(3,JOB_SIZE*2,size=n)
#コスト
c = np.random.randint(1,10,size=(n,m))
################################################################################
##### Pulpで解く
p = pulp.LpProblem("AssignmentProblem")
x = pulp.LpVariable.dict("x",indexs=(range(n),range(m)),lowBound=0,upBound=1,cat=pulp.LpBinary)
#目的関数定義
p += pulp.lpSum([x[(i,j)]*c[i,j] for i,j in product(range(n),range(m))])
#エージェントの利用可能資源量を超えない
for i in range(n):
p += pulp.lpSum([x[(i,j)]*a[i,j] for j in range(m)]) <= b[i]
#全ての仕事をエージェントに割り振る
for j in range(m):
p += pulp.lpSum([x[(i,j)] for i in range(n)]) == 1
p.solve()
#解が最適解であれば結果を表示
if p.status == 1:
print("Optimization Result by Pulp")
cols = []
assigned_agents=[]
for j in range(m):
cols.append(f"JOB{j}")
assigned_agents.append(int(sum(i*x[(i,j)].value() for i in range(n))))
df = pd.DataFrame([assigned_agents],columns=cols,index=["result"])
print(df)
print(f"Value = {pulp.value(p.objective)}")
elif p.status == -1:
print("実行不能解")
exit(0) | [
"[email protected]"
] | |
f491af9b118e0a51af1aa743cb5dd99057a5baec | fdbb74a95924e2677466614f6ab6e2bb13b2a95a | /third_party/python/Tools/scripts/find-uname.py | b6ec1b6d79060cfa6705dab7dfe4c258da21d304 | [
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] | permissive | jart/cosmopolitan | fb11b5658939023977060a7c6c71a74093d9cb44 | 0d748ad58e1063dd1f8560f18a0c75293b9415b7 | refs/heads/master | 2023-09-06T09:17:29.303607 | 2023-09-02T03:49:13 | 2023-09-02T03:50:18 | 272,457,606 | 11,887 | 435 | ISC | 2023-09-14T17:47:58 | 2020-06-15T14:16:13 | C | UTF-8 | Python | false | false | 1,207 | py | #!/usr/bin/env python3
"""
For each argument on the command line, look for it in the set of all Unicode
names. Arguments are treated as case-insensitive regular expressions, e.g.:
% find-uname 'small letter a$' 'horizontal line'
*** small letter a$ matches ***
LATIN SMALL LETTER A (97)
COMBINING LATIN SMALL LETTER A (867)
CYRILLIC SMALL LETTER A (1072)
PARENTHESIZED LATIN SMALL LETTER A (9372)
CIRCLED LATIN SMALL LETTER A (9424)
FULLWIDTH LATIN SMALL LETTER A (65345)
*** horizontal line matches ***
HORIZONTAL LINE EXTENSION (9135)
"""
import unicodedata
import sys
import re
def main(args):
unicode_names = []
for ix in range(sys.maxunicode+1):
try:
unicode_names.append((ix, unicodedata.name(chr(ix))))
except ValueError: # no name for the character
pass
for arg in args:
pat = re.compile(arg, re.I)
matches = [(y,x) for (x,y) in unicode_names
if pat.search(y) is not None]
if matches:
print("***", arg, "matches", "***")
for match in matches:
print("%s (%d)" % match)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"[email protected]"
] | |
0c208e72fe41124ae5ee5ec5bb3df1ed79c49f3f | 211d73361b7f67f75a1cb77083a775fc0b219325 | /matrixportal/code.py | 2188f9b9997fc1ae13eb0adb16566e6371bc46b3 | [] | no_license | georgeloyer/pm25 | b1f583be47e3fc11aacfe4835e414cf52f36dfc0 | 73c157e40fa9fa4a00d4bc451590aaad744f144f | refs/heads/master | 2023-01-24T00:17:24.999194 | 2020-12-06T00:57:15 | 2020-12-06T00:57:15 | 300,921,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,199 | py | # Purple Air AQI Display
# for Metro M4 Airlift with RGB Matrix Shield
# or Matrix Portal
# and 64 x 32 RGB LED Matrix
import time
import board
import terminalio
import busio
from adafruit_pm25.i2c import PM25_I2C
from adafruit_matrixportal.matrixportal import MatrixPortal
def aqi_transform(val):
aqi = pm_to_aqi(val) # derive Air Quality Index from Particulate Matter 2.5 value
return "AQI:%d" % aqi
def message_transform(val): # picks message based on thresholds
index = aqi_to_list_index(pm_to_aqi(val))
messages = (
"Hazardous",
"Very Unhealthy",
"Unhealthy",
"Unhealthy for Sensitive Groups",
"Moderate",
"Good",
)
if index is not None:
return messages[index]
return "Unknown"
SENSOR_ID = 69897 # Ashbury Terrace, SF
SENSOR_REFRESH_PERIOD = 30 # seconds
DATA_SOURCE = "https://www.purpleair.com/json?show=" + str(SENSOR_ID)
SCROLL_DELAY = 0.02
DATA_LOCATION = ["results", 0, "PM2_5Value"] # navigate the JSON response
# --- Display setup ---
matrixportal = MatrixPortal(
status_neopixel=board.NEOPIXEL,
debug=True,
url=DATA_SOURCE,
json_path=(DATA_LOCATION, DATA_LOCATION),
)
# Create a static label to show AQI
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(0, 7),
text_transform=aqi_transform,
)
# Create a scrolling label to show level message
matrixportal.add_text(
text_font=terminalio.FONT,
text_position=(0, 23),
scrolling=True,
text_transform=message_transform,
)
# pylint: disable=too-many-return-statements
def aqi_to_list_index(aqi):
aqi_groups = (301, 201, 151, 101, 51, 0)
for index, group in enumerate(aqi_groups):
if aqi >= group:
return index
return None
def calculate_aqi(Cp, Ih, Il, BPh, BPl): # wikipedia.org/wiki/Air_quality_index#Computing_the_AQI
return round(((Ih - Il)/(BPh - BPl)) * (Cp - BPl) + Il)
def pm_to_aqi(pm):
pm = float(pm)
if pm < 0:
return pm
if pm > 1000:
return 1000
if pm > 350.5:
return calculate_aqi(pm, 500, 401, 500, 350.5)
elif pm > 250.5:
return calculate_aqi(pm, 400, 301, 350.4, 250.5)
elif pm > 150.5:
return calculate_aqi(pm, 300, 201, 250.4, 150.5)
elif pm > 55.5:
return calculate_aqi(pm, 200, 151, 150.4, 55.5)
elif pm > 35.5:
return calculate_aqi(pm, 150, 101, 55.4, 35.5)
elif pm > 12.1:
return calculate_aqi(pm, 100, 51, 35.4, 12.1)
elif pm >= 0:
return calculate_aqi(pm, 50, 0, 12, 0)
else:
return None
def get_color(aqi):
index = aqi_to_list_index(aqi)
colors = (
(115, 20, 37),
(140, 26, 75),
(234, 51, 36),
(239, 133, 51),
(255, 255, 85),
(104, 225, 67),
)
if index is not None:
return colors[index]
return (150, 150, 150)
sensor_refresh = None
reset_pin = None
# If you have a GPIO, its not a bad idea to connect it to the RESET pin
# reset_pin = DigitalInOut(board.G0)
# reset_pin.direction = Direction.OUTPUT
# reset_pin.value = False
# Create library object, use 'slow' 100KHz frequency!
i2c = busio.I2C(board.SCL, board.SDA, frequency=100000)
# Connect to a PM2.5 sensor over I2C
pm25 = PM25_I2C(i2c, reset_pin)
print("Found PM2.5 sensor, reading data...")
while True:
if (not sensor_refresh) or (time.monotonic() - sensor_refresh) > SENSOR_REFRESH_PERIOD:
try:
value = matrixportal.fetch()
print("PurpleAir response is", value[0])
matrixportal.set_text_color(get_color(pm_to_aqi(value[0])))
sensor_refresh = time.monotonic()
except RuntimeError as e:
print("Unable to read from PurpleAir, retrying...", e)
continue
try:
aqdata = pm25.read()
print("Local PlanTower PM2.5 reading is", aqdata["particles 25um"])
matrixportal.set_text(aqi_transform(value[0]) + " " + str(pm_to_aqi(aqdata["particles 25um"])), 0)
except RuntimeError as e:
print("Unable to read from local sensor, retrying...", e)
continue
# Scroll it
matrixportal.scroll_text(SCROLL_DELAY) | [
"[email protected]"
] | |
75d39712832c4b0a6402ab7afe9f40d275e8ff25 | 6006480f9a0442143022dccb4241e61dbee71c49 | /chapter3/section3.1/contact.py | db53b089506140a53c878d65b818bab201cfae22 | [] | no_license | riffschelder/train.usaco.org | 2f5c1e177bcb00853b599065dc1bef4c56fe19c4 | 28cab6d80d96f00683b0bb23f47ce7d3c47ead0c | refs/heads/master | 2022-12-20T18:06:31.096633 | 2020-09-27T20:56:55 | 2020-09-27T20:56:55 | 275,011,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | """
ID: riff.sc1
LANG: PYTHON3
TASK: contact
"""
from collections import defaultdict
def main():
fin = open('contact.in')
(low, high, amount_to_print) = [int(x) for x in fin.readline().split()]
last_pattern = ['' for _ in range(13)] # 0..12 inclusive
frequency_of = defaultdict(int) # frequency_of[pattern]
while True:
line = fin.readline().strip()
if not line:
break
for char in line:
if char not in '01': # Skip '\n', etc.
continue
for i in range(1, 13): # 1..12 inclusive
if low <= i <= high: # could have used range(low, high+1), in hindsight
last_pattern[i] = new_pattern(last_pattern[i], i, char)
record_pattern(last_pattern[i], i, frequency_of)
patterns_at = defaultdict(list) # patterns_at[frequency]
just_frequencies = set()
for pattern, frequency in frequency_of.items():
patterns_at[frequency].append(pattern)
just_frequencies.add(frequency)
just_frequencies = list(just_frequencies)
just_frequencies.sort(reverse=True)
print_answer(patterns_at, just_frequencies, amount_to_print)
def new_pattern(pattern, length, new_char):
pattern = pattern + new_char
if len(pattern) > length:
# should only be over by 1
pattern = pattern[1:]
return pattern
def record_pattern(pattern, length, frequency_of):
if len(pattern) == length:
frequency_of[pattern] += 1
def print_answer(patterns_at, sorted_frequencies, amount_to_print):
with open('contact.out', 'w') as fout:
for i in range(min(amount_to_print, len(sorted_frequencies))):
frequency = sorted_frequencies[i]
fout.write(f'{frequency}\n')
patterns = patterns_at[frequency]
patterns.sort(key=length_first)
count = 0
for pattern in patterns[:-1]:
fout.write(f'{pattern}')
count += 1
if count < 6:
fout.write(' ')
else:
count = 0
fout.write('\n')
fout.write(f'{patterns[-1]}\n')
def length_first(pattern):
return (len(pattern), pattern)
main() | [
"[email protected]"
] | |
b9045576fb2eb6b8ea0bfaff18a617b57d215c9c | c24fa89450cccb48fcd481c3cfa475ee0e412e09 | /UnitTests/rbfUnitTest.py | 82e7e1a5252bc38d63310a3ab622216d36bb4dbb | [] | no_license | PhoenixYanrongLi/CareEcoSystem_ServerCodeNew | e95d1c552cdcc70aac09482dfda63e253e01fcb0 | b627484694863c425483a04391eedc2ec2ec1098 | refs/heads/master | 2021-01-01T04:34:51.858543 | 2016-04-14T17:57:30 | 2016-04-14T17:57:30 | 56,258,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,869 | py | """
rbfUnitTest.py is a collection of unit tests that validate Matlab algorithms ported over to Python have the
same behavior
rbfTester is a class that contains all of the unit tests.
setUp imports test data and assigns them to variables.
TtoN is a test that verifies it can find 1s in a matrix.
"""
__author__ = "Bradley Zylstra"
__version__ = "1.0"
__maintainer__ = "Bradley Zylstra"
__email__ = "[email protected]"
__status__ = "Development"
import unittest
import scipy.io
import numpy as np
import numpy
from InHomeMonitoringCode.rbfMain import TtoN,rbfnn_raw,rMeans, pnnHuer,radial,decide,initialize
class rbfTester(unittest.TestCase):
def setUp(self):
self.rkmeansInTestData = scipy.io.loadmat(
'C:\Users\Brad\Desktop\Programming\InHomeMonitoring\PythonServerCode\UnitTests\\testData\\rkmeansInTestData.mat')
self.rkmeansOutTestData = scipy.io.loadmat(
'C:\Users\Brad\Desktop\Programming\InHomeMonitoring\PythonServerCode\UnitTests\\testData\\rkmeansOutTestData.mat')
self.exampleKmeansData = scipy.io.loadmat(
'C:\Users\Brad\Desktop\Programming\InHomeMonitoring\PythonServerCode\UnitTests\\testData\\seeds.mat')
#self.examplePnnData=scipy.io.loadmat('C:\Users\Brad\Desktop\Programming\InHomeMonitoring\PythonServerCode\UnitTests\\testData\\')
self.testData=scipy.io.loadmat('C:\Users\Brad\Desktop\RSSI Localization with path resolution - Jul 9 2014\MATLAB\exportedData.mat')
#self.X = self.rkmeansInTestData['X']
self.R=self.testData['R']
self.X=self.testData['X']
self.ET=self.testData['Et']
self.Y=self.testData['Y']
self.L=self.testData['L']
#self.X,self.R=initialize(self.X,self.R,self.ET,self.Y,self.L)
#print(self.X)
self.k = self.rkmeansInTestData['k']
#self.Y=self.exampleKmeansData['Y']
self.Stest = self.rkmeansOutTestData['S']
self.Ctest = self.rkmeansOutTestData['C']
self.seeds = self.exampleKmeansData['Seeds']
#self.C, self.S = rMeans(self.seeds, self.X)
#self.B = pnnHuer(self.Ctest, self.k[0][0]-1)
#self.G=radial(self.X,self.k[0][0],self.Ctest,self.B)
#self.V=numpy.dot(numpy.linalg.pinv(self.G),self.Y)
#self.That=numpy.dot(radial(self.R,33.0,self.C,self.B),self.V)
#self.Yhat=decide(self.That)
#self.Yhat=rbfnn_raw(self.R,self.X,self.Y,self.k,self.seeds)
#print self.Yhat
initialize('C:\Users\Brad\Desktop\Programming\InHomeMonitoring\PythonServerCode\UnitTests\\testData\\exportedData.mat')
#def tests(self):
# self.assertEqual(1, 1)
def testTtoN(self):
AF=numpy.zeros((3,3))
AF[0,1]=1
AF[1,2]=1
AF[2,0]=1
print AF
print TtoN(AF)
if __name__ == '__main__':
runner = unittest.main()
#unittest.main() | [
"[email protected]"
] | |
eeb4a44ac5937539eae4d6286c811973d57f7cf4 | eb8d1b878ea214a135df5cc89b8a8efe14b9010a | /Baskets/__init__.py | 8c574be477d48a4259da9f6b93cae9ecf0d10c5a | [] | no_license | Firexd2/OnlineMarket | 099d7ebb303c224ea081159fe4afc9e11bdc65c6 | 5af3ef8665e4ad61d02891d5ec6fcbe81809c789 | refs/heads/master | 2021-09-16T06:03:59.258112 | 2018-06-17T19:02:22 | 2018-06-17T19:02:22 | 115,608,183 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | default_app_config = "Baskets.apps.BasketsConfig" | [
"[email protected]"
] | |
1bb9a75f02fb42a43cefc37c6be027f711841964 | 42fbb1295dfe0eea84507a71bbf8a57107626511 | /test/test_error_response.py | bfb2043ae20e7d6040b1a2edb13bde81e3c240f2 | [] | no_license | joeynebula/ynab_pie | bd6642bcd747869630e837fb02df808e16e0b9cc | c6d3c73309b64d8df4bd70fa9892a2edf22599a9 | refs/heads/master | 2020-03-28T04:48:22.756956 | 2018-09-07T14:38:34 | 2018-09-07T14:38:34 | 147,737,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | # coding: utf-8
"""
YNAB API Endpoints
Our API uses a REST based design, leverages the JSON data format, and relies upon HTTPS for transport. We respond with meaningful HTTP response codes and if an error occurs, we include error details in the response body. API Documentation is at https://api.youneedabudget.com # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import ynab_pie
from ynab_pie.models.error_response import ErrorResponse # noqa: E501
from ynab_pie.rest import ApiException
class TestErrorResponse(unittest.TestCase):
"""ErrorResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testErrorResponse(self):
"""Test ErrorResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = ynab_pie.models.error_response.ErrorResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3ed51e8fad3f739ea3d00caaa0b669d8d1d22a02 | 2e9e51d88e6969dd1ca298accda219f4331591b6 | /14_LongestCommonPrefix.py | 1e1d3cbe125d3b771c20a2d6598fd43d09d12ea8 | [] | no_license | juzen2003/LeetCode-python-practice | 88474325868df8829c808ca6897d576959b01c37 | 7d5610667f907d9960f3ea05302fc86451cb0a51 | refs/heads/master | 2021-01-12T01:22:58.105303 | 2017-01-16T08:08:10 | 2017-01-16T08:08:10 | 78,379,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | # LeetCode #14 Longest Common Prefix
# Dave Chang
# 2017/01/02
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
#empty list
if strs == []:
return ""
temp = ""
s = strs[0]
minLength = len(s)
isCommon = True
# get the minimum length of string to reduce the search
for item in strs:
minLength = min(minLength, len(item))
if minLength == 0:
return ""
for i in xrange(0, minLength):
for item in strs:
if s[i] == item[i]:
continue
else:
isCommon = False
if isCommon:
temp += s[i]
return temp
| [
"[email protected]"
] | |
48768834b1fef2caf84e039e98d955323a38b991 | 5f300418ce1605eb41276b0a9a79fe5f46fa8198 | /users/schema.py | 8d340c5905761a66ecfbc65fac423793a88a2f5a | [] | no_license | Academia-MagicKode/FastAPI | daaeea85152717a484a32309acf77be92435b53d | 139942870a5ee76a1e29bcbfb5d1262af0b2a832 | refs/heads/master | 2023-05-09T02:03:45.804484 | 2021-05-29T14:37:47 | 2021-05-29T14:37:47 | 371,967,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from pydantic import BaseModel
from typing import Optional
class UserSchema(BaseModel):
username: str
email: str
password: str
full_name: Optional[str] = None
class UserShowSchema(BaseModel):
id:int
username:str
email:str
class Config():
orm_mode= True
class TokenSchema(BaseModel):
access_token:str
token_type:str
class TokenData(BaseModel):
username: Optional[str] = None
| [
"[email protected]"
] | |
86cd6ba4970b1c67cc4bfb02b0ea766a92fceb11 | 6b6308031cb70672edbf69e753b175a66bc80940 | /hel.py | e3c10d3a699db959e3ee55157ab194618f5da51a | [] | no_license | mposypkin/papmanager | bd30a37d9ffde03ded3d88e43442459af8c9cb52 | e5b76d2af52c0a475332b68570489cea0395267a | refs/heads/master | 2021-05-06T05:28:40.116982 | 2018-02-17T21:02:07 | 2018-02-17T21:02:07 | 115,090,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | from tkinter import filedialog
from tkinter import *
import json
import bibprint
root = Tk()
name = filedialog.askopenfilename(initialdir="/", title="Select file",
filetypes= (("json files","*.json"),("all files","*.*")))
print(name)
s = ""
with open(name) as f:
s = f.read();
#print(s)
parsed = json.loads(s)
#print(parsed)
contrs = parsed['contributions']
i = 57
for contr in contrs:
i = i + 1
#prn = bibprint.printForDissSovetRinc(contr, i)
# prn = bibprint.printForDissSovetBases(contr, i)
# bibprint.printForMiet(contr, i)
prn = bibprint.printForGost(contr, i)
#bibprint.printJSON(contr)
if prn:
print(prn)
#for aut in auth:
# fst = aut['1st']
# snd = aut['2nd']
# print(fst[0] + ". " + snd)
#weird_json = '{"x": 1, "y": 2, "z": 3}'
#json.loads(weird_json)
| [
"[email protected]"
] | |
4e19590238f7ad5a0154a3fcd149292e9009c141 | 301e7bb7f758dcb97fff090aae92226c75f5ddb1 | /MNIST/mnist.py | 86cb4cfa6afee06f5ece3557fee3e85d5a22d021 | [] | no_license | Ionnia/ML-Python | 860f1c3dc055c292b5330c6cb5ae46dbfa5509f6 | d1fff39d2945b77dfb8d71ce3f9a997fe1331b37 | refs/heads/master | 2020-03-24T12:30:48.925698 | 2018-08-06T01:01:24 | 2018-08-06T01:01:24 | 142,716,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | import idx_decompressor as idxd
import download_mnist as dm
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.utils import to_categorical
import numpy as np
# Downloading and extracting mnist dataset
dm.get_mnist()
train_images = idxd.idx_decompress('train-images-idx3-ubyte')
train_labels = idxd.idx_decompress('train-labels-idx1-ubyte')
test_images = idxd.idx_decompress('t10k-images-idx3-ubyte')
test_labels = idxd.idx_decompress('t10k-labels-idx1-ubyte')
train_images = np.array(train_images, dtype=np.float32)
train_labels = np.array(train_labels, dtype=np.float32)
test_images = np.array(test_images, dtype=np.float32)
test_labels = np.array(test_labels, dtype=np.float32)
train_images = train_images.reshape((60000, 28, 28, 1))
test_images = test_images.reshape((10000, 28, 28, 1))
train_labels = to_categorical(train_labels, 10)
test_labels = to_categorical(test_labels, 10)
# Normalizing images
train_images = train_images/255
test_images = test_images/255
NUM_OF_EPOCHS = 10
BATCH_SIZE = 128
# Creating model
model = Sequential()
model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(16, (3, 3), activation='relu'))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dense(units=128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=10, activation='softmax'))
model.summary()
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=train_images, y=train_labels, batch_size=BATCH_SIZE, epochs=NUM_OF_EPOCHS,
validation_data=(test_images, test_labels))
model.save('mnist_model.h5')
| [
"[email protected]"
] | |
4221d825e44541bbd9aefe57229a3c8124657c89 | 880d75590e82c74c05616aba471fc63b624dffb5 | /Workshop 5/Excercise_page_149/excercise_01_page_149.py | a7c6147050452128be9a5b4a498876ae19f30069 | [] | no_license | Scaletts/Python | 7591a6ae29e7d8a77072ebe5b8af597d40612b78 | d55a7ab0f8824178aae0e43b4e1b78785773fa1e | refs/heads/master | 2023-08-18T12:34:39.900662 | 2021-10-17T16:19:41 | 2021-10-17T16:19:41 | 402,992,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | """
Author: DuongTruongTho
Date: 09/08/2021
Program: Exersice_01_page_149.py
Problem:
What roles do the parameters and the return statement play in a function definition?
Solution:
The return statement terminates the execution of a function and returns control to the calling function.
Execution resumes in the calling function at the point immediately following the call.
Areturn statement can also return a value to the calling function.
""" | [
"[email protected]"
] | |
bdd93a2261bd97e02005e332392751213a22294c | 42b799b2ff8283511472b76152a3ca70af445ebd | /Election-Analysis/python_practice.py | e6f102627bbdc95cc7c5460a08a3b849c5810aa4 | [] | no_license | ducluu27/Election-Analysis | 3173a1384cfd784db8bda5dec52bd48a65eeda3c | 68e16c3dd02d29e9eecd22f78c9560c2ce83f48b | refs/heads/master | 2022-12-04T21:36:54.057932 | 2020-08-16T19:26:36 | 2020-08-16T19:26:36 | 286,522,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68 | py | counties= ["arapahoe",'denver',"jefferson"]
counties_dict ={}
| [
"[email protected]"
] | |
4fc8d5244e37373fb0ec5effd26f848f88a4864d | 6b5fd328e3aa38acc6ef0b97a8e1e253a42ee385 | /supervised_learning/0x11-attention/10-transformer_decoder.py | a0e62a79e62a999313bab85074fc02f120ef3bd7 | [] | no_license | oscarmrt/holbertonschool-machine_learning | c43251b74b16c50b8ee1853f0644cf80af055d2b | b1d0995023630f2a2b7ed953983c405077c0d5a8 | refs/heads/master | 2023-04-02T23:51:44.545003 | 2021-03-31T17:19:35 | 2021-03-31T17:19:35 | 280,184,789 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,391 | py | #!/usr/bin/env python3
"""Class Decoder"""
import tensorflow as tf
positional_encoding = __import__('4-positional_encoding').positional_encoding
DecoderBlock = __import__('8-transformer_decoder_block').DecoderBlock
class Decoder(tf.keras.layers.Layer):
"""class Decoder"""
def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len,
drop_rate=0.1):
"""Class constructor"""
super(Decoder, self).__init__()
self.N = N
self.dm = dm
self.embedding = tf.keras.layers.Embedding(target_vocab, dm)
self.positional_encoding = positional_encoding(max_seq_len, dm)
self.blocks = [DecoderBlock(dm, h, hidden, drop_rate)
for _ in range(N)]
self.dropout = tf.keras.layers.Dropout(drop_rate)
def call(self, x, encoder_output, training, look_ahead_mask, padding_mask):
"""Public instance method that returns a tensor of shape
(batch, target_seq_len, dm) containing the decoder output"""
seq_len = x.shape[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.dm, tf.float32))
x += self.positional_encoding[:seq_len]
x = self.dropout(x, training=training)
for i in range(self.N):
x = self.blocks[i](x, encoder_output, training,
look_ahead_mask, padding_mask)
return x
| [
"[email protected]"
] | |
d2fd7a84f08b247847499681ef0c54a84674c851 | c67c2d26b71638455278149a3771949659cf96b2 | /led-nod.py~ | c1f8f673ac3116274da75e4dcaee88782ffb50d8 | [] | no_license | rafitzadik/rpimove | 71706590367217be865307002af221837827d953 | 3ef53b611c97cc69578477a55feaf01ad2a8c41c | refs/heads/master | 2021-01-23T02:59:10.941143 | 2015-10-29T17:13:36 | 2015-10-29T17:13:36 | 37,815,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | #!/usr/bin/env python
from gopigo import *
import sys
import atexit
atexit.register(stop)
for count in range(0,3):
led_on(1)
led_on(0)
time.sleep(0.2)
led_off(1)
led_off(0)
time.sleep(0.2)
| [
"[email protected]"
] | ||
863f8f1e4c02e3a6d287a9f3eb7ceadbe731f4d8 | 56207908c6681d3c23e4385d37c560c5bee17664 | /Util/GetKey.py | 3e8858cd82b02ae27a4b432f01d69a1a36ccd70d | [] | no_license | idealegg/Get_Vip_Vedio | 75642459be6eabe6033791995e4724c4685729e7 | 2d57468d7d5a1aa634ecb814d7fbc7f9072e8a9f | refs/heads/master | 2022-10-24T13:23:33.274444 | 2022-10-04T17:35:57 | 2022-10-04T17:35:57 | 164,870,540 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,531 | py | # -*- coding:utf-8 -*-
import requests
import re
def GetKey(url, addr, host, ref):
ret = ""
textmod={ "url": addr,
}
header_dict = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip,deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Host": host,
"Referer": "%s%s" % (ref, addr),
"User-Agent": "Mozilla/5.0(Windows NT 6.1; Win64; x64) AppleWebKit/537.36(KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
"Upgrade-Insecure-Requests": "1"
}
req = requests.get(url=url, params=textmod, headers=header_dict)
print req.encoding
print req.headers
print req.reason
print req.content
res = re.search("eval\(\"(.*?)\"\);", req.content)
if not res:
print "GetKey error: %s\n" % addr
else:
ret = "".join(map(lambda x: chr(int(x, 16)), res.group(1).split('\\x')[1:]))[17:-3]
print 'GetKey: key[%s]' % ret
req.close()
return ret
if __name__ == "__main__":
#print GetKey(url='http://all.baiyug.cn:2021/vip_all/index.php',
# addr='https://www.iqiyi.com/v_19rrok775g.html?vfm=2008_aldbd',
# host='all.baiyug.cn:2021',
# ref='http://app.baiyug.cn:2019/vip/iqiyi.php?url=')
print GetKey(url='http://www.1717yun.com/1717yun/',
addr='https://www.iqiyi.com/v_19rrf3hzfs.html?vfm=2008_aldbd',
host='www.1717yun.com',
ref='http://www.1717yun.com/jx/ty.php?url=&url=')# | [
"[email protected]"
] | |
6e725fc04ccc427d7f62de141bd77b5808e7d874 | 6d31e7b62a9a6438d648becf8708171e2773c49c | /day/migrations/0005_auto_20160911_1719.py | 8aa67e314549f3e44622db9aa2f5747b64a42de9 | [] | no_license | jakeseaton/fop | 74d6e27f3afe9b3652873ca5a987e534f60cc392 | 8d21ab8bfb5916311bebabb267298933c0774bf5 | refs/heads/master | 2020-08-04T16:06:29.365540 | 2016-09-12T01:51:48 | 2016-09-12T01:51:48 | 67,883,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-11 21:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('day', '0004_day_day_type'),
]
operations = [
migrations.AddField(
model_name='day',
name='date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='day',
name='is_national_forest',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
f10e035392a68b9897e6aa8a80b0395349d1d82a | bd0fb69648ff4516e8e1ac3840509d22c5e4e6fa | /二叉树/PathSum.py | 3c939ca21d8224a318b9313e770db0c83b73597d | [] | no_license | takenmore/Leetcode_record | d523b997f7e1b80e1841f007e48b6ed84b38e6c5 | 7ebe6f3a373403125549346c49a08f9c554dafac | refs/heads/master | 2022-12-02T04:50:02.000415 | 2020-08-08T05:45:20 | 2020-08-08T05:45:20 | 262,273,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
from typing import List
'''
给定一个二叉树和一个目标和,找到所有从根节点到叶子节点路径总和等于给定目标和的路径。
hasPathSum -> 判断有没有
pathSum -> 记录下路径。
'''
'''
两道 dfs 题 (虽然记录是利用了回溯的思想。)
'''
class Solution:
def hasPathSum(self, root: TreeNode, sum: int) -> bool:
if not root: return False
if not root.left and not root.right:
return sum == root.val
return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(
root.right, sum - root.val)
def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:
res = []
path = []
if not root:
return res
def dfs(root, target):
if not root:
return
target -= root.val
path.append(root.val)
if target == 0 and not root.left and not root.right:
res.append(path.copy())
dfs(root.left, target)
dfs(root.right, target)
path.pop()
dfs(root, sum)
return res
S = Solution()
root = TreeNode(5)
l_1 = TreeNode(4)
r_1 = TreeNode(8)
l_2 = TreeNode(11)
r_2_l = TreeNode(13)
r_2_r = TreeNode(4)
l_3_l = TreeNode(7)
l_3_r = TreeNode(2)
r_3_l = TreeNode(5)
r_3_r = TreeNode(2)
root.left = l_1
root.right = r_1
l_1.left = l_2
r_1.left = r_2_l
r_1.right = r_2_r
l_2.left = l_3_l
l_2.right = l_3_r
r_2_r.left = r_3_l
r_2_r.right = r_3_r
print(S.pathSum(root, 22))
| [
"[email protected]"
] | |
492314193bdd779078c2f05799fa7b84b4a5bf04 | ec2b3b8b61cef4e94447ad70e543b690d70050e5 | /static_d-fold_rectangle_tree_variant/core/Util.py | 6418041839b8a9806ad1c275f21645cd3f310f3e | [
"MIT"
] | permissive | bzliu94/algorithms | d6e491f6d3c68c50a37bab504501a73362b9a94d | 43ccefd7ea1fd88339bf2afa0b35b0a3bdf6acff | refs/heads/master | 2021-01-17T02:22:26.044478 | 2020-08-02T01:13:59 | 2020-08-02T01:13:59 | 36,104,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,116 | py | import math
import decimal
def comp(a, b):
if a < b:
return -1
elif a == b:
return 0
elif a > b:
return 1
def getDistance(loc1, loc2):
x1, y1 = loc1
x2, y2 = loc2
delta_x = x2 - x1
delta_y = y2 - y1
distance = math.sqrt(delta_x ** 2 + delta_y ** 2)
return distance
# truncates the value x to have n digits after the decimal point
def truncate(x, n):
d = decimal.Decimal(str(x))
result = d.quantize(decimal.Decimal(str(pow(10, -1 * n))), rounding = decimal.ROUND_DOWN)
value = float(result)
return value
"""
print truncate(1, 2)
print truncate(1.001, 2)
print truncate(1.001, 3)
print truncate(1.001, 4)
print truncate(100.001, 2)
print truncate(100.001, 3)
print truncate(100.001, 4)
print truncate(-100.001, 1)
print truncate(-100.001, 2)
print truncate(-100.001, 3)
print truncate(-100.001, 4)
print truncate(10500, 0)
print truncate(10500, 1)
print truncate(10500, 2)
print truncate(10500.000009, 3)
print truncate(10500.000009, 4)
print truncate(10500.000009, 5)
print truncate(10500.000009, 6)
print truncate(10500.000009, 7)
"""
| [
"[email protected]"
] | |
eb1c7cf4cd3827cda705f975119f960bd5605afa | 50bd16c857db1fd7074f7b56df64da3112d18d5f | /backend/patient_hill_19535/wsgi.py | 9666dcb595d33b5dd9464a10f06797968ebb57f2 | [] | no_license | andremcb/patient-hill-19535 | ae2469733bdbfd5577b8942aa9702f77536945c9 | e7dda9549c93511acc11a927ee022708579c5a3b | refs/heads/master | 2023-03-03T23:59:32.449515 | 2021-02-18T21:33:17 | 2021-02-18T21:33:17 | 340,182,747 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
WSGI config for patient_hill_19535 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'patient_hill_19535.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
9b5a26ccd793a7f16ec99c306e0fc6f9ee28b36f | 3a634668e9f83116b49b7e742bbba54ea47615ec | /ft_odd_even_analysis_lst.py | 05c47d389e084c85d1c9f6f39a0eaffd815a343c | [] | no_license | Linellian/this_shit_i_always_get_an_f_for | 715583bba3596012433650ee80653808310125a6 | 0254ada959f79ab9a63e46aacedacf0f2ff9f056 | refs/heads/main | 2023-01-10T07:13:50.377162 | 2020-11-13T07:04:02 | 2020-11-13T07:04:02 | 312,498,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,261 | py | def ft_odd_even_analysis_lst(lst):
ev = 0
nev = 0
max_ev = 0
min_ev = 9999999999999999999999999
max_nev = 0
min_nev = 9999999999999999999999999
sum_ev = 0
sum_nev = 0
for i in lst:
if i % 2 == 0:
ev += 1
sum_ev += i
if i < min_ev:
min_ev = i
if i > max_ev:
max_ev = i
elif i % 2 != 0:
nev += 1
sum_nev += i
if i < min_nev:
min_nev = i
if i > max_nev:
max_nev = i
print("Анализ списка:")
print("Количество четных чисел:", ev, end=",\t\t")
print("Количество нечетных чисел:", nev)
print("Максимальная четная цифра:", max_ev, end=",\t\t")
print("Максимальная нечетная цифра:", max_nev)
print("Минимальная четная цифра:", min_ev, end=",\t\t")
print("Минимальная нечетная цифра:", min_nev)
print("Сумма четных чисел:", sum_ev, end=",\t\t")
print("Сумма нечетных чисел:", sum_nev, end=",")
| [
"[email protected]"
] | |
9098862e6382e92a3078581798a715abbec82bd2 | 2d0209e35956674baf7117dc607887bb7e9a043d | /learning_site/views.py | 10803613ec5b054b0d66a03345740ca853b624ef | [] | no_license | Jaxx0/learning-site | 38cf6068ba1061bd6e9544274ed958ad52501434 | 2fac65357dd78f3a6a2111bed0362b222bdfe84b | refs/heads/master | 2020-07-26T23:19:02.938907 | 2019-10-09T07:35:03 | 2019-10-09T07:35:03 | 208,795,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 968 | py | from django.contrib import messages
from django.core.mail import send_mail
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from . import forms
def hello_world(request):
return render(request, 'home.html')
def suggestion_view(request):
form = forms.SuggestionForm()
if request.method == 'POST':
form = forms.SuggestionForm(request.POST)
if form.is_valid():
print('good form')
send_mail(
'Suggestion from {}'.format(form.cleaned_data['name']),
form.cleaned_data['suggestion'],
'{name} <{email}>'.format(**form.cleaned_data),
['[email protected]']
)
messages.add_message(request, messages.SUCCESS, 'Thanks for your suggestion')
return HttpResponseRedirect(reverse('home'))
return render(request, 'suggestion_form.html', {'form': form})
| [
"[email protected]"
] | |
562fcf223ee20b63577a6becbc0c7f022bc9c9a7 | 640f7ba8ce3f79e3b41fa972edcabae2f6aa82dd | /deep-twi-bot.py | fe9611554d384ebed1646a084773ea4ca049a303 | [] | no_license | deepthi10-code/Twitter-name-changing-bot | 93a11f3eeb6d1205c667fa8c067ab5030dd341fc | 6f335938497d2eb26d245321df75dfc4c8f8bdb8 | refs/heads/master | 2022-12-04T12:00:12.485453 | 2020-08-25T08:09:06 | 2020-08-25T08:09:06 | 290,146,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | import tweepy
import os
def create_api():
consumer_key = os.getenv('consumer_key')
consumer_secret = os.getenv('consumer_secret')
access_token = os.getenv('access_token')
access_token_secret = os.getenv('access_token_secret')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True,wait_on_rate_limit_notify=True)
api.verify_credentials()
print('API Created')
return api
import time
def follower_count(user):
emoji_numbers = {0: "0️⃣", 1: "1️⃣", 2: "2️⃣", 3: "3️⃣",
4: "4️⃣", 5: "5️⃣", 6: "6️⃣", 7: "7️⃣", 8: "8️⃣", 9: "9️⃣"}
uf_split = [int(i) for i in str(user.followers_count)]
emoji_followers = ''.join([emoji_numbers[j] for j in uf_split if j in emoji_numbers.keys()])
return emoji_followers
api = create_api()
while True:
user = api.get_user('Deep07397886')
api.update_profile(name=f'DEEP|{follower_count(user)} Followers')
print(f'Updating Twitter Name : DEEP|{follower_count(user)} Followers')
print('Waiting to refresh')
time.sleep(60)
| [
"[email protected]"
] | |
7ef03c4c91a20c4be8570f7e64d9b0d1855f71c7 | 568d0281cad4cdc7bd5d070b142ca19af781af20 | /uwont/app/models.py | 96d1a11e715d72267e3f326ef8df654accc4acc5 | [] | no_license | robertrenecker/uwont.io | 48502c4a01f73a65780a6389ea1cbdfb8dbf10e5 | 0f22640b19d2933ffd4e8b9a835f62b33b01f619 | refs/heads/master | 2021-01-13T16:34:41.038210 | 2017-01-15T21:57:22 | 2017-01-15T21:57:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from django.db import models
from mongoengine import *
class User(Document):
firstname = StringField(max_length=200)
lastname = StringField(max_length=200)
# Create your models here.
| [
"[email protected]"
] | |
00c6b32985e34e7e8e39f37029cae2eac5ca2d14 | 4311ed18fbcc2d0f3326658ed22a10f6555c4f26 | /1.两数之和.py | d8a81836c52d34ba7788a27560777161291f7159 | [] | no_license | 823858275/leetcode_python | a5ec6034d5ac3d12c6c5e6415cb85ca7c2b7f5e9 | 136d7f08c9de9259ad1fcdfe902e49dd7f0e4f37 | refs/heads/master | 2022-03-04T12:41:27.604433 | 2019-09-23T10:31:06 | 2019-09-23T10:31:06 | 198,064,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #
# @lc app=leetcode.cn id=1 lang=python3
#
# [1] 两数之和
#
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dic={}
res=[]
for i in range(len(nums)):
if dic.get(target-nums[i])!=None:
res.append(dic[target-nums[i]])
res.append(i)
break
dic[nums[i]]=i
return res
| [
"[email protected]"
] | |
5557e6d5a399aec61a87ff8ac49b2592e489890a | f0f66324c82c00e5552e71b3632dc242aa3d7927 | /colors.py | ab75604890fe3511c9c50cf53f5fe4dc9b735d2e | [] | no_license | jcdragon/pi-pong | 43e1e1b8c0e6f5b5e5f99a9fc7197a0ba5f91b26 | f6055515cf793037071be04adf500b0028c778b3 | refs/heads/master | 2020-12-30T09:58:18.972377 | 2017-08-06T19:01:14 | 2017-08-06T19:01:14 | 99,453,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | RED = (128, 0, 0)
GREEN = (0, 128, 0)
BLUE = (0, 0, 128)
YELLOW = (200, 200, 0)
BLACK = (0, 0, 0)
PUKE_GREEN = (53, 67, 3)
BLOOD_RED = (187, 10, 30)
HUSKY_PURPLE = (51, 0, 111)
HUSKY_GOLD = (232, 211, 162)
| [
"[email protected]"
] | |
ced3ffd52e01898a76bf7b8123f468234fec7a3c | a0fcc981869be99e32cae8208c4b00ed1a516844 | /archives/oldMyApp/crud_app.py | 3493557084f2898f0f6af16bafd087842202c8ee | [] | no_license | hmuus01/data-and-the-web | 21f52b24a0cdd45c4ad2af5c5f2896838a2457b5 | a1f512a6af3b458a6ee40848849e06e55d0e5d73 | refs/heads/master | 2023-07-19T18:22:05.730806 | 2019-03-30T00:00:04 | 2019-03-30T00:00:04 | 310,714,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | from flask import Flask, flash, redirect, render_template, \
request, url_for, session , redirect
from flask.ext.pymongo import Pymongo
import bcrypt
app = Flask(__name__)
app.secret_key = 'some_secret'
@app.route('/')
def index():
if 'username' in session:
return 'You are logged in'
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != 'hamze' or request.form['password'] != 'hmuus001':
error = 'Inval'
flash('ERROR!!')
else:
flash('You were successfully logged in')
return render_template('welcome.html')
return render_template('login.html', error=error)
@app.route('/register', methods = ['POST', 'GET'])
def register():
if request.method == 'POST':
users = mongo.db.users
existing_user = users.find_one({'name': request.form['username']})
if existing_user is None:
hashpass = bcrypt.hashpw[request.form['password'].encode('utf-8'), bcrypt.gensalt())
users.insert({'name' : request.form['username'],' password' : hashpass})
session['username'] = request.form['username']
return redirect(ur_for('index'))
return 'That username already exists!'
@app.route('/search/')
@app.route('/search/<name>')
def hello(name=None):
return render_template('welcome.html', name=name)
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0',port=8000)
| [
"[email protected]"
] | |
6cf5d41c21216e328ea3fa19f931e73221a9c6e5 | a9a7f83e1f9e629fdd07d028e738fbc11ded3a4f | /AutoLogin_@TRUEWIFI_with_chrome.py | 8591f933900b597699ef09f9b6b8f1a2b730dce8 | [] | no_license | StartloJ/AutoWiFiLogin_TrueWIFI | c99af5b9222a4b4a17ba0173f490324f1713c8cc | acecf0cf5daaaaeaf5dc523ab8baab14b7305236 | refs/heads/master | 2021-10-10T17:48:56.182144 | 2019-01-15T04:04:24 | 2019-01-15T04:04:24 | 97,491,256 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
# from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
import time
# def createSession():
# try:
# # binary = FirefoxBinary('/Applications/Firefox.app/Contents/MacOS/firefox-bin')
# # driver = webdriver.Firefox(firefox_binary=binary)
# # profile = webdriver.FirefoxProfile(os.path.expanduser("~/Library/Application Support/Firefox/Profiles/Selenium/"))
# chromedriver = "chromedriver"
# driver = webdriver.Chrome(chromedriver)
# driver.implicitly_wait(10)
# # WebDriverWait(driver , 10)
# print "Page already"
# return driver
# except Exception as e:
def auto_login(chromedriver , userName , passWord):
try:
# chromedriver = r"/Users/l31ank5pace/Desktop/PyScripts/chromedriver"
driver = webdriver.Chrome(chromedriver)
driver.implicitly_wait(5)
# time.sleep(1)
print "Now to go..."
driver.get("https://portal.trueinternet.co.th/wifiauthen/web/wifi-login.php")
# time.sleep(1)
print "finding usernameFill..."
user = driver.find_element_by_name("username")
user.send_keys(userName)
pwd = driver.find_element_by_name("password")
pwd.clear()
pwd.send_keys(passWord)
submit = driver.find_element_by_id("confirm").click()
print "Finished on!!!!"
# time.sleep(2)
driver.quit()
except Exception as e:
raise e
######################### Main Function #############################
os.chdir(os.path.dirname(__file__))
path = os.getcwd()
ope = open("user.txt" , "r")
keep = ope.read().split('\n')
while(1):
print "Test Connection...."
chNet = os.system("ping www.google.com")
if(chNet > 0):
print "Internet disconnect try to Login .@TRUEWIFI"
############## to Different for anyone #############################
uName = keep[0]
pWord = keep[1]
chromedriver = path + "/chromedriver.exe"
####################################################################
auto_login(chromedriver , uName , pWord)
else:
print "Now you connected to Internet..."
pass
time.sleep(7)
| [
"[email protected]"
] | |
e36cc8ab36ca699d7897499867d918b0505bcf13 | 4f11fd7653a6548764c5e9956d6ce82550082cf2 | /count_factor(codility practice).py | afe77b6c7842f8d84eacddfe4f928ae19771e45f | [] | no_license | prasojojiwandono/logic | fd323a1ae5217ee5e98fecff04080dba659fb10a | 4936531786604274667d8fec60ac724e7ce05fdd | refs/heads/master | 2022-05-05T10:21:24.119078 | 2022-04-14T04:12:29 | 2022-04-14T04:12:29 | 158,405,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | #kodingan ini untuk melatih logic programming
#untuk soal bisa dilihat di tautan berikut:
#https://app.codility.com/programmers/lessons/10-prime_and_composite_numbers/count_factors/
def solution(N):
# write your code in Python 3.6
# write your code in Python 3.6
if N ==1:
return 1
if N ==2:
return 2
a=[]
u = 0
for i in range(N+1):
if i>0 :
if N % i==0 and i <= N**(0.5):
a.append(i)
if i>N**(0.5):
u = 1
break
if u==1:
break
b = len(a)
hasil = 2 * b
if N%(N**(0.5))==0:
return hasil-1
else:
return hasil
#contoh, untuk mengetahui banyak faktor dari 24
faktor24= solution(24)
print(faktor(24))
##hasilnya 8 , dan memang faktor dari 24 ada sebanyak 8 yaitu --> 1,2,3,4,6,8,12,24
| [
"[email protected]"
] | |
b56726cb37c89ee4fefdeb060663be2e06bf528e | 992a8fd483f1b800f3ccac44692a3dd3cef1217c | /python 学习/objectives/string.py | 471ed23f6413b8b8c2b45453ef7fb007126efdd6 | [] | no_license | xinshuaiqi/My_Scripts | c776444db3c1f083824edd7cc9a3fd732764b869 | ff9d5e38d1c2a96d116e2026a88639df0f8298d2 | refs/heads/master | 2020-03-17T02:44:40.183425 | 2018-10-29T16:07:29 | 2018-10-29T16:07:29 | 133,203,411 | 3 | 1 | null | null | null | null | WINDOWS-1256 | Python | false | false | 357 | py |
####### string
S="songshanshan"
len(S)
S[0]
S[-1] ## last iterm
S[-3:] ## from last 3 to the end
S[:-3] ## print, except the last three
S + '123'
#string جو´ْ
name="qxs"
age="32"
s="my name is {name}, my age is {age}"
print(s)
##remove empty part
a=' abc'
a.strip()
b='\t\tsss'
b.strip()
c='ATCG\n\r'
c.strip() | [
"[email protected]"
] | |
6f090196d342d803900a2d3a622859ed3c951b90 | 40b69f48b0aad6fb1fde23b41f34d000236b3136 | /ppdet/data/tools/visDrone/test_output_process.py | 3371b85ea37ac9e809a52ce3a7be3aeaf7f7c48b | [
"Apache-2.0"
] | permissive | TrendingTechnology/AFSM | e88d36d757229dc1266a0ec62d61fd6e48d29649 | 54af2f072071779789ba0baa4e4270a1403fd0dd | refs/heads/master | 2023-01-22T03:28:17.868009 | 2020-12-10T09:47:17 | 2020-12-10T09:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,326 | py | import torch
from mmdet.models import CenterNetHead
from mmdet.datasets.my_dataset import vis_bbox
from mmdet.datasets.loader.build_loader import build_dataloader
import mmcv
from mmdet.core import tensor2imgs
from mmdet.datasets import get_dataset
def gt2out(gt_bboxes_list, gt_labels_list, inp_shapes_list, stride, categories):
"""transform ground truth into output format"""
batch_size = len(gt_bboxes_list)
inp_shapes = gt_bboxes_list[0].new_tensor(inp_shapes_list, dtype=torch.int)
output_size = inp_shapes[0] / stride
height_ratio, width_ratio = output_size.float() / inp_shapes[0].float()
# allocating memory
tl_heatmaps = -2 * gt_bboxes_list[0].new_ones(batch_size, categories, output_size[0], output_size[1])
br_heatmaps = -2 * gt_bboxes_list[0].new_ones(batch_size, categories, output_size[0], output_size[1])
ct_heatmaps = -2 * gt_bboxes_list[0].new_ones(batch_size, categories, output_size[0], output_size[1])
tl_regrs = gt_bboxes_list[0].new_zeros(batch_size, 2, output_size[0], output_size[1])
br_regrs = gt_bboxes_list[0].new_zeros(batch_size, 2, output_size[0], output_size[1])
ct_regrs = gt_bboxes_list[0].new_zeros(batch_size, 2, output_size[0], output_size[1])
tl_emds = gt_labels_list[0].new_zeros(batch_size, 1, output_size[0], output_size[1])
br_emds = gt_labels_list[0].new_zeros(batch_size, 1, output_size[0], output_size[1])
for b_ind in range(batch_size): # loop through batch-images
for obj_ind, detection in enumerate(gt_bboxes_list[b_ind]): # loop through objects in one image
category = gt_labels_list[b_ind][obj_ind] - 1
xtl, ytl = detection[0], detection[1]
xbr, ybr = detection[2], detection[3]
xct, yct = (detection[2] + detection[0]) / 2., (detection[3] + detection[1]) / 2.
fxtl = (xtl * width_ratio)
fytl = (ytl * height_ratio)
fxbr = (xbr * width_ratio)
fybr = (ybr * height_ratio)
fxct = (xct * width_ratio)
fyct = (yct * height_ratio)
xtl = int(fxtl)
ytl = int(fytl)
xbr = int(fxbr)
ybr = int(fybr)
xct = int(fxct)
yct = int(fyct)
# heatmaps
tl_heatmaps[b_ind, category, ytl, xtl] = 1
br_heatmaps[b_ind, category, ybr, xbr] = 1
ct_heatmaps[b_ind, category, yct, xct] = 1
# offsets
tl_regrs[b_ind, 0, ytl, xtl] = fxtl - xtl # tl_tx
tl_regrs[b_ind, 1, ytl, xtl] = fytl - ytl # tl_ty
br_regrs[b_ind, 0, ybr, xbr] = fxbr - xbr # br_tx
br_regrs[b_ind, 1, ybr, xbr] = fybr - ybr # br_ty
ct_regrs[b_ind, 0, yct, xct] = fxct - xct # ct_tx
ct_regrs[b_ind, 1, yct, xct] = fyct - yct # ct_ty
# embeddings
tl_emds[b_ind, 0, ytl, xtl] = 2
br_emds[b_ind, 0, ybr, xbr] = 2
tl_out=(tl_heatmaps, tl_emds, tl_regrs)
br_out=(br_heatmaps, br_emds, br_regrs)
ct_out=(ct_heatmaps, None, ct_regrs)
return tl_out, br_out, ct_out
def out2box(outs, img_meta, num_clses):
"""transform output format into final detection results"""
decode_cfg = dict(
K=100,
kernel=3,
ae_threshold=0.5,
num_dets=1000)
ct_cfg = dict(
score_thr=0.05,
nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05),
max_per_img=100)
head = CenterNetHead(in_channels=1, inner_channels=1, num_classes=num_clses)
det_bboxes, det_labels = head.get_det_bboxes(
*outs, img_meta, decode_cfg, rescale=False, cfg=ct_cfg)
bboxes = det_bboxes.numpy()
labels = det_labels.numpy()
return bboxes, labels
def main(cfg_file, test_num=1):
""" data_path: path to images
label_path: path to annotations
idxes: index of image is going to be tested with output process
"""
cfg = mmcv.Config.fromfile(cfg_file)
dataset = get_dataset(cfg.data.val)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=1,
num_gpus=1,
dist=True,
shuffle=False)
for i, data in enumerate(data_loader):
imgs = tensor2imgs(data['img'].data[0], **cfg.img_norm_cfg)
gt_boxes = data['gt_bboxes'].data[0]
gt_labels = data['gt_labels'].data[0]
inp_shapes = [meta['pad_shape'][:2] for meta in data['img_meta'].data[0]]
outs = gt2out(gt_boxes, gt_labels, inp_shapes, stride=4, categories=len(dataset.CLASSES))
bboxes, labels = out2box(outs, data['img_meta'].data[0], len(dataset.CLASSES))
vis_bbox(imgs[0], gt_boxes[0].cpu().numpy(),
gt_labels[0].cpu().numpy(),
show=True, show_str='ground truth')
print('num detected box:', bboxes.shape[0])
vis_bbox(imgs[0], bboxes, labels, show=True, show_str='transformed boxes', color='green')
if i >= test_num:
break
if __name__ == '__main__':
""" test whether the output process is right.trun ground truth into output format
then use the output process to get final detected boxes
"""
cfg_file = '/media/jp/新加卷/ZEHUI_DATA/pytorch_code/mmdetection/configs/centernet/centernet_hourglass-52_1x.py'
main(cfg_file, test_num=1)
| [
"[email protected]"
] | |
ba744cd1b663e1a7434d02b1da7afbc8bb00966d | 0bf3cdca7dcdde6704bf436c6941fab4766ffba6 | /students/K33401/Polyakov_Sergey/pr2/django_project_polyakov/project_first_app/views.py | 826c574ff49e56d0038317dab5f629ddf909f490 | [
"MIT"
] | permissive | spolyakovs/ITMO_ICT_WebDevelopment_2020-2021 | 4ef99309c1662356791662cb77e20896853746bd | b83b609676554afd6cd5d0cf989cda7e0d571000 | refs/heads/master | 2023-03-31T09:27:31.028586 | 2021-04-09T15:37:35 | 2021-04-09T15:37:35 | 298,790,962 | 0 | 0 | MIT | 2020-09-26T10:32:47 | 2020-09-26T10:32:47 | null | UTF-8 | Python | false | false | 2,043 | py | from django.shortcuts import render
from django.http import Http404
from django.views import View
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import UpdateView
from django.views.generic.edit import CreateView
from django.views.generic.edit import DeleteView
from .models import Owner, Car
from .forms import OwnerForm
class IndexPage(View):
template_name = 'project_first_app/index.html'
def get(self, request):
return render(request, self.template_name)
def owner_detail_view(request, id):
template_name = 'project_first_app/owner_detail.html'
try:
owner = Owner.objects.get(id=id)
except Owner.DoesNotExist:
raise Http404("Owner does not exist")
return render(request, template_name, {'owner': owner})
def owners_list_view(request):
template_name = 'project_first_app/owners_list.html'
context = {'owners_list': Owner.objects.all()}
return render(request, template_name, context)
def owner_create_view(request):
template_name = 'project_first_app/owner_create.html'
context = {}
form = OwnerForm(request.POST or None)
if form.is_valid():
form.save()
context['form'] = form
return render(request, template_name, context)
class CarDetailView(DetailView):
model = Car
template_name = 'project_first_app/car_detail.html'
class CarsListView(ListView):
model = Car
template_name = 'project_first_app/cars_list.html'
class CarUpdateView(UpdateView):
model = Car
fields = ['licence_number', 'make', 'model', 'color']
success_url = '/cars/'
template_name = 'project_first_app/car_update.html'
class CarCreateView(CreateView):
model = Car
fields = [ 'licence_number', 'make', 'model', 'color']
success_url = '/cars/'
template_name = 'project_first_app/car_create.html'
class CarDeleteView(DeleteView):
model = Car
success_url = '/cars/'
template_name = 'project_first_app/car_delete.html'
| [
"[email protected]"
] | |
2c26c402ff37f9a9d45c366b9db7c6dce8fd8249 | 930a4bb24a575a85dc569ca5f5fbd262a0db7404 | /investorkopo/views.py | 098c2a3e3d4c2e8f001fe34fcaa7611a1646e6a4 | [] | no_license | difasdfs/dashboardcb2 | 11937480e0c2cf52b41806772c74731567b67ba0 | 6d070e72a3bc2c557c13246bdec280679537d9f3 | refs/heads/main | 2023-07-02T20:48:54.932302 | 2021-08-02T03:55:48 | 2021-08-02T03:55:48 | 344,037,530 | 0 | 1 | null | 2021-06-21T06:09:06 | 2021-03-03T07:20:53 | HTML | UTF-8 | Python | false | false | 3,663 | py | from django.shortcuts import render, redirect
from . import update_struk, update_sales, format_rupiah, kumpulan_struk
from datetime import date, timedelta
from .models import Sales
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from .decorators import unauthenticated_user
# Create your views here.
def logoutuser(request):
# ini halaman logout
logout(request)
return redirect('loginpage')
@unauthenticated_user
def loginpage(request):
context = {}
# jika metode request adalah post
if request.method == 'POST':
# ambil username dan passwordnya
username = request.POST.get('username')
password = request.POST.get('password')
# autentifikasi usernya
user = authenticate(request, username=username, password=password)
# kalau user berhasil diautentifikasi, login
if user is not None:
login(request, user)
return redirect('indexcrisbarkopo')
else:
messages.info(request, 'username atau password salah')
return render(request, 'investorkopo/login.html', context)
return render(request, 'login.html', context)
@login_required(login_url='loginpage')
def index(request):
update_struk.main()
update_sales.main()
tanggal_akhir = date.today()
tanggal_awal = tanggal_akhir - timedelta(days=7)
if request.method == 'POST':
tanggal_akhir = date.fromisoformat(request.POST.get('tanggal_akhir'))
tanggal_awal = date.fromisoformat(request.POST.get('tanggal_awal'))
if tanggal_akhir < tanggal_awal:
tanggal_awal, tanggal_akhir = tanggal_akhir, tanggal_awal
if tanggal_akhir > date.today():
tanggal_akhir = date.today()
kumpulan_sales = Sales.objects.filter(tanggal__range=[tanggal_awal, tanggal_akhir])
list_sales = [a.total_sales for a in kumpulan_sales]
print(list_sales)
total_sales = sum(list_sales)
maksimum = max(list_sales)
maksimum_grafik = maksimum*1.4
pembagi = maksimum_grafik // 5
urutan = [format_rupiah.main(a*pembagi) for a in range(6)]
urutan.reverse()
persenan = ["{:.2f}".format((b / maksimum_grafik)*100) for b in list_sales]
list_tanggal = [a.tanggal for a in kumpulan_sales]
query_grafik = [(persenan[a], list_tanggal[a], format_rupiah.main(list_sales[a])) for a in range(len(persenan))]
# query kotak depan
total_penjualan = format_rupiah.main(total_sales, total_penjualan=True)
jumlah_struk = kumpulan_struk.main(tanggal_awal, tanggal_akhir)
average_spend = format_rupiah.main(total_sales / jumlah_struk, total_penjualan=True)
revenue_sharing = format_rupiah.main(total_sales*0.15, total_penjualan=True)
# akhir query kotak depan
# timezone asia/jakarta, untuk ngeliatin aja. kalau di sistem pakenya harus utc
context = {
'tanggal_awal' : str(tanggal_awal),
'tanggal_akhir' : str(tanggal_akhir),
'total_penjualan' : total_penjualan,
'jumlah_struk' : jumlah_struk,
'average_spend' : average_spend,
'revenue_sharing' : revenue_sharing,
'urutan_grafik' : urutan,
'persenan' : persenan,
'list_tanggal' : list_tanggal,
'list_sales' : list_sales,
'query_grafik' : query_grafik,
'tanggal_awal_date' : tanggal_awal,
'tanggal_akhir_date' : tanggal_akhir,
}
return render(request, 'investorkopo/index.html', context) | [
"[email protected]"
] | |
a9150869bbc758e33d5796c863d0b70d6994ab50 | 394fad0dbb422a2996a3fe50b204338b665d8efd | /k_fold_Cross_validation.py | 555708793c70d492fe588c8ae1d807a74753168d | [] | no_license | strategist922/semEval_Task6_Text_Classification | 32b0405a6b5cf9aa9fc0919592034f1228ed96ff | 190d5340284b56e3dcae0bd0665538fe54c28dfb | refs/heads/master | 2020-12-05T23:42:00.384463 | 2019-01-17T20:35:28 | 2019-01-17T20:35:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,652 | py | import random
import math
import Data_cleaner
import extract_features
import naive_bayes
import logistic_regression
import warnings
import tree_classifier
import svm
import random_forest
import Kneighbours_Clf
# divides data into k chunks and prompts score
def validate(k,data,labels):
list = []
for i in range(0,len(data)):
list.append((data[i],labels[i]))
random.shuffle(list)
print(k , " fold Cross Validation\n\n")
chunk_size = math.floor(len(list)/k)
#print(chunk_size)
for i in range(0,k):
test = []
train = []
# divided our data k-1 for training , 1 for testing
for j in range(0,len(list)):
if math.floor(j/chunk_size) == i:
test.append(list[j])
else:
train.append(list[j])
#print(len(test),len(train))
if i == 0:
print("1st test")
elif i == 1:
print("2nd test")
elif i == 2:
print("3rd test")
else:
print(i+1,"th test")
train_data = []
test_data = []
train_labels = []
test_labels = []
# get divided and cleaned data
train_data,train_labels,test_data,test_labels = preprocess(test, train)
#get features
train_features,test_features = extract_features.get_features_TF_IDF(train_data,test_data)
# runs classifier
run_clf(train_features,train_labels,test_features,test_labels)
def preprocess(test,train):
train_data = []
train_labels = []
test_data = []
test_labels = []
# separate data and labels
for i in range(0,len(train)):
train_data.append(train[i][0])
train_labels.append(train[i][1])
for i in range(0,len(test)):
test_data.append(test[i][0])
test_labels.append(test[i][1])
# clean data
test_data = Data_cleaner.remove_noise(test_data)
train_data = Data_cleaner.remove_noise(train_data)
return train_data,train_labels,test_data,test_labels
def run_clf(train_features,train_labels,test_features,test_labels):
# naive_bayes.run_naive_bayes(train_features, test_features, train_labels, test_labels)
# logistic_regression.run(train_features, test_features, train_labels, test_labels)
#tree_classifier.run(train_features, test_features, train_labels, test_labels)
# svm.run(train_features, test_features, train_labels, test_labels)
# random_forest.run(train_features, test_features, train_labels, test_labels)
Kneighbours_Clf.run(train_features, test_features, train_labels, test_labels)
print("\n")
| [
"[email protected]"
] | |
5a6a44dc8927d3270bafd0fc1c567851897acc08 | 1056747bc425646e51e1bbbc42fd3ab27b0bed93 | /src/smartcar/scripts/image_resizer_node.py | 242a399684a41979451eadedac67ea5030cb17c0 | [] | no_license | kdshuo/rosProject | 1df71d9c20d9a2dab0e3460d21ce6065c18905bd | 38ddcbbbba13e6589e9a8c67fd959f1af69a5376 | refs/heads/master | 2023-04-24T04:47:23.505565 | 2021-05-10T12:07:09 | 2021-05-10T12:07:09 | 366,025,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
#import sys, time
import rospy
#import roslib
import cv2
#import numpy as np
#from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
NODE_NAME = "image_resizer_node"
#SUB_TOPIC = "/image"
SUB_TOPIC = "/image_raw"
PUB_TOPIC = "image_preproc_resized"
QUEUE_SIZE = 1
DEFAULT_HEIGHT = 216
DEFAULT_WIDTH = 384
class ImageReziserNode:
def initial_parameters(self):
global intrinsicMat
global distortionCoe
#global perspective_transform_matrix
#global kernel
intrinsicMat = np.array([[669.0672, -0.2097, 490.6801],
[0, 671.0723, 283.2345],
[0, 0, 1]])
distortionCoe = np.array([-0.3739,0.1119,3.5478e-04,0.002, 0])
startx = 280
starty = 220
length_pers = 400
width_pers = length_pers
srcps = np.float32([[(289,250), (93,415), (870,419), (680,256)]])
#srcps_ramp = np.float32([[(27, 349), (177, 207), (452, 207), (599, 349)]])
dstps = np.float32([[(startx, starty), (startx, starty + width_pers), (startx + length_pers, starty + width_pers), (startx + length_pers, starty)]])
#perspective_transform_matrix = cv2.getPerspectiveTransform(srcps, dstps)
#kernel = np.ones((3,3),np.uint8)
def __init__(self, node_name, sub_topic, pub_topic):
self.bridge = CvBridge()
self.initial_parameters()
self.image_pub = rospy.Publisher(pub_topic, Image, queue_size=QUEUE_SIZE)
rospy.init_node(node_name, anonymous=True)
#self.image_sub = rospy.Subscriber(sub_topic, CompressedImage, self.callback)
self.image_sub = rospy.Subscriber(sub_topic, Image, self.callback)
self.rate = rospy.Rate(20)
rospy.spin()
def callback(self, data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
cv_image = cv2.pyrDown(cv_image)
undstrt = cv2.undistort(cv_image, intrinsicMat, distortionCoe, None, intrinsicMat)
except CvBridgeError as e:
rospy.logerr(e)
#height = 1080
#width = 1920
#cv_image = cv2.resize(cv_image, (width, height), 0, 0, 0)
#print(cv_image.shape)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(undstrt, "bgr8"))
except CvBridgeError as e:
rospy.logerr(e)
def main():
try:
ImageReziserNode(NODE_NAME, SUB_TOPIC, PUB_TOPIC)
except KeyboardInterrupt:
rospy.loginfo("Shutting down node %s", NODE_NAME)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
50fdcd5dc69bcd6c62fd815ac6c60e63d90605d1 | a0dc7e0c139b727d77781137c53b3ee886a7d7e5 | /crawl_ccass/crawl_ccass/pipelines.py | 6a4244e04425f0cd0d29a28ed0fbee8a5ab55511 | [
"MIT"
] | permissive | easy00000000/crawl_ccass | 313db81578f5defa1c80a31016bea0652276ba8f | d7cad1ed11858c68d680c9a0d83e376f0c9c6a8a | refs/heads/master | 2021-07-11T17:10:15.339568 | 2017-10-14T06:05:46 | 2017-10-14T06:05:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,719 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.conf import settings
from bs4 import BeautifulSoup
import json
import MySQLdb
class Json_Pipeline(object):
def open_spider(self, spider):
self.file = open('brokerinfo.jl', 'a')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
for tr in item['broker_info']:
broker_info = []
for td in tr.find_all('td'):
broker_info.append(td.getText().strip())
# Set ID for HKSFC
if (broker_info[1] == 'HONG KONG SECURITIES CLEARING CO. LTD.'):
broker_info[0] = 'SFC001'
# Set empty ID = Name
if (broker_info[0] == ''):
broker_info[0] = broker_info[1]
# remove Shares_Number's ','
broker_info[3] = broker_info[3].replace(',','')
# remove Shares_%'s '%'
broker_info[4] = broker_info[4].replace('%','')
br_data = {
'StockID' : item['stockid'],
'Date' : item['sdate'],
'Broker_ID' : broker_info[0],
'Broker_Name' : broker_info[1],
'Shares_Number' : broker_info[3],
'Share_Percent' : broker_info[4]
}
line = json.dumps(br_data) + "\n"
self.file.write(line)
return item
class MYSQL_Pipeline(object):
def open_spider(self, spider):
self.conn = MySQLdb.connect(host = settings.get('MYSQL_HOST'),
db = settings.get('CCASS_DB'),
user = settings.get('MYSQL_USER'),
passwd = settings.get('MYSQL_PASSWD'),
charset = 'utf8',
use_unicode = True
)
self.cursor = self.conn.cursor()
def close_spider(self, spider):
self.conn.close()
def process_item(self, item, spider):
# Create Table of StockID if not exists
try:
mysql_command = "create table if not exists HK" + item['stockid']
mysql_command = mysql_command + " (StockID VARCHAR(5), Broker_ID VARCHAR(100), Broker_Name VARCHAR(100), Date DATE, Shares BIGINT, Percent FLOAT)"
self.cursor.execute(mysql_command)
self.conn.commit()
except MySQLdb.Error, e:
print 'Error %d %s' % (e.args[0], e.args[1])
# Add Item into Index
try:
mysql_command = "INSERT INTO stockid_date_index (StockID, Date) VALUES (%s, %s)"
self.cursor.execute(mysql_command,
(
item['stockid'],
item['sdate'],
))
self.conn.commit()
except MySQLdb.Error, e:
print 'Error %d %s' % (e.args[0], e.args[1])
# Add Items into StockID Table
for tr in item['broker_info']:
broker_info = []
for td in tr.find_all('td'):
broker_info.append(td.getText().strip())
# Set ID for HKSFC
if (broker_info[1] == 'HONG KONG SECURITIES CLEARING CO. LTD.'):
broker_info[0] = 'SFC001'
# Set empty ID = Name
if (broker_info[0] == ''):
broker_info[0] = broker_info[1]
# remove Shares_Number's ','
broker_info[3] = broker_info[3].replace(',','')
# remove Shares_%'s '%'
if len(broker_info)<5:
broker_info.append('0')
else:
broker_info[4] = broker_info[4].replace('%','')
try:
mysql_command = "INSERT INTO " + "HK" + item['stockid'] + " (StockID, Date, Broker_ID, Broker_Name, Shares, Percent) VALUES (%s, %s, %s, %s, %s, %s)"
self.cursor.execute(mysql_command,
(
item['stockid'],
item['sdate'],
broker_info[0],
broker_info[1],
broker_info[3],
broker_info[4],
))
self.conn.commit()
except MySQLdb.Error, e:
print 'Error %d %s' % (e.args[0], e.args[1])
return item | [
"[email protected]"
] | |
1d28d5aa54abb9d2cf7d82a6e75c565253701ba1 | 05329cc0ccc814c5204379b2ed5cbe8ee2503879 | /library/python/runtime_py3/test/test_arcadia_source_finder.py | 193336d17a1fc602bf83e4756e8e754cae0f4c79 | [
"Apache-2.0"
] | permissive | wayfair-contribs/catboost | 3d01a5fabf60187b27d7d543409235940fba8abc | 849b66e4c4faf832cd6ee9c39a3022f29d18819f | refs/heads/master | 2023-09-01T09:07:45.153072 | 2021-10-22T11:39:45 | 2021-10-22T11:39:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,913 | py | import unittest
from unittest.mock import patch
from parameterized import parameterized
import __res as res
NAMESPACE_PREFIX = b'py/namespace/'
TEST_SOURCE_ROOT = '/home/arcadia'
TEST_FS = {
'home': {
'arcadia': {
'project': {
'normal_lib': {
'mod1.py': '',
'package1': {
'mod2.py': '',
},
},
'lib_with_namespace': {
'ns_mod1.py': '',
'ns_package1': {
'ns_mod2.py': '',
},
},
'top_level_lib': {
'tl_mod1.py': '',
'tl_package1': {
'tl_mod2.py': '',
},
},
'normal_lib_extension': {
'mod3.py': '',
'package1': {
'mod4.py': '',
},
},
},
'contrib': {
'python': {
'pylib': {
'libmod.py': '',
'tests': {
'conftest.py': '',
'ya.make': '',
},
},
},
},
},
},
}
TEST_RESOURCE = {
b'py/namespace/unique_prefix1/project/normal_lib': b'project.normal_lib.',
# 'normal_lib_extension' extend normal_lib by additional modules
b'py/namespace/unique_prefix1/project/normal_lib_extension': b'project.normal_lib.',
b'py/namespace/unique_prefix2/project/lib_with_namespace': b'virtual.namespace.',
b'py/namespace/unique_prefix3/project/top_level_lib': b'.',
# Contrib: the library is in the top level namespace but 'tests' project is not
b'py/namespace/unique_prefix4/contrib/python/pylib': b'.',
b'py/namespace/unique_prefix4/contrib/python/pylib/tests': b'contrib.python.pylib.tests.',
}
MODULES = {
'project.normal_lib.mod1': b'project/normal_lib/mod1.py',
'project.normal_lib.mod3': b'project/normal_lib_extension/mod3.py',
'project.normal_lib.package1.mod2': b'project/normal_lib/package1/mod2.py',
'project.normal_lib.package1.mod4': b'project/normal_lib_extension/package1/mod4.py',
'virtual.namespace.ns_mod1': b'project/lib_with_namespace/ns_mod1.py',
'virtual.namespace.ns_package1.ns_mod2': b'project/lib_with_namespace/ns_package1/ns_mod2.py',
'tl_mod1': b'project/top_level_lib/tl_mod1.py',
'tl_package1.tl_mod2': b'project/top_level_lib/tl_package1/tl_mod2.py',
'libmod': b'contrib/python/pylib/libmod.py',
'contrib.python.pylib.tests.conftest': b'contrib/python/pylib/tests/conftest.py',
}
PACKAGES = [
'project',
'project.normal_lib',
'project.normal_lib.package1',
'virtual',
'virtual.namespace',
'virtual.namespace.ns_package1',
'tl_package1',
'contrib',
'contrib.python',
'contrib.python.pylib',
'contrib.python.pylib.tests',
]
UNKNOWN_MODULES = [
'project.normal_lib.unknown_module',
'virtual.namespace.unknown_module',
'unknown_module',
# contribr/python/pylib directory is not a regular package and cannot be used for a usual module lookup
'contrib.python.pylib.libmod',
# Parent project contrib/python/pylib with top level namespace should not affect nested 'tests' project
'tests.conftest',
]
def iter_keys_mock(prefix):
assert prefix == NAMESPACE_PREFIX
l = len(prefix)
for k in TEST_RESOURCE.keys():
yield k, k[l:]
def resource_find_mock(key):
return TEST_RESOURCE.get(key)
def find_fake_fs(filename):
path = filename.lstrip('/').split('/')
curdir = TEST_FS
for item in path:
if item in curdir:
curdir = curdir[item]
else:
return None
return curdir
def path_isfile_mock(filename):
f = find_fake_fs(filename)
return isinstance(f, str)
def path_isdir_mock(filename):
f = find_fake_fs(filename)
return isinstance(f, dict)
def os_listdir_mock(dirname):
f = find_fake_fs(dirname)
if isinstance(f, dict):
return f.keys()
else:
return []
class TestArcadiaSourceFinder(unittest.TestCase):
def setUp(self):
self.patchers = [
patch('__res.iter_keys', wraps=iter_keys_mock),
patch('__res.__resource.find', wraps=resource_find_mock),
patch('__res._path_isdir', wraps=path_isdir_mock),
patch('__res._path_isfile', wraps=path_isfile_mock),
patch('__res._os.listdir', wraps=os_listdir_mock),
]
for patcher in self.patchers:
patcher.start()
self.arcadia_source_finder = res.ArcadiaSourceFinder(TEST_SOURCE_ROOT)
def tearDown(self):
for patcher in self.patchers:
patcher.stop()
@parameterized.expand(MODULES.items())
def test_get_module_path_for_modules(self, module, path):
assert path == self.arcadia_source_finder.get_module_path(module)
@parameterized.expand(PACKAGES)
def test_get_module_path_for_packages(self, package):
assert self.arcadia_source_finder.get_module_path(package) is None
@parameterized.expand(UNKNOWN_MODULES)
def test_get_module_path_for_unknown_modules(self, unknown_module):
assert self.arcadia_source_finder.get_module_path(unknown_module) is None
@parameterized.expand(MODULES.keys())
def test_is_package_for_modules(self, module):
assert self.arcadia_source_finder.is_package(module) is False
@parameterized.expand(PACKAGES)
def test_is_package_for_packages(self, package):
assert self.arcadia_source_finder.is_package(package) is True
@parameterized.expand(UNKNOWN_MODULES)
def test_is_package_for_unknown_modules(self, unknown_module):
self.assertRaises(ImportError, lambda: self.arcadia_source_finder.is_package(unknown_module))
@parameterized.expand([
('project.', {
('PFX.normal_lib', True),
}),
('project.normal_lib.', {
('PFX.mod1', False),
('PFX.mod3', False),
('PFX.package1', True),
}),
('project.normal_lib.package1.', {
('PFX.mod2', False),
('PFX.mod4', False),
}),
('virtual.', {
('PFX.namespace', True),
}),
('virtual.namespace.', {
('PFX.ns_mod1', False),
('PFX.ns_package1', True),
}),
('virtual.namespace.ns_package1.', {
('PFX.ns_mod2', False),
}),
('', {
('PFX.project', True),
('PFX.virtual', True),
('PFX.tl_mod1', False),
('PFX.tl_package1', True),
('PFX.contrib', True),
('PFX.libmod', False),
}),
('tl_package1.', {
('PFX.tl_mod2', False),
}),
('contrib.python.pylib.', {
('PFX.tests', True),
}),
('contrib.python.pylib.tests.', {
('PFX.conftest', False),
}),
])
def test_iter_modules(self, package_prefix, expected):
got = self.arcadia_source_finder.iter_modules(package_prefix, 'PFX.')
assert expected == set(got)
# Check iter_modules() don't crash and return correct result after not existing module was requested
def test_iter_modules_after_unknown_module_import(self):
self.arcadia_source_finder.get_module_path('project.unknown_module')
assert {('normal_lib', True)} == set(self.arcadia_source_finder.iter_modules('project.', ''))
class TestArcadiaSourceFinderForEmptyResources(unittest.TestCase):
@staticmethod
def _unreachable():
raise Exception()
def setUp(self):
self.patchers = [
patch('__res.iter_keys', wraps=lambda x: []),
patch('__res.__resource.find', wraps=self._unreachable),
patch('__res._path_isdir', wraps=self._unreachable),
patch('__res._path_isfile', wraps=self._unreachable),
patch('__res._os.listdir', wraps=self._unreachable),
]
for patcher in self.patchers:
patcher.start()
self.arcadia_source_finder = res.ArcadiaSourceFinder(TEST_SOURCE_ROOT)
def tearDown(self):
for patcher in self.patchers:
patcher.stop()
def test_get_module_path(self):
assert self.arcadia_source_finder.get_module_path('project.normal_lib.mod1') is None
def test_is_package(self):
self.assertRaises(ImportError, lambda: self.arcadia_source_finder.is_package('project'))
self.assertRaises(ImportError, lambda: self.arcadia_source_finder.is_package('project.normal_lib.mod1'))
def test_iter_modules(self):
assert [] == list(self.arcadia_source_finder.iter_modules('', 'PFX.'))
| [
"[email protected]"
] | |
f6294fd2619f26947669c79bceb7f26f41082eba | 15cf8ab8d96083d84409d88b6db2e66c506084a4 | /Python/CotaçãoDolar e Clima.py | 60bfeaca5fd0ca55420d2ca435914a5b69bff730 | [
"MIT"
] | permissive | ABHISHEK-AMRUTE/Hello-world-1 | 59bea839af5a5e064ede374ac593f47a5f8249d5 | ba8ab6f1a5e6a23a49a2cb17eaa44e616d04ee36 | refs/heads/master | 2020-08-29T10:21:34.438677 | 2019-10-28T08:58:22 | 2019-10-28T08:58:22 | 218,004,701 | 2 | 0 | MIT | 2019-10-28T08:56:58 | 2019-10-28T08:56:57 | null | UTF-8 | Python | false | false | 725 | py | # COTAÇÃO DO DOLAR E CLIMA ATUAL
import re
import requests
import json
rc = requests.get('http://api.promasters.net.br/cotacao/v1/valores').text
cotacoes = re.findall(r'\d\.\d+', rc) # TODAS COTAÇÕES
cidade = str(input('Digite a cidade: ')).strip().lower()
cidade = cidade.split(' ')
cidade = '%20'.join(cidade)
rt = requests.get('https://api.hgbrasil.com/weather/?format=json&city_name=' + cidade + '&key=bcb790a1').text
dic = json.loads(rt)
print(dic)
print('O dolar está custando: {}\n'.format(cotacoes[0]))
print('Em {}'.format(dic['results']['city_name']))
print('Faz {}ºC e está {}\nSão exatamente {}'.format(dic['results']['temp'], dic['results']['description'], dic['results']['time']))
| [
"[email protected]"
] | |
384d3eda1b59936bccfc0cf7d0b3aee330fae824 | dde079f27589bc5b2141c7522e6b682338510e71 | /Project 1 - Classification/preprocessors/kdd/KDD.py | f97b3287b18ca1f3bc556c8329e3ef83cdbf207f | [] | no_license | gentrexha/machine-learning | 60923d5b944ba950260cfdf1e5f4d3aa399e31b6 | f4bb49acd61a84d81b3883a31ec3dce1bcadfa96 | refs/heads/master | 2022-05-08T05:21:11.705372 | 2020-04-20T11:34:43 | 2020-04-20T11:34:43 | 257,257,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,528 | py | import sys
import pandas as pd
import numpy as np
from sklearn.base import TransformerMixin
from classifiers.config import DATASETS, kdd_data_folder
def split_mdmaud_and_domain(dataset: pd.DataFrame) -> pd.DataFrame:
# split the data from MDMAUD column to three meaningful columns
dataset = dataset.assign(
RecencyOfGiving=dataset['MDMAUD'].apply(lambda x: get_recency_of_giving(x)))
dataset = dataset.assign(
FrequencyOfGiving=dataset['MDMAUD'].apply(lambda x: x[1] if x and x[1] != 'X' else 0))
dataset = dataset.assign(
AmountOfGiving=dataset['MDMAUD'].apply(lambda x: get_amount_of_giving_level(x)))
dataset.drop(columns='MDMAUD', inplace=True)
# split DOMAIN column into two meaningful columns
dataset = dataset.assign(
UrbanicityLevel=dataset['DOMAIN'].apply(lambda x: get_urbanicity_level(x)))
dataset = dataset.assign(
SocioEconomicStatus=dataset['DOMAIN'].apply(lambda x: get_socio_economic_status(x)))
dataset.drop(columns='DOMAIN', inplace=True)
return dataset
def get_recency_of_giving(value: str):
"""
MDMAUD column description:
The Major Donor Matrix code
The codes describe frequency and amount of
giving for donors who have given a $100+
gift at any time in their giving history.
An RFA (recency/frequency/monetary) field.
The (current) concatenated version is a nominal
or symbolic field. The individual bytes could separately be
used as fields and refer to the following:
First byte: Recency of Giving
C=Current Donor
L=Lapsed Donor
I=Inactive Donor
D=Dormant Donor
2nd byte: Frequency of Giving
1=One gift in the period of recency
2=Two-Four gifts in the period of recency
5=Five+ gifts in the period of recency
3rd byte: Amount of Giving
L=Less than $100(Low Dollar)
C=$100-499(Core)
M=$500-999(Major)
T=$1,000+(Top)
4th byte: Blank/meaningless/filler
'X' indicates that the donor is not a major donor.
For the first bit (RecencyOfGiving), we map as follows:
Current = 4,
Lapsed = 3,
Dormant = 2,
Inactive = 1,
None = 0
"""
if not value or value[0] == 'X':
return 0
if value[0] == 'C':
return 4
elif value[0] == 'L':
return 3
elif value[0] == 'D':
return 2
elif value[0] == 'O':
return 1
else:
return 0
def get_amount_of_giving_level(value: str):
"""
For the third bit (AmountOfGiving), we map as follows:
L=Less than $100(Low Dollar) - 1
C=$100-499(Core) - 2
M=$500-999(Major) - 3
T=$1,000+(Top) - 4
None - 0
"""
if not value or value[2] == 'X':
return 0
if value[2] == 'L':
return 1
elif value[2] == 'C':
return 2
elif value[2] == 'M':
return 3
elif value[2] == 'T':
return 4
else:
return 0
def get_urbanicity_level(value: str):
"""
DOMAIN column description:
DOMAIN/Cluster code. A nominal or symbolic field.
could be broken down by bytes as explained below.
1st byte = Urbanicity level of the donor's neighborhood
U=Urban
C=City
S=Suburban
T=Town
R=Rural
2nd byte = Socio-Economic status of the neighborhood
1 = Highest SES
2 = Average SES
3 = Lowest SES (except for Urban communities, where
1 = Highest SES, 2 = Above average SES,
3 = Below average SES, 4 = Lowest SES.)
For the Urbanicity level, we do the following mapping:
U=Urban - 5
C=City - 4
S=Suburban - 3
T=Town - 2
R=Rural - 1
None - 0
"""
if not value or value[0] == 'X':
return 0
if value[0] == 'R':
return 1
elif value[0] == 'T':
return 2
elif value[0] == 'S':
return 3
elif value[0] == 'C':
return 4
elif value[0] == 'U':
return 5
else:
return 0
def get_socio_economic_status(value: str):
"""
Keep the same variables for SES:
1 = Highest SES
2 = Average SES
3 = Lowest SES (except for Urban communities, where
1 = Highest SES, 2 = Above average SES,
3 = Below average SES, 4 = Lowest SES.)
"""
# sometimes value is just a string which contains a white space. Remove it
value = value.replace(' ', '')
if not value or value[1] == 'X':
return 0
else:
return value[1]
def get_cluster_as_float(value: str):
"""
"""
value = value.replace(' ', '')
try:
return float(value)
except ValueError:
return pd.np.nan
def main():
"""
Pre-processes kdd dataset and stores it
Procedure:
- Load dataset
- Perform pre processing as described in README
- Store preprocessed dataset
- Apply classification algorithms on the preprocessed file
Here we preprocess only the columns in the `target_variables` list, which were picked from
https://pdfs.semanticscholar.org/865a/6dba275f21ea42a10616f59d85da6d26eae1.pdf, page 75+
"""
dataset_name = 'kdd'
dataset_train: pd.DataFrame = pd.read_csv(DATASETS[dataset_name]['initial_path_train'])
dataset_test: pd.DataFrame = pd.read_csv(DATASETS[dataset_name]['initial_path_test'])
target_variables = ['RECINHSE', 'RECP3', 'RECPGVG', 'RECSWEEP', 'MDMAUD', 'DOMAIN', 'CLUSTER', 'HOMEOWNR',
'NUMCHLD',
'INCOME', 'GENDER', 'WEALTH1']
flag_variables = ['RECINHSE', 'RECP3', 'RECPGVG', 'RECSWEEP', 'HOMEOWNR', 'WEALTH1']
# replace flag variables with either 1 or 0
for column in flag_variables:
dataset_train[column] = dataset_train[column].apply(lambda x: 1 if x == 'X' else 0)
dataset_test[column] = dataset_test[column].apply(lambda x: 1 if x == 'X' else 0)
# update gender column with M=1, F=0
dataset_train['GENDER'] = dataset_train['GENDER'].apply(lambda x: 1 if x == 'M' else 0)
dataset_test['GENDER'] = dataset_test['GENDER'].apply(lambda x: 1 if x == 'M' else 0)
# split MDMAUD and DOMAIN columns
dataset_train = split_mdmaud_and_domain(dataset_train)
dataset_test = split_mdmaud_and_domain(dataset_test)
dataset_train['CLUSTER'] = dataset_train['CLUSTER'].apply(lambda x: get_cluster_as_float(x))
dataset_test['CLUSTER'] = dataset_test['CLUSTER'].apply(lambda x: get_cluster_as_float(x))
# replace NaN's foreach column with the mean value
dataset_train.fillna(dataset_train.mean(), inplace=True)
dataset_test.fillna(dataset_test.mean(), inplace=True)
# put target column (which in this case is class) as the last column
class_column = dataset_train['TARGET_B']
dataset_train.drop(columns='TARGET_B', inplace=True)
dataset_train.insert(len(dataset_train.columns), 'TARGET_B', class_column)
print('Storing preprocessed datasets...')
dataset_train.to_csv(kdd_data_folder / 'kdd-train-preprocessed.csv', index=False)
dataset_test.to_csv(kdd_data_folder / 'kdd-test-preprocessed.csv', index=False)
print('Done preprocessing!')
def no_nominal():
"""
Preprocess kdd without any nominal values and store it.
:return:
"""
dataset_name = 'kdd'
dataset_train: pd.DataFrame = pd.read_csv(DATASETS[dataset_name]['initial_path_train'])
dataset_test: pd.DataFrame = pd.read_csv(DATASETS[dataset_name]['initial_path_test'])
# remove all nominal values
dataset_train = dataset_train._get_numeric_data()
dataset_test = dataset_test._get_numeric_data()
# replace NaN's foreach column with the mean value
dataset_train.fillna(dataset_train.mean(), inplace=True)
dataset_test.fillna(dataset_test.mean(), inplace=True)
# put target column (which in this case is class) as the last column
class_column = dataset_train['TARGET_B']
dataset_train.drop(columns='TARGET_B', inplace=True)
dataset_train.insert(len(dataset_train.columns), 'TARGET_B', class_column)
print('Storing preprocessed datasets...')
dataset_train.to_csv(kdd_data_folder / 'kdd-train-preprocessed-no_nominal.csv', index=False)
dataset_test.to_csv(kdd_data_folder / 'kdd-test-preprocessed-no_nominal.csv', index=False)
print('Done preprocessing!')
if __name__ == '__main__':
# main()
no_nominal()
sys.exit(0)
| [
"[email protected]"
] | |
04cf6a286b8a5e7e7fbef16c441136ab4fba3821 | fd4f50ff353ae456e9b9223204cd20b34296cd8c | /code/terra/migrations/0002_delete_feedback.py | c6220e06adcc36ad24aec25eeb628cf1726fefbd | [] | no_license | jaejunha/Terra-Coding | b406dd40a68664d36fee0b48eccb500e2da262d9 | 3f3daa7bbed6baf449e0bd1533ab6d9bd6056820 | refs/heads/master | 2021-09-08T00:24:02.081338 | 2018-03-04T05:04:41 | 2018-03-04T05:04:41 | 103,519,799 | 1 | 4 | null | 2017-09-29T01:25:00 | 2017-09-14T10:33:19 | Python | UTF-8 | Python | false | false | 349 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-27 16:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('terra', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Feedback',
),
]
| [
"[email protected]"
] | |
bc7772704aee021e9a6e3fcfb0e66c4fb233f3a4 | 1ae4d3632f788f1a5e8f1e919e3f43b5f53d9ecd | /lesson2 (Variables)/task8/comparison_operators.py | 7336e2dc96cee41c856c334f12f966e7cc43b8d9 | [] | no_license | catharsis96/pythonintro2 | fc1f2dc3380efff92918c4bf9f77615b39380c0a | ac896af08e0835372bd1e6901b99f005bf0bb4b8 | refs/heads/master | 2021-04-29T11:37:30.599143 | 2017-01-02T16:23:48 | 2017-01-02T16:23:48 | 77,846,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | one = 1
two = 2
three = 3
print(one < two < three) # This chained comparison means that the (one < two) and (two < three) comparisons are performed at the same time.
is_greater = three operator two
print(is_greater) | [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.