blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f1852c7da40eb5c08990351bb1c5c7ea3197c233 | 7bfcb91f95d20f1199d54f91c9a095df08b44d83 | /Backup/Django_Youtube/WebBanHang/user/models.py | b2d82ecc75ed2668b3c7dbb54babf9acbad04250 | []
| no_license | llduyll10/backup | bcb09eb632dd0858d515aacb7132d913da4dc24c | 8849d812566977f9a379d38ee1daa2ef42c02c7f | refs/heads/master | 2023-02-28T11:22:23.831040 | 2021-02-01T17:09:55 | 2021-02-01T17:09:55 | 335,006,700 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomerUser(AbstractUser):
phone_number = models.CharField(default='', max_length=15)
address = models.CharField(default='', max_length=255) | [
"[email protected]"
]
| |
6a20c6b46f1956d64bcd5fc20261bb7af697a8eb | 6679ab23bf4f0100eb07cf13be21a8c1b1ae4c1f | /Python_Team_Notes/구현/input_alpha_to_num.py | 817176213e5896f5d99570aae3099f7501ab5271 | []
| no_license | gimquokka/problem-solving | 1c77e0ad1828fa93ebba360dcf774e38e157d7b6 | f3c661241d3e41adee330d19db3a66e20d23cf50 | refs/heads/master | 2023-06-28T10:19:07.230366 | 2021-07-29T11:29:26 | 2021-07-29T11:29:26 | 365,461,737 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | # a ~ z를 입력으로 받아 숫자로 전환
char = input()
# ord 함수 이용
num = int(ord(char)) - int(ord('a'))
print(num)
| [
"[email protected]"
]
| |
72caec1e57d85a6bf4b606a5228254cf3c680874 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_179/ch25_2020_03_23_14_36_52_247565.py | b77b247db72eeb75a6603e8b3a253feeebcab017 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | import math
g = 9.8
def calcula_distancia (velocidade, angulo):
angulo_radianos = math.degrees(angulo)
distancia = (velocidade**2 * math.sin(2*angulo_radianos))/g
return distancia
if distancia < 98:
print ('Muito perto')
elif distancia > 102:
print ('Muito longe')
else:
print ('Acertou!') | [
"[email protected]"
]
| |
06648aa873d47bf1a3429114bfc2a4d5585aa1c1 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_20914.py | 3c20fbf20829f9b981a226ccaa46c39051a43f32 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | # py.test Tracebacks: Highlight my code, fold frames of framework
--tb=short
| [
"[email protected]"
]
| |
01aaab4806daf83624fce5a5d71e77ac84e3cb95 | 714983fc24c6befe80d426dd94134d09ad2cbdfb | /env/lib/python3.6/site-packages/RestAuth/Services/migrations/0004_delete_old_m2m.py | 31494a3ab34e3a19585de405f5ad81cb7bb1f511 | []
| no_license | sachinlokesh05/login-registration-forgotpassword-and-resetpassword-using-django-rest-framework- | 486354ffb3a397c79afc6cbb290ab1cd637f50ac | 60769f6b4965836b2220878cfa2e1bc403d8f8a3 | refs/heads/master | 2023-01-28T22:19:13.483527 | 2020-01-28T14:07:53 | 2020-01-28T14:07:53 | 233,223,694 | 3 | 0 | null | 2023-01-07T22:10:06 | 2020-01-11T11:49:44 | Python | UTF-8 | Python | false | false | 4,682 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field hosts on 'Service'
db.delete_table('Services_service_hosts')
def backwards(self, orm):
# Adding M2M table for field hosts on 'Service'
db.create_table('Services_service_hosts', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('service', models.ForeignKey(orm['Services.service'], null=False)),
('serviceaddress', models.ForeignKey(orm['Services.serviceaddress'], null=False))
))
db.create_unique('Services_service_hosts', ['service_id', 'serviceaddress_id'])
models = {
'Services.service': {
'Meta': {'object_name': 'Service', '_ormbases': ['auth.User']},
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'Services.serviceaddress': {
'Meta': {'object_name': 'ServiceAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '39'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'services': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'hosts'", 'symmetrical': 'False', 'to': "orm['Services.Service']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['Services'] | [
"[email protected]"
]
| |
52bd5b80c303f7ec03c6a84634f9654784e1fe1c | 2293c76c3d18e2fcd44ded90bd40113d26285663 | /pyeccodes/defs/grib2/modelName_def.py | ca5790b97b3bf22a70902abdc87628726645d7a4 | [
"Apache-2.0"
]
| permissive | ecmwf/pyeccodes | b1f121dbddf68d176a03805ed5144ba0b37ac211 | dce2c72d3adcc0cb801731366be53327ce13a00b | refs/heads/master | 2022-04-23T10:37:40.524078 | 2020-04-18T06:30:29 | 2020-04-18T06:30:29 | 255,554,540 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,990 | py | import pyeccodes.accessors as _
def load(h):
def wrapped(h):
originatingCentre = h.get_l('originatingCentre')
if originatingCentre == 242:
return 'cosmo-romania'
if originatingCentre == 220:
return 'cosmo-poland'
if originatingCentre == 96:
return 'cosmo-greece'
generatingProcessIdentifier = h.get_l('generatingProcessIdentifier')
if originatingCentre == 76 and generatingProcessIdentifier == 235:
return 'cosmo_ru-eps'
if originatingCentre == 76 and generatingProcessIdentifier == 135:
return 'cosmo_ru'
if originatingCentre == 200 and generatingProcessIdentifier == 131:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 46:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 42:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 38:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 34:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 32:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 31:
return 'cosmo-i7'
if originatingCentre == 200 and generatingProcessIdentifier == 148:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 144:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 139:
return 'cosmo-i2'
if originatingCentre == 200 and generatingProcessIdentifier == 36:
return 'cosmo-i2'
subCentre = h.get_l('subCentre')
if subCentre == 250:
return 'cosmo'
if originatingCentre == 250:
return 'cosmo'
return wrapped
| [
"[email protected]"
]
| |
fbfc207ef43a7797ae51a3f77a2080848f479024 | d94be223f733daa58ce03f6f2dd701c55355f044 | /docs/data/new_east_st_louis-3.py | 7270fd573042b368984fc13a16e5220c497a576b | []
| no_license | emirdemirel/JAAH | 7bb4f9c2a434e1df34d99596dd294b7c96836bfe | 8c065c3b043ad7ac95241c242bb468fe4c731ec7 | refs/heads/master | 2023-02-10T14:10:52.755206 | 2021-01-07T23:11:02 | 2021-01-07T23:11:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | import siteUtils
siteUtils.show5HexagramsForFileList(['../../../annotations/new_east_st_louis.json']) | [
"[email protected]"
]
| |
469eebafcf857f87276c308ad37773ed5d6351dd | 95ec78292e150591dc0587988cf3a4b9b5ad08c2 | /code/srmcollider/Residues.py | 988d50d1542f6b880b4d90b333ad0cef6cd72721 | []
| no_license | hroest/srmcollider | 40b034f4f1713d94a6f36ed78b3ed67857b47eb7 | 67c0a04fb21a4f089e3aab15d5ee8884b389ec44 | refs/heads/master | 2021-01-18T23:10:14.927217 | 2018-01-15T15:04:25 | 2018-01-15T15:04:25 | 10,242,093 | 0 | 2 | null | 2017-12-05T22:02:04 | 2013-05-23T11:18:25 | Python | UTF-8 | Python | false | false | 18,191 | py | """
*
* Program : SRMCollider
* Author : Hannes Roest <[email protected]>
* Date : 05.02.2011
*
*
* Copyright (C) 2011 - 2012 Hannes Roest
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
*
"""
import string
# Isotope Modification
# 0 means no modification
# 1 means N15 (heavy nitrogen)
NOISOTOPEMODIFICATION = 0
N15_ISOTOPEMODIFICATION = 1
class Residues:
# http://www.sisweb.com/referenc/source/exactmaa.htm
# http://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl
average_elements = {
'H' : 1.007825 * 99.99/100 + 2.014102 * 0.015/100,
'N' : 14.003074 * 99.63/100 + 15.000109 * 0.37/100,
'O' : 15.994915 * 99.76/100 + 16.999131 * 0.038/100 + 17.999159 * 0.20/100,
'C' : 12.000000 * 98.90/100 + 13.003355 * 1.10,
'P' : 30.973763
}
monoisotopic_elements = {
'H' : 1.007825032,
'H2' : 2.01410178,
'C' : 12.000000,
'C13' : 13.00335484,
'N' : 14.003074005,
'N15' : 15.000108898,
'O' : 15.994914620,
'O17' : 16.999132,
'O18' : 17.999161,
'P' : 30.973762,
'S' : 31.972071
}
aa_codes = {
'A' : 'Ala',
'R' : 'Arg',
'N' : 'Asn',
'D' : 'Asp',
'C' : 'Cys',
'E' : 'Glu',
'Q' : 'Gln',
'G' : 'Gly',
'H' : 'His',
'I' : 'Ile',
'L' : 'Leu',
'K' : 'Lys',
'M' : 'Met',
'F' : 'Phe',
'P' : 'Pro',
'S' : 'Ser',
'T' : 'Thr',
'W' : 'Trp',
'Y' : 'Tyr',
'V' : 'Val',
'C[160]' : 'Cys+CAM',
'M[147]' : 'Met+Ox',
}
aa_codes_rev = dict([(v,k) for k,v in aa_codes.iteritems()])
aa_names = {
'A': 'Alanine',
'B': 'Aspartic Acid or Asparagine',
'C': 'Cysteine',
'c': 'Modified cysteine' ,
'D': 'Aspartate',
'E': 'Glutamate',
'F': 'Phenylalanine',
'G': 'Glycine',
'H': 'Histidine',
'I': 'Isoleucine',
'K': 'Lysine',
'k': 'Lys->Cys substitution and carbamidomethylation (903)',
'L': 'Leucine',
'M': 'Methionine',
'm': 'Modified methionine' ,
'N': 'Asparagine',
'P': 'Proline',
'Q': 'Glutamine',
'R': 'Arginine',
'S': 'Serine',
'T': 'Threonine',
'V': 'Valine',
'W': 'Tryptophan',
'X': 'Leucine/Isoleucine',
'Y': 'Tyrosine',
'Z': 'Glutamic acid'
}
aa_sum_formulas_text = {
'A' : 'C3H5ON',
'R' : 'C6H12ON4',
'N' : 'C4H6O2N2',
'D' : 'C4H5O3N',
'C' : 'C3H5ONS',
'E' : 'C5H7O3N',
'Q' : 'C5H8O2N2',
'G' : 'C2H3ON',
'H' : 'C6H7ON3',
'I' : 'C6H11ON',
'L' : 'C6H11ON',
'K' : 'C6H12ON2',
'M' : 'C5H9ONS',
'F' : 'C9H9ON',
'P' : 'C5H7ON',
'S' : 'C3H5O2N',
'T' : 'C4H7O2N',
'W' : 'C11H10ON2',
'Y' : 'C9H9O2N',
'V' : 'C5H9ON'
}
#from http://education.expasy.org/student_projects/isotopident/htdocs/aa-list.html
aa_sum_formulas = {
'A' : { 'C' : 3, 'H' : 5 , 'O' : 1, 'N' : 1 },
'R' : { 'C' : 6, 'H' : 12 , 'O' : 1, 'N' : 4 },
'N' : { 'C' : 4, 'H' : 6 , 'O' : 2, 'N' : 2 },
'D' : { 'C' : 4, 'H' : 5 , 'O' : 3, 'N' : 1 },
'C' : { 'C' : 3, 'H' : 5 , 'O' : 1, 'N' : 1, 'S' : 1 },
'E' : { 'C' : 5, 'H' : 7 , 'O' : 3, 'N' : 1 },
'Q' : { 'C' : 5, 'H' : 8 , 'O' : 2, 'N' : 2 },
'G' : { 'C' : 2, 'H' : 3 , 'O' : 1, 'N' : 1 },
'H' : { 'C' : 6, 'H' : 7 , 'O' : 1, 'N' : 3 },
'I' : { 'C' : 6, 'H' : 11 , 'O' : 1, 'N' : 1 },
'L' : { 'C' : 6, 'H' : 11 , 'O' : 1, 'N' : 1 },
'K' : { 'C' : 6, 'H' : 12 , 'O' : 1, 'N' : 2 },
'M' : { 'C' : 5, 'H' : 9 , 'O' : 1, 'N' : 1, 'S' : 1 },
'F' : { 'C' : 9, 'H' : 9 , 'O' : 1, 'N' : 1 },
'P' : { 'C' : 5, 'H' : 7 , 'O' : 1, 'N' : 1 },
'S' : { 'C' : 3, 'H' : 5 , 'O' : 2, 'N' : 1 },
'T' : { 'C' : 4, 'H' : 7 , 'O' : 2, 'N' : 1 },
'W' : { 'C' : 11, 'H' : 10 , 'O' : 1, 'N' : 2 },
'Y' : { 'C' : 9, 'H' : 9 , 'O' : 2, 'N' : 1 },
'V' : { 'C' : 5, 'H' : 9 , 'O' : 1, 'N' : 1 },
'C[160]' : { 'C' : 3+2, 'H' : 5+3 , 'O' : 1+1, 'N' : 1+1, 'S' : 1 }, # + CAM = H(3) C(2) N O
'M[147]' : { 'C' : 5, 'H' : 9 , 'O' : 1+1, 'N' : 1, 'S' : 1 },
}
mass_H = monoisotopic_elements['H']
mass_N = monoisotopic_elements['N']
mass_O = monoisotopic_elements['O']
mass_C = monoisotopic_elements['C']
mass_S = monoisotopic_elements['S']
mass_P = monoisotopic_elements['P']
mass_NH2 = mass_N + 2*mass_H
mass_NH3 = mass_N + 3*mass_H
mass_CO = mass_C + mass_O
mass_H2O = mass_O + 2*mass_H
mass_OH = mass_O + mass_H
mass_H3PO4 = mass_P + mass_O * 4 + mass_H * 3
mass_H1PO4 = mass_P + mass_O * 4 + mass_H * 1
mass_H1PO3 = mass_P + mass_O * 3 + mass_H * 1
mass_CAM = 2* mass_C + 4*mass_H + mass_O + mass_N #CH2-CONH2
mass_C13 = monoisotopic_elements['C13']
mass_N15 = monoisotopic_elements['N15']
mass_diffC13 = mass_C13 - mass_C
mass_diffN15 = mass_N15 - mass_N
average_data = {
# Key on abbreviation, give name, molecular weight (in daltons).
'A': ('Alanine', 71.0788),
'B': ('Aspartic Acid or Asparagine', 114.5962),
'C': ('Cysteine', 103.1448),
'c': ('Modified cysteine' , 160.1448), # Add 57
'D': ('Aspartate', 115.0886),
'E': ('Glutamate', 129.1155),
'F': ('Phenylalanine', 147.1766),
'G': ('Glycine', 57.0519),
'H': ('Histidine', 137.1411),
'I': ('Isoleucine', 113.1594),
'K': ('Lysine', 128.1741),
'k': ('Lys->Cys substitution and carbamidomethylation (903)', 128.09496 + 32.0219),
'L': ('Leucine', 113.1594),
'M': ('Methionine', 131.1986),
'm': ('Modified methionine' , 147.1986), # add 16
'N': ('Asparagine', 114.1038),
'P': ('Proline', 97.1167),
'Q': ('Glutamine', 128.1307),
'R': ('Arginine', 156.1875),
'S': ('Serine', 87.0782),
'T': ('Threonine', 101.1051),
'V': ('Valine', 99.1326),
'W': ('Tryptophan', 186.2132),
'X': ('Leucine/Isoleucine', 113.1594), # Can't distinguish leucine/isoleucine.
'Y': ('Tyrosine', 163.176),
'Z': ('Glutamic acid, or glutamine', 128),
}
#e.g. from http://education.expasy.org/student_projects/isotopident/htdocs/aa-list.html
# see also http://www.sbeams.org/svn/sbeams/trunk/sbeams/lib/perl/SBEAMS/Proteomics/AminoAcidModifications.pm
monoisotopic_data = {
# Key on abbreviation, give name, molecular weight (in daltons).
'A': ('Alanine', 71.03711),
'B': ('Aspartic Acid or Asparagine', 114.04293),
'C': ('Cysteine', 103.00919),
'D': ('Aspartate', 115.02694),
'E': ('Glutamate', 129.04259),
'F': ('Phenylalanine', 147.06841),
'G': ('Glycine', 57.02146),
'H': ('Histidine', 137.05891),
'I': ('Isoleucine', 113.08406),
'K': ('Lysine', 128.09496),
'L': ('Leucine', 113.08406),
'M': ('Methionine', 131.04049),
'N': ('Asparagine', 114.04293),
'P': ('Proline', 97.05276),
'Q': ('Glutamine', 128.05858),
'R': ('Arginine', 156.10111),
'S': ('Serine', 87.03203),
'T': ('Threonine', 101.04768),
'V': ('Valine', 99.06841),
'W': ('Tryptophan', 186.07931),
'X': ('Leucine/Isoleucine', 113.08406), # Can't distinguish leucine/isoleucine
'Y': ('Tyrosine', 163.06333),
'Z': ('Glutamic acid, or glutamine', 128.05858),
}
monoisotopic_mod = {
'c': ('Modified cysteine', monoisotopic_data["C"][1] + mass_CAM - mass_H ), # CAM replaces H
#'c': ('Modified cysteine' , 160.00919), # Add 57
'C[160]': ('Modified cysteine', monoisotopic_data["C"][1] + mass_CAM - mass_H ), # CAM replaces H
'k': ('Lys->Cys substitution and carbamidomethylation (903)', 128.09496 + 31.935685),
'N[115]': ('Asparagine', monoisotopic_data["N"][1] - mass_N - mass_H + mass_O),
#'m': ('Modified methionine', 147.04049), # add 16
'm': ('Modified methionine', monoisotopic_data["M"][1] + mass_O), # oxygen
'M[147]': ('Modified methionine', monoisotopic_data["M"][1] + mass_O), # oxygen
# SILAC labels
'K[136]' : ('heavy Lysine', monoisotopic_data["K"][1] + 8.014199), #UniMod:259
'R[166]' : ('heavy Arginine', monoisotopic_data["R"][1] + 10.008269), #UniMod:267
'R[162]' : ('heavy Arginine', monoisotopic_data["R"][1] + 6*mass_diffC13), #UniMod:188
'V[104]' : ('heavy Valine', monoisotopic_data["V"][1] + 5*mass_diffC13), # no unimod
'V[105]' : ('heavy Valine', monoisotopic_data["V"][1] + 5*mass_diffC13 + mass_diffN15), # unimod 268
# Pyro Unimod 27 and 28
'E[111]': ('pyro Glutamate', 129.04259 - mass_O - 2*mass_H),
'Q[111]': ('pyro Glutamine', 128.05858 - mass_O - 2*mass_H),
# Unimod 385 # Pyro-carbamidomethyl as a delta from Carbamidomethyl-Cys
'C[143]': ('Pyro-carbamidomethyl cysteine' , monoisotopic_data["C"][1] + mass_CAM - mass_H - 3*mass_H - mass_N),
# Phospho
'S[166]': ('Phospho Serine', 87.03203 + mass_H1PO3),
'S[167]': ('Phospho Serine', 87.03203 + mass_H1PO3),
'T[181]': ('Phospho Threonine', 101.04768 + mass_H1PO3),
'Y[243]': ('Phospho Tyrosine', 163.06333 + mass_H1PO3),
}
mod_mapping = {
"K[+8]" : "K[136]",
"R[+10]": "R[166]",
"M[+16]": "M[147]",
"N[-1]" : "N[115]",
"C[+57]": "C[160]",
"C[+40]": "C[160]",
"R[+6]" : "R[162]",
"V[+5]" : "V[104]",
"V[+6]" : "R[105]",
"S[+80]" : "S[167]",
"T[+80]" : "T[181]",
"Y[+80]" : "Y[243]",
}
monoisotopic_data.update(monoisotopic_mod)
#C[169] 58 => ?
#C[152] 2 => ?
#W[202] 23 => Oxidation?
"""
http://web.expasy.org/protscale/pscale/Hphob.Doolittle.html
GRAVY (Grand Average of Hydropathy)
The GRAVY value for a peptide or protein is calculated as the sum of hydropathy values [9] of all the amino acids, divided by the number of residues in the sequence.
Amino acid scale: Hydropathicity.
Author(s): Kyte J., Doolittle R.F.
Reference: J. Mol. Biol. 157:105-132(1982).
Amino acid scale values:
"""
Hydropathy = {
'Ala': 1.800,
'Arg': -4.500,
'Asn': -3.500,
'Asp': -3.500,
'Cys': 2.500,
'Gln': -3.500,
'Glu': -3.500,
'Gly': -0.400,
'His': -3.200,
'Ile': 4.500,
'Leu': 3.800,
'Lys': -3.900,
'Met': 1.900,
'Phe': 2.800,
'Pro': -1.600,
'Ser': -0.800,
'Thr': -0.700,
'Trp': -0.900,
'Tyr': -1.300,
'Val': 4.200,
}
Hydropathy_aa = dict([ (aa_codes_rev[k],v) for k,v in Hydropathy.iteritems()])
hydrophobicity = {
'F': 5.00,
'W': 4.88,
'L': 4.76,
'X': 4.59,
'I': 4.41,
'M': 3.23,
'V': 3.02,
'C': 2.50,
'Y': 2.00,
'A': 0.16,
'T': -1.08,
'E': -1.50,
'Z': -2.13,
'D': -2.49,
'Q': -2.76,
'R': -2.77,
'S': -2.85,
'B': -3.14,
'G': -3.31,
'N': -3.79,
'H': -4.63,
'P': -4.92,
'K': -5.00
}
basicity = {
'G': 202.7,
'C': 206.2,
'A': 206.4,
'S': 207.6,
'D': 208.6,
'V': 208.7,
'L': 209.6,
'X': 210.2,
'B': 210.7,
'I': 210.8,
'T': 211.7,
'F': 212.1,
'N': 212.8,
'Y': 213.1,
'M': 213.3,
'Q': 214.2,
'P': 214.4,
'Z': 214.9,
'E': 215.6,
'W': 216.1,
'K': 221.8,
'H': 223.7,
'R': 237.0
}
helicity = {
'F': 1.26,
'W': 1.07,
'L': 1.28,
'X': 1.29, #avg L,I
'I': 1.29,
'M': 1.22,
'V': 1.27,
'C': 0.79,
'Y': 1.11,
'A': 1.24,
'T': 1.09,
'E': 0.85,
'D': 0.89,
'Z': 0.91, #avg Q,E
'B': 0.92, #avg N,D
'Q': 0.96,
'R': 0.95,
'S': 1.00,
'G': 1.15,
'N': 0.94,
'H': 0.97,
'P': 0.57,
'K': 0.88,
}
pI = {
'G': 6.0,
'A': 6.0,
'V': 6.0,
'L': 6.0,
'X': 6.0, #L or I
'I': 6.0,
'F': 5.5,
'P': 6.3,
'S': 5.7,
'T': 5.6,
'Y': 5.7,
'C': 5.0,
'M': 5.7,
'N': 5.4,
'B': 4.1, #avg N and D
'Q': 5.7,
'Z': 4.5, #avg Q,E
'W': 5.9,
'D': 2.8,
'E': 3.2,
'K': 9.7,
'R': 10.8,
'H': 7.6
}
def __init__(self, type="mono"):
"""Set up the residue data structure."""
#add the phosphorylations
self.monoisotopic_data[ 's' ] = ('Phospho-S',
self.monoisotopic_data[ 'S' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 't' ] = ('Phospho-T',
self.monoisotopic_data[ 'T' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 'y' ] = ('Phospho-Y',
self.monoisotopic_data[ 'Y' ][1] + self.mass_H1PO3)
self.average_data[ 's' ] = ('Phospho-S',
self.average_data[ 'S' ][1] + self.mass_H1PO3)
self.average_data[ 't' ] = ('Phospho-T',
self.average_data[ 'T' ][1] + self.mass_H1PO3)
self.average_data[ 'y' ] = ('Phospho-Y',
self.average_data[ 'Y' ][1] + self.mass_H1PO3)
if not type:
self.residues = self.average_data
elif type.startswith("mono"):
self.residues = self.monoisotopic_data
elif type.startswith("av"):
self.residues = self.average_data
else:
raise ValueError("Type of residue must be one of: mono[isotopic], av[erage] (characters within [] are optional.")
keys = self.residues.keys()
self.res_pairs = [ string.join((r, s), '') for r in keys for s in keys ]
def recalculate_monisotopic_data(self):
self.monoisotopic_data = {}
for abbrev, formula in self.aa_sum_formulas.iteritems():
mysum = 0.0
for key, value in formula.iteritems():
mysum += self.monoisotopic_elements[ key ] * value
self.monoisotopic_data[ abbrev ] = ( self.aa_codes[abbrev] , mysum )
#
self.monoisotopic_data['c'] = self.monoisotopic_data['C'] + self.mass_CAM - self.mass_H
self.monoisotopic_data['c'] = ( 'Modified cystein',
self.monoisotopic_data['C'][1] + self.mass_CAM - self.mass_H)
self.monoisotopic_data['k'] = ( 'Lys->Cys substitution and carbamidomethylation (903)',
self.monoisotopic_data['K'][1] + 31.935685)
self.monoisotopic_data['m'] = ( 'Modified methionine',
self.monoisotopic_data['M'][1] + self.mass_O)
self.monoisotopic_data[ 's' ] = ('Phospho-S',
self.monoisotopic_data[ 'S' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 't' ] = ('Phospho-T',
self.monoisotopic_data[ 'T' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 'y' ] = ('Phospho-Y',
self.monoisotopic_data[ 'Y' ][1] + self.mass_H1PO3)
self.residues = self.monoisotopic_data
def recalculate_monisotopic_data_for_N15(self):
self.monoisotopic_data = {}
for abbrev, formula in self.aa_sum_formulas.iteritems():
mysum = 0.0
for key, value in formula.iteritems():
#replace N with N15
if key == 'N': key = 'N15'
mysum += self.monoisotopic_elements[ key ] * value
self.monoisotopic_data[ abbrev ] = ( self.aa_codes[abbrev] , mysum )
#IMPORTANT: CAM is added afterwards and is NOT heavy
#
self.monoisotopic_data['C[160]'] = ( 'Modified cystein',
self.monoisotopic_data['C'][1] + self.mass_CAM - self.mass_H)
self.monoisotopic_data['N[115]'] = ( 'Modified asparagine',
self.monoisotopic_data['N'][1] - self.mass_N15 - self.mass_H + self.mass_O)
self.monoisotopic_data['M[147]'] = ( 'Modified methionine',
self.monoisotopic_data['M'][1] + self.mass_O)
#
self.monoisotopic_data['c'] = ( 'Modified cystein',
self.monoisotopic_data['C'][1] + self.mass_CAM - self.mass_H)
self.monoisotopic_data['k'] = ( 'Lys->Cys substitution and carbamidomethylation (903)',
self.monoisotopic_data['K'][1] + 31.935685)
self.monoisotopic_data['m'] = ( 'Modified methionine',
self.monoisotopic_data['M'][1] + self.mass_O)
self.monoisotopic_data[ 's' ] = ('Phospho-S',
self.monoisotopic_data[ 'S' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 't' ] = ('Phospho-T',
self.monoisotopic_data[ 'T' ][1] + self.mass_H1PO3)
self.monoisotopic_data[ 'y' ] = ('Phospho-Y',
self.monoisotopic_data[ 'Y' ][1] + self.mass_H1PO3)
self.residues = self.monoisotopic_data
| [
"[email protected]"
]
| |
8fbeae9a4326bddee26e1a4de2ade8d305654222 | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW06_20210715223105.py | d97dd858d1598f85d9ebd66b49358181614c0345 | []
| no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,070 | py | """
Georgia Institute of Technology - CS1301
HW06 - Text Files & CSV
Collaboration Statement:
"""
#########################################
"""
Function Name: findCuisine()
Parameters: filename (str), cuisine (str)
Returns: list of restaurants (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def findCuisine(filename, cuisine):
file = open(filename,'r')
content = file.readlines()
listOfRestaurants = []
for i in range(len(content)):
if content[i].strip() == cuisine:
listOfRestaurants.append(content[i-1].strip()) #add the name of the restaurant, which is the previous line
file.close()
return listOfRestaurants
"""
Function Name: restaurantFilter()
Parameters: filename (str)
Returns: dictionary that maps cuisine type (str)
to a list of restaurants of the same cuisine type (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def restaurantFilter(filename):
dict = {}
file = open(filename,'r')
content = file.readlines()
cuisines = []
for i in range(1,len(content),4):
line = content[i].strip()
if line not in cuisines:
cuisines.append(line)
for i in range(len(cuisines)):
dict[cuisines[i]] = []
for i in range(0,len(content),4):
line = content[i].strip()
lineBelow = content[i+1].strip()
dict[lineBelow].append(line)
return dict
"""
Function Name: createDirectory()
Parameters: filename (str), output filename (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def createDirectory(filename, outputFilename):
readFile = open(filename, 'r')
writeFile = open(outputFilename, 'w')
content = readFile.readlines()
fastfood = []
sitdown = []
fastfoodcounter = 1
sitdowncouter = 1
for i in range(2,len(content), 4):
restaurant = content[i-2].strip()
cuisine = content[i-1].strip()
group = content[i].strip()
if group == 'Fast Food':
fastfood.append(str(fastfoodcounter) + '. ' + restaurant + ' - ' + cuisine + '\n')
fastfoodcounter += 1
else:
sitdown.append(str(sitdowncouter) + '. ' + restaurant + ' - ' + cuisine)
sitdowncouter += 1
writeFile.write('Restaurant Directory' + '\n')
writeFile.write('Fast Food' + '\n')
writeFile.writelines(fastfood)
writeFile.write('Sit-down' + '\n')
for i in range(len(sitdown)):
if i != len(sitdown)-1:
writeFile.write(sitdown[i] + '\n')
else:
writeFile.write(sitdown[i])
"""
Function Name: extraHours()
Parameters: filename (str), hour (int)
Returns: list of (person, extra money) tuples (tuple)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def extraHours(filename, hour):
overtime = []
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
name = line[0]
wage = int(line[2])
hoursWorked = int(line[4])
if hoursWorked > hour:
compensation = (hoursWorked - hour) * wage
overtime.append((name, compensation))
return overtime
"""
Function Name: seniorStaffAverage()
Parameters: filename (str), year (int)
Returns: average age of senior staff members (float)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def seniorStaffAverage(filename, year):
averageAge = 0.0
employeeCount = 0
file = open(filename, 'r')
header = file.readline()
content = file.readlines()
for i in content:
line = i.strip().split(',')
age = int(line[1])
yearHired = int(line[3])
if yearHired < year:
averageAge += age
employeeCount += 1
averageAge /= employeeCount
return round(averageAge,2)
"""
Function Name: ageDict()
Parameters: filename (str), list of age ranges represented by strings (list)
Returns: dictionary (dict) that maps each age range (str) to a list of employees (list)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def ageDict(filename, ageRangeList):
employeeAgeDictionary = {}
for i in ageRangeList:
employeeAgeDictionary[i] = []
print(employeeAgeDictionary)
# print(findCuisine('restaurants.txt', 'Mexican'))
# print(restaurantFilter('restaurants.txt'))
# print(createDirectory('restaurants.txt','output.txt'))
# print(extraHours('employees.csv', 40))
# print(seniorStaffAverage('employees.csv', 2019))
print(ageDict('employees.csv'))
| [
"[email protected]"
]
| |
bb5ebaf33900bfcc44fdc19ac42207993daeaa5f | 551d993b15f7e54635cc11d7ed3ee45a2e9aacc6 | /AAE/Tensorflow_implementation/unsupervised/regularized_z/model.py | df4e3fcf6ad90ce669025df91eb33dfbcfbcb10a | [
"MIT"
]
| permissive | hendrikTpl/GAN_models | 6185a3c112a8b45205bdd4c556164b6153fbec19 | 8234c7f04be39d20fe09f81511b591deab9152a9 | refs/heads/master | 2021-10-25T16:52:13.239290 | 2019-04-05T15:28:06 | 2019-04-05T15:28:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,844 | py | from component_without_bn import *
class Object:
pass
def build_graph(is_test=False):
# Inputs
images = tf.placeholder(dtype=tf.float32, shape=[None, config.ndim_x])
z_sampler = tf.placeholder(dtype=tf.float32, shape=[None, config.ndim_z])
learning_rate = tf.placeholder(dtype=tf.float32, shape=[])
# Graph
encoder = encoder_x_z
decoder = decoder_z_x
discriminator = discriminator_z
with tf.variable_scope('encoder'):
z_representation = encoder(images)
with tf.variable_scope('decoder'):
reconstruction = decoder(z_representation)
if is_test:
test_handle = Object()
test_handle.x = images
test_handle.z_r = z_representation
test_handle.x_r = reconstruction
return test_handle
probability_fake_sample = discriminator(z_representation)
probability_true_sample = discriminator(z_sampler, reuse=True)
# Loss function
# classification
# 0 -> true sample
# 1 -> generated sample
class_true = tf.ones(shape=(config.batch_size, config.ndim_z / 2), dtype=tf.int32)
class_fake = tf.zeros(shape=(config.batch_size, config.ndim_z / 2), dtype=tf.int32)
loss_discriminator = opt.softmax_cross_entropy(probability_fake_sample, probability_true_sample, class_fake,
class_true)
loss_encoder = opt.softmax_cross_entropy(probability_fake_sample, probability_true_sample,\
class_fake, class_true, for_generator=True)
loss_resconstruction = opt.euclidean_distance(images, reconstruction)
# Variables Collection
variables_encoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='encoder')
variables_decoder = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='decoder')
variables_discriminator = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
# Optimizer
counter_encoder = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
counter_resconstruction = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
counter_discriminator = tf.Variable(trainable=False, initial_value=0, dtype=tf.float32)
opt_resconstruction = opt.optimize(loss_resconstruction, variables_decoder + variables_encoder,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_resconstruction
)
opt_discriminator = opt.optimize(config.scale_ratio * loss_discriminator, variables_discriminator,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_discriminator
)
opt_encoder = opt.optimize(config.scale_ratio * loss_encoder, variables_encoder,
optimizer=tf.train.AdamOptimizer if config.optimizer_is_adam is True else tf.train.RMSPropOptimizer,
learning_rate=learning_rate, global_step=counter_encoder
)
# output what we want
graph_handle = Object()
graph_handle.x = images
graph_handle.z = z_sampler
graph_handle.x_ = reconstruction
graph_handle.z_r = z_representation
graph_handle.opt_r = opt_resconstruction
graph_handle.opt_d = opt_discriminator
graph_handle.opt_e = opt_encoder
graph_handle.loss_d = loss_discriminator
graph_handle.loss_e = loss_encoder
graph_handle.loss_r = loss_resconstruction
graph_handle.lr = learning_rate
return graph_handle
| [
"[email protected]"
]
| |
4e84c64706c5b3dcde4f84dc13e6085aa18fa72b | 61296b98e4d481893db4bc51d75652c7109ae626 | /0000_examples/cobotta_g.py | 116d8d398c21d519f84520776dd6e95bfdd43b4d | [
"MIT"
]
| permissive | Shogo-Hayakawa/wrs | 23d4560b1062cf103ed32db4b2ef1fc2261dd765 | 405f15be1a3f7740f3eb7d234d96998f6d057a54 | refs/heads/main | 2023-08-19T19:29:15.409949 | 2021-11-02T01:22:29 | 2021-11-02T01:22:29 | 423,663,614 | 0 | 0 | MIT | 2021-11-02T00:59:17 | 2021-11-02T00:59:17 | null | UTF-8 | Python | false | false | 1,418 | py | import visualization.panda.world as wd
import grasping.planning.antipodal as gp
import robot_sim.end_effectors.grippers.cobotta_gripper.cobotta_gripper as cg
import modeling.collision_model as cm
import modeling.geometric_model as gm
import numpy as np
import math
base = wd.World(cam_pos=np.array([.5, .5, .5]), lookat_pos=np.array([0, 0, 0]))
gm.gen_frame().attach_to(base)
objcm = cm.CollisionModel("objects/holder.stl")
objcm.attach_to(base)
# base.run()
hnd_s = cg.CobottaGripper()
# hnd_s.gen_meshmodel().attach_to(base)
# base.run()
grasp_info_list = gp.plan_grasps(hnd_s,
objcm,
angle_between_contact_normals=math.radians(175),
openning_direction='loc_y',
rotation_interval=math.radians(15),
max_samples=20,
min_dist_between_sampled_contact_points=.001,
contact_offset=.001)
gp.write_pickle_file(objcm_name="holder",
grasp_info_list=grasp_info_list,
file_name="cobg_holder_grasps.pickle")
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
hnd_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
hnd_s.gen_meshmodel().attach_to(base)
base.run() | [
"[email protected]"
]
| |
560bbdf2d856311a383f2556ff042c6b24798d81 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/SAF-ENTERPRISE.py | 25f13b95d7e50531041d277cb4e2ad47bc261ce1 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 2,343 | py | #
# PySNMP MIB module SAF-ENTERPRISE (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SAF-ENTERPRISE
# Produced by pysmi-0.3.4 at Wed May 1 14:59:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
NotificationType, Integer32, Counter32, Bits, iso, Gauge32, Unsigned32, IpAddress, MibIdentifier, enterprises, TimeTicks, ModuleIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "Counter32", "Bits", "iso", "Gauge32", "Unsigned32", "IpAddress", "MibIdentifier", "enterprises", "TimeTicks", "ModuleIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
saf = ModuleIdentity((1, 3, 6, 1, 4, 1, 7571))
if mibBuilder.loadTexts: saf.setLastUpdated('2007040300Z')
if mibBuilder.loadTexts: saf.setOrganization('SAF Tehnika')
if mibBuilder.loadTexts: saf.setContactInfo('SAF Tehnika technical support <[email protected]>')
if mibBuilder.loadTexts: saf.setDescription('')
tehnika = ObjectIdentity((1, 3, 6, 1, 4, 1, 7571, 100))
if mibBuilder.loadTexts: tehnika.setStatus('current')
if mibBuilder.loadTexts: tehnika.setDescription('Subtree to register SAF tehnika modules')
microwaveRadio = MibIdentifier((1, 3, 6, 1, 4, 1, 7571, 100, 1))
pointToPoint = MibIdentifier((1, 3, 6, 1, 4, 1, 7571, 100, 1, 1))
mibBuilder.exportSymbols("SAF-ENTERPRISE", tehnika=tehnika, PYSNMP_MODULE_ID=saf, microwaveRadio=microwaveRadio, pointToPoint=pointToPoint, saf=saf)
| [
"[email protected]"
]
| |
b18c5a2b2afb8aa641c036874755e5247c1d83d0 | be78d77bea1a5eea2a7f0d4090e1fc138623b79a | /cybox/test/objects/link_test.py | bac34e34bbbca2617a14995b938c2e2f2505741b | [
"BSD-3-Clause"
]
| permissive | CybOXProject/python-cybox | 399f73feb6a54778dca9260b1c0340a3895c6369 | 25e6e8b3a6f429f079d3fbd9ace3db9eb3d5ab71 | refs/heads/master | 2020-05-21T19:05:56.725689 | 2020-05-01T13:33:48 | 2020-05-01T13:33:48 | 7,631,169 | 43 | 31 | BSD-3-Clause | 2020-05-01T12:41:03 | 2013-01-15T19:04:47 | Python | UTF-8 | Python | false | false | 980 | py | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.vendor.six import u
from cybox.core import Observables
from cybox.objects.link_object import Link
from cybox.objects.uri_object import URI
from cybox.test.objects import ObjectTestCase
class TestLink(ObjectTestCase, unittest.TestCase):
object_type = "LinkObjectType"
klass = Link
_full_dict = {
'value': u("http://www.example.com"),
'type': URI.TYPE_URL,
'url_label': u("Click Here!"),
'xsi:type': object_type,
}
# https://github.com/CybOXProject/python-cybox/issues/202
def test_correct_namespace_output(self):
link = Link()
link.value = u("https://www.example.com")
xml = Observables(link).to_xml()
self.assertTrue(b"cybox:Properties" in xml)
self.assertTrue(b"LinkObj:Properties" not in xml)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
1e8fed92b77867c5a707bc1e8cdaed3ff6f5566b | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/20ed819acd6f85b1facda3b799d3c24b3ada7ad6-<run>-bug.py | 9d67f4caf81ac18c3daab8feb6cc8736cb5c336a | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | def run(self, terms, variables, **kwargs):
if (not CREDSTASH_INSTALLED):
raise AnsibleError('The credstash lookup plugin requires credstash to be installed.')
ret = []
for term in terms:
try:
version = kwargs.pop('version', '')
region = kwargs.pop('region', None)
table = kwargs.pop('table', 'credential-store')
profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None))
aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None))
aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None))
aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None))
kwargs_pass = {
'profile_name': profile_name,
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
'aws_session_token': aws_session_token,
}
val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass)
except credstash.ItemNotFound:
raise AnsibleError('Key {0} not found'.format(term))
except Exception as e:
raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e.message))
ret.append(val)
return ret | [
"[email protected]"
]
| |
09d3b6f3dc518b71e5ac7013db8d512620bbe1a1 | 7bb64fb43c503e8f2ecf0f02619b539af3401d39 | /test_files/Zr_Vasprun/test_Vasprun.py | adc1d5cc9df5295240f97a9d3801a1ca67e323a4 | [
"MIT"
]
| permissive | montoyjh/pymatgen | 13c3179cd4cf5ff521e8380e480b23d35709c379 | 62ecae1c7382a41861e3a5d9b9c8dd1207472409 | refs/heads/master | 2023-06-09T15:02:15.309354 | 2019-04-03T14:39:33 | 2019-04-03T14:39:33 | 42,539,977 | 2 | 2 | MIT | 2019-06-21T17:15:55 | 2015-09-15T18:56:26 | Propeller Spin | UTF-8 | Python | false | false | 99 | py | #!/usr/bin/env python
from pymatgen.io.vasp.outputs import Vasprun
run = Vasprun("./vasprun.xml")
| [
"[email protected]"
]
| |
00884fcc431f3b0fc1c306f662977b63ebc1c16c | 743da4642ac376e5c4e1a3b63c079533a5e56587 | /build/lib.win-amd64-3.6/fairseq/modules/quantization/pq/modules/__init__.py | b6881e26bb167f75f55dacfac72238979dd74f80 | [
"MIT"
]
| permissive | tmtmaj/Exploiting-PrLM-for-NLG-tasks | cdae1b6e451b594b11d8ecef3c1cd4e12fe51c9b | e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5 | refs/heads/main | 2023-06-16T08:26:32.560746 | 2021-07-14T17:50:19 | 2021-07-14T17:50:19 | 371,899,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .qconv import PQConv2d # NOQA
from .qlinear import PQLinear # NOQA
from .qemb import PQEmbedding # NOQA
| [
"[email protected]"
]
| |
f89d77ec7050a0b3fad826498e49acf3dae1ad69 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4060/157004060.py | 58e52da8c9b818275d320822a6d5a4d065d5c91c | []
| no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,347 | py | from bots.botsconfig import *
from records004060 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'NP',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
]},
{ID: 'HL', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'NM1', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'IN2', MIN: 0, MAX: 99999},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'REF', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
{ID: 'SPY', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 2},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 99999},
{ID: 'DTM', MIN: 0, MAX: 99999},
]},
]},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
]
| |
52ae3a1a8d1d8f8f7503b9181f015b165f68bf00 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_scrubs.py | 6efbee2e9b847a91a88e3d43d8c1023f95e3fd07 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.nouns._scrub import _SCRUB
#calss header
class _SCRUBS(_SCRUB, ):
def __init__(self,):
_SCRUB.__init__(self)
self.name = "SCRUBS"
self.specie = 'nouns'
self.basic = "scrub"
self.jsondata = {}
| [
"[email protected]"
]
| |
9c765fca0194129caa59e74b70cc204fc59bce14 | cf1e19f7b6354302037bca563b42218df7d79400 | /최단경로/[2307]도로검문.py | 3540a2ab6f4b48e1f02290e4e11b12bf476f0669 | []
| no_license | kim-kiwon/Baekjoon-Algorithm | 680565ddeced2d44506ae6720cf32d8004db42f8 | 4699e6551d3e7451648b9256c54ea4318b71bd4d | refs/heads/master | 2023-04-13T11:10:21.031969 | 2021-04-26T10:50:08 | 2021-04-26T10:50:08 | 325,209,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py | #다익스트라 + 경로추적
import heapq
n, m = map(int, input().split())
INF = int(1e9)
graph = [[] for _ in range(n+1)]
previous = [1] * (n+1) #이전 노드 저장
for _ in range(m):
a, b, dist = map(int, input().split())
graph[a].append((b, dist))
graph[b].append((a, dist))
def dijkstra():
distance = [INF] * (n+1)
distance[1] = 0
q = []
q.append((1, 0))
while q:
now, dist = heapq.heappop(q)
if distance[now] < dist:
continue
for i in graph[now]:
cost = dist + i[1]
if cost < distance[i[0]]:
distance[i[0]] = cost
heapq.heappush(q, (i[0], cost))
previous[i[0]] = now
return distance[n]
init_val = dijkstra() #다익스트라 수행. 초기 최단경로 저장.
temp = [] #1->n 까지 최단경로에 거치는 간선들 저장할 리스트.
now = n #n부터 1까지 역순으로 탐지할것.
while True:
if now == 1: break #1까지 탐지 완료시 종료
a = previous[now] #a : 이전노드
b = now #b : 현재노드
for i in graph[now]: #dist = 이전노드 -> 현재노드 거리.
if i[0] == previous[now]:
dist = i[1]
break
temp.append((a, b, dist)) #temp에 이전노드 현재노드 거리 삽입.
now = previous[now]
max_val = -1e9
#최단경로에 사용하는 간선들 없애는게 아니면
#반드시 최단경로 사용할 것이기에 cost변화 없다.
while True:
if len(temp) == 0: break
#최단경로에 사용한 간선 중 하나 삭제 -> 다익스트라로 거리측정 -> 다시 추가
a, b, dist = temp.pop()
graph[a].remove((b, dist))
graph[b].remove((a, dist))
max_val = max(max_val, dijkstra())
graph[a].append((b, dist))
graph[b].append((a, dist))
if max_val >= 1e9:
print(-1)
else:
print(max_val - init_val) | [
"[email protected]"
]
| |
e1c2fca2bad35624293caa5c903e7e1a37fcb96d | e35eb92b5ab6547119585004b9eea3cafe948050 | /efsw/archive/errors.py | 3b9ac8626e58cb7513fc221356b582c5bec573f4 | []
| no_license | einsfr/mmkit | 0a084db85b2cf5ba268e692676095d768733f387 | f12bc2f83254a3123e02abdc105816cc04c438b5 | refs/heads/master | 2020-12-31T05:56:19.287611 | 2016-06-10T05:56:58 | 2016-06-10T05:56:58 | 29,473,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | ITEM_LINK_SELF_SELF = 'Элемент не может иметь связь с самим собой.'
ITEM_LINK_TYPE_UNKNOWN = 'Неизвестный тип связи между элементами: {0}.'
ITEM_NOT_FOUND = 'Элемент(ы) с ID {0} не существует(ют).'
STORAGE_NOT_FOUND = 'Хранилище(а) с ID {0} не существует(ют).'
STORAGE_NOT_ALLOWED_AS_ARCHIVE = 'Хранилище(а) с ID {0} нельзя использовать как архивные.'
CATEGORY_NOT_FOUND = 'Категория(и) с ID {0} не существует(ют).'
| [
"[email protected]"
]
| |
9084c5e743b26571e62ba65a4df2d3ec5e68700c | a3972cb6ba32abd18b374975f4abd5318bc95f09 | /project/src/yosigy/api/yosigy_list_views.py | 960d32f8f54604c94ee00262c81979094695a2d5 | []
| no_license | ssr03/MiniDelivery | c57bb45e497cab34787473925663ace46dbb6b2d | 659d9757d1f369a6713aa5a66bab2aa5d6381b8e | refs/heads/master | 2020-07-30T15:05:01.401229 | 2019-09-23T11:52:51 | 2019-09-23T11:52:51 | 210,267,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,099 | py | import enum
from datetime import datetime
from django.core.paginator import Paginator
from django.db.models import F, Count
from django.http import JsonResponse
from django.views.generic.base import View
from accounts.mixins import LoginRequiredMixin
from restaurant.api.views import CategoryNum
from yosigy.models import Yosigy
class YosigyListInfo(enum.IntEnum):
POST_TO_SHOW_IN_ONE_PAGE = 4
PAGES_TO_SHOW = 3
class YosigyListAPIView(LoginRequiredMixin, View):
def get(self, request, *args, **kwargs):
category_id = kwargs['category_id']
today = datetime.now().date()
tab_value = request.GET.get('tab_value', '')
json_data = {}
if kwargs['page']:
self.page = kwargs['page']
if not category_id or category_id == CategoryNum.ALL_ID:
yosigy = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(
restaurant__is_yosigy=True,
deadline__gte=today,
)
.values(
'restaurant',
)
.annotate(
is_yosigy_count=Count('yosigymenu__menu'),
)
.values(
'pk',
'is_yosigy_count',
restaurant_title=F('restaurant__title'),
restaurant_img=F('restaurant__img'),
yosigy_deadline=F('deadline'),
yosigy_notice=F('notice'),
)
.order_by('-created_time')
)
else:
yosigy = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(
restaurant__is_yosigy=True,
deadline__gte=today,
restaurant__category__pk=category_id,
)
.values(
'restaurant',
)
.annotate(
is_yosigy_count=Count('yosigymenu__menu'),
)
.values(
'pk',
'is_yosigy_count',
restaurant_title=F('restaurant__title'),
restaurant_img=F('restaurant__img'),
yosigy_deadline=F('deadline'),
yosigy_notice=F('notice'),
)
.order_by('-created_time')
)
yosigy_set = (
Yosigy.objects
.select_related('restaurant')
.prefetch_related('yosigymenu_set')
.filter(yosigymenu__menu__is_set_menu=True,)
.annotate(
is_set_menu_count=Count('yosigymenu__menu'),
)
.values(
'is_set_menu_count',
'pk',
)
)
for i in yosigy:
for j in yosigy_set:
if i['pk'] == j['pk']:
i['is_set_menu_count'] = j['is_set_menu_count']
yosigy=list(yosigy)
if not yosigy:
json_data = {
'message': '아직 공동 구매할 수 있는 메뉴가 없습니다.',
}
elif tab_value == 'deadline':
yosigy=sorted(yosigy, key=lambda menu:menu['yosigy_deadline'])
json_data = self.yosigy_paginator(yosigy)
json_data['deadline'] = True
elif tab_value == 'all' or tab_value == '':
json_data = self.yosigy_paginator(yosigy)
json_data['all'] = True
return JsonResponse(
json_data
)
def yosigy_paginator(self, yosigy):
paginator = Paginator(yosigy, YosigyListInfo.POST_TO_SHOW_IN_ONE_PAGE)
current_page = paginator.get_page(self.page)
start = (self.page-1) // YosigyListInfo.PAGES_TO_SHOW * YosigyListInfo.PAGES_TO_SHOW + 1
end = start + YosigyListInfo.PAGES_TO_SHOW
last_page = len(paginator.page_range)
if last_page < end:
end = last_page
yosigy_list = current_page.object_list
page_range = range(start, end + 1)
yosigy_list_data = {
'yosigy_list': yosigy_list,
'current_page': {
'has_previous': current_page.has_previous(),
'has_next': current_page.has_next(),
},
'page_range': [page_range[0], page_range[-1]],
}
if current_page.has_previous():
yosigy_list_data['current_page']['previous_page_number'] = current_page.previous_page_number()
if current_page.has_next():
yosigy_list_data['current_page']['next_page_number'] = current_page.next_page_number()
return yosigy_list_data
| [
"[email protected]"
]
| |
1217e3e57869565f3ec42a80986e66bb1d63dbd2 | e00fe1e065b448f6f8c0472ed2b8a39991fa7b1b | /Fuzzy_clustering/version2/dataset_manager/create_datasets_pca.py | 0542341ca3bd78c1d9c10831c04312420e50e87c | [
"Apache-2.0"
]
| permissive | joesider9/forecasting_library | 1a4ded5b09fc603f91fa1c075e79fc2ed06c08a8 | db07ff8f0f2693983058d49004f2fc6f8849d197 | refs/heads/master | 2023-03-29T12:18:22.261488 | 2021-04-01T08:57:08 | 2021-04-01T08:57:08 | 319,906,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,155 | py | import os
import joblib
import numpy as np
import pandas as pd
from joblib import Parallel
from joblib import delayed
from pytz import timezone
from sklearn.decomposition import KernelPCA
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.version2.common_utils.logging import create_logger
from Fuzzy_clustering.version2.dataset_manager.common_utils import my_scorer
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_3d
from Fuzzy_clustering.version2.dataset_manager.common_utils import stack_daily_nwps
class DatasetCreatorPCA:
def __init__(self, project, data=None, n_jobs=1, test=False, dates=None):
if test is None:
raise NotImplemented('test is none for short-term, not implemented for PCA')
self.data = data
self.is_for_test = test
self.project_name = project['_id']
self.static_data = project['static_data']
self.path_nwp_project = self.static_data['pathnwp']
self.path_data = self.static_data['path_data']
self.areas = self.static_data['areas']
self.area_group = self.static_data['area_group']
self.nwp_model = self.static_data['NWP_model']
self.nwp_resolution = self.static_data['NWP_resolution']
self.location = self.static_data['location']
self.compress = True if self.nwp_resolution == 0.05 else False
self.n_jobs = n_jobs
self.variables = self.static_data['data_variables']
self.logger = create_logger(logger_name=f"log_{self.static_data['project_group']}",
abs_path=self.path_nwp_project,
logger_path=f"log_{self.static_data['project_group']}.log", write_type='a')
if self.data is not None:
self.dates = self.check_dates()
elif dates is not None:
self.dates = dates
def check_dates(self):
# Extract dates of power measurements.
start_date = pd.to_datetime(self.data.index[0].strftime('%d%m%y'), format='%d%m%y')
end_date = pd.to_datetime(self.data.index[-1].strftime('%d%m%y'), format='%d%m%y')
dates = pd.date_range(start_date, end_date)
data_dates = pd.to_datetime(np.unique(self.data.index.strftime('%d%m%y')), format='%d%m%y')
dates = [d for d in dates if d in data_dates]
self.logger.info('Dates is checked. Number of time samples %s', str(len(dates)))
return pd.DatetimeIndex(dates)
def make_dataset_res(self):
nwp_3d_pickle = 'nwps_3d_test.pickle' if self.is_for_test else 'nwps_3d.pickle'
dataset_cnn_pickle = 'dataset_cnn_test.pickle' if self.is_for_test else 'dataset_cnn.pickle'
nwp_3d_pickle = os.path.join(self.path_data, nwp_3d_pickle)
dataset_cnn_pickle = os.path.join(self.path_data, dataset_cnn_pickle)
if not (os.path.exists(nwp_3d_pickle) and os.path.exists(dataset_cnn_pickle)):
data, x_3d = self.get_3d_dataset()
else:
data = joblib.load(nwp_3d_pickle)
x_3d = joblib.load(dataset_cnn_pickle) # FIXME: unused variable
data_path = self.path_data
if not isinstance(self.areas, dict):
self.dataset_for_single_farm(data, data_path)
else:
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=25), t + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates)
flag = False
for i, p_dates in enumerate(dates_stack):
t = self.dates[i]
file_name = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(file_name):
nwps = joblib.load(file_name)
for date in p_dates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except Exception:
continue
if flag:
break
self.dataset_for_multiple_farms(data, self.areas, lats_group, longs_group)
def correct_nwps(self, nwp, variables):
if nwp['lat'].shape[0] == 0:
area_group = self.projects[0]['static_data']['area_group']
resolution = self.projects[0]['static_data']['NWP_resolution']
nwp['lat'] = np.arange(area_group[0][0], area_group[1][0] + resolution / 2,
resolution).reshape(-1, 1)
nwp['long'] = np.arange(area_group[0][1], area_group[1][1] + resolution / 2,
resolution).reshape(-1, 1).T
for var in nwp.keys():
if not var in {'lat', 'long'}:
if nwp['lat'].shape[0] != nwp[var].shape[0]:
nwp[var] = nwp[var].T
if 'WS' in variables and not 'WS' in nwp.keys():
if 'Uwind' in nwp.keys() and 'Vwind' in nwp.keys():
if nwp['Uwind'].shape[0] > 0 and nwp['Vwind'].shape[0] > 0:
r2d = 45.0 / np.arctan(1.0)
wspeed = np.sqrt(np.square(nwp['Uwind']) + np.square(nwp['Vwind']))
wdir = np.arctan2(nwp['Uwind'], nwp['Vwind']) * r2d + 180
nwp['WS'] = wspeed
nwp['WD'] = wdir
if 'Temp' in nwp.keys():
nwp['Temperature'] = nwp['Temp']
del nwp['Temp']
return nwp
def get_3d_dataset(self):
dates_stack = []
for t in self.dates:
p_dates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47),
freq='H') # 47 hours: 00:00 -> 23:00
dates = [dt.strftime('%d%m%y%H%M') for dt in p_dates if dt in self.data.index]
dates_stack.append(dates) # For each date we have prediction append the next 47 hours
area = self.area_group if isinstance(self.areas, dict) else self.areas
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], p_dates, self.path_nwp_project,
self.nwp_model, area, self.variables,
self.compress, self.static_data['type'])
for i, p_dates in enumerate(dates_stack))
x = np.array([])
data_var = dict()
for var in self.variables:
if (var == 'WS' and self.static_data['type'] == 'wind') or \
(var == 'Flux' and self.static_data['type'] == 'pv'):
data_var[var + '_prev'] = x
data_var[var] = x
data_var[var + '_next'] = x
else:
data_var[var] = x
data_var['dates'] = x
x_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
x_3d = stack_3d(x_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
if self.is_for_test:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d_test.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn_test.pickle'))
else:
joblib.dump(data_var, os.path.join(self.path_data, 'nwps_3d.pickle'))
joblib.dump(x_3d, os.path.join(self.path_data, 'dataset_cnn.pickle'))
self.logger.info('NWP stacked data saved')
return data_var, x_3d
def train_pca(self, data, components, level):
scaler = MinMaxScaler()
data_scaled = scaler.fit_transform(data)
param_grid = [{
"gamma": np.logspace(-3, 0, 20),
}]
kpca = KernelPCA(n_components=components, fit_inverse_transform=True, n_jobs=self.n_jobs)
grid_search = GridSearchCV(kpca, param_grid, cv=3, scoring=my_scorer, n_jobs=self.n_jobs)
grid_search.fit(data_scaled)
kpca = grid_search.best_estimator_
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
joblib.dump({'scaler': scaler, 'kpca': kpca}, fname)
def pca_transform(self, data, components, level):
fname = os.path.join(self.path_data, 'kpca_' + level + '.pickle')
if not os.path.exists(fname):
self.train_pca(data, components, level)
models = joblib.load(fname)
data_scaled = models['scaler'].transform(data)
data_compress = models['kpca'].transform(data_scaled)
return data_compress
def dataset_for_single_farm(self, data, data_path):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(data_path, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(data_path, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def dataset_for_multiple_farms(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
dataset_y = self.data.loc[dataset_X.index].to_frame()
dataset_y.columns = ['target']
if self.is_for_test:
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X_test.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y_test.csv'))
self.logger.info('Successfully dataset created for Evaluation for %s', self.project_name)
else:
corr = []
for f in range(dataset_X.shape[1]):
corr.append(np.abs(np.corrcoef(dataset_X.values[:, f], dataset_y.values.ravel())[1, 0]))
ind = np.argsort(np.array(corr))[::-1]
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
joblib.dump(ind, os.path.join(self.path_data, 'dataset_columns_order.pickle'))
dataset_X.to_csv(os.path.join(self.path_data, 'dataset_X.csv'))
dataset_y.to_csv(os.path.join(self.path_data, 'dataset_y.csv'))
self.logger.info('Successfully dataset created for training for %s', self.project_name)
def make_dataset_res_offline(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_offline(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_offline(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_offline(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_offline(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
for dt in self.dates:
if utc:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
dates = [t.strftime('%d%m%y%H%M') for t in pdates]
dates_stack.append(dates)
else:
pdates = pd.date_range(dt + pd.DateOffset(hours=25), dt + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.areas, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
else:
nwp = stack_daily_nwps(self.dates[0], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
nwp_daily = Parallel(n_jobs=self.n_jobs)(
delayed(stack_daily_nwps)(self.dates[i], pdates, self.path_nwp_project, self.nwp_model,
self.area_group, self.variables, self.compress, self.static_data['type'])
for i, pdates in enumerate(dates_stack))
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
for arrays in nwp_daily:
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_offline(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def dataset_for_multiple_farms_offline(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
longs = (np.where((longs_group[0, :] >= area[1]) & (longs_group[0, :] <= area[3])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
return dataset_X
def make_dataset_res_online(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_online(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_online(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t = self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (
nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (
nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_online(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_online(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
if utc:
pdates = pd.date_range(self.dates[-1] + pd.DateOffset(hours=25), self.dates[-1] + pd.DateOffset(hours=48),
freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
else:
pdates = pd.date_range(self.dates[-1] + pd.DateOffset(hours=25), self.dates[-1] + pd.DateOffset(hours=48),
freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
arrays = stack_daily_nwps(self.dates[-1], dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas,
self.variables, self.compress, self.static_data['type'])
else:
arrays = stack_daily_nwps(self.dates[-1], dates_stack[0], self.path_nwp_project, self.nwp_model,
self.area_group,
self.variables, self.compress, self.static_data['type'])
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
data_var[var + '_prev'] = X
data_var[var] = X
data_var[var + '_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0] != 0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_online(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = np.transpose(data[var + '_prev'], [0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var == 'Flux' else 'wind'
var_sort = 'fl' if var == 'Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d,
X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var], [0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.pca_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.pca_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.pca_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.pca_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud' if var == 'Cloud' else 'direction'
var_sort = 'cl' if var == 'Cloud' else 'wd'
col = [var_name] + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X2 = np.transpose(data[var], [0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'Temp' if var == 'Temperature' else 'wind'
var_sort = 'tp' if var == 'Temperature' else 'ws'
col = [var_name]
X = X2_level0
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
dataset_X = dataset_X
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
def dataset_for_multiple_farms_online(self, data, areas, lats_group, longs_group):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
for area_name, area in areas.items():
if len(area) > 1:
lats = (np.where((lats_group[:, 0] >= area[0, 0]) & (lats_group[:, 0] <= area[1, 0])))[0]
longs = (np.where((longs_group[0, :] >= area[0, 1]) & (longs_group[0, :] <= area[1, 1])))[0]
else:
lats = (np.where((lats_group[:, 0] >= area[0]) & (lats_group[:, 0] <= area[2])))[0]
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
X0 = data[var + '_prev'][:, lats, :][:, :, longs]
X0 = X0.reshape(-1, X0.shape[1] * X0.shape[2])
level = var + '_prev_' + area_name
self.logger.info('Begin PCA training for %s', level)
X0_compressed = self.pca_transform(X0, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.pca_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var == 'Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var == 'Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.pca_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
ind = joblib.load(os.path.join(self.path_data, 'dataset_columns_order.pickle'))
columns = dataset_X.columns[ind]
dataset_X = dataset_X[columns]
self.logger.info('Successfully dataset created for training for %s', self.project_name)
return dataset_X
| [
"[email protected]"
]
| |
0f41b4c555162561f877240887369c044b1fe898 | 3d589d1c56b55fbd2b45b03564b8a9442ebf142b | /lib/src/klio/metrics/base.py | 1b50aeb1da57930cc8fba17042c72434460c2eb4 | [
"Apache-2.0"
]
| permissive | spotify/klio | 1aff27412e92c9d699259e5ab1eaeb39dc3e9571 | e625565708ed846201d2e05f782c0ce585554346 | refs/heads/develop | 2023-05-25T14:33:28.348335 | 2022-03-23T20:34:09 | 2022-03-23T20:34:09 | 285,928,366 | 815 | 57 | Apache-2.0 | 2023-05-24T21:07:09 | 2020-08-07T22:02:58 | Python | UTF-8 | Python | false | false | 5,765 | py | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base classes from which a metrics consumer (i.e. ffwd, logger, etc.)
will need to implement.
New consumers are required to implement the :class:`AbstractRelayClient`, and
three metrics objects based off of :class:`BaseMetric`: a counter, a gauge, and
a timer.
"""
import abc
import six
class _DummyAttribute(object):
# for the ability to do `FOO_ATTR = abstract_attr()` as well as
# decorate a property method
pass
def abstract_attr(obj=None):
"""Set an attribute or a property as abstract.
Supports class-level attributes as well as methods defined as a
``@property``.
Usage:
.. code-block:: python
class Foo(object):
my_foo_attribute = abstract_attr()
@property
@abstract_attr
def my_foo_property(self):
pass
Args:
obj (callable): Python object to "decorate", i.e. a class method. If
none is provided, a dummy object is created in order to attach
the ``__isabstractattr__`` attribute (similar to
``__isabstractmethod__`` from ``abc.abstractmethod``).
Returns object with ``__isabstractattr__`` attribute set to ``True``.
"""
if not obj:
obj = _DummyAttribute()
obj.__isabstractattr__ = True
return obj
def _has_abstract_attributes_implemented(cls, name, bases):
"""Verify a given class has its abstract attributes implemented."""
for base in bases:
abstract_attrs = getattr(base, "_klio_metrics_abstract_attributes", [])
class_attrs = getattr(cls, "_klio_metrics_all_attributes", [])
for attr in abstract_attrs:
if attr not in class_attrs:
err_str = (
"Error instantiating class '{0}'. Implementation of "
"abstract attribute '{1}' from base class '{2}' is "
"required.".format(name, attr, base.__name__)
)
raise NotImplementedError(err_str)
def _get_all_attributes(clsdict):
return [name for name, val in six.iteritems(clsdict) if not callable(val)]
def _get_abstract_attributes(clsdict):
return [
name
for name, val in six.iteritems(clsdict)
if not callable(val) and getattr(val, "__isabstractattr__", False)
]
class _ABCBaseMeta(abc.ABCMeta):
"""Enforce behavior upon implementations of ABC classes."""
def __init__(cls, name, bases, clsdict):
_has_abstract_attributes_implemented(cls, name, bases)
def __new__(metaclass, name, bases, clsdict):
clsdict[
"_klio_metrics_abstract_attributes"
] = _get_abstract_attributes(clsdict)
clsdict["_klio_metrics_all_attributes"] = _get_all_attributes(clsdict)
cls = super(_ABCBaseMeta, metaclass).__new__(
metaclass, name, bases, clsdict
)
return cls
class AbstractRelayClient(six.with_metaclass(_ABCBaseMeta)):
"""Abstract base class for all metric consumer relay clients.
Each new consumer (i.e. ffwd, logging-based metrics)
will need to implement this relay class.
Attributes:
RELAY_CLIENT_NAME (str): must match the key in ``klio-job.yaml``
under ``job_config.metrics``.
"""
RELAY_CLIENT_NAME = abstract_attr()
def __init__(self, klio_config):
self.klio_config = klio_config
@abc.abstractmethod
def unmarshal(self, metric):
"""Returns a dictionary-representation of the ``metric`` object"""
pass
@abc.abstractmethod
def emit(self, metric):
"""Emit the given metric object to the particular consumer.
``emit`` will be run in a threadpool separate from the transform,
and any errors raised from the method will be logged then ignored.
"""
pass
@abc.abstractmethod
def counter(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated counter-type metric specific for
the particular consumer.
Callers to the ``counter`` method will store new counter objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def gauge(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated gauge-type metric specific for
the particular consumer.
Callers to the ``gauge`` method will store new gauge objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def timer(self, name, transform=None, **kwargs):
"""Return a newly instantiated timer-type metric specific for
the particular consumer.
Callers to the ``timer`` method will store new timer objects
returned in memory for simple caching.
"""
pass
class BaseMetric(object):
"""Base class for all metric types.
A consumer must implement a counter metric, a gauge metric, and a
timer metric.
"""
def __init__(self, name, value=0, transform=None, **kwargs):
self.name = name
self.value = value
self.transform = transform
def update(self, value):
self.value = value
| [
"[email protected]"
]
| |
2682ec078d2d665c54515022a6840ddf88168001 | 7a1f6f1aae43b219cd34c3c9b907923fb839e6f5 | /Python/Udemy/FXTRADE/pyfxtrading/pyfxtrading/28/app/controllers/webserver.py | bbf2ff35ce8221762754b16b7b6dd096ee8484a4 | []
| no_license | amanoman/amanoman.github.io | b5afc80e0e49ed15db793e2ebf69003c05ab8ce0 | 141c928f6d1df0389859f663f6439d327d4c32d6 | refs/heads/master | 2023-05-28T07:22:09.735409 | 2021-03-31T15:00:14 | 2021-03-31T15:00:14 | 187,139,297 | 0 | 1 | null | 2023-05-22T23:37:24 | 2019-05-17T03:19:36 | Jupyter Notebook | UTF-8 | Python | false | false | 543 | py | from flask import Flask
from flask import render_template
import settings
app = Flask(__name__, template_folder='../views')
@app.teardown_appcontext
def remove_session(ex=None):
from app.models.base import Session
Session.remove()
@app.route('/')
def index():
app.logger.info('index')
return render_template('./google.html',
word='World')
def start():
# app.run(host='127.0.0.1', port=settings.web_port, threaded=True)
app.run(host='0.0.0.0', port=settings.web_port, threaded=True)
| [
"[email protected]"
]
| |
2dd09cf0b1134b3972740048402bc6e9ee1c97be | 1ece1faa638f85c567fdb237c67340501f86f89e | /model/model_builder.py | 5bc0acb8d41370c2b1905ff26fb7f1070790eb67 | []
| no_license | seasa2016/transformer_random | 54223ee5b04a4563c7903d925436d843b8cf7f1c | e3e13c9a2ddc49558d8e991427a974848a850b9c | refs/heads/master | 2020-04-02T12:21:28.167673 | 2019-03-19T03:45:00 | 2019-03-19T03:45:00 | 154,429,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,030 | py | import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_,xavier_normal_
from .module.Embedding import Embedding
from .util.Logger import logger
from . import Constant
from . import transformer
def build_embedding(opt,word_dict,max_len,for_encoder=True,dtype='sum',tag=None):
if(for_encoder):
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tar_word_vec_size
#print(Constant.PAD_token)
word_padding_idx = word_dict[Constant.PAD_token]
num_word_embedding = len(word_dict)
# num_word,max_len,emb_dim,feature_dim,dropout=0,dtype='sum'
return Embedding(num_word= num_word_embedding,
max_len = max_len,
emb_dim = embedding_dim,
feature_dim = embedding_dim,
padding_idx = word_padding_idx,
dropout = opt.dropout,
dtype = dtype,tag=tag)
def build_encoder(opt,src_dict,tag_dict):
"""
function to build the encoder
"""
max_len = 128
src_embedding = build_embedding(opt,src_dict,max_len,tag=tag_dict)
return transformer.Encoder( opt.enc_layer,opt.num_head,
opt.model_dim,opt.nin_dim_en,
opt.dropout,src_embedding)
def build_decoder(opt,tar_dict):
"""
function to build the decoder
"""
max_len = 128
tar_embedding = build_embedding(opt,tar_dict,max_len,for_encoder=False,dtype=opt.decode_pos)
return transformer.Decoder(
opt.dec_layer,opt.num_head,
opt.model_dim,opt.nin_dim_de,len(tar_dict),max_len,
opt.self_attn_type,opt.dropout,tar_embedding
)
def load_test_model(opt,model_path=None,mode=False):
"""
use the method the acquire the data_dict and the model
"""
if model_path is None:
if(opt.test_from is None):
raise ValueError('test_from shouble not be None')
model_path = opt.test_from
checkpoint = torch.load(model_path)
data_new = dict()
for t in ['source','target','tag']:
data_new[t] = dict()
with open('./{0}/subword.{1}'.format(opt.data,t)) as f_in:
for i,word in enumerate(f_in):
if(t=='source'):
data_new[t][word.strip()[1:-1]] = i
else:
data_new[t][word.strip()+'_'] = i
if(mode == False):
model = build_base_model(checkpoint['opt'],opt, data_new, torch.cuda.is_available(),checkpoint)
else:
#build_model_pre(opt,opt,data_ori,data_new,True,checkpoint=checkpoint)
model = build_base_model(opt,opt,data_new,True,checkpoint=checkpoint)
model.load_state_dict(checkpoint['model'])
model.eval()
return model, opt
def build_base_model(model_opt,opt,data_token,gpu,checkpoint=None,dtype=None):
"""
build the base model
"""
if('tag' in data_token):
encoder = build_encoder(model_opt,data_token['source'],len(data_token['tag']))
else:
encoder = build_encoder(model_opt,data_token['source'],None)
logger.info("finish build encoder")
decoder = build_decoder(model_opt,data_token['target'])
logger.info("finish build decoder")
device = torch.device("cuda" if gpu else "cpu")
model = transformer.Transformer(encoder,decoder)
#print(model)
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print("the size will be {0} {1} {2}".format(n_params,enc,dec))
if(checkpoint is not None):
logger.info('loading model weight from checkpoint')
model.load_state_dict(checkpoint['model'])
else:
if(model_opt.param_init != 0.0):
for p in model.parameters():
if(p.requires_grad):
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if(model_opt.param_init_glorot):
for p in model.parameters():
if(p.requires_grad):
if p.dim() > 1:
xavier_normal_(p)
model.to(device)
logger.info('the model is now in the {0} mode'.format(device))
return model
def change(model_opt,opt,model,data_new):
"""
change the decoder and lock the grad for the encoder
"""
model.decoder = build_decoder(opt,data_new['target'])
#update the parameter
model_opt.tar_word_vec_size = opt.tar_word_vec_size
model_opt.dropout = opt.dropout
model_opt.dec_layer = opt.dec_layer
model_opt.num_head = opt.num_head
model_opt.model_dim = opt.model_dim
model_opt.nin_dim_de = opt.nin_dim_de
model_opt.self_attn_type = opt.self_attn_type
model_opt.dropout = opt.dropout
#lock the grad for the encoder
model.encoder.embedding.word_emb.requires_grad = False
if model_opt.param_init != 0.0:
for p in model.parameters():
if(p.requires_grad):
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in model.parameters():
if(p.requires_grad):
if(p.dim()>1):
xavier_normal_(p)
if(opt.replace):
#one for the pretrain model and the other for the new model
logger.info("with mid layer {0} {1}".format(model_opt.model_dim,opt.model_dim))
model.mid = nn.Linear(model_opt.model_dim,opt.model_dim)
return model
def build_model_pre(model_opt,opt,data_ori,data_new,gpu,checkpoint=None):
#in our work,we only use text
#build encoder
encoder = build_encoder(model_opt,data_ori['source'],len(data_ori['tag']))
logger.info("build the origin encoder")
decoder = build_decoder(model_opt,data_ori['target'])
logger.info("build the origin decoder")
device = torch.device("cuda" if gpu else "cpu")
model = transformer.Transformer(encoder,decoder)
print(model)
if(checkpoint):
logger.info('loading model weight from checkpoint')
model.load_state_dict(checkpoint['model'])
else:
raise ValueError('cant access this mode without using pretrain model')
model = change(model_opt,opt,model,data_new)
#print(model)
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
elif 'decoder' or 'generator' in name:
dec += param.nelement()
print("the size will be {0} {1} {2}".format(n_params,enc,dec))
model.to(device)
logger.info('the model is now in the {0} mode'.format(device))
return model
def build_model(model_opt,opt,data_token,checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt,opt,data_token,torch.cuda.is_available(),checkpoint)
return model
| [
"[email protected]"
]
| |
b51e6caa09f683cec6c8f09fb1aca60e73ec36f0 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_25/models/resource_performance_no_id_by_array_get_response.py | dc59892d20f80e34fb26c25e0f59584a263ca562 | [
"BSD-2-Clause"
]
| permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 6,240 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.25
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_25 import models
class ResourcePerformanceNoIdByArrayGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[ResourcePerformanceNoIdByArray]',
'total': 'list[ResourcePerformanceNoIdByArray]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items',
'total': 'total'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.ResourcePerformanceNoIdByArray]
total=None, # type: List[models.ResourcePerformanceNoIdByArray]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[ResourcePerformanceNoIdByArray]): Performance data, broken down by array. If `total_only=true`, the `items` list will be empty.
total (list[ResourcePerformanceNoIdByArray]): The aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each field where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
if total is not None:
self.total = total
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ResourcePerformanceNoIdByArrayGetResponse`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResourcePerformanceNoIdByArrayGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourcePerformanceNoIdByArrayGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
e152459956c322f9cbc51869cd7582e46c883a4f | ca77040a488fea5410449a6ae06693c64c0d3f1a | /web/ui/views/views.py | 8bf22427e4bbe80ad2a7a40c428831f5b4e2c697 | []
| no_license | patricknevindwyer/space-base-otter | 0565412db484b5bb8b2dbc4e8d3878fbd73f7b0b | ea7dd7649c172ca3e4c14b6df8b6715f55f746ba | refs/heads/master | 2021-01-19T13:42:53.229964 | 2017-08-19T21:50:45 | 2017-08-19T21:50:45 | 82,412,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, "ping.html") | [
"[email protected]"
]
| |
2455804a9deef4d3443589283af4dc9f1ef5c926 | dd8227454b817ccf2ceb24b3dfd4260d4ded7a72 | /scripts/item/consume_2435694.py | 020e445c8c1b62894419c308afa2bc358e797d3f | [
"MIT"
]
| permissive | Snewmy/swordie | 0dd3c17808b064c2cb2bd9576b51daf01ae5d686 | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | refs/heads/master | 2023-06-30T21:14:05.225798 | 2021-07-06T14:32:39 | 2021-07-06T14:32:39 | 389,497,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | # Heroes Evan Damage Skin
success = sm.addDamageSkin(2435694)
if success:
sm.chat("The Heroes Evan Damage Skin has been added to your account's damage skin collection.")
| [
"[email protected]"
]
| |
12caf078872a5634ca4638aed6dbdbd7776b5062 | 6097031d8e85400214085f152164a29346d106e3 | /maxheap.py | 7e3f269c4b2365d3684fe48cdb32ec815206f9cd | []
| no_license | ekourkchi/GalaxyGroups | 2fccca4998850c0838d0c7ef949bba8b1267716a | 19e98da0015b0462133133a23915e6d633614ad3 | refs/heads/master | 2022-04-03T09:30:19.667796 | 2020-02-13T03:05:48 | 2020-02-13T03:05:48 | 112,898,380 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,137 | py | #!/home/ehsan/Ureka/Ureka/variants/common/bin/python
import numpy as np
from math import *
from copy import *
class heapNode:
key = None
ID = None
flag = False
def __init__(self, key, ID):
self.key = key
self.ID = ID
def toString(self):
print self.key, self.ID, self.flag
# *********************************************
class maxHeap:
size = 0 # Number of current elements
array = []
# *****************
def __init__(self):
self.size = 0
self.array = []
# *****************
def push(self, key, ID):
#print "push:", key, ID, self.size
newNode = heapNode(key, ID)
self.array.append(newNode)
child = self.size
while child > 0:
parent = (child+1)/2-1
if self.array[child].key > self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
child = parent
else:
break
#for i in range(0,self.size+1):
#print self.array[i].key
self.size+=1
return 0
# *****************
def lrmax(self, left, right):
if right <= self.size-1:
if self.array[left].key >= self.array[right].key:
return left
else:
return right
elif left <= self.size-1:
return left
else:
return 0
# *****************
def pop(self):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return None
N = self.size
output = self.array[0]
self.array[0] = self.array[N-1]
parent = 0
while parent <= N-1:
left = 2*parent+1
right = 2*parent+2
child = self.lrmax(left, right)
if child != 0:
if self.array[child].key >= self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
parent = child
else:
break
else:
break
self.array.pop(N-1)
self.size -= 1
return output
# *****************
def setFlag(self, key):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return False
for i in range(0, self.size):
if self.array[i].key == key:
self.array[i].flag = True
# *****************
def peek(self):
if self.size == 0 :
print "\n[Error] No elements in the mean Heap ...\n"
return None
else:
return self.array[0]
# *****************
"""
This method removes heap elements which have the same id as the input ID
The number of removed elements would be returned
"""
def remove(self, ID):
boolean = 0
if self.size == 0 :
#print "\n[Error] No elements in the mean Heap ...\n"
return boolean
else:
i = 0
while i < self.size:
# ID would be the object ID
if self.array[i].ID == ID:
parent = i
N = self.size
self.array[parent] = self.array[N-1]
while parent <= N-1:
left = 2*parent+1
right = 2*parent+2
child = self.lrmax(left, right)
if child != 0:
if self.array[child].key >= self.array[parent].key:
self.array[parent], self.array[child] = self.array[child], self.array[parent]
parent = child
else:
break
else:
break
self.array.pop(N-1)
self.size -= 1
boolean+=1
i-=1 # The new item must be checked again
i+=1
return boolean
# *****************
def Size(self): return self.size
# *****************
def toString(self):
for i in range(0,self.size):
self.array[i].toString();
# *********************************************
# *********************************************
if __name__ == '__main__':
myHeap = maxHeap()
myHeap.push(4, "e4")
myHeap.push(7, "e7")
myHeap.push(2, "e2")
myHeap.push(6, "e6")
myHeap.push(8, "e7")
myHeap.push(5, "e5")
myHeap.push(3, "e7")
print "\n", myHeap.Size()
print myHeap.remove("e5")
print "\n", myHeap.Size()
while myHeap.Size()>0:
myHeap.pop().toString()
#print myHeap.peek().key
| [
"[email protected]"
]
| |
4e3d52464d257688f122a23748edd43590043b89 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_24/models/network_interface_neighbor_capability.py | 76ae203e5847eb8a031597b8f2d39119f564eac0 | [
"BSD-2-Clause"
]
| permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,245 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.24
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_24 import models
class NetworkInterfaceNeighborCapability(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'supported': 'bool',
'enabled': 'bool'
}
attribute_map = {
'supported': 'supported',
'enabled': 'enabled'
}
required_args = {
}
def __init__(
self,
supported=None, # type: bool
enabled=None, # type: bool
):
"""
Keyword args:
supported (bool): If true, this capability is supported by this neighbor; false otherwise.
enabled (bool): If true, this capability is enabled by this neighbor; false otherwise.
"""
if supported is not None:
self.supported = supported
if enabled is not None:
self.enabled = enabled
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `NetworkInterfaceNeighborCapability`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NetworkInterfaceNeighborCapability, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NetworkInterfaceNeighborCapability):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
2ecaa0902b36455da6e55c02523cefe6bcec5bfc | e5f4c22bfae93d3d96dea1b0ed8f3e4df373243f | /test.py | f3a74709481a1e1e55a6bdc81b7b3e3e0cf3f866 | []
| no_license | MrLokans/discover_flask | 5925a2ab07480398543d51e33c8be2cf23b2c36b | 63f847409dd67725bdef754cd0041f2647dabf4e | refs/heads/master | 2021-01-10T16:25:21.767911 | 2016-03-07T05:44:17 | 2016-03-07T05:44:17 | 52,816,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,919 | py | import unittest
from app import app
class AppTestCase(unittest.TestCase):
def setUp(self):
self.tester = app.test_client(self)
def login(self, username, password, follow_redirects=True):
return self.tester.post('/login',
data={'username': username,
'password': password},
follow_redirects=follow_redirects)
def logout(self):
return self.tester.get('/logout', follow_redirects=True)
def correctly_login(self, follow_redirects=True):
return self.login('admin', 'password', follow_redirects)
def test_index(self):
response = self.tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
def test_login_page_is_loaded(self):
response = self.tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
self.assertIn('Please login', response.data.decode('utf-8'))
def test_login_process_behaves_correctly_with_correct_creds(self):
response = self.correctly_login()
self.assertIn('Successfully logged in', response.data.decode('utf-8'))
def test_login_process_behaves_correctly_with_incorrect_creds(self):
response = self.login('incorrectuser', 'incorrectpassword')
self.assertIn('Invalid username', response.data.decode('utf-8'))
def test_logout_works(self):
response = self.correctly_login()
response = self.logout()
self.assertIn('Logged out.', response.data.decode('utf-8'))
def test_main_page_requires_user_being_logged_in(self):
response = self.tester.get('/', content_type='html/text',
follow_redirects=True)
self.assertIn('Login required', response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
a145346bc456c2281fad96365f8d9a5af1f4cd7d | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/sets_20200609191149.py | 89a07ef2d3f4c49463319f9699998f9dd296f2fc | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | import json
def Strings(str):
# dictionary--> key value pairs
values = {}
newArray = []
keys = []
for i in str:
newArray.append(i.split(":"))
for j in range(0,len(newArray)):
if newArray[j][0] in values:
values[j][0] =
# if newArray[j][0] in values:
# values[newArray[j][0]] += int(newArray[j][1])
# else:
# values[newArray[j][0]] = int(newArray[j][1])
# for k in values:
# keys.append(k)
# keys = sorted(keys)
# newString = ""
# last =len(keys)-1
# lastString = ""
# lastString +=keys[last] + ":" + json.dumps(values[keys[last]])
# for i in range(len(keys)-1):
# if keys[i] in values:
# newString += keys[i] + ":"+ json.dumps(values[keys[i]])+","
# finalString = newString + lastString
# print(type(finalString))
Strings(["Z:1","B:3","C:3","Z:4","B:2"])
# "B:5,C:3,Z:5"
| [
"[email protected]"
]
| |
8a463bda0d0c60cd4f34f3e9d156d3254165acfc | ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31 | /repos/Flask-Large-Application-Example-master/tests/views/test_pypi_packages.py | 27394594cc76c8ccde073c14c83e1f2757b0f036 | [
"MIT"
]
| permissive | babiato/flaskapp1 | 84de2d0b26a54f5820d3bbe97926782ad41e005c | 530beb9e3b8516e0e93960b99521c23a523ef546 | refs/heads/master | 2023-02-26T16:36:49.760632 | 2021-02-04T09:08:40 | 2021-02-04T09:08:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,239 | py | from flask import current_app
from flask.ext.celery import CELERY_LOCK
import pytest
from redis.exceptions import LockError
from pypi_portal.extensions import db, redis
from pypi_portal.models.pypi import Package
from pypi_portal.models.redis import POLL_SIMPLE_THROTTLE
from pypi_portal.tasks import pypi
class FakeDelay(object):
@staticmethod
def ready():
return False
def test_index():
assert '200 OK' == current_app.test_client().get('/pypi/').status
def test_sync_empty(alter_xmlrpc):
alter_xmlrpc(set())
redis.delete(POLL_SIMPLE_THROTTLE)
Package.query.delete()
db.session.commit()
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
assert [] == db.session.query(Package.name, Package.summary, Package.latest_version).all()
def test_sync_few(alter_xmlrpc):
alter_xmlrpc([dict(name='packageB', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_rate_limit(alter_xmlrpc):
alter_xmlrpc([dict(name='packageC', summary='Test package.', version='3.0.0'), ])
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
def test_sync_parallel(alter_xmlrpc):
alter_xmlrpc([dict(name='packageD', summary='Test package.', version='3.0.0'), ])
redis.delete(POLL_SIMPLE_THROTTLE)
redis_key = CELERY_LOCK.format(task_name='pypi_portal.tasks.pypi.update_package_list')
lock = redis.lock(redis_key, timeout=1)
assert lock.acquire(blocking=False)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [('packageB', 'Test package.', '3.0.0'), ]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert expected == actual
try:
lock.release()
except LockError:
pass
def test_sync_many(alter_xmlrpc):
alter_xmlrpc([
dict(name='packageB1', summary='Test package.', version='3.0.0'),
dict(name='packageB2', summary='Test package.', version='3.0.0'),
dict(name='packageB3', summary='Test package.', version='3.0.0'),
dict(name='packageB4', summary='Test package.', version='3.0.0'),
dict(name='packageB5', summary='Test package.', version='3.0.0'),
])
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
def test_sync_unhandled_exception():
old_throttle = pypi.THROTTLE
pypi.THROTTLE = 'nan'
redis.delete(POLL_SIMPLE_THROTTLE)
with pytest.raises(ValueError):
current_app.test_client().get('/pypi/sync').status()
pypi.THROTTLE = old_throttle
def test_sync_timeout():
old_delay = pypi.update_package_list.delay
pypi.update_package_list.delay = FakeDelay
redis.delete(POLL_SIMPLE_THROTTLE)
assert '302 FOUND' == current_app.test_client().get('/pypi/sync').status
expected = [
('packageB', 'Test package.', '3.0.0'), ('packageB1', 'Test package.', '3.0.0'),
('packageB2', 'Test package.', '3.0.0'), ('packageB3', 'Test package.', '3.0.0'),
('packageB4', 'Test package.', '3.0.0'), ('packageB5', 'Test package.', '3.0.0'),
]
actual = db.session.query(Package.name, Package.summary, Package.latest_version).all()
assert sorted(expected) == sorted(actual)
pypi.update_package_list.delay = old_delay
| [
"[email protected]"
]
| |
6e242cc43e2c7d24c5cfd1f02e749621f9366a0e | 0bfb4208bdf7fcfd75311c777e25a3b639bf566d | /backend/code/iep/auth/models.py | fb6f856736cbe2fd1a25f1dc89baf52a17eff536 | [
"Apache-2.0"
]
| permissive | socek/iep | ab7833f94af739abd19f569f28de84cdcc689e95 | 793e35ca5304eef7b7dacb5dd8d486622f497759 | refs/heads/master | 2020-05-16T13:48:12.252161 | 2019-12-03T08:28:05 | 2019-12-03T08:28:05 | 183,082,207 | 0 | 0 | Apache-2.0 | 2019-12-03T08:28:07 | 2019-04-23T19:24:49 | Python | UTF-8 | Python | false | false | 1,193 | py | from bcrypt import checkpw
from bcrypt import gensalt
from bcrypt import hashpw
from iep.application.model import Model
class User(Model):
def __init__(
self,
uid,
created_at=None,
updated_at=None,
name=None,
email=None,
is_admin=None,
password=None,
):
super().__init__(uid, created_at, updated_at)
self.name = name
self.email = email
self.is_admin = is_admin
self.password = password
def do_password_match(self, password):
"""
Validate if provided password match with the password from the model.
"""
if self.password:
return checkpw(password.encode("utf8"), self.password)
else:
return False
def set_password(self, password):
self.password = hashpw(password.encode("utf8"), gensalt())
def to_dict(self):
return {
'uid': self.uid,
'created_at': self.created_at,
'updated_at': self.updated_at,
'name': self.name,
'email': self.email,
'is_admin': self.is_admin,
'password': self.password,
}
| [
"[email protected]"
]
| |
f77ffc69cb16459c8138b3e8578323ac411365e2 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/5644d97cc5014e18b14799feeb9b354d528a6489-<test_invalid_interfaces>-bug.py | 88bcd68511e3ab151bad7e95439f0d409610e661 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | def test_invalid_interfaces(self):
event = self.create_sample_event(platform='invalid-interfaces')
self.browser.get('/{}/{}/issues/{}/'.format(self.org.slug, self.project.slug, event.group.id))
self.browser.wait_until('.entries')
self.browser.snapshot('issue details invalid interfaces') | [
"[email protected]"
]
| |
bb4d3c4ffba8b3fdd9dae18528199a1e9560a1a0 | 43ede7b8fb546c00804c0ef94501f6e48ba170d6 | /Cursos Python/Python 3 - Solyd/Orientacao_a_objeto.py | e902f0d109aa9feef7f8a68a9651bc74a65cd1bb | []
| no_license | bopopescu/Python-13 | db407d17252473e78e705e563cfee4dbd316c6b9 | c8bef500f2d3e4a63d850f96dfa219eff2ecebda | refs/heads/master | 2022-11-22T16:24:08.490879 | 2020-06-11T14:22:24 | 2020-06-11T14:22:24 | 281,830,055 | 0 | 0 | null | 2020-07-23T02:26:31 | 2020-07-23T02:26:30 | null | UTF-8 | Python | false | false | 1,339 | py | class Cliente:
def __init__(self, nome, cpf, idade):
self.__nome = nome
self.__cpf = cpf
self.__idade = idade
def dados_cliente(self):
return {'nome': self.__nome,
'cpf': self.__cpf,
'idade': self.__idade}
class Conta(Cliente):
def __init__(self, nome, cpf, idade, saldo, limite):
super().__init__(nome, cpf, idade)
# Representante da conta
self.__nome = nome
self.__cpf = cpf
self.__idade = idade
# dados da conta
self.__saldo = float(saldo)
self.__limite = float(limite)
def saldo_atual(self):
print(f'Saldo atual: R${self.__saldo:.2f}')
def dono(self):
print('nome: ', self.__nome)
print('cpf:', self.__cpf)
print('idade :', self.__idade)
def sacar(self, valor_saque):
self.__saldo -= float(valor_saque)
print(f'Saque de R${valor_saque}, Realizado com sucesso!')
def depositar(self, valor_deposito):
self.__saldo += float(valor_deposito)
cliente = Cliente('Erickson', '19542634-05', 18)
dc = cliente.dados_cliente()
conta = Conta(dc['nome'], dc['cpf'], dc['idade'], 1500.00, 5000.00)
conta.saldo_atual()
conta.sacar(257.05)
conta.saldo_atual()
conta.saldo_atual()
conta.depositar(750.00)
conta.saldo_atual()
| [
"[email protected]"
]
| |
ccf9a734c56a27aad1c7b63e96282803ea84b5a4 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/f1dbf6292b80b7cc67661707e7f1d8b5b0a06eb5-<check_params>-bug.py | b748d6d0116f380fc5635eaf4ef57ebc08f34ef9 | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | def check_params(self):
'Check all input params'
if (not self.key_id.isdigit()):
self.module.fail_json(msg='Error: key_id is not digit.')
if ((int(self.key_id) < 1) or (int(self.key_id) > 4294967295)):
self.module.fail_json(msg='Error: The length of key_id is between 1 and 4294967295.')
if (self.state == 'present'):
if ((self.auth_type == 'encrypt') and ((len(self.password) < 20) or (len(self.password) > 392))):
self.module.fail_json(msg='Error: The length of encrypted password is between 20 and 392.')
elif ((self.auth_type == 'text') and ((len(self.password) < 1) or (len(self.password) > 255))):
self.module.fail_json(msg='Error: The length of text password is between 1 and 255.') | [
"[email protected]"
]
| |
dfdfdc73b69afa83125300340f0252cfe3100d38 | a127d0feb3bcf4f2581f385bb24f2b789c771c9c | /10syo/95_2.py | 0a1ea7e35fd38d9a0daad78a923622656306fdf5 | []
| no_license | NgoVanDau/nlp100knock | 01383e4cc5a1470508744668103b9ea1a238b892 | 3ef63c0d2dfb55c0e6a31aced645f284325a98a5 | refs/heads/master | 2023-03-22T13:19:23.932429 | 2018-08-05T05:27:11 | 2018-08-05T05:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | fname_input = 'combined_out.tab'
class Data:
def __init__(self, human_score, my_score):
self.human_score = human_score
self.my_score = my_score
def __repr__(self):
return 'Data%s' % repr(self.__dict__)
# データ配列作成
with open(fname_input) as data_file:
def read_data():
for line in data_file:
word1, word2, human_score, my_score = line.split('\t')
yield Data(float(human_score), float(my_score))
data = list(read_data())
# 順位付け
data_sorted_by_human_score = sorted(data, key=lambda data: data.human_score)
for order, d in enumerate(data_sorted_by_human_score):
d.human_order = order
data_sorted_by_my_score = sorted(data, key=lambda data: data.my_score)
for order, d in enumerate(data_sorted_by_my_score):
d.my_order = order
# スピアマン相関係数算出
N = len(data)
total = sum((d.human_order - d.my_order) ** 2 for d in data)
result = 1 - (6 * total) / (N ** 3 - N)
print(result)
| [
"[email protected]"
]
| |
796965104f9a8b405aea58339305c0e917d2c247 | 7aae3051a7d08a280f7adc55b4b984bc48c87db3 | /vehicle/admins/vehicle_model_admin.py | ba26d4ec5f9adf2698da8711bc9fa8bd44e5b5a4 | []
| no_license | ohahlev/ahlev-django-vehicle | d087375e3b49cda9253a776f79e4531bbf0a686d | 51895c200b40be7a298a4054ba2d8945df6a84d0 | refs/heads/master | 2020-11-30T07:00:12.441028 | 2020-01-21T01:25:48 | 2020-01-21T01:25:48 | 230,340,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | from django.utils.html import format_html
from django.contrib import admin
from imagekit import ImageSpec
from imagekit.admin import AdminThumbnail
from imagekit.processors import ResizeToFill
from imagekit.cachefiles import ImageCacheFile
from ..models.vehicle_model import VehicleModel
from .widgets import AdminSmallestThumbnailSpec, AdminSmallThumbnailSpec
class VehicleModelAdmin(admin.ModelAdmin):
def preview_thumbnail(self, obj):
if obj.logo_thumbnail:
return format_html(u"<img src='{}'/>", obj.logo_thumbnail.url)
preview_thumbnail.short_description = 'Preview'
readonly_fields = ['preview_thumbnail']
fieldsets = [
("NAME", {
'fields': ['name', 'logo', 'preview_thumbnail'],
}),
]
search_fields = ['name']
list_display = ['name', 'preview_thumbnail', 'date_created', 'last_updated']
class Media:
css = {
'all': (
'vehicle/css/vehicle.css',
)
}
'''
js = (
'js/jquery.min.js',
'js/popper.min.js',
'js/bootstrap.min.js',
'js/mdb.min.js',
'js/myscript.js'
)
'''
admin.site.register(VehicleModel, VehicleModelAdmin) | [
"[email protected]"
]
| |
7d19f4be3e65d55621b576d2306fd4eb58e60381 | 015ce35e6344d1726173594ae509dfc1ca6f856d | /2-basics/Study basics/loops.py | 7ce93a431733e32e961c8476c4ae0d1bd2085bee | []
| no_license | ayman-elkassas/Python-Notebooks | 4af80df75c15a6ac3049450b3920d500fef0e581 | 26a8265f458c40ac22965d55722f32a650851683 | refs/heads/master | 2023-04-03T19:12:17.707673 | 2021-04-10T21:32:37 | 2021-04-10T21:32:37 | 356,699,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | count=5
while count>99:
print("yes")
count-=1
else:
print("if")
for letter in "python":
print(letter)
| [
"[email protected]"
]
| |
19ef56453f855c29a72eaa6c8c52e2ca967e6a36 | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Binary Search/ValidPerfectSquare.py | c4e8a70a8beb4c70db11315cbe222321332ff181 | [
"MIT"
]
| permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 898 | py | """
LeetCode Problem 367. Valid Perfect Square
Link: https://leetcode.com/problems/valid-perfect-square/
Written by: Mostofa Adib Shakib
Language: Python
Observation:
1) Number less than 2 will always form perfect squares so return True.
2) The number will always be in the first half of the array. Hence, we can discard the second half.
Time Complexity: O(log n)
Space Complexity: O(1)
"""
class Solution:
def isPerfectSquare(self, num: int) -> bool:
if num <= 1: return True
left = 2
right = num//2
while left <= right:
mid = (left + right) // 2
guess = mid * mid
if guess == num:
return True
elif guess < num:
left = mid + 1
else:
right = mid - 1
return False | [
"[email protected]"
]
| |
ed0466956305c5f5e6955a737d43b2039c8f0fc5 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/4-functional-programming/7-list-comprehension_20200422222427.py | 81d606e197ec10031073a3db9b3879a25cb59bc1 | []
| no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #list, set, dicitonary
my_list = []
for char in 'HELLO':
my_list.append(char)
print(my_list)
dict_list = [char for char in 'good morning']
print(dict_list)
num_list = [num for num in range (0, 100)]
print(num_list)
print("divide by 3 with no remainder")
num_list3 = [num for num in range (0, 100) if(num%3 ==0)]
print(num_list3) | [
"[email protected]"
]
| |
c2ee78250d0f3860d8ec164c11ab88e734704bed | 8efd8bcd3945d88370f6203e92b0376ca6b41c87 | /problems100_200/151_Reverse_Words_in_a_String.py | 11b5357b6300152e2debfd6b3f1328822ffebdd4 | []
| no_license | Provinm/leetcode_archive | 732ad1ef5dcdfdde6dd5a33522e86f7e24ae2db5 | 3e72dcaa579f4ae6f587898dd316fce8189b3d6a | refs/heads/master | 2021-09-21T08:03:31.427465 | 2018-08-22T15:58:30 | 2018-08-22T15:58:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | #coding=utf-8
'''
151. Reverse Words in a String
Given an input string, reverse the string word by word.
Example:
Input: "the sky is blue",
Output: "blue is sky the".
Note:
A word is defined as a sequence of non-space characters.
Input string may contain leading or trailing spaces. However, your reversed string should not contain leading or trailing spaces.
You need to reduce multiple spaces between two words to a single space in the reversed string.
Follow up: For C programmers, try to solve it in-place in O(1) space.
'''
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
lst = [i for i in s.split(" ") if i]
return ' '.join(reversed(lst))
s = " the sky is blue"
ss = Solution()
r = ss.reverseWords(s)
print(r)
| [
"[email protected]"
]
| |
6659b4d8145e55d900dcabb7398db42929c560f4 | d75560d9acde4f1f6457898d8862b06ba5f8dd7b | /backend/msm_sgsjhsjh4803_de_13561/wsgi.py | 3cd1976c5b367e8dc49ef8d3516ab2cc510980f7 | []
| no_license | crowdbotics-apps/msm-sgsjhsjh4803-de-13561 | af6563f775832664041dbd8abc5d05af9d8d4a4f | 9364d828ffee0edfe68d263fce2b0a7cb2949039 | refs/heads/master | 2022-12-29T15:25:29.870944 | 2020-10-19T08:18:12 | 2020-10-19T08:18:12 | 305,263,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | """
WSGI config for msm_sgsjhsjh4803_de_13561 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'msm_sgsjhsjh4803_de_13561.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
e348b3e0dfab26e0cc1f9c6a114ae59be50476c4 | 4c8755443320f0e8fde2718aec40c49ef27ab6fe | /{{cookiecutter.repo_name}}/cookiecutter_repo/utils/loaders.py | 0d90448aa34fd2244e0f3ef816996b8e56608d99 | [
"MIT"
]
| permissive | ethman/cookiecutter-nussl | 28266f2b714607493016aa554794617e1cb431aa | 302df1bee74b13ff0e2c6725997f7b7fa26b32d5 | refs/heads/master | 2020-12-09T23:50:09.844838 | 2020-01-12T17:19:06 | 2020-01-12T17:19:06 | 233,449,725 | 0 | 0 | null | 2020-01-12T19:54:48 | 2020-01-12T19:54:47 | null | UTF-8 | Python | false | false | 586 | py | from .. import dataset, model
def load_dataset(dataset_class, dataset_folder, dataset_config):
DatasetClass = getattr(dataset, dataset_class)
dataset_instance = DatasetClass(dataset_folder, dataset_config)
return dataset_instance
def load_model(model_config):
model_class = model_config.pop('class', 'SeparationModel')
ModelClass = getattr(model, model_class)
if model_class == 'SeparationModel':
model_instance = ModelClass(model_config, extra_modules=model.extras)
else:
model_instance = ModelClass(model_config)
return model_instance | [
"[email protected]"
]
| |
837d8f52574c6bab972f540869f2bca52b2bf000 | 94c8dd4126da6e9fe9acb2d1769e1c24abe195d3 | /qiskit/circuit/library/boolean_logic/quantum_or.py | 0864affb7958edffe5050f3c8d54af82bdc515be | [
"Apache-2.0"
]
| permissive | levbishop/qiskit-terra | a75c2f96586768c12b51a117f9ccb7398b52843d | 98130dd6158d1f1474e44dd5aeacbc619174ad63 | refs/heads/master | 2023-07-19T19:00:53.483204 | 2021-04-20T16:30:16 | 2021-04-20T16:30:16 | 181,052,828 | 1 | 0 | Apache-2.0 | 2019-06-05T15:32:13 | 2019-04-12T17:20:54 | Python | UTF-8 | Python | false | false | 3,664 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Implementations of boolean logic quantum circuits."""
from typing import List, Optional
from qiskit.circuit import QuantumRegister, QuantumCircuit
from qiskit.circuit.library.standard_gates import MCXGate
class OR(QuantumCircuit):
r"""A circuit implementing the logical OR operation on a number of qubits.
For the OR operation the state :math:`|1\rangle` is interpreted as ``True``. The result
qubit is flipped, if the state of any variable qubit is ``True``. The OR is implemented using
a multi-open-controlled X gate (i.e. flips if the state is :math:`|0\rangle`) and
applying an X gate on the result qubit.
Using a list of flags, qubits can be skipped or negated.
The OR gate without special flags:
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5)
%circuit_library_info circuit
Using flags we can negate qubits or skip them. For instance, if we have 5 qubits and want to
return ``True`` if the first qubit is ``False`` or one of the last two are ``True`` we use the
flags ``[-1, 0, 0, 1, 1]``.
.. jupyter-execute::
:hide-code:
from qiskit.circuit.library import OR
import qiskit.tools.jupyter
circuit = OR(5, flags=[-1, 0, 0, 1, 1])
%circuit_library_info circuit
"""
def __init__(self, num_variable_qubits: int, flags: Optional[List[int]] = None,
mcx_mode: str = 'noancilla') -> None:
"""Create a new logical OR circuit.
Args:
num_variable_qubits: The qubits of which the OR is computed. The result will be written
into an additional result qubit.
flags: A list of +1/0/-1 marking negations or omissions of qubits.
mcx_mode: The mode to be used to implement the multi-controlled X gate.
"""
# store num_variables_qubits and flags
self.num_variable_qubits = num_variable_qubits
self.flags = flags
# add registers
qr_variable = QuantumRegister(num_variable_qubits, name='variable')
qr_result = QuantumRegister(1, name='result')
super().__init__(qr_variable, qr_result, name='or')
# determine the control qubits: all that have a nonzero flag
flags = flags or [1] * num_variable_qubits
control_qubits = [q for q, flag in zip(qr_variable, flags) if flag != 0]
# determine the qubits that need to be flipped (if a flag is > 0)
flip_qubits = [q for q, flag in zip(qr_variable, flags) if flag > 0]
# determine the number of ancillas
self.num_ancilla_qubits = MCXGate.get_num_ancilla_qubits(len(control_qubits), mode=mcx_mode)
if self.num_ancilla_qubits > 0:
qr_ancilla = QuantumRegister(self.num_ancilla_qubits, 'ancilla')
self.add_register(qr_ancilla)
else:
qr_ancilla = []
self.x(qr_result)
if len(flip_qubits) > 0:
self.x(flip_qubits)
self.mcx(control_qubits, qr_result[:], qr_ancilla[:], mode=mcx_mode)
if len(flip_qubits) > 0:
self.x(flip_qubits)
| [
"[email protected]"
]
| |
a663a571c791506a5bbea2e874df529dbed68ebb | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1907/py02/day01/cut_log.py | cceaf977d83d75e82696a61778603e0948c24313 | []
| no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 525 | py | import time
t9 = time.strptime('2019-05-15 09:00:00', '%Y-%m-%d %H:%M:%S')
t12 = time.strptime('2019-05-15 12:00:00', '%Y-%m-%d %H:%M:%S')
with open('mylog.txt') as fobj:
for line in fobj:
t = time.strptime(line[:19], '%Y-%m-%d %H:%M:%S')
if t > t12:
break
if t >= t9:
print(line, end='')
# with open('mylog.txt') as fobj:
# for line in fobj:
# t = time.strptime(line[:19], '%Y-%m-%d %H:%M:%S')
# if t9 <= t <= t12:
# print(line, end='')
| [
"[email protected]"
]
| |
7394010400225008bcf0ebefdea0242ca3765d3e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_96/1509.py | 985390ba9c8945569ce4096912a9a40962d7ecaf | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | from string import split
f1=open('B-large.in','r')
f2=open('out.txt','w')
t=int(f1.readline())
for i in range (t):
k=0
s=f1.readline()
data=list(map(int,s.split(' ')))
u=data[1]+0
for j in range(data[0]):
if data[j+3]==0 or data[j+3]==1:
if data[j+3]>=data[2]:
k+=1
elif data[1]==0:
if data[j+3] % 3==0 and data[j+3]//3>=data[2]:
k+=1
elif data[j+3]%3!=0 and data[j+3]//3+1>=data[2]:
k+=1
else:
if data[j+3]%3==1 and data[j+3]//3+1>=data[2]:
k+=1
elif data[j+3]%3==0 and data[j+3]//3+1==data[2] and u!=0:
u-=1
k+=1
elif data[j+3]%3==0 and data[j+3]//3>=data[2]:
k+=1
elif data[j+3]%3==2 and data[j+3]//3+2==data[2] and u!=0:
u-=1
k+=1
elif data[j+3]%3==2 and data[j+3]//3+1>=data[2]:
k+=1
f2.write ("Case #"+str(i+1)+": "+str(k)+"\n")
f1.close()
f2.close()
| [
"[email protected]"
]
| |
3a7a4e0fc74d98a3d4bb90e7220f2bca91eaa4d0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/97/usersdata/239/54617/submittedfiles/lecker.py | e3ba12e1ff6dc558621a8f2f17a217e1787cc426 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | # -*- coding: utf-8 -*-
from __future__ import division
def lecker (lista):
cont=0
for i in range (0,len(lista),1
if i==0:
if lista[i]>lista[i+1]:
cont=cont+1
elif i==(len(lista)-1):
if lista[i]>lista[i-1]:
cont=cont+1
else:
if lista[i]>lista[i-1]:
if lista[i]>lista[i+1]:
cont=cont+1
if cont==1:
return True
else:
return False
n=int(input("Digite a quantidade de elementos da lista:"))
a=[]
for i in range (0,n,1):
valor=int(input("Digite o valor:"))
a.append(valor)
b=[]
for i in range (0,n,1):
valor=int(input("Digite o valor:"))
b.append(valor)
if lecker (a):
print("S")
else:
print("N")
if lecker (b):
print("S")
else:
print("N")
| [
"[email protected]"
]
| |
4ae9d4cd17ad18027fa1dffe901e6463804b40c4 | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/python/typecheck/mypy/skip_field.py | 672a681eeba2e506b35d3c2f51bbadb683934354 | [
"Apache-2.0"
]
| permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 897 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.target_types import (
PythonSourcesGeneratorTarget,
PythonSourceTarget,
PythonTestsGeneratorTarget,
PythonTestTarget,
PythonTestUtilsGeneratorTarget,
)
from pants.engine.target import BoolField
class SkipMyPyField(BoolField):
alias = "skip_mypy"
default = False
help = "If true, don't run MyPy on this target's code."
def rules():
return [
PythonSourcesGeneratorTarget.register_plugin_field(SkipMyPyField),
PythonSourceTarget.register_plugin_field(SkipMyPyField),
PythonTestsGeneratorTarget.register_plugin_field(SkipMyPyField),
PythonTestTarget.register_plugin_field(SkipMyPyField),
PythonTestUtilsGeneratorTarget.register_plugin_field(SkipMyPyField),
]
| [
"[email protected]"
]
| |
66b654358f12a58b653f3dd74fb992717fe0bcc6 | a6b46a37bb2fc9e27ed000cb5d2e1fcef6e7527c | /python/common/expressions.py | fac15ec0ea42f9bfa0e78d51628dfc525e99d9a9 | [
"MIT"
]
| permissive | data-man-34/ad_examples | 3c4b522b64a8387aed922a6fd062114a3e96c26f | f0d5d95c443cf1cfaf293a2e76b9bff3cbfd85b7 | refs/heads/master | 2020-04-09T15:12:48.677931 | 2018-12-04T18:58:22 | 2018-12-04T18:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,371 | py | import tokenize
import re
import os
import numpy as np
# from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support
"""
General Rule-parsing functions. We might use only a subset of the features available.
For some rule illustrations/examples, see test_rule_apis()
To test:
pythonw -m common.expressions
"""
# Supported datatypes
DTYPE_CATEGORICAL = 0 # categorical
DTYPE_CONTINUOUS = 1 # numerical float values
DTYPE_BINARY = 2 # categorical {'0','1'}
LABEL_VAR_INDEX = -1
UNINITIALIZED_VAR_INDEX = -2
ILLEGAL_VAR_INDEX = -3
UNKNOWN_CATEGORICAL_VALUE_INDEX = -1
DEFAULT_PREDICATE_WEIGHT = 1.0
class stack(list):
def push(self, item):
self.append(item)
def is_empty(self):
return not self
class DType(object):
def __init__(self):
self.name = None
def is_numeric(self):
pass
def is_continuous(self):
pass
class Factor(DType):
"""Stores information about the values that a categorical variable can take.
Also provides the one-hot encodings
Attributes:
__values: dict
Mapping of categorical values from string representations
to integer.
__fromindex: dict
Mapping of integers to corresponding categorical string
representations.
__onehot: dict
Mapping of categorical (integer) values to corresponding
one-hot representations.
__onehotNA: np.array
One-hot vector that will be returned when the value is
missing. This has 'nan' in all positions of the vector.
"""
def __init__(self, vals, sort=True):
"""Creates a Factor instance from the input set of values.
Args:
vals: list
An unique set of allowable values/levels for the factor.
sort: boolean, (default True)
Whether to sort the values alphabetically before assigning
them indexes. Sometimes the input order might need to be
maintained (with sort=False) e.g., if these represent
column names which occur in the specific input order.
"""
DType.__init__(self)
self.__values = {}
self.__fromindex = {}
self.__onehot = {}
tmp = [x for x in vals if x != '']
if sort:
tmp = sorted(tmp)
self.__onehotNA = np.empty(len(tmp))
self.__onehotNA.fill(np.nan)
tmphot = np.zeros(len(tmp), dtype=float)
for i in range(0, len(tmp)):
self.__values[tmp[i]] = i
self.__fromindex[i] = tmp[i]
self.__onehot[i] = np.array(tmphot) # store a new copy
self.__onehot[i][i] = 1
def is_numeric(self):
return False
def is_continuous(self):
return False
def all_values(self):
return self.__values
def index_of(self, value):
return self.__values.get(value)
def encode_to_one_hot(self, index):
"""Encode the categorical variable to one-hot vector.
Some algorithms like scikit-learn decision trees need the data
to be presented only as numeric vectors. In that case, we need
to encode all categorical features as one-hot vectors.
Args:
index: int
Index of the value
Returns: np.array
"""
ret = self.__onehot.get(index)
return self.__onehotNA if ret is None else ret
def __getitem__(self, index):
return self.__fromindex[index]
def num_values(self):
return len(self.__values)
def __repr__(self):
return 'Factor ' + repr(self.__values)
def __str__(self):
return 'Factor ' + repr(self.__values)
class NumericContinuous(DType):
"""Definitions for Gaussian distributed real values."""
def __init__(self, vals):
"""Initializes the mean and variance of the Gaussian variable."""
DType.__init__(self)
# Ignore NaNs
n = np.count_nonzero(~np.isnan(vals))
if n > 0:
self.mean = np.nanmean(vals)
self.variance = np.nanvar(vals)
else:
self.mean = 0
self.variance = 0
def is_numeric(self):
return True
def is_continuous(self):
return True
def __repr__(self):
return 'Continuous(mean=' + str(self.mean) + ", var=" + str(self.variance) + ")"
def __str__(self):
return 'Continuous(mean=' + str(self.mean) + ", var=" + str(self.variance) + ")"
class FeatureMetadata(object):
"""Contains all metadata related to features.
Attributes:
lblname: string
The column name of the label column.
lbldef: Factor
All permissible label values.
featurenames: Factor
All feature names stored in the same order as features.
featuredefs: list
Contains info about each feature.
"""
def __init__(self, lblname=None, lbldef=None,
featurenames=None, featuredefs=None):
self.lblname = lblname
self.lbldef = lbldef
self.featurenames = featurenames
self.featuredefs = featuredefs
def num_features(self):
return 0 if self.featuredefs is None else len(self.featuredefs)
def _tostr(self):
return "[FeatureMetadata]\nlblname: " + str(self.lblname) + "\n" + \
"lbldef: " + str(self.lbldef) + "\n" + \
"featurenames: " + str(self.featurenames) + "\n" + \
"featuredefs: " + str(self.featuredefs)
def __repr__(self):
return self._tostr()
def __str__(self):
return self._tostr()
class Expression(object):
"""Base class for any expression that needs to be parsed or evaluated."""
def evaluate(self, inst, lbl, meta):
"""Given an instance, will evaluate the expression.
The expression might return a literal or a True/False value.
"""
pass
def compile(self, meta):
"""Resolves the variable and literal bindings such that
the expression can be evaluated efficiently later.
"""
pass
def ground(self, inst, lbl, meta):
"""Grounds the expression with values from
the instance and returns a string representation.
This is useful for debugging since it makes the
evaluation transparent.
"""
pass
def expr(self, meta):
"""Returns a readable string representation of the rule that can be parsed."""
def get_variables(self):
"""Returns the set of variables bound to the expression"""
raise TypeError("Unsupported operation: get_variables()")
def __repr__(self):
return str(self)
class Term(Expression):
pass
class Literal(Term):
"""A literal.
Literals might be numeric, strings, or categorical.
If categorical, they are converted to internal (integer)
representation when compiled.
Attributes:
val: object (float/string/category)
The actual value
valindex: int
If the corresponding feature is categorical, then
the integer representation of the value is stored.
categorical: boolean
Indicates whether the literal is categorical.
rexp: Compiled regular expression
Used to remove quotes (surrounding the literal),
if required.
"""
rexp = re.compile(r" *(['\"])(.*)\1 *")
def __init__(self, val, removequotes=False):
self.val = val
self.valindex = UNKNOWN_CATEGORICAL_VALUE_INDEX
self.categorical = None
if removequotes:
m = Literal.rexp.match(val)
if m is not None:
self.val = m.group(2)
else:
pass
def evaluate(self, inst, lbl, meta):
if self.categorical is None:
raise ValueError("Undetermined whether literal '" + str(self.val) + "' is categorical or not.")
elif self.categorical:
ret = self.valindex
else:
ret = self.val
# print(str(self) + ': ' + str(ret))
return ret
def ground(self, inst, lbl, meta):
return repr(self.val)
def expr(self, meta):
if meta is None:
raise ValueError("Invalid metadata")
if self.categorical is not None and self.categorical:
return "'" + str(self.val) + "'"
return str(self.val)
def get_variables(self):
return set([]) # A literal does not contain any variable
def __str__(self):
return "Lit(" + str(self.val) + "<" + str(self.valindex) + ">" + ")"
class Var(Term):
"""Variable/Feature.
This represents the feature/label variable. Initially, the
name of the feature is stored. This gets compiled later into
the feature index such that evaluation can be faster.
Label variable is indicated by the feature index '-1'.
Before compilation, the feature index is initialized to '-2'.
After compilation, the feature index corresponding to the
variable name is looked up using the metadata.
Attributes:
name: string
The name of the feature. Usually corresponds to a
columnname on the data.
varindex: int
The index of the variable -- computed during compilation.
vartype: int (default DTYPE_CATEGORICAL)
The datatype for the variable.
"""
def __init__(self, name):
self.name = name
self.varindex = UNINITIALIZED_VAR_INDEX # uninitialized
self.vartype = DTYPE_CATEGORICAL
def evaluate(self, inst, lbl, meta):
ret = None
if self.varindex == LABEL_VAR_INDEX:
ret = lbl
elif self.vartype == DTYPE_CATEGORICAL and self.varindex >= 0:
# self.vartype is categorical
ret = int(inst[self.varindex])
elif self.vartype == DTYPE_CONTINUOUS and self.varindex >= 0:
# self.vartype is numeric continuous
ret = inst[self.varindex]
# print(str(self) + ': ' + str(ret))
return None if self.vartype == DTYPE_CATEGORICAL and ret < 0 else ret
def compile(self, meta):
self.varindex = UNINITIALIZED_VAR_INDEX # set to uninitialized first
# print('Compiling Var ' + str(self.name))
if self.name == meta.lblname:
self.varindex = LABEL_VAR_INDEX # label column
else:
idx = meta.featurenames.index_of(self.name)
# -3 = unknown
self.varindex = ILLEGAL_VAR_INDEX if idx is None else idx
if self.varindex == ILLEGAL_VAR_INDEX:
raise ValueError("Unknown variable: '%s' in expression. Allowed variables: %s or '%s'" %
(self.name, str(meta.featurenames.all_values().keys()), meta.lblname))
if self.varindex >= 0 and meta.featuredefs is not None \
and meta.featuredefs[self.varindex].is_continuous():
self.vartype = DTYPE_CONTINUOUS # DTYPE_CONTINUOUS
else:
self.vartype = DTYPE_CATEGORICAL # DTYPE_CATEGORICAL
def ground(self, inst, lbl, meta):
val = "?"
if self.varindex == LABEL_VAR_INDEX: # label
val = "'" + meta.lbldef[lbl] + "'"
elif self.varindex >= 0:
# assume that all features are continuous
val = inst[self.varindex]
return str(self.name) + "(" + repr(val) + ")"
def expr(self, meta):
if self.varindex == LABEL_VAR_INDEX: # label
return meta.lblname
elif self.varindex >= 0:
return meta.featurenames[self.varindex]
raise ValueError("Uncompiled Rule: %s" % (str(self),))
def get_variables(self):
return {self.varindex}
def __str__(self):
return "Var(" + str(self.name) + "<" + str(self.varindex) + ">)"
class Predicate(Expression):
pass
class Atom(Predicate):
pass
class BinaryPredicate(Predicate):
"""Predicate taking two inputs."""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
self.p1 = p1
self.p2 = p2
self.weight = weight
def compile(self, meta):
# print('Compiling ' + str(self.p1) + ' ' + str(isinstance(self.p1, Predicate)))
self.p1.compile(meta)
# print('Compiling ' + str(self.p2) + ' ' + str(isinstance(self.p2, Predicate)))
self.p2.compile(meta)
def get_variables(self):
vars = set()
vars.update(self.p1.get_variables())
vars.update(self.p2.get_variables())
return vars
def get_str_weight(self, suppress_default_weight=True):
if suppress_default_weight and self.weight == DEFAULT_PREDICATE_WEIGHT:
return ""
return "[" + str(self.weight) + "]"
class UnaryPredicate(Predicate):
"""Predicate taking one input."""
def __init__(self, p=None, weight=DEFAULT_PREDICATE_WEIGHT):
self.p = p
self.weight = weight
def compile(self, meta):
self.p.compile(meta)
def get_variables(self):
vars = set()
vars.update(self.p.get_variables())
return vars
def get_str_weight(self, suppress_default_weight=True):
if suppress_default_weight and self.weight == DEFAULT_PREDICATE_WEIGHT:
return ""
return "[" + str(self.weight) + "]"
class Cmp(BinaryPredicate):
"""Base class for evaluating comparison operators."""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
BinaryPredicate.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluate(self, inst, lbl, meta):
e1 = self.p1.evaluate(inst, lbl, meta)
e2 = self.p2.evaluate(inst, lbl, meta)
ret = None if e1 is None or e2 is None else self.evaluateCmp(e1, e2)
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value for %s unbound \n inst: %s' \
% (str(self), str(inst)))
return ret
def evaluateCmp(self, e1, e2):
raise NotImplementedError('Comparison operator not implemented.')
def compile(self, meta):
self.p1.compile(meta)
self.p2.compile(meta)
# Comparisons must be between a variable and a literal.
tvar = self.p1 if isinstance(self.p1, Var) else self.p2 if isinstance(self.p2, Var) else None
tlit = self.p1 if isinstance(self.p1, Literal) else self.p2 if isinstance(self.p2, Literal) else None
if tvar is not None and tlit is not None:
if tvar.varindex == LABEL_VAR_INDEX: # label column
tlit.categorical = True # class variables are always categorical
tlit.valindex = meta.lbldef.index_of(tlit.val)
elif tvar.varindex >= 0: # feature column
# assume that all features are continuous
tlit.categorical = False
else:
raise ValueError('Comparisons must be between a variable and a literal.')
def ground(self, inst, lbl, meta):
raise NotImplementedError('ground() not implemented.')
def __str__(self):
return "Cmp(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpEq(Cmp):
"""Compares if values of two expressions are equal"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 == e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " = " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " = " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpEq(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpLr(Cmp):
"""Compares if e1 < e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 < e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " < " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " < " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpLr(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpLE(Cmp):
"""Compares if e1 <= e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 <= e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " <= " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " <= " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpLE(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpGr(Cmp):
"""Compares if e1 > e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 > e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " > " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " > " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpGr(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class CmpGE(Cmp):
"""Compares if e1 >= e2"""
def __init__(self, p1=None, p2=None, weight=DEFAULT_PREDICATE_WEIGHT):
Cmp.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluateCmp(self, e1, e2):
return e1 >= e2
def ground(self, inst, lbl, meta):
return "" + self.p1.ground(inst, lbl, meta) + " >= " + self.p2.ground(inst, lbl, meta) + ""
def expr(self, meta):
return "(" + self.p1.expr(meta) + " >= " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "CmpGE(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class Or(BinaryPredicate):
def __init__(self, p1, p2, weight=DEFAULT_PREDICATE_WEIGHT):
BinaryPredicate.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluate(self, inst, lbl, meta):
e1 = self.p1.evaluate(inst, lbl, meta)
if e1 is None:
raise ValueError('predicate value unbound for e1')
elif e1:
return True
e2 = self.p2.evaluate(inst, lbl, meta)
ret = None if e1 is None or e2 is None else e1 or e2
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value unbound for e2')
return ret
def ground(self, inst, lbl, meta):
return "(" + self.p1.ground(inst, lbl, meta) + " | " + self.p2.ground(inst, lbl, meta) + ")"
def expr(self, meta):
return "(" + self.p1.expr(meta) + " | " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "Or(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class And(BinaryPredicate):
def __init__(self, p1, p2, weight=DEFAULT_PREDICATE_WEIGHT):
BinaryPredicate.__init__(self, p1=p1, p2=p2, weight=weight)
def evaluate(self, inst, lbl, meta):
e1 = self.p1.evaluate(inst, lbl, meta)
if e1 is None:
raise ValueError('predicate value unbound for e1')
elif not e1:
return False
e2 = self.p2.evaluate(inst, lbl, meta)
ret = None if e1 is None or e2 is None else e1 and e2
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value unbound for e2')
return ret
def ground(self, inst, lbl, meta):
return "(" + self.p1.ground(inst, lbl, meta) + " & " + self.p2.ground(inst, lbl, meta) + ")"
def expr(self, meta):
return "(" + self.p1.expr(meta) + " & " + self.p2.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "And(" + str(self.p1) + ", " + str(self.p2) + ")" + self.get_str_weight()
class Not(UnaryPredicate):
def __init__(self, p, weight=DEFAULT_PREDICATE_WEIGHT):
UnaryPredicate.__init__(self, p=p, weight=weight)
def evaluate(self, inst, lbl, meta):
e = self.p.evaluate(inst, lbl, meta)
ret = None if e is None else not e
# print(str(self) + ': ' + str(ret))
if ret is None:
raise ValueError('predicate value unbound')
return ret
def ground(self, inst, lbl, meta):
return "~(" + self.p.ground(inst, lbl, meta) + ")"
def expr(self, meta):
return "~(" + self.p.expr(meta) + ")" + self.get_str_weight()
def __str__(self):
return "Not(" + str(self.p) + ")" + self.get_str_weight()
class RuleParser(object):
"""Methods to parse strings as Expression objects."""
# noinspection PyMethodMayBeStatic
def parse(self, s):
"""Parses string 's' and returns an Expression object.
:param s: str
:return: Predicate
"""
# A kludgy way to make it work for both Python 2.7 and 3.5+
try:
import StringIO
rdr = StringIO.StringIO(s).readline
except:
from io import StringIO
rdr = StringIO(s).readline
def precedence(op):
"""Higher value means higher precedence"""
if op == "|":
return 1
elif op == "&":
return 2
elif op == "~":
return 3
elif op == "=" or op == "<=" or op == "<" or op == ">" or op == ">=":
return 4
elif op == "": # usually as endmarker
return 0
else:
return 0
def consume_operator(astk, ostk, op):
while not ostk.is_empty():
top = ostk[len(ostk) - 1]
# print("top: %s op: %s precedence(top): %d precedence(op): %d" % (top,op,precedence(top),precedence(op)))
if op == ")" and top == "(":
ostk.pop()
break
elif op == "]" and top == "[":
# populate predicate weight
ostk.pop()
# There must be a predicate and a numeric literal on stack
if len(astk) < 2:
raise ValueError("invalid weight found")
wtlit = astk.pop()
pred = astk.pop()
if not isinstance(wtlit, Literal) or not isinstance(pred, Predicate):
raise ValueError("invalid weight format")
pred.weight = wtlit.val
astk.push(pred)
break
elif op == "]" and not top == "[":
raise ValueError("invalid ']' found")
if precedence(op) <= precedence(top):
if top == "=":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpEq(t1, t2))
elif top == "<":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpLr(t1, t2))
elif top == "<=":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpLE(t1, t2))
elif top == ">":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpGr(t1, t2))
elif top == ">=":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(CmpGE(t1, t2))
elif top == "~":
ostk.pop()
t1 = astk.pop()
astk.push(Not(t1))
elif top == "&":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(And(t1, t2))
elif top == "|":
ostk.pop()
t2 = astk.pop()
t1 = astk.pop()
astk.push(Or(t1, t2))
else:
break
else:
break
astk = stack()
ostk = stack() # operator stack
g = tokenize.generate_tokens(rdr) # tokenize the string
ret = None
for toknum, tokval, _, _, _ in g:
if toknum == tokenize.OP:
# print('OP ' + tokval + ' ' + str(toknum))
if tokval == "(": # nested predicate
ostk.push(tokval)
elif tokval == ")":
consume_operator(astk, ostk, tokval)
elif tokval == "[": # predicate weight
ostk.push(tokval)
elif tokval == "]":
consume_operator(astk, ostk, tokval)
elif tokval == "-": # handle negative numbers
ostk.push(tokval)
elif tokval in ["=", "&", "|", "~", "<=", "<", ">", ">="]:
consume_operator(astk, ostk, tokval)
ostk.push(tokval)
else:
raise SyntaxError("Illegal operator '" + tokval + "' found in rule expression")
elif toknum == tokenize.NAME:
# print('NAME ' + tokval + ' ' + str(toknum))
astk.push(Var(tokval))
elif toknum == tokenize.STRING:
# print('STR/NUM ' + tokval + ' ' + str(toknum))
astk.push(Literal(tokval, removequotes=True))
elif toknum == tokenize.NUMBER:
# print('STR/NUM ' + tokval + ' ' + str(toknum))
sign = 1
if len(ostk) > 0 and ostk[len(ostk) - 1] == "-":
sign = -1
ostk.pop()
astk.push(Literal(sign * float(tokval)))
elif toknum == tokenize.INDENT or toknum == tokenize.DEDENT:
pass
elif toknum == tokenize.ENDMARKER:
consume_operator(astk, ostk, "")
ret = None if astk.is_empty() else astk.pop()
# print(ret)
if not astk.is_empty():
print(astk)
print(ostk)
raise SyntaxError("Invalid rule syntax in " + str(s))
else:
print('UNK ' + tokval + ' ' + str(toknum))
# print("astk: %s" % (str(astk),))
# print("ostk: %s" % (str(ostk),))
return ret
def string_to_predicate(str_predicate, meta=None, parser=None):
"""Converts a string representation of rule to a Predicate object"""
parser_ = RuleParser() if parser is None else parser
predicate = parser_.parse(str_predicate)
if meta is not None:
predicate.compile(meta)
return predicate
class PredicateContext(object):
"""Holds predicate traversal context
Attributes:
neg: boolean
features: set of tuples
"""
def __init__(self):
self.neg = False
self.features = []
def traverse_predicate_conjunctions(predicate, context):
""" Updates traversal context recursively
Collects all the And/Cmp predicate expressions into predicate list.
Expressions other than {Cmp, And} will raise error.
:param predicate: Expression
:param context: PredicateContext
:return: None
"""
if isinstance(predicate, Cmp):
p1 = None # holds Var
p2 = None # holds Literal
if isinstance(predicate.p1, Var):
p1 = predicate.p1
elif isinstance(predicate.p2, Var):
p1 = predicate.p2
if isinstance(predicate.p1, Literal):
p2 = predicate.p1
elif isinstance(predicate.p2, Literal):
p2 = predicate.p2
if p1 is not None and p2 is not None:
context.features.append((p1, p2, predicate))
else:
raise ValueError("Unbound Var or Literal. Expected Comparison of Var and Literal.")
elif isinstance(predicate, And):
traverse_predicate_conjunctions(predicate.p1, context)
traverse_predicate_conjunctions(predicate.p2, context)
else:
raise ValueError("Expected conjunctive form, but found Or()")
return context
def conjunctive_predicate_to_list(predicate):
context = PredicateContext()
traverse_predicate_conjunctions(predicate, context)
return context.features
class ConjunctiveRule(object):
""" Represents a conjunction (And) of simple one-feature-value comparison predicates """
def __init__(self, predicates, meta, id=None):
self.predicates = predicates
self.meta = meta
# id might be required e.g., when we have to remember the node
# of the tree that this corresponds to.
self.id = id
self.support = None
self.confusion_matrix = None
def set_confusion_matrix(self, positive_indexes, y):
mask = np.array([True] * len(y))
mask[positive_indexes] = False
negative_indexes = np.where(mask)[0]
tp = np.sum(y[positive_indexes])
fp = len(positive_indexes) - tp
tn = np.sum(y[negative_indexes])
fn = len(negative_indexes) - tn
self.confusion_matrix = np.array([[tp, fp], [fn, tn]], dtype=np.float32)
self.support = tp * 1.0 / (tp + fp)
@staticmethod
def parse(str_rule, meta):
rule = string_to_predicate(str_rule, meta)
conjunctions = conjunctive_predicate_to_list(rule)
predicates = []
for p1, p2, predicate in conjunctions:
if not (isinstance(predicate.p1, Var) and isinstance(predicate.p2, Literal)):
raise ValueError("Conjunctive predicates must be of format: Variable = Literal")
predicates.append(predicate)
return ConjunctiveRule(predicates, meta)
def evaluate_inst(self, inst, label):
""" Checks if the instance satisfies all the predicates (i.e., 'And') """
result = True
i = 0
while result and i < len(self.predicates):
result = result and self.predicates[i].evaluate(inst, label, self.meta)
i += 1
return result
def where_satisfied(self, insts, labels):
""" Returns all indexes of insts which satisfy the rule
:param insts: np.ndarray
:param labels: np.array
:return: np.array
"""
satisfied = []
for i in range(insts.shape[0]):
if self.evaluate_inst(insts[i, :], labels[i]):
satisfied.append(i)
return np.array(satisfied, dtype=np.int32)
def _str_confusion_mat(self):
if self.confusion_matrix is None:
return 'None'
else:
return "[%s, %s]" % \
(str(list(self.confusion_matrix[0,:])), str(list(self.confusion_matrix[1,:])))
def __str__(self):
predicate_strs = []
for predicate in self.predicates:
predicate_strs.append(predicate.expr(self.meta))
return " & ".join(predicate_strs)
def __len__(self):
if self.predicates is not None:
return len(self.predicates)
return 0
def __repr__(self):
predicate_strs = []
for predicate in self.predicates:
predicate_strs.append(predicate.expr(self.meta))
return "%s%s%s" % \
("" if self.support is None else "support: %0.4f; " % self.support,
" & ".join(predicate_strs),
"" if self.confusion_matrix is None else "; %s" % self._str_confusion_mat())
def convert_feature_ranges_to_rules(ranges, meta):
""" Converts list of maps of feature-ranges to Rule objects.
Each range map in the input list will be converted to a separate Rule.
The leaf nodes of a tree-based model usually partition the feature
space into subspaces defined by corresponding feature ranges. These
feature-ranges can be represented by the ConjunctiveRule data structure.
:param ranges: list of dict
[{feature_index: (min_val, max_val), ...}, ...]
:param meta: FeatureMetadata
:return: list of ConjunctiveRule, list of strings
"""
rules = []
str_rules = []
for range_map in ranges:
predicates = []
for feature, range in range_map.items():
if np.isfinite(range[0]):
predicates.append("%s > %f" % (meta.featurenames[feature], range[0]))
if np.isfinite(range[1]):
predicates.append("%s <= %f" % (meta.featurenames[feature], range[1]))
str_rule = " & ".join(predicates)
rules.append(ConjunctiveRule.parse(str_rule, meta))
str_rules.append(str_rule)
return rules, str_rules
def convert_conjunctive_rule_to_feature_ranges(rule, meta):
extents = dict()
for feature, featuredef in enumerate(meta.featuredefs):
if featuredef.is_continuous():
extents[feature] = (-np.inf, np.inf)
for predicate in rule.predicates:
feature = predicate.p1.varindex
if not meta.featuredefs[feature].is_continuous():
continue
value = predicate.p2.val
f_range = extents[feature]
if isinstance(predicate, CmpGr) or isinstance(predicate, CmpGE):
f_range = (max(f_range[0], value), f_range[1])
elif isinstance(predicate, CmpLr) or isinstance(predicate, CmpLE):
f_range = (f_range[0], min(f_range[1], value))
extents[feature] = f_range
return extents
def convert_conjunctive_rules_to_strings(rules):
return [str(rule) for rule in rules]
def convert_conjunctive_rules_to_feature_ranges(rules, meta):
ranges = [convert_conjunctive_rule_to_feature_ranges(rule, meta) for rule in rules]
return ranges
def convert_strings_to_conjunctive_rules(str_rules, meta):
rules = []
for str_rule in str_rules:
rules.append(ConjunctiveRule.parse(str_rule, meta))
return rules
def get_max_len_in_rules(rules):
return max([len(rule) for rule in rules])
def get_rule_satisfaction_matrix(x, y, rules):
""" Returns a matrix that shows which instances satisfy which rules
Each column of the returned matrix corresponds to a rules and each row to an instance.
If an instance satisfies a rule, the corresponding value will be 1, else 0.
:param x: np.ndarray
:param y: np.array
:param rules: list
:param opts: AadOpts
:return: np.ndarray
matrix with x.shape[0] rows and len(rules) rows
"""
satisfaction_matrix = np.zeros((x.shape[0], len(rules)), dtype=np.int32)
for i, rule in enumerate(rules):
idxs = rule.where_satisfied(x, y)
satisfaction_matrix[idxs, i] = 1
return satisfaction_matrix
def check_if_at_least_one_rule_satisfied(x, y, rules):
""" For each input instance, check if it satisfies at least one rule
Basically performs a disjunction of rules.
Can be applied to rules in DNF format.
:param x: np.ndarray
:param y: np.array
This could be None if unsupervised and if it is not required to evaluate any rule
:param rules: list of rules
:return: np.array
Binary indicator for each instance
"""
sat_vec = np.zeros(x.shape[0], dtype=np.int32)
for rule in rules:
idxs = rule.where_satisfied(x, y)
sat_vec[idxs] += 1
return np.minimum(sat_vec, 1)
def evaluate_ruleset(x, y, rules, average="binary"):
""" For each input instance, check if it satisfies at least one rule and computes F1 """
y_hat = check_if_at_least_one_rule_satisfied(x, y, rules)
precision, recall, f1, _ = precision_recall_fscore_support(y_true=y, y_pred=y_hat, average=average)
return precision, recall, f1
def save_strings_to_file(strs, file_path):
if file_path is None or file_path == '':
raise ValueError
with open(file_path, 'w') as f:
for s in strs:
f.write(s + os.linesep)
def load_strings_from_file(file_path):
strs = []
with open(file_path) as f:
for line in f:
line = line.strip()
if line != "":
strs.append(line)
return strs
def get_feature_meta_default(x, y, feature_names=None,
label_name='label', labels=None, featuredefs=None):
""" A simple convenience method that creates a default FeatureMetadata
In the default metadata:
1. If feature names are not provided, the columns/features of x
are assigned names F1, F2, ...
2. The class label is referred to as 'label'
3. All columns are treated as continuous numeric
In case dataset-specific names are to be assigned, create the appropriate
metadata in a similar manner as illustrated here.
"""
if feature_names is None:
f_names = Factor(["F%d" % (i+1) for i in range(x.shape[1])], sort=False)
else:
if x.shape[1] != len(feature_names):
raise ValueError("feature_names should have same length as columns in x")
f_names = Factor(feature_names, sort=False)
if featuredefs is None:
featuredefs = [NumericContinuous(x[:, i]) for i in range(x.shape[1])]
if labels is None:
labels = np.unique(y)
meta = FeatureMetadata(lblname=label_name, lbldef=Factor(labels),
featurenames=f_names, featuredefs=featuredefs)
return meta
def evaluate_instances_for_predicate(predicate, insts, labels, meta):
satisfied = []
for i in range(insts.shape[0]):
if predicate.evaluate(insts[i, :], labels[i], meta):
satisfied.append(i)
return np.array(satisfied, dtype=np.int32)
def test_rule_apis():
from .gen_samples import read_anomaly_dataset
x, y = read_anomaly_dataset("toy2")
y = np.asarray(y, dtype=np.int32)
meta = get_feature_meta_default(x, y)
print(meta)
parser = RuleParser()
"""
Since the default metadata names the features as F1, F2, ... and
the class label as 'label', we will refer to these by the same names
in the predicate rules.
RuleParser can parse any well-formed logical predicates such as
those in predicate_strs below.
"""
predicate_strs = [
# internal representation:
# CmpEq(Var(label<-1>), Lit(0.0<0>))
# Var(label<-1>) : <-1> means that the variable 'label' is not a regular feature
# Lit(0.0<0>) : the label '0' got changed to 0.0 because it was numeric.
# To make label '0', change the label to string.
# <0> means that '0' is at the 0-th position of the label Factor
"label = 0", # all 0 labeled
# internal representation:
# CmpEq(Var(label<-1>), Lit(1.0<1>))
"label = 1", # all 1 labeled
# internal representation:
# And(Or(Or(CmpGE(Var(F1<0>), Lit(0.0<-1>)), CmpLr(Var(F2<1>), Lit(2.0<-1>))), CmpLr(Var(F1<0>), Lit(-5.0<-1>))), CmpGr(Var(F2<1>), Lit(0.0<-1>)))
# Var(F1<0>) : feature 'F1' is the 0-th feature
# Var(F2<1>) : feature 'F2' is the 1-st feature
# Lit(0.0<-1>) : <-1> here means 0.0 is numeric, and not categorical
# ... and so on ...
"(F1 >= 0 | F2 < 2 | F1 < -5) & F2 > 0", # just an arbitrary predicate
# internal representation:
# Or(Not(CmpGE(Var(F2<1>), Lit(2.0<-1>))), CmpEq(Var(label<-1>), Lit(1.0<1>)))
"(~(F2 >= 2) | (label = 1))", # a Horn clause: (F2 >= 2) => (label = 1)
# internal representation:
# And(And(And(CmpGE(Var(F1<0>), Lit(1.0<-1>)), CmpLr(Var(F1<0>), Lit(5.0<-1>))), CmpGE(Var(F2<1>), Lit(0.0<-1>))), CmpLr(Var(F2<1>), Lit(6.0<-1>)))
"F1 >= 1 & F1 < 5 & (F2 >= 0) & (F2 < 6)", # conjunctive predicate
]
for predicate_str in predicate_strs:
predicate = parser.parse(predicate_str)
predicate.compile(meta) # bind feature indexes to feature names
matches = evaluate_instances_for_predicate(predicate, x, y, meta)
print("%s matched: %d\n repr: %s" % (predicate.expr(meta), len(matches), str(predicate)))
# the rule(s) below are conjunctive
# conjunctive_str = "(F1 >= 1) & (F1 < 5) & (F2 >= 0) & (F2 < 6)"
conjunctive_str = "F1 >= 1 & F1 < 5 & (F2 >= 0) & (F2 < 6)"
# conjunctive rules can be used with the convenience class ConjunctiveRule
rule = ConjunctiveRule.parse(conjunctive_str, meta)
idxs = rule.where_satisfied(x, y)
rule.set_confusion_matrix(idxs, y)
print(str(rule))
if __name__ == "__main__":
test_rule_apis()
| [
"[email protected]"
]
| |
1586616caf1191874f3dfdf0a908af9d390cbd3e | 54eeab2befaa4bf0d96a7bd18110900f8f32c766 | /other/sql/sqlite.py | cc06497fe5586ae73d672cbedf67aa19174a1c04 | []
| no_license | w8833531/mypython | 40239ada90426db73444ee54e6e79decc6c9fc9b | 45ed12a611efd33838766e7bd73840e6d8b73e28 | refs/heads/master | 2021-01-19T06:59:09.790525 | 2017-10-18T06:20:43 | 2017-10-18T06:20:43 | 87,513,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#由于SQLite的驱动内置在Python标准库中,所以我们可以直接来操作SQLite数据库。
#要操作关系数据库,首先需要连接到数据库,一个数据库连接称为Connection;
#连接到数据库后,需要打开游标,称之为Cursor,通过Cursor执行SQL语句,然后,获得执行结果。
#导入SQLite 驱动
import sqlite3
try:
# 连接到SQLite数据库
# 数据库文件是test.db
# 如果文件不存在,会自动在当前目录创建:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
# cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
cursor.execute('insert into user (id, name) values(\'3\', \'Wu\')')
print cursor.rowcount
except sqlite3.Error as e:
print e
finally:
cursor.close()
conn.commit()
conn.close()
#在Python中操作数据库时,要先导入数据库对应的驱动,然后,通过Connection对象和Cursor对象操作数据。
#要确保打开的Connection对象和Cursor对象都正确地被关闭,否则,资源就会泄露。
try:
conn = sqlite3.connect('test.db')
cursor = conn.cursor()
cursor.execute('select * from user')
values = cursor.fetchall()
print values
except sqlite3.Error as e:
print e
finally:
cursor.close()
conn.close()
| [
"[email protected]"
]
| |
24f4ad0bc75271d08496072c0885072c734d3990 | 5b1ff6054c4f60e4ae7315db9f20a334bc0b7634 | /Launchkey_MK2/Colors.py | 6f5028d35ea48a5ef4fb11c613cb1206a59fc846 | []
| no_license | maratbakirov/AbletonLive9_RemoteScripts | 2869122174634c75405a965401aa97a2dae924a1 | 4a1517c206353409542e8276ebab7f36f9bbd4ef | refs/heads/master | 2021-06-05T14:38:27.959025 | 2021-05-09T11:42:10 | 2021-05-09T11:42:10 | 13,348,327 | 3 | 4 | null | 2016-10-16T13:51:11 | 2013-10-05T16:27:04 | Python | UTF-8 | Python | false | false | 4,566 | py | #Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Launchkey_MK2/Colors.py
from _Framework.ButtonElement import Color
from .consts import BLINK_LED_CHANNEL, PULSE_LED_CHANNEL
class Blink(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Blink, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=BLINK_LED_CHANNEL)
class Pulse(Color):
def __init__(self, midi_value = 0, *a, **k):
super(Pulse, self).__init__(midi_value, *a, **k)
def draw(self, interface):
interface.send_value(0)
interface.send_value(self.midi_value, channel=PULSE_LED_CHANNEL)
class Rgb:
BLACK = Color(0)
DARK_GREY = Color(1)
GREY = Color(2)
WHITE = Color(3)
RED = Color(5)
RED_BLINK = Blink(5)
RED_PULSE = Pulse(5)
RED_HALF = Color(7)
ORANGE = Color(9)
ORANGE_HALF = Color(11)
AMBER = Color(96)
AMBER_HALF = Color(14)
YELLOW = Color(13)
YELLOW_HALF = Color(15)
DARK_YELLOW = Color(17)
DARK_YELLOW_HALF = Color(19)
GREEN = Color(21)
GREEN_BLINK = Blink(21)
GREEN_PULSE = Pulse(21)
GREEN_HALF = Color(27)
MINT = Color(29)
MINT_HALF = Color(31)
LIGHT_BLUE = Color(37)
LIGHT_BLUE_HALF = Color(39)
BLUE = Color(45)
BLUE_HALF = Color(47)
DARK_BLUE = Color(49)
DARK_BLUE_HALF = Color(51)
PURPLE = Color(53)
PURPLE_HALF = Color(55)
DARK_PURPLE = Color(59)
BRIGHT_PURPLE = Color(81)
DARK_ORANGE = Color(84)
CLIP_COLOR_TABLE = {15549221: 60,
12411136: 61,
11569920: 62,
8754719: 63,
5480241: 64,
695438: 65,
31421: 66,
197631: 67,
3101346: 68,
6441901: 69,
8092539: 70,
3947580: 71,
16712965: 72,
12565097: 73,
10927616: 74,
8046132: 75,
4047616: 76,
49071: 77,
1090798: 78,
5538020: 79,
8940772: 80,
10701741: 81,
12008809: 82,
9852725: 83,
16149507: 84,
12581632: 85,
8912743: 86,
1769263: 87,
2490280: 88,
6094824: 89,
1698303: 90,
9160191: 91,
9611263: 92,
12094975: 93,
14183652: 94,
16726484: 95,
16753961: 96,
16773172: 97,
14939139: 98,
14402304: 99,
12492131: 100,
9024637: 101,
8962746: 102,
10204100: 103,
8758722: 104,
13011836: 105,
15810688: 106,
16749734: 107,
16753524: 108,
16772767: 109,
13821080: 110,
12243060: 111,
11119017: 112,
13958625: 113,
13496824: 114,
12173795: 115,
13482980: 116,
13684944: 117,
14673637: 118,
16777215: 119}
RGB_COLOR_TABLE = ((0, 0),
(1, 1973790),
(2, 8355711),
(3, 16777215),
(4, 16731212),
(5, 16711680),
(6, 5832704),
(7, 1638400),
(8, 16760172),
(9, 16733184),
(10, 5840128),
(11, 2562816),
(12, 16777036),
(13, 16776960),
(14, 5855488),
(15, 1644800),
(16, 8978252),
(17, 5570304),
(18, 1923328),
(19, 1321728),
(20, 5046092),
(21, 65280),
(22, 22784),
(23, 6400),
(24, 5046110),
(25, 65305),
(26, 22797),
(27, 6402),
(28, 5046152),
(29, 65365),
(30, 22813),
(31, 7954),
(32, 5046199),
(33, 65433),
(34, 22837),
(35, 6418),
(36, 5030911),
(37, 43519),
(38, 16722),
(39, 4121),
(40, 5015807),
(41, 22015),
(42, 7513),
(43, 2073),
(44, 5000447),
(45, 255),
(46, 89),
(47, 25),
(48, 8867071),
(49, 5505279),
(50, 1638500),
(51, 983088),
(52, 16731391),
(53, 16711935),
(54, 5832793),
(55, 1638425),
(56, 16731271),
(57, 16711764),
(58, 5832733),
(59, 2228243),
(60, 16717056),
(61, 10040576),
(62, 7950592),
(63, 4416512),
(64, 211200),
(65, 22325),
(66, 21631),
(67, 255),
(68, 17743),
(69, 2425036),
(70, 8355711),
(71, 2105376),
(72, 16711680),
(73, 12451629),
(74, 11529478),
(75, 6618889),
(76, 1084160),
(77, 65415),
(78, 43519),
(79, 11007),
(80, 4129023),
(81, 7995647),
(82, 11672189),
(83, 4202752),
(84, 16730624),
(85, 8970502),
(86, 7536405),
(87, 65280),
(88, 3931942),
(89, 5898097),
(90, 3735500),
(91, 5999359),
(92, 3232198),
(93, 8880105),
(94, 13835775),
(95, 16711773),
(96, 16744192),
(97, 12169216),
(98, 9502464),
(99, 8609031),
(100, 3746560),
(101, 1330192),
(102, 872504),
(103, 1381674),
(104, 1450074),
(105, 6896668),
(106, 11010058),
(107, 14569789),
(108, 14182940),
(109, 16769318),
(110, 10412335),
(111, 6796559),
(112, 1973808),
(113, 14483307),
(114, 8454077),
(115, 10131967),
(116, 9332479),
(117, 4210752),
(118, 7697781),
(119, 14745599),
(120, 10485760),
(121, 3473408),
(122, 1757184),
(123, 475648),
(124, 12169216),
(125, 4141312),
(126, 11755264),
(127, 4920578)) | [
"[email protected]"
]
| |
ab14c4d4a9d8c432ae24647c18b9e98e4968ece0 | 90be755a741d6c93dd59d4acef8b27b4cf93ff54 | /src/elsia/scripts/get_abs_ori.py | 8decc0c43e2f8716f8f28629c4b7ed417de7cc24 | []
| no_license | karry3775/Elsia_ws | 05aa5786a6f3f64b70c7ceafead6d72d4ca18bab | 031f8006e9a439d9947be5ed288a666f20fca3a7 | refs/heads/master | 2023-02-21T05:21:10.842475 | 2021-01-23T14:58:57 | 2021-01-23T15:21:46 | 326,032,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,405 | py | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from tf.transformations import quaternion_from_euler, euler_from_quaternion
from cv_bridge import CvBridge, CvBridgeError
from nav_msgs.msg import Odometry
import cv2
import numpy as np
import math as m
# initialize the node
rospy.init_node("get_abs_ori_node")
# global variables
best_ori_estimate = 0.0
ini_angle_offset = 0.0
# create publishers
odom_pub = rospy.Publisher("/abs_orientation_odom", Odometry, queue_size=10)
image_pub = rospy.Publisher("/considered_image", Image, queue_size=10)
# global variable for whether to DEBUG or not
DEBUG = False
def wrap2Pi(theta):
wrappedUpVal = m.atan2(m.sin(theta), m.cos(theta))
return wrappedUpVal
def abs_ori_cb(msg):
global best_ori_estimate
try:
cv_image = CvBridge().imgmsg_to_cv2(msg, "bgr8")
# crop out the excess image
cv_image = cv_image[100:300, 100:300, :]
except CvBridgeError as e:
print("[INFO]: Error in obtaining image from CvBridge! Skipping frame!")
else:
# convert to gray
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
# convert to edges
edges = cv2.Canny(gray, 50, 150)
cv2.imshow("edges", edges)
cv2.waitKey(1)
# convert to thresholded image
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
# extract hough lines
lines = cv2.HoughLinesP(edges, 1, m.pi/180, 2, None, 20, 1)
# list of [count, angle] pairs
cnt_ang_pair = []
# draw lines
for i in range(lines.shape[0]):
for line in lines[i]:
pt1 = (line[0], line[1])
pt2 = (line[2], line[3])
cv2.line(cv_image, pt1, pt2, (255, 0, 0), 3)
# calculate angle
ang = m.atan2(pt2[1]-pt1[1], pt2[0]-pt1[0])
cnt_ang_pair.append([1, m.degrees(ang)])
###################### show the detected lines ########################
cv2.imshow("frame", cv_image)
cv2.waitKey(1)
#######################################################################
if len(cnt_ang_pair) != 0:
# sort the cnt_ang_pair
cnt_ang_pair.sort(key=lambda x: x[1])
# bunch up the pairs based on predetermined threshold
ang_thresh_deg = 1
bunch = [cnt_ang_pair[0]]
for i in range(1, len(cnt_ang_pair)):
pairs = cnt_ang_pair[i]
if abs(pairs[1] - bunch[-1][1]) < ang_thresh_deg:
# update the value and the count
new_count = bunch[-1][0] + 1
new_value = (
(bunch[-1][1] * (new_count - 1) * 1.0) / new_count) + (pairs[1]*1.0) / new_count
bunch[-1] = [new_count, new_value]
else:
# time to append
bunch.append(pairs)
# sort bunch based on first value i.e. count
bunch.sort(key=lambda x: x[0], reverse=True)
if DEBUG:
print("The cnt_ang_pair list is: \n {} \n".format(cnt_ang_pair))
print("The bunched up list is: \n {} \n".format(bunch))
# use the first value of bunch
f_ori = m.radians(bunch[0][1]) # in degrees
f_ori1 = wrap2Pi(f_ori + m.radians(90) - ini_angle_offset)
f_ori2 = wrap2Pi(f_ori + m.radians(-90) - ini_angle_offset)
f_ori3 = wrap2Pi(f_ori + m.radians(180) - ini_angle_offset)
# we need to find which has the smallest difference
# f_ori, f_ori1 or f_ori2
if(abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori1)) and abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori2)) and abs(wrap2Pi(best_ori_estimate - f_ori)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori
elif(abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori)) and abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori2)) and abs(wrap2Pi(best_ori_estimate - f_ori1)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori1
elif(abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori)) and abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori1)) and abs(wrap2Pi(best_ori_estimate - f_ori2)) < abs(wrap2Pi(best_ori_estimate - f_ori3))):
best_ori_estimate_temp = f_ori2
else:
best_ori_estimate_temp = f_ori3
# will get the best_ori_estimate in degrees , the choice is made so that any difference will be amplified more than radians
best_ori_estimate = best_ori_estimate_temp
if DEBUG:
print("best ori estimate: {} deg".format(
m.degrees(best_ori_estimate)))
# to debug lets plot the best_ori_estimate in the image
pt1 = [200, 200]
pt2 = [200, 200]
line_angle = best_ori_estimate
pt2[0] = int(pt2[0] + 200*m.cos(line_angle))
pt2[1] = int(pt2[1] + 200*m.sin(line_angle))
cv2.line(cv_image, (pt1[0], pt1[1]),
(pt2[0], pt2[1]), (0, 0, 255), 3)
# publish abs odometry for yaw
# create euler angles
roll = 0
pitch = 0
yaw = -best_ori_estimate
# convert to quaternion
q = quaternion_from_euler(roll, pitch, yaw)
# create a odom message
odom_msg = Odometry()
odom_msg.pose.pose.orientation.x = q[0]
odom_msg.pose.pose.orientation.y = q[1]
odom_msg.pose.pose.orientation.z = q[2]
odom_msg.pose.pose.orientation.w = q[3]
odom_msg.header.frame_id = "odom"
odom_msg.header.stamp = rospy.Time().now()
odom_pub.publish(odom_msg)
rosimg = CvBridge().cv2_to_imgmsg(cv_image, "bgr8")
image_pub.publish(rosimg)
if __name__ == "__main__":
try:
abs_ori_sub = rospy.Subscriber(
"/stereo/left_upward/image_rect", Image, abs_ori_cb)
rospy.spin()
except rospy.ROSInterruptException:
pass
| [
"[email protected]"
]
| |
b1dde0477b45dffe82a9f680f72b5dc5f910eee9 | 3eb4d64a8bb0bc240a2ef189724f4d51b5275eac | /heltour/tournament/migrations/0106_auto_20161031_0546.py | 059d9943ff0cb31240b7a8a561df84ba822d9f3b | [
"MIT"
]
| permissive | brucemubayiwa/heltour | c01cc88be7f86dce8246f619d7aa2da37e0e0ac2 | fa4e9b06343acaf6a8a99337860e1ad433e68f6b | refs/heads/master | 2021-01-23T19:59:04.099215 | 2017-09-06T03:34:31 | 2017-09-06T03:34:31 | 102,840,526 | 1 | 0 | null | 2017-09-08T08:53:30 | 2017-09-08T08:53:30 | null | UTF-8 | Python | false | false | 2,839 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-31 05:46
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import select2.fields
class Migration(migrations.Migration):
dependencies = [
('tournament', '0105_seasonplayer_final_rating'),
]
operations = [
migrations.AlterField(
model_name='alternateassignment',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='availabletime',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='gamenomination',
name='nominating_player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='leaguemoderator',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playeravailability',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerbye',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerlateregistration',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='playerwithdrawl',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='seasonplayer',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='seasonprizewinner',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
migrations.AlterField(
model_name='teammember',
name='player',
field=select2.fields.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Player'),
),
]
| [
"[email protected]"
]
| |
3ec6bfaea601759fd9ce090e2468cd49049e454d | 88cfeb8f7076450e7a38d31ab2d11883c1818c8d | /net/dpn92.py | bee4297159590c50e4ca40b1570569426a17eb3b | []
| no_license | ZQPei/Alibaba_Cloud_German_AI_Challenge_for_Earth_Observation | 4e5a127c12e0c02ed1914ab000a131e1a7f7d844 | c2efb32763af0a56a3a7ecb9d83c0744f71d5c14 | refs/heads/master | 2020-04-26T04:31:57.731178 | 2019-02-17T01:10:55 | 2019-02-17T01:10:55 | 173,305,034 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,563 | py | '''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(10, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 17)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| [
"[email protected]"
]
| |
9262d9b3881e896a97b190c2ea16eeea43d24d9c | 958c19436632b41b43c9462337d13e836935a9da | /E01_python_for_data_analysis/04_NumPy/0403_numpy_cal.py | 24af0cf6e7c79bf551a52bc51df3c822da19b676 | []
| no_license | Vincent105/ML | 4752b2a99c124e01e40e383a0177fb5d82115cb6 | fa926caabf83628b3fb7d74cee02a3e923a917f7 | refs/heads/master | 2020-12-29T18:21:50.144711 | 2020-10-12T09:56:41 | 2020-10-12T09:56:41 | 238,697,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | import numpy as np
arr = np.array([[1., 2., 3.], [4., 5., 6.]])
print(arr)
print(arr * arr)
print(arr * arr - arr)
# 数组与标量的算术运算会将标量值传播到各个元素:
print(1 / arr)
print(arr * 0.5)
# 大小相同的数组之间的比较会生成布尔值数组:
arr2 = np.array([[0., 4., 1.], [7., 2., 12.]])
print(arr2)
print(arr2 > arr)
| [
"[email protected]"
]
| |
b352068896dbae835d20da90ab54de2d4f34fec9 | d2eb7bd335175edd844a3e6c1c633ee0dc2dbb25 | /contests_atcoder/arc017/arc017_c.py | 80b806e3389e7dfd81e012229a4a9723cc08f1d5 | [
"BSD-2-Clause"
]
| permissive | stdiorion/competitive-programming | 5020a12b85f1e691ceb0cacd021606a9dc58b72c | e7cf8ef923ccefad39a1727ca94c610d650fcb76 | refs/heads/main | 2023-03-27T01:13:42.691586 | 2021-03-08T08:05:53 | 2021-03-08T08:05:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from bisect import bisect_left, bisect_right
n, x = map(int, input().split())
w = [int(input()) for _ in range(n)]
pt1 = w[:16]
pt2 = w[16:]
w1 = []
for bit in range(1 << len(pt1)):
weight = 0
for i in range(len(pt1)):
if (bit >> i) & 1:
weight += pt1[i]
w1.append(weight)
if not len(pt2):
print(w1.count(x))
exit()
w2 = []
for bit in range(1 << len(pt2)):
weight = 0
for i in range(len(pt2)):
if (bit >> i) & 1:
weight += pt2[i]
w2.append(weight)
ans = 0
w1.sort()
w2.sort()
i2 = 0
for weight1 in w1:
ans += bisect_right(w2, x - weight1) - bisect_left(w2, x - weight1)
print(ans) | [
"[email protected]"
]
| |
23241518e94ae0d5c41c03ff56152a117f302c17 | d7ec67a5ba315103fa6a6bae6dc045f1fecf7add | /docs_master_tensorflow/keras/tf_dqn_simple_master/dqn_agent.py | d0dc2cccfa0c1fbf14d21175a9b41c3605ff96e2 | []
| no_license | munezou/PycharmProject | cc62f5e4278ced387233a50647e8197e009cc7b4 | 26126c02cfa0dc4c0db726f2f2cabb162511a5b5 | refs/heads/master | 2023-03-07T23:44:29.106624 | 2023-01-23T16:16:08 | 2023-01-23T16:16:08 | 218,804,126 | 2 | 1 | null | 2023-02-28T23:58:22 | 2019-10-31T15:57:22 | Jupyter Notebook | UTF-8 | Python | false | false | 4,247 | py | from collections import deque
import os
import numpy as np
import tensorflow as tf
class DQNAgent:
"""
Multi Layer Perceptron with Experience Replay
"""
def __init__(self, enable_actions, environment_name):
# parameters
self.name = os.path.splitext(os.path.basename(__file__))[0]
self.environment_name = environment_name
self.enable_actions = enable_actions
self.n_actions = len(self.enable_actions)
self.minibatch_size = 32
self.replay_memory_size = 1000
self.learning_rate = 0.001
self.discount_factor = 0.9
self.exploration = 0.1
self.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models")
self.model_name = "{}.ckpt".format(self.environment_name)
# replay memory
self.D = deque(maxlen=self.replay_memory_size)
# model
self.init_model()
# variables
self.current_loss = 0.0
def init_model(self):
# input layer (8 x 8)
self.x = tf.placeholder(tf.float32, [None, 8, 8])
# flatten (64)
x_flat = tf.reshape(self.x, [-1, 64])
# fully connected layer (32)
W_fc1 = tf.Variable(tf.truncated_normal([64, 64], stddev=0.01))
b_fc1 = tf.Variable(tf.zeros([64]))
h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1)
# output layer (n_actions)
W_out = tf.Variable(tf.truncated_normal([64, self.n_actions], stddev=0.01))
b_out = tf.Variable(tf.zeros([self.n_actions]))
self.y = tf.matmul(h_fc1, W_out) + b_out
# loss function
self.y_ = tf.placeholder(tf.float32, [None, self.n_actions])
self.loss = tf.reduce_mean(tf.square(self.y_ - self.y))
# train operation
optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
self.training = optimizer.minimize(self.loss)
# saver
self.saver = tf.train.Saver()
# session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def Q_values(self, state):
# Q(state, action) of all actions
return self.sess.run(self.y, feed_dict={self.x: [state]})[0]
def select_action(self, state, epsilon):
if np.random.rand() <= epsilon:
# random
return np.random.choice(self.enable_actions)
else:
# max_action Q(state, action)
return self.enable_actions[np.argmax(self.Q_values(state))]
def store_experience(self, state, action, reward, state_1, terminal):
self.D.append((state, action, reward, state_1, terminal))
def experience_replay(self):
state_minibatch = []
y_minibatch = []
# sample random minibatch
minibatch_size = min(len(self.D), self.minibatch_size)
minibatch_indexes = np.random.randint(0, len(self.D), minibatch_size)
for j in minibatch_indexes:
state_j, action_j, reward_j, state_j_1, terminal = self.D[j]
action_j_index = self.enable_actions.index(action_j)
y_j = self.Q_values(state_j)
if terminal:
y_j[action_j_index] = reward_j
else:
# reward_j + gamma * max_action' Q(state', action')
y_j[action_j_index] = reward_j + self.discount_factor * np.max(self.Q_values(state_j_1)) # NOQA
state_minibatch.append(state_j)
y_minibatch.append(y_j)
# training
self.sess.run(self.training, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
# for log
self.current_loss = self.sess.run(self.loss, feed_dict={self.x: state_minibatch, self.y_: y_minibatch})
def load_model(self, model_path=None):
if model_path:
# load from model_path
self.saver.restore(self.sess, model_path)
else:
# load from checkpoint
checkpoint = tf.train.get_checkpoint_state(self.model_dir)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
def save_model(self):
self.saver.save(self.sess, os.path.join(self.model_dir, self.model_name))
| [
"[email protected]"
]
| |
42bcc717daa52c76b623b77adb64ac1e50d8fe60 | b57d337ddbe946c113b2228a0c167db787fd69a1 | /scr/py00033SpiderDeath.py | 6fd5b9134c2358a8544c5ef441100d8e4da50196 | []
| no_license | aademchenko/ToEE | ebf6432a75538ae95803b61c6624e65b5cdc53a1 | dcfd5d2de48b9d9031021d9e04819b309d71c59e | refs/heads/master | 2020-04-06T13:56:27.443772 | 2018-11-14T09:35:57 | 2018-11-14T09:35:57 | 157,520,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | from toee import *
from utilities import *
from combat_standard_routines import *
def san_dying( attachee, triggerer ):
if should_modify_CR( attachee ):
modify_CR( attachee, get_av_level() )
if (attachee.map == 5069):
game.global_vars[3] = game.global_vars[3] + 1
if (game.party_alignment == LAWFUL_NEUTRAL or game.party_alignment == CHAOTIC_NEUTRAL or game.party_alignment == TRUE_NEUTRAL or game.party_alignment == LAWFUL_EVIL or game.party_alignment == CHAOTIC_EVIL or game.party_alignment == NEUTRAL_EVIL):
ring = attachee.item_find( 3000 )
ring.destroy()
elif (attachee.map == 5002):
if (game.party_alignment == LAWFUL_GOOD or game.party_alignment == CHAOTIC_GOOD or game.party_alignment == NEUTRAL_GOOD or game.party_alignment == LAWFUL_EVIL or game.party_alignment == CHAOTIC_EVIL or game.party_alignment == NEUTRAL_EVIL):
ring = attachee.item_find( 3000 )
ring.destroy()
elif (attachee.map == 5003):
if (game.party_alignment == LAWFUL_GOOD or game.party_alignment == CHAOTIC_GOOD or game.party_alignment == NEUTRAL_GOOD or game.party_alignment == LAWFUL_NEUTRAL or game.party_alignment == CHAOTIC_NEUTRAL or game.party_alignment == TRUE_NEUTRAL):
ring = attachee.item_find( 3000 )
ring.destroy()
return RUN_DEFAULT
| [
"[email protected]"
]
| |
fa428df271c1a095589ea4dda94bbd27ca4f7705 | 06870667821f26b0c8c96b52321938df58fd91f6 | /parking_scrapers/scrapers/new_haven.py | 85e9236cddfb6c481a2d0bfc60ccfb3c43b84610 | []
| no_license | jmcarp/open-parking-spaces | 69244962a316fe6bd3273ba6837bfe8d0f1f4b8e | 5f855a1b25c9109f15af26e1fb3b4ecbd3ef5845 | refs/heads/master | 2023-01-24T11:43:53.641262 | 2020-11-30T19:00:46 | 2020-11-30T19:00:46 | 312,906,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | import re
from typing import Iterator
import lxml.html
import requests
from base import LotSpaces, Scraper
class NewHavenScraper(Scraper):
"""Scrape New Haven html.
https://parknewhaven.com
"""
HTML_URL = "https://parknewhaven.com"
TIMEOUT = 5
SPACES_PATTERN = re.compile(r"(.*?):\s+(\d+)% \((\d+) available\)", re.IGNORECASE)
name = "new_haven"
def fetch_spaces(self) -> Iterator[LotSpaces]:
response = requests.get(
self.HTML_URL,
headers={"User-Agent": "open-parking-spaces"},
timeout=self.TIMEOUT,
)
response.raise_for_status()
doc = lxml.html.fromstring(response.content)
links = doc.xpath(
'//div[contains(@class, "tickr")]//a[contains(@class, "tickrlink")]'
)
for link in links:
match = self.SPACES_PATTERN.search(link.text_content())
assert match is not None
lot, percent, spaces = match.groups()
yield LotSpaces(
lot=lot,
spaces=int(spaces),
url=link.attrib["href"],
)
| [
"[email protected]"
]
| |
a31b322b32555a927b3a63f5092900042142b843 | 27398b2a8ed409354d6a36c5e1d2089dad45b4ac | /backend/common/decapod_common/models/properties.py | 2a7dbbf75e03a2cf644b94bf0f4bf491dda45988 | [
"Apache-2.0"
]
| permissive | amar266/ceph-lcm | e0d6c1f825f5ac07d2926bfbe6871e760b904340 | 6b23ffd5b581d2a1743c0d430f135261b7459e38 | refs/heads/master | 2021-04-15T04:41:55.950583 | 2018-03-23T12:51:26 | 2018-03-23T12:51:26 | 126,484,605 | 0 | 0 | null | 2018-03-23T12:50:28 | 2018-03-23T12:50:27 | null | UTF-8 | Python | false | false | 3,449 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains special property descriptors."""
import enum
import importlib
class Property:
SENTINEL = object()
class ChoicesProperty(Property):
def __init__(self, attr_name, choices):
self.choices = choices
self.attr_name = attr_name
def __get__(self, instance, owner):
value = getattr(instance, self.attr_name, self.SENTINEL)
if value is self.SENTINEL:
raise AttributeError()
return value
def __set__(self, instance, value):
choices = self.choices
if callable(choices) and type(choices) is not enum.EnumMeta:
choices = choices()
try:
if value in choices:
setattr(instance, self.attr_name, value)
return
except TypeError:
pass
raise ValueError("Unknown error")
class ModelProperty(Property):
@classmethod
def get_value_id(cls, value):
if hasattr(value, "model_id"):
return value.model_id
if isinstance(value, dict):
return value.get("_id", value.get("id"))
if value is None:
return None
return str(value)
@classmethod
def get_model(cls, klass, model_id):
return klass.find_by_model_id(model_id)
def __init__(self, model_class_name, id_attribute):
self.model_class_name = model_class_name
self.id_attribute = id_attribute
self.instance_attribute = id_attribute + "_instance"
def __get__(self, instance, owner):
value = instance.__dict__.get(self.instance_attribute, self.SENTINEL)
if value is not self.SENTINEL:
return value
model_id = instance.__dict__.get(self.id_attribute)
model = self.get_model(self.get_class(), model_id)
instance.__dict__[self.instance_attribute] = model
return model
def __set__(self, instance, value):
value_id = self.get_value_id(value)
instance.__dict__[self.id_attribute] = value_id
instance.__dict__[self.instance_attribute] = self.SENTINEL
def get_class(self):
module, obj_name = self.model_class_name.rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, obj_name)
return klass
class ModelListProperty(ModelProperty):
@classmethod
def get_value_id(cls, value):
return [super(ModelListProperty, cls).get_value_id(item)
for item in value]
@classmethod
def get_model(cls, klass, model_id):
query = {
"model_id": {"$in": model_id},
"is_latest": True
}
models = []
for item in klass.list_raw(query):
model = klass()
model.update_from_db_document(item)
models.append(model)
return models
| [
"[email protected]"
]
| |
215a011898e29aea78aa8531f6aadbd936358259 | d68c9105c03bef9dce2e438b5b91c2bdd0d856e2 | /[9095] 1, 2, 3 더하기.py | 308b6ff62e407f5494c58d595b9839b3addcf2e6 | []
| no_license | newfull5/Baekjoon-Online-Judge | 2a2dd1080af234551ecab6277968fedeb170a1f4 | 00d04f6c21080e3ad7c0fb06ca311f2324a591c0 | refs/heads/master | 2023-06-29T21:05:07.539911 | 2021-07-16T09:23:46 | 2021-07-16T09:23:46 | 267,557,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | def Reculsive(n):
global answer
if n >=3:
Reculsive(n-3)
if n >=2:
Reculsive(n-2)
if n >=1:
Reculsive(n-1)
if n == 0:
answer += 1
return
for _ in range(int(input())):
answer = 0
Reculsive(int(input()))
print(answer)
| [
"[email protected]"
]
| |
fcf3fe369d825fc8f70166e86d6154d98a1eccfa | 23bc3e2bc6b2b9e3fd19f738d4767d09bec590b5 | /CourseWork/Labs/lab3/vivek_pygame_base_template.py | e880efac680ed5ff5a5856816fdf28423d8e2bb4 | []
| no_license | vivekVells/GameDesignProgramming | 4e683114bf487d2ea4c5c1c4a2b7a3375e8be8e7 | bee0fbc4d0a8d0e4001d6c9c9b35fea6b74da1f9 | refs/heads/master | 2020-03-27T13:49:52.159394 | 2018-12-12T09:37:01 | 2018-12-12T09:37:01 | 146,630,596 | 0 | 0 | null | 2018-12-12T08:32:11 | 2018-08-29T16:49:28 | Python | UTF-8 | Python | false | false | 2,516 | py | """
Show how to use a sprite backed by a graphic.
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/vRB_983kUMc
"""
import pygame
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
# Set the width and height of the screen [width, height]
size = (700, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Vivek's 1st House via PyGame")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# --- Game logic should go here
# --- Drawing code should go here
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
# rect(screen, GREEN, [x,y,breadth, length], 0)
# polygon(screen, BLACK, [[midx, midy], [leftx, lefty], [rightx, righty]], 5)
# drawing house
pygame.draw.rect(screen, RED, [100, 200, 200, 200], 0)
# drawing chimney
pygame.draw.rect(screen, BLACK, [125, 140, 20, 60], 0)
# drawing roof
pygame.draw.polygon(screen, WHITE, [[200, 100], [100, 200], [300, 200]], 0)
pygame.draw.polygon(screen, BLACK, [[200, 100], [100, 200], [300, 200]], 3)
# drawing window
pygame.draw.rect(screen, GREEN, [125, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [175, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [225, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [275, 250, 10, 30], 0)
# drawing the door
pygame.draw.rect(screen, BLACK, [190, 350, 20, 50], 0)
BLUE = (0, 0, 255)
BOARD_X = 50
BOARD_Y = 350
BOARD_LENGTH = 150
BOARD_WIDTH = 70
BOARD_COLOR_FILL = 0
pygame.draw.rect(screen, BLUE, [BOARD_X, BOARD_Y, BOARD_LENGTH, BOARD_WIDTH], BOARD_COLOR_FILL)
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
| [
"[email protected]"
]
| |
4bb3df61f7e8707d0f5b6dc0a372e300a836a1f0 | d5e4d88e4124ab2387bac64e7d7b76ff37793bf6 | /011/problem11.py | 072127ab96c86257506ca23cee758a4aa9743be4 | []
| no_license | grawinkel/ProjectEuler | 1ae5572eec92e4307183e8b30222ffa39ef4bbce | b470dd4219c769587769c9a70ec3bae5d3ca1166 | refs/heads/master | 2021-05-26T20:01:03.410567 | 2012-10-05T16:58:48 | 2012-10-05T16:58:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,421 | py | # To change this template, choose Tools | Templates
# and open the template in the editor.
__author__="meatz"
__date__ ="$01.08.2010 14:10:38$"
m = []
max = 0
maxa,maxb = 0,0
def nw(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b-1]) * int(m[a-2][b-2]) * int(m[a-3][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def n(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b]) * int(m[a-2][b]) * int(m[a-3][b])
if (prod > max):
max = prod
maxa = a
maxb = b
def sw(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b-1]) * int(m[a+2][b-2]) * int(m[a+3][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def w(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a][b-1]) * int(m[a][b-2]) * int(m[a][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def s(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b]) * int(m[a+2][b]) * int(m[a+3][b])
if (prod > max):
max = prod
maxa = a
maxb = b
def se(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b+1]) * int(m[a+2][b+2]) * int(m[a+3][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def ne(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b+1]) * int(m[a-2][b+2]) * int(m[a-3][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def e(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a][b+1]) * int(m[a][b+2]) * int(m[a][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def run(m):
for a in range(20):
for b in range(20):
if (a-3>=0):
n(a,b)
if (a+3<=19):
s(a,b)
if (b-3>=0): #check the west
w(a,b)
if (a-3>=0):
nw(a,b)
if (a+3<=19):
sw(a,b)
if (b+3<20): #check the east
e(a,b)
if (a-3>=0):
ne(a,b)
if (a+3<20):
se(a,b)
if __name__ == "__main__":
f = open("data.txt","r")
for x in f.readlines():
m.append(x.split(" "))
run(m)
print max
| [
"[email protected]"
]
| |
b290f6c4c523dba303d7efb6b9edbfc26d01ce6b | 4d0bbeb8ab52f7e450aff20056f7509e12751258 | /lists/migrations/0003_list.py | da0266eb470c2bba6c9bd9b11f8ba74b47076401 | []
| no_license | chicocheco/tdd_book | f7c9246dcb4eb5327704c72f655bf6e187b28849 | 574b1082aa523c7434f50e0c4cbdf5777ddf50ef | refs/heads/master | 2022-05-02T17:44:27.217329 | 2020-03-13T18:57:22 | 2020-03-13T18:57:22 | 197,633,503 | 0 | 0 | null | 2022-04-22T22:19:12 | 2019-07-18T17:56:43 | JavaScript | UTF-8 | Python | false | false | 441 | py | # Generated by Django 2.2.3 on 2019-08-08 07:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"[email protected]"
]
| |
74500f2dd0b8c53a83c336ef4540ba2e49d79f58 | 5ca1893df92150683d386ba61f849a8a20e80f0a | /RSRvenv/lib/python3.5/site-packages/polcart/__init__.py | e72aa4dbaf2557af7d2da9b78431fe7fe7b21272 | []
| no_license | JaredJRoss/RSR | a5340a087b7e19f5c9c8a47d8b322e2384ae8152 | 6601afbab963f095f939ba4ca07cc07c7257e271 | refs/heads/master | 2021-07-10T18:26:28.128522 | 2018-01-08T20:24:20 | 2018-01-08T20:24:20 | 104,166,288 | 0 | 6 | null | 2017-11-27T23:35:25 | 2017-09-20T04:38:26 | Python | UTF-8 | Python | false | false | 23 | py | from .polcart import *
| [
"[email protected]"
]
| |
8e54379c9e0e2512323873740a307b5ac6552d0b | de79ece8981f0fd241bcea578e4a534a1213397e | /spirl/configs/few_shot_imitation_learning/kitchen/hierarchical_cl_gc_demo_slide_demo_trained_vae/conf.py | 1f2fcbafcc3b7ab14bb8c70bf240ee9d69987572 | [
"BSD-3-Clause"
]
| permissive | ahmeda14960/fist | 3ee684cd7da0bb531d791321f1af09adad386ab4 | baf2b0bfed12a9bc0db9a099abeefad1ef618d1c | refs/heads/master | 2023-08-02T01:35:29.983633 | 2021-09-13T20:07:28 | 2021-09-13T20:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-microwave_kettle_hinge_slide.hdf5',
subseq_len=10,
)
env = AttrDict(
task_list = ['microwave', 'kettle', 'slide cabinet', 'hinge cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
# checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/kitchen/hierarchical_cl_gc_no_slide'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-no-slide.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
| [
"[email protected]"
]
| |
00213373c71f2901f04b9c3f250dfd0d591ee90b | bfd41fc543f6dbfc821341522cf8e7a9d2e34ce8 | /venv/lib/python2.7/site-packages/astroid/scoped_nodes.py | f9ec7b774f86c4821ff457b8eb19100ab3217d62 | []
| no_license | MaraKovalcik/Flask | 783243560ead637a381f76d3893da2b212eff898 | 1ff8413f3551b051f8e6c76db6cf402fc7428188 | refs/heads/master | 2021-01-22T09:09:16.165734 | 2015-02-24T16:57:14 | 2015-02-24T16:57:14 | 31,268,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,465 | py | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, Class, Function (and
Lambda, GenExpr, DictComp and SetComp to some extent).
"""
from __future__ import with_statement
__doctype__ = "restructuredtext en"
import sys
from itertools import chain
try:
from io import BytesIO
except ImportError:
from cStringIO import StringIO as BytesIO
import six
from logilab.common.compat import builtins
from logilab.common.decorators import cached, cachedproperty
from astroid.exceptions import NotFoundError, \
AstroidBuildingException, InferenceError
from astroid.node_classes import Const, DelName, DelAttr, \
Dict, From, List, Pass, Raise, Return, Tuple, Yield, YieldFrom, \
LookupMixIn, const_factory as cf, unpack_infer, Name, CallFunc
from astroid.bases import NodeNG, InferenceContext, Instance,\
YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, \
BUILTINS
from astroid.mixins import FilterStmtsMixin
from astroid.bases import Statement
from astroid.manager import AstroidManager
ITER_METHODS = ('__iter__', '__getitem__')
PY3K = sys.version_info >= (3, 0)
def remove_nodes(func, cls):
def wrapper(*args, **kwargs):
nodes = [n for n in func(*args, **kwargs) if not isinstance(n, cls)]
if not nodes:
raise NotFoundError()
return nodes
return wrapper
def function_to_method(n, klass):
if isinstance(n, Function):
if n.type == 'classmethod':
return BoundMethod(n, klass)
if n.type != 'staticmethod':
return UnboundMethod(n)
return n
def std_special_attributes(self, name, add_locals=True):
if add_locals:
locals = self.locals
else:
locals = {}
if name == '__name__':
return [cf(self.name)] + locals.get(name, [])
if name == '__doc__':
return [cf(self.doc)] + locals.get(name, [])
if name == '__dict__':
return [Dict()] + locals.get(name, [])
raise NotFoundError(name)
MANAGER = AstroidManager()
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astroid for the builtin
module
"""
builtin_astroid = MANAGER.ast_from_module(builtins)
if name == '__dict__':
return builtin_astroid, ()
try:
stmts = builtin_astroid.locals[name]
except KeyError:
stmts = ()
return builtin_astroid, stmts
# TODO move this Mixin to mixins.py; problem: 'Function' in _scope_lookup
class LocalsDictNodeNG(LookupMixIn, NodeNG):
""" this class provides locals handling common to Module, Function
and Class nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
# dictionary of locals with name as key and node defining the local as
# value
def qname(self):
"""return the 'qualified' name of the node, eg module.name,
module.class.name ...
"""
if self.parent is None:
return self.name
return '%s.%s' % (self.parent.frame().qname(), self.name)
def frame(self):
"""return the first parent frame node (i.e. Module, Function or Class)
"""
return self
def scope(self):
"""return the first node defining a new scope (i.e. Module,
Function, Class, Lambda but also GenExpr, DictComp and SetComp)
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = node._filter_stmts(self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
if self.parent: # i.e. not Module
# nested scope: if parent scope is a function, that's fine
# else jump to the module
pscope = self.parent.scope()
if not pscope.is_function:
pscope = pscope.root()
return pscope.scope_lookup(node, name)
return builtin_lookup(name) # Module
def set_local(self, name, stmt):
"""define <name> in locals (<stmt> is the node defining the name)
if the node is a Module node (i.e. has globals), add the name to
globals
if the name is already defined, ignore it
"""
#assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""append a child which should alter locals to the given node"""
if name != '__class__':
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""method from the `dict` interface returning the first node
associated with the given name in the locals dictionary
:type item: str
:param item: the name of the locally defined object
:raises KeyError: if the name is not defined
"""
return self.locals[item][0]
def __iter__(self):
"""method from the `dict` interface returning an iterator on
`self.keys()`
"""
return iter(self.keys())
def keys(self):
"""method from the `dict` interface returning a tuple containing
locally defined names
"""
return list(self.locals.keys())
def values(self):
"""method from the `dict` interface returning a tuple containing
locally defined nodes which are instance of `Function` or `Class`
"""
return [self[key] for key in self.keys()]
def items(self):
"""method from the `dict` interface returning a list of tuple
containing each locally defined name with its associated node,
which is an instance of `Function` or `Class`
"""
return list(zip(self.keys(), self.values()))
def __contains__(self, name):
return name in self.locals
has_key = __contains__
# Module #####################################################################
class Module(LocalsDictNodeNG):
_astroid_fields = ('body',)
fromlineno = 0
lineno = 0
# attributes below are set by the builder module or by raw factories
# the file from which as been extracted the astroid representation. It may
# be None if the representation has been built from a built-in module
file = None
# Alternatively, if built from a string/bytes, this can be set
file_bytes = None
# encoding of python source file, so we can get unicode out of it (python2
# only)
file_encoding = None
# the module name
name = None
# boolean for astroid built from source (i.e. ast)
pure_python = None
# boolean for package module
package = None
# dictionary of globals with name as key and node defining the global
# as value
globals = None
# Future imports
future_imports = None
# names of python special attributes (handled by getattr impl.)
special_attributes = set(('__name__', '__doc__', '__file__', '__path__',
'__dict__'))
# names of module attributes available through the global scope
scope_attrs = set(('__name__', '__doc__', '__file__', '__path__'))
def __init__(self, name, doc, pure_python=True):
self.name = name
self.doc = doc
self.pure_python = pure_python
self.locals = self.globals = {}
self.body = []
self.future_imports = set()
@cachedproperty
def file_stream(self):
if self.file_bytes is not None:
return BytesIO(self.file_bytes)
if self.file is not None:
return open(self.file, 'rb')
return None
def block_range(self, lineno):
"""return block line numbers.
start from the beginning whatever the given lineno
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
if name in self.scope_attrs and not name in self.locals:
try:
return self, self.getattr(name)
except NotFoundError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
return '%s.module' % BUILTINS
def display_type(self):
return 'Module'
def getattr(self, name, context=None, ignore_locals=False):
if name in self.special_attributes:
if name == '__file__':
return [cf(self.file)] + self.locals.get(name, [])
if name == '__path__' and self.package:
return [List()] + self.locals.get(name, [])
return std_special_attributes(self, name)
if not ignore_locals and name in self.locals:
return self.locals[name]
if self.package:
try:
return [self.import_module(name, relative_only=True)]
except AstroidBuildingException:
raise NotFoundError(name)
except SyntaxError:
raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
raise NotFoundError(name)
getattr = remove_nodes(getattr, DelName)
def igetattr(self, name, context=None):
"""inferred getattr"""
# set lookup name since this is necessary to infer on import nodes for
# instance
if not context:
context = InferenceContext()
try:
return _infer_stmts(self.getattr(name, context), context, frame=self, lookupname=name)
except NotFoundError:
raise InferenceError(name)
def fully_defined(self):
"""return True if this module has been built from a .py file
and so contains a complete representation including the code
"""
return self.file is not None and self.file.endswith('.py')
def statement(self):
"""return the first parent node marked as statement node
consider a module as a statement...
"""
return self
def previous_sibling(self):
"""module has no sibling"""
return
def next_sibling(self):
"""module has no sibling"""
return
if sys.version_info < (2, 8):
@cachedproperty
def _absolute_import_activated(self):
for stmt in self.locals.get('absolute_import', ()):
if isinstance(stmt, From) and stmt.modname == '__future__':
return True
return False
else:
_absolute_import_activated = True
def absolute_import_activated(self):
return self._absolute_import_activated
def import_module(self, modname, relative_only=False, level=None):
"""import the given module considering self as context"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return MANAGER.ast_from_module_name(absmodname)
except AstroidBuildingException:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return MANAGER.ast_from_module_name(modname)
def relative_to_absolute_name(self, modname, level):
"""return the absolute module name for a relative import.
The relative import can be implicit or explicit.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.astroid.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit('.', level)[0]
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit('.', 1)[0]
if package_name:
if not modname:
return package_name
return '%s.%s' % (package_name, modname)
return modname
def wildcard_import_names(self):
"""return the list of imported names when this module is 'wildcard
imported'
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
"""
# take advantage of a living module if it exists
try:
living = sys.modules[self.name]
except KeyError:
pass
else:
try:
return living.__all__
except AttributeError:
return [name for name in living.__dict__.keys()
if not name.startswith('_')]
# else lookup the astroid
#
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
default = [name for name in self.keys() if not name.startswith('_')]
try:
all = self['__all__']
except KeyError:
return default
try:
explicit = next(all.assigned_stmts())
except InferenceError:
return default
except AttributeError:
# not an assignment node
# XXX infer?
return default
# Try our best to detect the exported name.
infered = []
try:
explicit = next(explicit.infer())
except InferenceError:
return default
if not isinstance(explicit, (Tuple, List)):
return default
str_const = lambda node: (isinstance(node, Const) and
isinstance(node.value, six.string_types))
for node in explicit.elts:
if str_const(node):
infered.append(node.value)
else:
try:
infered_node = next(node.infer())
except InferenceError:
continue
if str_const(infered_node):
infered.append(infered_node.value)
return infered
class ComprehensionScope(LocalsDictNodeNG):
def frame(self):
return self.parent.frame()
scope_lookup = LocalsDictNodeNG._scope_lookup
class GenExpr(ComprehensionScope):
_astroid_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class DictComp(ComprehensionScope):
_astroid_fields = ('key', 'value', 'generators')
def __init__(self):
self.locals = {}
self.key = None
self.value = None
self.generators = []
class SetComp(ComprehensionScope):
_astroid_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class _ListComp(NodeNG):
"""class representing a ListComp node"""
_astroid_fields = ('elt', 'generators')
elt = None
generators = None
if sys.version_info >= (3, 0):
class ListComp(_ListComp, ComprehensionScope):
"""class representing a ListComp node"""
def __init__(self):
self.locals = {}
else:
class ListComp(_ListComp):
"""class representing a ListComp node"""
# Function ###################################################################
def _infer_decorator_callchain(node):
""" Detect decorator call chaining and see if the
end result is a static or a classmethod.
"""
current = node
while True:
if isinstance(current, CallFunc):
try:
current = next(current.func.infer())
except InferenceError:
return
elif isinstance(current, Function):
if not current.parent:
return
try:
# TODO: We don't handle multiple inference results right now,
# because there's no flow to reason when the return
# is what we are looking for, a static or a class method.
result = next(current.infer_call_result(current.parent))
if current is result:
# This will lead to an infinite loop, where a decorator
# returns itself.
return
except (StopIteration, InferenceError):
return
if isinstance(result, (Function, CallFunc)):
current = result
else:
if isinstance(result, Instance):
result = result._proxied
if isinstance(result, Class):
if (result.name == 'classmethod' and
result.root().name == BUILTINS):
return 'classmethod'
elif (result.name == 'staticmethod' and
result.root().name == BUILTINS):
return 'staticmethod'
else:
return
else:
# We aren't interested in anything else returned,
# so go back to the function type inference.
return
else:
return
def _function_type(self):
"""
Function type, possible values are:
method, function, staticmethod, classmethod.
"""
# Can't infer that this node is decorated
# with a subclass of `classmethod` where `type` is first set,
# so do it here.
if self.decorators:
for node in self.decorators.nodes:
if isinstance(node, CallFunc):
_type = _infer_decorator_callchain(node)
if _type is None:
continue
else:
return _type
if not isinstance(node, Name):
continue
try:
for infered in node.infer():
if not isinstance(infered, Class):
continue
for ancestor in infered.ancestors():
if isinstance(ancestor, Class):
if (ancestor.name == 'classmethod' and
ancestor.root().name == BUILTINS):
return 'classmethod'
elif (ancestor.name == 'staticmethod' and
ancestor.root().name == BUILTINS):
return 'staticmethod'
except InferenceError:
pass
return self._type
class Lambda(LocalsDictNodeNG, FilterStmtsMixin):
_astroid_fields = ('args', 'body',)
name = '<lambda>'
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
type = 'function'
def __init__(self):
self.locals = {}
self.args = []
self.body = []
def pytype(self):
if 'method' in self.type:
return '%s.instancemethod' % BUILTINS
return '%s.function' % BUILTINS
def display_type(self):
if 'method' in self.type:
return 'Method'
return 'Function'
def callable(self):
return True
def argnames(self):
"""return a list of argument names"""
if self.args.args: # maybe None with builtin functions
names = _rec_get_names(self.args.args)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
if node in self.args.defaults or node in self.args.kw_defaults:
frame = self.parent.frame()
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
class Function(Statement, Lambda):
if PY3K:
_astroid_fields = ('decorators', 'args', 'body', 'returns')
returns = None
else:
_astroid_fields = ('decorators', 'args', 'body')
special_attributes = set(('__name__', '__doc__', '__dict__'))
is_function = True
# attributes below are set by the builder module or by raw factories
blockstart_tolineno = None
decorators = None
_type = "function"
type = cachedproperty(_function_type)
def __init__(self, name, doc):
self.locals = {}
self.args = []
self.body = []
self.name = name
self.doc = doc
self.extra_decorators = []
self.instance_attrs = {}
@cachedproperty
def fromlineno(self):
# lineno is the line number of the first decorator, we want the def
# statement lineno
lineno = self.lineno
if self.decorators is not None:
lineno += sum(node.tolineno - node.lineno + 1
for node in self.decorators.nodes)
return lineno
@cachedproperty
def blockstart_tolineno(self):
return self.args.tolineno
def block_range(self, lineno):
"""return block line numbers.
start from the "def" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
"""
if name == '__module__':
return [cf(self.root().qname())]
if name in self.instance_attrs:
return self.instance_attrs[name]
return std_special_attributes(self, name, False)
def is_method(self):
"""return true if the function node should be considered as a method"""
# check we are defined in a Class, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != 'function' and isinstance(self.parent.frame(), Class)
def decoratornames(self):
"""return a list of decorator qualified names"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
for infnode in decnode.infer():
result.add(infnode.qname())
return result
decoratornames = cached(decoratornames)
def is_bound(self):
"""return true if the function is bound to an Instance or a class"""
return self.type == 'classmethod'
def is_abstract(self, pass_is_abstract=True):
"""Returns True if the method is abstract.
A method is considered abstract if
- the only statement is 'raise NotImplementedError', or
- the only statement is 'pass' and pass_is_abstract is True, or
- the method is annotated with abc.astractproperty/abc.abstractmethod
"""
if self.decorators:
for node in self.decorators.nodes:
try:
infered = next(node.infer())
except InferenceError:
continue
if infered and infered.qname() in ('abc.abstractproperty',
'abc.abstractmethod'):
return True
for child_node in self.body:
if isinstance(child_node, Raise):
if child_node.raises_not_implemented():
return True
if pass_is_abstract and isinstance(child_node, Pass):
return True
return False
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""return true if this is a generator function"""
# XXX should be flagged, not computed
return next(self.nodes_of_class((Yield, YieldFrom),
skip_klass=(Function, Lambda)), False)
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
if self.is_generator():
yield Generator()
return
# This is really a gigantic hack to work around metaclass generators
# that return transient class-generating functions. Pylint's AST structure
# cannot handle a base class object that is only used for calling __new__,
# but does not contribute to the inheritance structure itself. We inject
# a fake class into the hierarchy here for several well-known metaclass
# generators, and filter it out later.
if (self.name == 'with_metaclass' and
len(self.args.args) == 1 and
self.args.vararg is not None):
metaclass = next(caller.args[0].infer(context))
if isinstance(metaclass, Class):
c = Class('temporary_class', None)
c.hide = True
c.parent = self
c.bases = [next(b.infer(context)) for b in caller.args[1:]]
c._metaclass = metaclass
yield c
return
returns = self.nodes_of_class(Return, skip_klass=Function)
for returnnode in returns:
if returnnode.value is None:
yield Const(None)
else:
try:
for infered in returnnode.value.infer(context):
yield infered
except InferenceError:
yield YES
def _rec_get_names(args, names=None):
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
# Class ######################################################################
def _is_metaclass(klass, seen=None):
""" Return if the given class can be
used as a metaclass.
"""
if klass.name == 'type':
return True
if seen is None:
seen = set()
for base in klass.bases:
try:
for baseobj in base.infer():
if baseobj in seen:
continue
else:
seen.add(baseobj)
if isinstance(baseobj, Instance):
# not abstract
return False
if baseobj is YES:
continue
if baseobj is klass:
continue
if not isinstance(baseobj, Class):
continue
if baseobj._type == 'metaclass':
return True
if _is_metaclass(baseobj, seen):
return True
except InferenceError:
continue
return False
def _class_type(klass, ancestors=None):
"""return a Class node type to differ metaclass, interface and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have a ancestor loop
if klass._type is not None:
return klass._type
if _is_metaclass(klass):
klass._type = 'metaclass'
elif klass.name.endswith('Interface'):
klass._type = 'interface'
elif klass.name.endswith('Exception'):
klass._type = 'exception'
else:
if ancestors is None:
ancestors = set()
if klass in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = 'class'
return 'class'
ancestors.add(klass)
for base in klass.ancestors(recurs=False):
name = _class_type(base, ancestors)
if name != 'class':
if name == 'metaclass' and not _is_metaclass(klass):
# don't propagate it if the current class
# can't be a metaclass
continue
klass._type = base.type
break
if klass._type is None:
klass._type = 'class'
return klass._type
def _iface_hdlr(iface_node):
"""a handler function used by interfaces to handle suspicious
interface nodes
"""
return True
class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin):
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astroid_fields = ('decorators', 'bases', 'body') # name
decorators = None
special_attributes = set(('__name__', '__doc__', '__dict__', '__module__',
'__bases__', '__mro__', '__subclasses__'))
blockstart_tolineno = None
_type = None
_metaclass_hack = False
hide = False
type = property(_class_type,
doc="class'type, possible values are 'class' | "
"'metaclass' | 'interface' | 'exception'")
def __init__(self, name, doc):
self.instance_attrs = {}
self.locals = {}
self.bases = []
self.body = []
self.name = name
self.doc = doc
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
klass = self._explicit_metaclass()
# could be any callable, we'd need to infer the result of klass(name,
# bases, dict). punt if it's not a class node.
if klass is not None and isinstance(klass, Class):
self._newstyle = klass._newstyle_impl(context)
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(_newstyle_impl,
doc="boolean indicating if it's a new style class"
"or not")
@cachedproperty
def blockstart_tolineno(self):
if self.bases:
return self.bases[-1].tolineno
else:
return self.fromlineno
def block_range(self, lineno):
"""return block line numbers.
start from the "class" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def pytype(self):
if self.newstyle:
return '%s.type' % BUILTINS
return '%s.classobj' % BUILTINS
def display_type(self):
return 'Class'
def callable(self):
return True
def is_subtype_of(self, type_name, context=None):
if self.qname() == type_name:
return True
for anc in self.ancestors(context=context):
if anc.qname() == type_name:
return True
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
if self.is_subtype_of('%s.type' % (BUILTINS,), context) and len(caller.args) == 3:
name_node = next(caller.args[0].infer(context))
if (isinstance(name_node, Const) and
isinstance(name_node.value, six.string_types)):
name = name_node.value
else:
yield YES
return
result = Class(name, None)
bases = next(caller.args[1].infer(context))
if isinstance(bases, (Tuple, List)):
result.bases = bases.itered()
else:
# There is currently no AST node that can represent an 'unknown'
# node (YES is not an AST node), therefore we simply return YES here
# although we know at least the name of the class.
yield YES
return
result.parent = caller.parent
yield result
else:
yield Instance(self)
def scope_lookup(self, node, name, offset=0):
if node in self.bases:
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
# list of parent class as a list of string (i.e. names as they appear
# in the class definition) XXX bw compat
def basenames(self):
return [bnode.as_string() for bnode in self.bases]
basenames = property(basenames)
def ancestors(self, recurs=True, context=None):
"""return an iterator on the node base classes in a prefixed
depth first order
:param recurs:
boolean indicating if it should recurse or return direct
ancestors only
"""
# FIXME: should be possible to choose the resolution order
# FIXME: inference make infinite loops possible here
yielded = set([self])
if context is None:
context = InferenceContext()
if sys.version_info[0] >= 3:
if not self.bases and self.qname() != 'builtins.object':
yield builtin_lookup("object")[1][0]
return
for stmt in self.bases:
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, Class):
if isinstance(baseobj, Instance):
baseobj = baseobj._proxied
else:
# duh ?
continue
if not baseobj.hide:
if baseobj in yielded:
continue # cf xxx above
yielded.add(baseobj)
yield baseobj
if recurs:
for grandpa in baseobj.ancestors(recurs=True,
context=context):
if grandpa in yielded:
continue # cf xxx above
yielded.add(grandpa)
yield grandpa
except InferenceError:
# XXX log error ?
continue
def local_attr_ancestors(self, name, context=None):
"""return an iterator on astroid representation of parent classes
which have <name> defined in their locals
"""
for astroid in self.ancestors(context=context):
if name in astroid:
yield astroid
def instance_attr_ancestors(self, name, context=None):
"""return an iterator on astroid representation of parent classes
which have <name> defined in their instance attribute dictionary
"""
for astroid in self.ancestors(context=context):
if name in astroid.instance_attrs:
yield astroid
def has_base(self, node):
return node in self.bases
def local_attr(self, name, context=None):
"""return the list of assign node associated to name in this class
locals or in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
try:
return self.locals[name]
except KeyError:
# get if from the first parent implementing it if any
for class_node in self.local_attr_ancestors(name, context):
return class_node.locals[name]
raise NotFoundError(name)
local_attr = remove_nodes(local_attr, DelAttr)
def instance_attr(self, name, context=None):
"""return the astroid nodes associated to name in this class instance
attributes dictionary and in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
# Return a copy, so we don't modify self.instance_attrs,
# which could lead to infinite loop.
values = list(self.instance_attrs.get(name, []))
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
if not values:
raise NotFoundError(name)
return values
instance_attr = remove_nodes(instance_attr, DelAttr)
def instanciate_class(self):
"""return Instance of Class node, else return self"""
return Instance(self)
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
It may return a YES object if the attribute has not been actually
found but a __getattr__ or __getattribute__ method is defined
"""
values = self.locals.get(name, [])
if name in self.special_attributes:
if name == '__module__':
return [cf(self.root().qname())] + values
# FIXME: do we really need the actual list of ancestors?
# returning [Tuple()] + values don't break any test
# this is ticket http://www.logilab.org/ticket/52785
# XXX need proper meta class handling + MRO implementation
if name == '__bases__' or (name == '__mro__' and self.newstyle):
node = Tuple()
node.items = self.ancestors(recurs=True, context=context)
return [node] + values
return std_special_attributes(self, name)
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if not values:
raise NotFoundError(name)
return values
def igetattr(self, name, context=None):
"""inferred getattr, need special treatment in class to handle
descriptors
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
if not context:
context = InferenceContext()
try:
for infered in _infer_stmts(self.getattr(name, context), context,
frame=self, lookupname=name):
# yield YES object instead of descriptors when necessary
if not isinstance(infered, Const) and isinstance(infered, Instance):
try:
infered._proxied.getattr('__get__', context)
except NotFoundError:
yield infered
else:
yield YES
else:
yield function_to_method(infered, self)
except NotFoundError:
if not name.startswith('__') and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a YES object
yield YES
else:
raise InferenceError(name)
def has_dynamic_getattr(self, context=None):
"""return True if the class has a custom __getattr__ or
__getattribute__ method
"""
# need to explicitly handle optparse.Values (setattr is not detected)
if self.name == 'Values' and self.root().name == 'optparse':
return True
try:
self.getattr('__getattr__', context)
return True
except NotFoundError:
#if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr('__getattribute__', context)[0]
if getattribute.root().name != BUILTINS:
# class has a custom __getattribute__ defined
return True
except NotFoundError:
pass
return False
def methods(self):
"""return an iterator on all methods defined in the class and
its ancestors
"""
done = {}
for astroid in chain(iter((self,)), self.ancestors()):
for meth in astroid.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""return an iterator on all methods defined in the class"""
for member in self.values():
if isinstance(member, Function):
yield member
def interfaces(self, herited=True, handler_func=_iface_hdlr):
"""return an iterator on interfaces implemented by the given
class node
"""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = Instance(self).getattr('__implements__')[0]
except NotFoundError:
return
if not herited and not implements.frame() is self:
return
found = set()
missing = False
for iface in unpack_infer(implements):
if iface is YES:
missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise InferenceError()
_metaclass = None
def _explicit_metaclass(self):
""" Return the explicit defined metaclass
for the current class.
An explicit defined metaclass is defined
either by passing the ``metaclass`` keyword argument
in the class definition line (Python 3) or (Python 2) by
having a ``__metaclass__`` class attribute, or if there are
no explicit bases but there is a global ``__metaclass__`` variable.
"""
for base in self.bases:
try:
for baseobj in base.infer():
if isinstance(baseobj, Class) and baseobj.hide:
self._metaclass = baseobj._metaclass
self._metaclass_hack = True
break
except InferenceError:
pass
if self._metaclass:
# Expects this from Py3k TreeRebuilder
try:
return next(node for node in self._metaclass.infer()
if node is not YES)
except (InferenceError, StopIteration):
return None
if sys.version_info >= (3, ):
return None
if '__metaclass__' in self.locals:
assignment = self.locals['__metaclass__'][-1]
elif self.bases:
return None
elif '__metaclass__' in self.root().locals:
assignments = [ass for ass in self.root().locals['__metaclass__']
if ass.lineno < self.lineno]
if not assignments:
return None
assignment = assignments[-1]
else:
return None
try:
infered = next(assignment.infer())
except InferenceError:
return
if infered is YES: # don't expose this
return None
return infered
def metaclass(self):
""" Return the metaclass of this class.
If this class does not define explicitly a metaclass,
then the first defined metaclass in ancestors will be used
instead.
"""
klass = self._explicit_metaclass()
if klass is None:
for parent in self.ancestors():
klass = parent.metaclass()
if klass is not None:
break
return klass
def has_metaclass_hack(self):
return self._metaclass_hack
def _islots(self):
""" Return an iterator with the inferred slots. """
if '__slots__' not in self.locals:
return
for slots in self.igetattr('__slots__'):
# check if __slots__ is a valid type
for meth in ITER_METHODS:
try:
slots.getattr(meth)
break
except NotFoundError:
continue
else:
continue
if isinstance(slots, Const):
# a string. Ignore the following checks,
# but yield the node, only if it has a value
if slots.value:
yield slots
continue
if not hasattr(slots, 'itered'):
# we can't obtain the values, maybe a .deque?
continue
if isinstance(slots, Dict):
values = [item[0] for item in slots.items]
else:
values = slots.itered()
if values is YES:
continue
for elt in values:
try:
for infered in elt.infer():
if infered is YES:
continue
if (not isinstance(infered, Const) or
not isinstance(infered.value, str)):
continue
if not infered.value:
continue
yield infered
except InferenceError:
continue
# Cached, because inferring them all the time is expensive
@cached
def slots(self):
""" Return all the slots for this node. """
return list(self._islots())
| [
"[email protected]"
]
| |
ae48ce85c8caa8b2632e5bbc58f086388955ab75 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/application_command/application_command/tests/test__validate_version.py | 0311466ec90e46c18abaa78702c11bd7846f90a8 | [
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 1,003 | py | import vampytest
from ..fields import validate_version
def test__validate_version__0():
"""
Tests whether `validate_version` works as intended.
Case: passing.
"""
version = 202302260011
for input_value, expected_output in (
(version, version),
(str(version), version),
):
output = validate_version(input_value)
vampytest.assert_eq(output, expected_output)
def test__validate_version__1():
"""
Tests whether `validate_version` works as intended.
Case: `ValueError`.
"""
for input_value in (
'-1',
-1,
):
with vampytest.assert_raises(AssertionError, ValueError):
validate_version(input_value)
def test__validate_version__2():
"""
Tests whether `validate_version` works as intended.
Case: `TypeError`.
"""
for input_value in (
12.6,
):
with vampytest.assert_raises(TypeError):
validate_version(input_value)
| [
"[email protected]"
]
| |
c65f10f40c7746b6a0f8b226efa07085cf5a26f6 | 3634703ad8685c9bc5d73edf148b7b8722356c0e | /Algorithm/programmers/pg_2016년.py | 872b394b834701e55c74ca2098cf27d1a25d7d18 | []
| no_license | chj3748/TIL | 23d88f97ebc8b1e3a06bb93752dfd2d331d01fd8 | 40a4e524c28945c95f059b0dee598abb686abe04 | refs/heads/master | 2022-02-26T16:43:56.964719 | 2022-02-14T04:43:20 | 2022-02-14T04:43:20 | 235,233,054 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | # math | programmers 2016년
# github.com/chj3748
import sys
def input():
return sys.stdin.readline().rstrip()
def solution(a, b):
months = [0, 0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for i in range(1, 14):
months[i] += months[i - 1]
weeks = [ 'THU', 'FRI', 'SAT', 'SUN', 'MON', 'TUE', 'WED']
return weeks[(months[a] + b) % 7] | [
"[email protected]"
]
| |
b27b059c477b45152d67c266b8bde14dfdbcfe93 | e122ab31559f7551e4bc4dff6dfa7f7dbbd10168 | /jaqs/__init__.py | 0be750ea380b5ec64652ff6b426589ec22e928c8 | [
"Apache-2.0"
]
| permissive | WayneWan413/JAQS | ffb909d6d550451552697358735ec5dd74975b2d | e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4 | refs/heads/master | 2021-08-30T10:30:20.675837 | 2017-12-17T14:14:59 | 2017-12-17T14:14:59 | 113,726,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # encoding: utf-8
"""
JAQS
~~~~
Open source quantitative research&trading framework.
copyright: (c) 2017 quantOS-org.
license: Apache 2.0, see LICENSE for details.
"""
import os
__version__ = '0.6.6'
SOURCE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
| [
"[email protected]"
]
| |
d3e1cb323db751ac2050493151ddde48bb868a90 | 566638e179b0add891e1d5c8900d35ae531af6dc | /alembic_simplelis/versions/6487bfd4c8aa_renamed_columns.py | 6cd0406fe6bfe944447113a2432ef47fb6ff8af3 | []
| no_license | likit/querystud | 9b023a45adfdbf6dc8a3a2f97fefb82b765c8690 | 1702c09ff6931b2cd94d0b55ef42f244c503a68a | refs/heads/master | 2020-03-25T19:25:40.412824 | 2018-08-09T18:08:48 | 2018-08-09T18:08:48 | 144,082,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,273 | py | """renamed columns
Revision ID: 6487bfd4c8aa
Revises: 8c08809abb09
Create Date: 2018-08-09 15:54:28.683879
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '6487bfd4c8aa'
down_revision = '8c08809abb09'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('labs', sa.Column('reportDate', sa.Date(), nullable=True))
op.add_column('labs', sa.Column('reportTime', sa.Time(), nullable=True))
op.alter_column('labs', 'recvDate',
existing_type=sa.DATE(),
nullable=False)
op.alter_column('labs', 'recvTime',
existing_type=postgresql.TIME(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('labs', 'recvTime',
existing_type=postgresql.TIME(),
nullable=True)
op.alter_column('labs', 'recvDate',
existing_type=sa.DATE(),
nullable=True)
op.drop_column('labs', 'reportTime')
op.drop_column('labs', 'reportDate')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
61c4329bc9311c20d6ca2fdca35994a57d850ee5 | 38fff7bdefd8d62a740d51329b50d0e1e49258bb | /projects/oscrypto/fuzz_keys.py | 891da04cd3fd9abfb86f3c556edf67a2c729e495 | [
"Apache-2.0"
]
| permissive | google/oss-fuzz | 026384c2ada61ef68b147548e830f60730c5e738 | f0275421f84b8f80ee767fb9230134ac97cb687b | refs/heads/master | 2023-08-31T23:30:28.157702 | 2023-08-31T21:49:30 | 2023-08-31T21:49:30 | 63,809,205 | 9,438 | 2,315 | Apache-2.0 | 2023-09-14T20:32:19 | 2016-07-20T19:39:50 | Shell | UTF-8 | Python | false | false | 938 | py | #!/usr/bin/python3
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import atheris
from oscrypto import keys
def TestOneInput(data):
try:
keys.parse_pkcs12(data, b'123')
except ValueError:
pass
except OSError:
pass
def main():
atheris.instrument_all()
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
50bee84349089e1aa4828f68a88a6d8a89dfdf41 | 568d7d17d09adeeffe54a1864cd896b13988960c | /month01/day07/exercise05.py | 3493424818067ec8a5a6e612d415a224bd930150 | [
"Apache-2.0"
]
| permissive | Amiao-miao/all-codes | e2d1971dfd4cecaaa291ddf710999f2fc4d8995f | ec50036d42d40086cac5fddf6baf4de18ac91e55 | refs/heads/main | 2023-02-24T10:36:27.414153 | 2021-02-01T10:51:55 | 2021-02-01T10:51:55 | 334,908,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | dict_travel_info = {
"北京": {
"景区": ["长城", "故宫"],
"美食": ["烤鸭", "豆汁焦圈", "炸酱面"]
},
"四川": {
"景区": ["九寨沟", "峨眉山"],
"美食": ["火锅", "兔头"]
}
}
#1.打印北京的第一个景区
print(dict_travel_info["北京"]["景区"][0])
# 打印四川的第二个美食
print(dict_travel_info["四川"]["美食"][1])
# 2. 所有城市 (一行一个)
for key in dict_travel_info:
print(key)
#3.北京所有美食(一行一个)
for i in dict_travel_info["北京"]["美食"]:
print(i)
# 4.打印所有城市的所有美食(一行一个)
for value in dict_travel_info.values():
for v in value["美食"]:
print(v) | [
"[email protected]"
]
| |
f6d7fbdef5cdaedb0fe2f8536b75a1173aca58fe | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/containerservice/azure-mgmt-containerservice/generated_samples/maintenance_configurations_create_update.py | c0abfbc1bab5cf537fc774e2f3b60eed6869983b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,900 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.containerservice import ContainerServiceClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-containerservice
# USAGE
python maintenance_configurations_create_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ContainerServiceClient(
credential=DefaultAzureCredential(),
subscription_id="subid1",
)
response = client.maintenance_configurations.create_or_update(
resource_group_name="rg1",
resource_name="clustername1",
config_name="default",
parameters={
"properties": {
"notAllowedTime": [{"end": "2020-11-30T12:00:00Z", "start": "2020-11-26T03:00:00Z"}],
"timeInWeek": [{"day": "Monday", "hourSlots": [1, 2]}],
}
},
)
print(response)
# x-ms-original-file: specification/containerservice/resource-manager/Microsoft.ContainerService/aks/stable/2023-07-01/examples/MaintenanceConfigurationsCreate_Update.json
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
d993401850d52d98db8b268955eeb445554951db | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_371/ch47_2020_10_04_13_40_50_371443.py | 3cc3cadd86d947006768c00c032c11e851f56842 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | def estritamente_crescente(lista):
numero_atual = 0
numero_anterior =0
i=0
nova_lista=[]
while len(lista)>i:
numero_atual=lista[i]
if numero_atual>numero_anterior:
numero_anterior=numero_atual
nova_lista.append(numero_atual)
i+=1
else:
i+=1
return nova_lista | [
"[email protected]"
]
| |
e73ea00412857d8bc51a1b6f7dd676d32b152336 | 1c6a29a7dcd62470d594d5e42dbea9ff79cc47f5 | /shade/_heat/utils.py | 24cb0b07115e1da1eb535444aa86612c446b7d0a | [
"Apache-2.0"
]
| permissive | major/shade | a1691a3e3311f1b87f4a31c3a26929ddc2541b7a | 0ced9b5a7568dd8e4a33b6627f636639bcbbd8a3 | refs/heads/master | 2023-06-07T17:15:47.089102 | 2020-06-01T22:59:14 | 2020-06-01T22:59:14 | 54,499,600 | 0 | 0 | Apache-2.0 | 2023-06-01T21:26:36 | 2016-03-22T18:34:14 | Python | UTF-8 | Python | false | false | 1,842 | py | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
from six.moves.urllib import error
from six.moves.urllib import parse
from six.moves.urllib import request
from shade import exc
def base_url_for_url(url):
parsed = parse.urlparse(url)
parsed_dir = os.path.dirname(parsed.path)
return parse.urljoin(url, parsed_dir)
def normalise_file_path_to_url(path):
if parse.urlparse(path).scheme:
return path
path = os.path.abspath(path)
return parse.urljoin('file:', request.pathname2url(path))
def read_url_content(url):
try:
# TODO(mordred) Use requests
content = request.urlopen(url).read()
except error.URLError:
raise exc.OpenStackCloudException(
'Could not fetch contents for %s' % url)
if content:
try:
content.decode('utf-8')
except ValueError:
content = base64.encodestring(content)
return content
def resource_nested_identifier(rsrc):
nested_link = [l for l in rsrc.links or []
if l.get('rel') == 'nested']
if nested_link:
nested_href = nested_link[0].get('href')
nested_identifier = nested_href.split("/")[-2:]
return "/".join(nested_identifier)
| [
"[email protected]"
]
| |
633f67db56b3fc27c70671b9cff7a90c51faa754 | 96538cc3eee3d73d429f3476d0e895be95d695e3 | /worker/db/redisdb.py | e7b69ffd0713371d1380120d397a1485debac7fe | []
| no_license | FashtimeDotCom/distributed-spider | d9555670216e68d4ff031e466cbf3529d080a534 | 33292f098403fa73239e0c7353e4cc5918be981b | refs/heads/master | 2020-03-22T11:43:14.796426 | 2018-07-06T10:51:48 | 2018-07-06T11:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,874 | py | # -*- coding: utf-8 -*-
'''
Created on 2016-11-16 16:25
---------
@summary: 操作redis数据库
---------
@author: Boris
'''
import sys
sys.path.append('../')
import init
import redis
import utils.tools as tools
from utils.log import log
IP = tools.get_conf_value('config.conf', 'redis', 'ip')
PORT = int(tools.get_conf_value('config.conf', 'redis', 'port'))
DB = int(tools.get_conf_value('config.conf', 'redis', 'db'))
USER_PASS = tools.get_conf_value('config.conf', 'redis', 'user_pass')
class Singleton(object):
def __new__(cls, *args, **kwargs):
if not hasattr(cls,'_inst'):
cls._inst=super(Singleton,cls).__new__(cls, *args, **kwargs)
return cls._inst
class RedisDB():
def __init__(self, ip = IP, port = PORT, db = DB, user_pass = USER_PASS):
# super(RedisDB, self).__init__()
if not hasattr(self,'_redis'):
try:
self._redis = redis.Redis(host = ip, port = port, db = db, password = user_pass, decode_responses=True) # redis默认端口是6379
self._pipe = self._redis.pipeline(transaction=True) # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。
except Exception as e:
raise
else:
log.debug('连接到redis数据库 ip:%s port:%s'%(ip, port))
def sadd(self, table, values):
'''
@summary: 使用无序set集合存储数据, 去重
---------
@param table:
@param values: 值; 支持list 或 单个值
---------
@result: 若库中存在 返回0,否则入库,返回1。 批量添加返回None
'''
if isinstance(values, list):
self._pipe.multi()
for value in values:
self._pipe.sadd(table, value)
self._pipe.execute()
else:
return self._redis.sadd(table, values)
def zadd(self, table, values, prioritys = 0):
'''
@summary: 使用有序set集合存储数据, 去重(值存在更新)
---------
@param table:
@param values: 值; 支持list 或 单个值
@param prioritys: 优先级; double类型,支持list 或 单个值。 根据此字段的值来排序, 值越小越优先。 可不传值,默认value的优先级为0
---------
@result:若库中存在 返回0,否则入库,返回1。 批量添加返回None
'''
if isinstance(values, list):
if not isinstance(prioritys, list):
prioritys = [prioritys] * len(values)
else:
assert len(values) == len(prioritys), 'values值要与prioritys值一一对应'
self._pipe.multi()
for value, priority in zip(values, prioritys):
self._pipe.zadd(table, value, priority)
self._pipe.execute()
else:
return self._redis.zadd(table, values, prioritys)
def zget(self, table, count = 0, is_pop = True):
'''
@summary: 从有序set集合中获取数据
---------
@param table:
@param count: 数量
@param is_pop:获取数据后,是否在原set集合中删除,默认是
---------
@result: 列表
'''
start_pos = 0 # 包含
end_pos = 0 if count == 0 else count - 1 # 包含
self._pipe.multi() # 标记事务的开始 参考 http://www.runoob.com/redis/redis-transactions.html
self._pipe.zrange(table, start_pos, end_pos) # 取值
if is_pop: self._pipe.zremrangebyrank(table, start_pos, end_pos) # 删除
results, count = self._pipe.execute()
return results
def zget_count(self, table, priority_min = None, priority_max = None):
'''
@summary: 获取表数据的数量
---------
@param table:
@param priority_min:优先级范围 最小值(包含)
@param priority_max:优先级范围 最大值(包含)
---------
@result:
'''
if priority_min != None and priority_max != None:
return self._redis.zcount(table, priority_min, priority_max)
else:
return self._redis.zcard(table)
def lpush(self, table, values):
if isinstance(values, list):
self._pipe.multi()
for value in values:
self._pipe.rpush(table, value)
self._pipe.execute()
else:
return self._redis.rpush(table, values)
def lpop(self, table, count = 1):
'''
@summary:
---------
@param table:
@param count:
---------
@result: 返回列表
'''
datas = []
count = count if count <= self.lget_count(table) else self.lget_count(table)
if count:
if count > 1:
self._pipe.multi()
while count:
data = self._pipe.lpop(table)
count -= 1
datas = self._pipe.execute()
else:
datas.append(self._redis.lpop(table))
return datas
def lget_count(self, table):
return self._redis.llen(table)
def clear(self, table):
self._redis.delete(table)
if __name__ == '__main__':
db = RedisDB()
# data = {
# "url": "http://www.icm9.com/",
# "status": 0,
# "remark": {
# "spider_depth": 3,
# "website_name": "早间新闻",
# "website_position": 23,
# "website_domain": "icm9.com",
# "website_url": "http://www.icm9.com/"
# },
# "depth": 0,
# "_id": "5b15f33d53446530acf20539",
# "site_id": 1,
# "retry_times": 0
# }
# print(db.sadd('25:25', data))
# print(db.zadd('26:26', [data]))
# # print(db.sadd('1', 1))
db.zadd('news_urls', '1', 1)
db.zadd('news_urls', '2', 1)
db.zadd('news_urls', '3', 2)
count = db.zget_count('news_urls', 2, 2)
print(count)
# print(type(data[0]))
# db.clear('name')
# import time
# start = time.time()
# # for i in range(10000):
# # db.zadd('test6', i)
# db.zadd('test7', list(range(10000)), [1])
# print(time.time() - start)
# db.zadd('test3', '1', 5)
# db.zadd('test3', '2', 6)
# db.zadd('test3', '3', 4)
data = db.zget('news_urls', 2)
print(data)
| [
"[email protected]"
]
| |
9792a5b3135bd29aa5e53b9ae8901a638fa9d8f1 | c6759b857e55991fea3ef0b465dbcee53fa38714 | /tools/nntool/nntool/quantization/multiplicative/quantizers/default_mult.py | 0385ebd178ce814eee4883e29eda75e12c0747cf | [
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
]
| permissive | GreenWaves-Technologies/gap_sdk | 1b343bba97b7a5ce62a24162bd72eef5cc67e269 | 3fea306d52ee33f923f2423c5a75d9eb1c07e904 | refs/heads/master | 2023-09-01T14:38:34.270427 | 2023-08-10T09:04:44 | 2023-08-10T09:04:44 | 133,324,605 | 145 | 96 | Apache-2.0 | 2023-08-27T19:03:52 | 2018-05-14T07:50:29 | C | UTF-8 | Python | false | false | 1,696 | py | # Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from nntool.quantization.qtype_constraint import MatchAll
from nntool.quantization.quantizers.no_change_mixin import NoChangeMixin
from nntool.quantization.unified_quantization_handler import (in_qs_constraint,
needs_stats,
out_qs_constraint,
params_type)
from ..mult_quantization_handler import MultQuantizionHandler
LOG = logging.getLogger('nntool.' + __name__)
@params_type('__default__')
@in_qs_constraint(MatchAll({'dtype': set([np.int8, np.int16, np.uint8, np.uint16])}))
@out_qs_constraint(MatchAll({'dtype': set([np.int8, np.int16, np.uint8, np.uint16])}))
@needs_stats(False)
class NoChangeMult(MultQuantizionHandler, NoChangeMixin):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
return cls._handle(params, in_qs, stats, 'scaled', **kwargs)
| [
"[email protected]"
]
| |
c64212fbcaa3ae1e49feb372c55e74cd6e4024a0 | 26bcead33b5ae529883f940d34c8ed6e1f1571af | /src/wordnet/__init__.py | 4e32b07df0478eabe63805e0922d9d3093bec5ff | []
| no_license | julieweeds/WordNet | a6ef1c7c757f60108c73ad02cfd43e902c2094c1 | 69c353c2cc0b6e5527f016a317e59bd43b8ceb45 | refs/heads/master | 2020-12-25T19:26:01.102329 | 2015-06-09T15:02:27 | 2015-06-09T15:02:27 | 9,606,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | __author__ = 'Julie'
| [
"[email protected]"
]
| |
488399fdb504ae50e47833dff927ee9e6ba0f590 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/effects/JRTeleportEffect.py | 8882e9e00c07ab5214be47edb6f53d4ae176d94a | []
| no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,337 | py | # File: J (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class JRTeleportEffect(PooledEffect, EffectController):
card2Scale = 32.0
cardScale = 32.0
def __init__(self, parent = None):
PooledEffect.__init__(self)
EffectController.__init__(self)
if parent is not None:
self.reparentTo(parent)
if not JRTeleportEffect.particleDummy:
JRTeleportEffect.particleDummy = render.attachNewNode(ModelNode('JRTeleportEffectParticleDummy'))
JRTeleportEffect.particleDummy.setColorScaleOff()
JRTeleportEffect.particleDummy.setLightOff()
JRTeleportEffect.particleDummy.setFogOff()
JRTeleportEffect.particleDummy.setDepthWrite(0)
JRTeleportEffect.particleDummy.setBin('fixed', 40)
self.effectScale = 1.0
self.duration = 3.0
self.radius = 1.0
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/particleEvilSmoke')
self.card2 = model.find('**/particleWhiteSmoke')
self.f = ParticleEffect.ParticleEffect('JRTeleportEffect')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('ZSpinParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('SphereVolumeEmitter')
self.p1 = Particles.Particles('particles-2')
self.p1.setFactory('ZSpinParticleFactory')
self.p1.setRenderer('SpriteParticleRenderer')
self.p1.setEmitter('SphereVolumeEmitter')
self.f.addParticles(self.p0)
self.f.addParticles(self.p1)
f1 = ForceGroup.ForceGroup('Noise')
force1 = LinearNoiseForce(0.5, 0)
force1.setVectorMasks(0, 1, 1)
force1.setActive(1)
f1.addForce(force1)
self.f.addForceGroup(f1)
self.p0.setPoolSize(256)
self.p0.setBirthRate(0.050000000000000003)
self.p0.setLitterSize(24)
self.p0.setLitterSpread(8)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(1.25)
self.p0.factory.setLifespanSpread(0.5)
self.p0.factory.setMassBase(4.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.factory.setInitialAngle(0.0)
self.p0.factory.setInitialAngleSpread(90.0)
self.p0.factory.enableAngularVelocity(1)
self.p0.factory.setAngularVelocity(500.0)
self.p0.factory.setAngularVelocitySpread(100.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(1)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 0.59999999999999998, Vec4(1.0, 1.0, 0.20000000000000001, 1.0), Vec4(0.80000000000000004, 0.59999999999999998, 0.25, 0.75), 1)
self.p0.renderer.getColorInterpolationManager().addLinear(0.59999999999999998, 1.0, Vec4(0.80000000000000004, 0.59999999999999998, 0.25, 0.75), Vec4(0.5, 0.25, 0.0, 0.0), 1)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, -5.0))
self.p1.setPoolSize(150)
self.p1.setBirthRate(0.01)
self.p1.setLitterSize(3)
self.p1.setLitterSpread(0)
self.p1.setSystemLifespan(0.0)
self.p1.setLocalVelocityFlag(1)
self.p1.setSystemGrowsOlderFlag(0)
self.p1.factory.setLifespanBase(1.0)
self.p1.factory.setLifespanSpread(0.0)
self.p1.factory.setMassBase(1.0)
self.p1.factory.setMassSpread(0.0)
self.p1.factory.setTerminalVelocityBase(400.0)
self.p1.factory.setTerminalVelocitySpread(0.0)
self.p1.factory.setInitialAngle(0.0)
self.p1.factory.setInitialAngleSpread(45.0)
self.p1.factory.enableAngularVelocity(0)
self.p1.factory.setFinalAngle(360.0)
self.p1.factory.setFinalAngleSpread(0.0)
self.p1.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p1.renderer.setUserAlpha(1.0)
self.p1.renderer.setFromNode(self.card2)
self.p1.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p1.renderer.setXScaleFlag(1)
self.p1.renderer.setYScaleFlag(1)
self.p1.renderer.setAnimAngleFlag(1)
self.p1.renderer.setNonanimatedTheta(0.0)
self.p1.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p1.renderer.setAlphaDisable(0)
self.p1.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p1.renderer.getColorInterpolationManager().addLinear(0.0, 0.5, Vec4(1.0, 1.0, 0.20000000000000001, 1.0), Vec4(0.80000000000000004, 0.59999999999999998, 0.25, 0.75), 1)
self.p1.renderer.getColorInterpolationManager().addLinear(0.5, 1.0, Vec4(0.80000000000000004, 0.59999999999999998, 0.25, 0.75), Vec4(0.5, 0.25, 0.0, 0.5), 1)
self.p1.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p1.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p1.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.setEffectScale(self.effectScale)
def createTrack(self):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.10000000000000001), Func(self.p0.clearToInitial), Func(self.p1.setBirthRate, 0.02), Func(self.p1.clearToInitial), Func(self.f.start, self, self.particleDummy))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100), Func(self.p1.setBirthRate, 100), Wait(2.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(self.duration), self.endEffect)
def reSize(self, t):
if self.p1:
self.p1.emitter.setRadius(self.radius * t)
def setEffectScale(self, scale):
self.effectScale = scale
if self.p0:
self.p0.renderer.setInitialXScale(0.025000000000000001 * self.effectScale * self.cardScale)
self.p0.renderer.setFinalXScale(0.014999999999999999 * self.effectScale * self.cardScale)
self.p0.renderer.setInitialYScale(0.014999999999999999 * self.effectScale * self.cardScale)
self.p0.renderer.setFinalYScale(0.050000000000000003 * self.effectScale * self.cardScale)
self.p0.emitter.setAmplitude(self.effectScale)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, -1.5) * self.effectScale)
self.p0.emitter.setRadius(self.effectScale)
if self.p1:
self.p1.renderer.setInitialXScale(0.014999999999999999 * self.effectScale * self.cardScale)
self.p1.renderer.setFinalXScale(0.029999999999999999 * self.effectScale * self.cardScale)
self.p1.renderer.setInitialYScale(0.014999999999999999 * self.effectScale * self.cardScale)
self.p1.renderer.setFinalYScale(0.050000000000000003 * self.effectScale * self.cardScale)
self.p1.emitter.setAmplitude(self.effectScale)
self.p1.emitter.setOffsetForce(Vec3(0.0, 0.0, -3.0) * self.effectScale)
self.p1.emitter.setRadius(self.effectScale * 1.25)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| [
"[email protected]"
]
| |
8e28e993c80f61da18a42c1591380ee8d5027018 | 94d5ef47d3244950a0308c754e0aa55dca6f2a0e | /migrations/versions/e19ce0373a4f_made_degrees_and_personal_info_a_a_one_.py | 1cdef1768726407a573115deba478c710260bcc0 | []
| no_license | MUMT-IT/mis2018 | 9cbc7191cdc1bcd7e0c2de1e0586d8bd7b26002e | 69fabc0b16abfeba44173caa93d4f63fa79033fd | refs/heads/master | 2023-08-31T16:00:51.717449 | 2023-08-31T11:30:13 | 2023-08-31T11:30:13 | 115,810,883 | 5 | 5 | null | 2023-09-14T10:08:35 | 2017-12-30T17:06:00 | HTML | UTF-8 | Python | false | false | 2,377 | py | """made degrees and personal info a a one-to-many relationship
Revision ID: e19ce0373a4f
Revises: 7d048ab06595
Create Date: 2021-03-04 09:41:21.032186
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e19ce0373a4f'
down_revision = '7d048ab06595'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('eduqa_degrees',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('eduqa_programs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('degree_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['degree_id'], ['eduqa_degrees.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('eduqa_curriculums',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('program_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['program_id'], ['eduqa_programs.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'staff_edu_degree', sa.Column('personal_info_id', sa.Integer(), nullable=True))
op.add_column(u'staff_edu_degree', sa.Column('received_date', sa.Date(), nullable=True))
op.create_foreign_key(None, 'staff_edu_degree', 'staff_personal_info', ['personal_info_id'], ['id'])
op.drop_constraint(u'staff_personal_info_highest_degree_id_fkey', 'staff_personal_info', type_='foreignkey')
op.drop_column(u'staff_personal_info', 'highest_degree_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'staff_personal_info', sa.Column('highest_degree_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'staff_personal_info_highest_degree_id_fkey', 'staff_personal_info', 'staff_edu_degree', ['highest_degree_id'], ['id'])
op.drop_constraint(None, 'staff_edu_degree', type_='foreignkey')
op.drop_column(u'staff_edu_degree', 'received_date')
op.drop_column(u'staff_edu_degree', 'personal_info_id')
op.drop_table('eduqa_curriculums')
op.drop_table('eduqa_programs')
op.drop_table('eduqa_degrees')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
d5de15191b5b65b8672942b474be1928172186e4 | 4806d5b15b45e30be16c857faaacaa278eba09d3 | /parse/parse_gansu_worker.py | a675f10751af6f727dfa48302b11fee243b2bc58 | []
| no_license | osenlin/gsxt_spider | 9ebc90b778ca1fe31f3339ddeccdac66808a1d96 | 6e75fe9802620284e8c19c6f23a9a90911b39016 | refs/heads/master | 2020-03-20T01:10:50.348913 | 2018-06-12T12:34:53 | 2018-06-12T12:34:53 | 137,068,844 | 5 | 9 | null | null | null | null | UTF-8 | Python | false | false | 28,729 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: [email protected]
@license: Apache Licence
@file: parse_base_worker.py
@time: 2017/2/3 17:32
"""
import traceback
from pyquery import PyQuery
from base.parse_base_worker import ParseBaseWorker
from common import util
from common.annual_field import *
from common.global_field import Model
from common.gsxt_field import *
# todo 甘肃亚盛股份实业(集团)有限公司兴盛分公司 解析逻辑有问题, 会漏掉属性解析
class GsxtParseGanSuWorker(ParseBaseWorker):
def __init__(self, **kwargs):
ParseBaseWorker.__init__(self, **kwargs)
# 基本信息
def get_base_info(self, base_info):
page = self.get_crawl_page(base_info)
res = PyQuery(page, parser='html').find('.info_name').items()
base_info_dict = {}
money = util.get_match_value("toDecimal6\('", "'\);", page)
for item in res:
item_content = item.text().replace('•', '')
item_content = item_content.replace(':', ':')
part = item_content.split(':', 1)
k = GsModel.format_base_model(part[0].replace(' ', ''))
base_info_dict[k] = part[1].strip()
base_info_dict[GsModel.PERIOD] = u"{0}至{1}". \
format(base_info_dict.get(GsModel.PERIOD_FROM), base_info_dict.get(GsModel.PERIOD_TO))
reg_unit = base_info_dict.get(GsModel.REGISTERED_CAPITAL)
if reg_unit is not None:
base_info_dict[GsModel.REGISTERED_CAPITAL] = money + reg_unit
# 股东信息
try:
shareholder_info_dict = self.get_inline_shareholder_info(page)
except ValueError:
self.log.error('company:{0},error-part:shareholder_info_dict,error-info:{1}'.format(
base_info.get('company', u''), traceback.format_exc()))
shareholder_info_dict = {}
base_info_dict.update(shareholder_info_dict)
# 变更信息
try:
change_info_dict = self.get_inline_change_info(page)
except ValueError:
self.log.error('company:{0},error-part:change_info_dict,error-info:{1}'.format(
base_info.get('company', u''), traceback.format_exc()))
change_info_dict = {}
base_info_dict.update(change_info_dict)
return base_info_dict
# 股东信息
@staticmethod
def get_inline_shareholder_info(page):
shareholder_info_dict = {}
shareholder_list = []
trs = PyQuery(page, parser='html').find('#gd_JSTab').find('tr').items()
for tr in trs:
tds = tr.find('td')
if tds is None or len(tds) < 2:
continue
share_model = {
GsModel.ShareholderInformation.SHAREHOLDER_NAME: tds.eq(1).text().replace(u'\\t', u''),
GsModel.ShareholderInformation.SUBSCRIPTION_AMOUNT: util.get_amount_with_unit(tds.eq(2).text()),
GsModel.ShareholderInformation.PAIED_AMOUNT: util.get_amount_with_unit(tds.eq(3).text()),
GsModel.ShareholderInformation.SUBSCRIPTION_DETAIL:
[{
GsModel.ShareholderInformation.SUBSCRIPTION_TYPE: tds.eq(4).text(),
GsModel.ShareholderInformation.SUBSCRIPTION_TIME: tds.eq(6).text(),
GsModel.ShareholderInformation.SUBSCRIPTION_PUBLISH_TIME: tds.eq(7).text(),
}],
GsModel.ShareholderInformation.PAIED_DETAIL:
[{
GsModel.ShareholderInformation.PAIED_TYPE: tds.eq(8).text(),
GsModel.ShareholderInformation.PAIED_TIME: tds.eq(10).text(),
GsModel.ShareholderInformation.PAIED_PUBLISH_TIME: tds.eq(11).text()
}]
}
shareholder_list.append(share_model)
if len(shareholder_list) > 0:
shareholder_info_dict[GsModel.SHAREHOLDER_INFORMATION] = shareholder_list
return shareholder_info_dict
# 变更信息
@staticmethod
def get_inline_change_info(page):
change_info_dict = {}
change_records_list = []
trs = PyQuery(page, parser='html').find('#changeTab').find('tr').items()
for tr in trs:
tds = tr.find('td')
if len(tds) < 2:
continue
change_model = {
GsModel.ChangeRecords.CHANGE_ITEM: tds.eq(1).text(),
# 去除多余的字
GsModel.ChangeRecords.BEFORE_CONTENT: util.format_content(tds.eq(2).text()),
GsModel.ChangeRecords.AFTER_CONTENT: util.format_content(tds.eq(3).text()),
# 日期格式化
GsModel.ChangeRecords.CHANGE_DATE: tds.eq(4).text()
}
change_records_list.append(change_model)
if len(change_records_list) > 0:
change_info_dict[GsModel.CHANGERECORDS] = change_records_list
return change_info_dict
# 主要人员
def get_key_person_info(self, key_person_info):
"""
:param key_person_info: 网页库字典, 里面包含list 与 detail 两个列表, 列表中存储的为网页数据
其中两个列表一定会存在一个, 否则则认为这个数据包无效, list一般储存列表翻页信息, detail存储列表项详情信息
具体结构参考mongodb网页库或者查看 common/global_field.py 中Model定义注释
主要人员一般存储在list列表中, 因为主要人员不包含列表结构不需要detail列表
:return: 返回工商schema字典
"""
key_person_info_dict = {}
page = self.get_crawl_page(key_person_info)
items = PyQuery(page, parser='html').find('#per270').items()
key_person_list = []
for item in items:
spans = item.find('span')
if len(spans) < 2:
continue
key_person = {
GsModel.KeyPerson.KEY_PERSON_NAME: spans.eq(0).text().strip(),
GsModel.KeyPerson.KEY_PERSON_POSITION: spans.eq(1).text().strip()}
key_person_list.append(key_person)
if len(key_person_list) > 0:
key_person_info_dict[GsModel.KEY_PERSON] = key_person_list
return key_person_info_dict
# 分支机构
def get_branch_info(self, branch_info):
branch_info_dict = {}
page = self.get_crawl_page(branch_info)
items = PyQuery(page, parser='html').find('#fzjg308').items()
branch_list = []
for item in items:
spans = item.find('span')
if len(spans) < 7:
continue
branch_model = {
GsModel.Branch.COMPAY_NAME: spans.eq(0).text(),
GsModel.Branch.CODE: spans.eq(3).text(),
GsModel.Branch.REGISTERED_ADDRESS: spans.eq(6).text() # 待定
}
branch_list.append(branch_model)
if len(branch_list) > 0:
branch_info_dict[GsModel.BRANCH] = branch_list
return branch_info_dict
# 出资信息
def get_contributive_info(self, con_info):
con_info_dict = {}
part_a_con = {}
part_b_con = {}
pages_list = self.get_crawl_page(con_info, True)
# for else 业务逻辑是什么?
for page in pages_list:
trs = PyQuery(page.get(u'text', u''), parser='html').find('#invTab').find('tr').items()
'''
注释: 查找id为invTab的<tr>集合,则进入循环(分支1)
'''
for tr in trs:
tds = tr.find('td')
if len(tds) < 2:
continue
con_model = {
GsModel.ContributorInformation.SHAREHOLDER_NAME: tds.eq(1).text(),
GsModel.ContributorInformation.SHAREHOLDER_TYPE: tds.eq(2).text(),
GsModel.ContributorInformation.CERTIFICATE_TYPE: tds.eq(3).text(),
GsModel.ContributorInformation.CERTIFICATE_NO: tds.eq(4).text()
}
part_a_con[tds.eq(1).text().strip()] = con_model
else:
'''
注释: 查找id为invTab的<tr>集合,没有数据集,则进入else分支,查找id为tzrPageTab的<tr>集合(分支2)
'''
trs = PyQuery(page.get('text', ''), parser='html').find('#tzrPageTab').find('tr').items()
for tr in trs:
tds = tr.find('td')
if len(tds) < 2:
continue
con_model = {
GsModel.ContributorInformation.SHAREHOLDER_NAME: tds.eq(1).text(),
GsModel.ContributorInformation.SHAREHOLDER_TYPE: tds.eq(2).text()
}
part_a_con[con_model[GsModel.ContributorInformation.SHAREHOLDER_NAME]] = con_model
pages_detail = self.get_crawl_page(con_info, True, Model.type_detail)
if pages_detail is not None:
for page in pages_detail:
tables = PyQuery(page.get(u'text', u''), parser='html').find('.detailsList').items()
shareholder_name, sub_model = self._get_sharehold_detail(tables)
shareholder_name.replace(u'.', u'')
part_b_con[shareholder_name] = sub_model
con_list = []
for k_list, v_list in part_a_con.items():
v_list.update(part_b_con.get(k_list, {}))
con_list.append(v_list)
if len(con_list) > 0:
con_info_dict[GsModel.CONTRIBUTOR_INFORMATION] = con_list
return con_info_dict
# 清算信息
def get_liquidation_info(self, liquidation_info):
return {}
def get_chattel_mortgage_info_detail(self, onclick, detail_list):
result = dict()
if onclick is None or onclick.strip() == '':
return result
temp_list = onclick.split(u'\'')
if temp_list is None or len(temp_list) < 2:
return result
temp_list = temp_list[1].split(u'\'')
if temp_list is None or len(temp_list) <= 0:
return result
morreg_id = temp_list[0]
# 遍历所有页面
for detail in detail_list:
url = detail.get('url')
if not isinstance(url, basestring):
continue
if morreg_id not in url:
continue
text = detail.get('text')
if not isinstance(text, basestring) or text.strip() == u'':
continue
table_list = PyQuery(text, parser='html').find('.detailsList')
if table_list is None or table_list.length < 5:
raise FieldMissError
# 动产抵押登记信息
td_list = table_list.eq(0).find('td')
cm_dict = dict()
result[GsModel.ChattelMortgageInfo.ChattelDetail.CHATTEL_MORTGAGE] = cm_dict
cm_dict[GsModel.ChattelMortgageInfo.ChattelDetail.ChattelMortgage.REGISTER_NUM] = td_list.eq(0).text()
cm_dict[GsModel.ChattelMortgageInfo.ChattelDetail.ChattelMortgage.REGISTER_DATE] = td_list.eq(1).text()
cm_dict[GsModel.ChattelMortgageInfo.ChattelDetail.ChattelMortgage.REGISTER_OFFICE] = td_list.eq(2).text()
# 抵押权人概况信息
tr_list = table_list.eq(1).find('tr').items()
mps_list = list()
result[GsModel.ChattelMortgageInfo.ChattelDetail.MORTGAGE_PERSON_STATUS] = mps_list
for tr in tr_list:
td_list = tr.find('td')
if td_list is None or td_list.length < 5:
continue
item = dict()
item[GsModel.ChattelMortgageInfo.ChattelDetail.MortgagePersonStatus.MORTGAGE_PERSON_NAME] = td_list.eq(
1).text()
item[GsModel.ChattelMortgageInfo.ChattelDetail.MortgagePersonStatus.CERTIFICATE_TYPE] = td_list.eq(
2).text()
item[GsModel.ChattelMortgageInfo.ChattelDetail.MortgagePersonStatus.CERTIFICATE_NUM] = td_list.eq(
3).text()
item[GsModel.ChattelMortgageInfo.ChattelDetail.MortgagePersonStatus.ADDRESS] = td_list.eq(4).text()
mps_list.append(item)
# 被担保债权概况信息
td_list = table_list.eq(2).find('td')
gps_dict = dict()
result[GsModel.ChattelMortgageInfo.ChattelDetail.GUARANTEED_PERSON_STATUS] = gps_dict
gps_dict[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteedPersonStatus.KIND] = td_list.eq(0).text()
gps_dict[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteedPersonStatus.AMOUNT] = td_list.eq(1).text()
gps_dict[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteedPersonStatus.SCOPE] = td_list.eq(2).text()
gps_dict[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteedPersonStatus.PERIOD] = td_list.eq(3).text()
gps_dict[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteedPersonStatus.REMARK] = td_list.eq(4).text()
# 抵押物概况信息
tr_list = table_list.eq(3).find('tr').items()
gs_list = list()
result[GsModel.ChattelMortgageInfo.ChattelDetail.GUARANTEE_STATUS] = gs_list
for tr in tr_list:
td_list = tr.find('td')
if td_list is None or td_list.length < 5:
continue
item = dict()
item[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteeStatus.NAME] = td_list.eq(
1).text()
item[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteeStatus.AFFILIATION] = td_list.eq(
2).text()
item[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteeStatus.SITUATION] = td_list.eq(
3).text()
item[GsModel.ChattelMortgageInfo.ChattelDetail.GuaranteeStatus.REMARK] = td_list.eq(4).text()
gs_list.append(item)
# 变更信息
tr_list = table_list.eq(4).find('tr').items()
change_list = list()
result[GsModel.ChattelMortgageInfo.ChattelDetail.CHANGE_INFO] = change_list
for tr in tr_list:
td_list = tr.find('td')
if td_list is None or td_list.length < 3:
continue
item = dict()
item[GsModel.ChattelMortgageInfo.ChattelDetail.ChangeInfo.CHANGE_DATE] = td_list.eq(
1).text()
item[GsModel.ChattelMortgageInfo.ChattelDetail.ChangeInfo.CHANGE_CONTENT] = td_list.eq(
2).text()
break
return result
# 动产抵押登记信息
def get_chattel_mortgage_info(self, chattel_mortgage_info):
chattel_mortgage_info_dict = dict()
result_list = list()
# 记录信息 空表也需要
chattel_mortgage_info_dict[GsModel.CHATTEL_MORTGAGE_INFO] = result_list
detail_list = self.get_crawl_page(chattel_mortgage_info, multi=True, part=Model.type_detail)
page_text = self.get_crawl_page(chattel_mortgage_info)
if page_text is None:
return chattel_mortgage_info_dict
jq = PyQuery(page_text, parser='html')
move_tab = jq.find("#moveTab")
tr_list = move_tab.find('tr').items()
for tr in tr_list:
td_list = tr.find('td')
if td_list.length < 8:
continue
item = dict()
item[GsModel.ChattelMortgageInfo.REGISTER_NUM] = td_list.eq(1).text()
item[GsModel.ChattelMortgageInfo.REGISTER_DATE] = td_list.eq(2).text()
item[GsModel.ChattelMortgageInfo.REGISTER_OFFICE] = td_list.eq(3).text()
item[GsModel.ChattelMortgageInfo.CREDIT_AMOUNT] = util.get_amount_with_unit(td_list.eq(4).text())
item[GsModel.ChattelMortgageInfo.STATUS] = td_list.eq(5).text()
item[GsModel.ChattelMortgageInfo.PUBLISH_DATE] = td_list.eq(6).text()
item[GsModel.ChattelMortgageInfo.CHATTEL_DETAIL] = self.get_chattel_mortgage_info_detail(
td_list.eq(7).find('a').attr('onclick'), detail_list)
if len(item) > 0:
result_list.append(item)
return chattel_mortgage_info_dict
# 列入经营异常名录信息
def get_abnormal_operation_info(self, abnormal_operation_info):
abnormal_operation_info_dict = dict()
result_list = list()
# 记录信息 空表也需要
abnormal_operation_info_dict[GsModel.ABNORMAL_OPERATION_INFO] = result_list
page_text = self.get_crawl_page(abnormal_operation_info)
if page_text is None:
return abnormal_operation_info_dict
jq = PyQuery(page_text, parser='html')
move_tab = jq.find("#excpTab")
tr_list = move_tab.find('tr').items()
for tr in tr_list:
td_list = tr.find('td')
if td_list.length < 7:
continue
item = dict()
item[GsModel.AbnormalOperationInfo.ENROL_REASON] = td_list.eq(1).text()
item[GsModel.AbnormalOperationInfo.ENROL_DATE] = td_list.eq(2).text()
item[GsModel.AbnormalOperationInfo.ENROL_DECIDE_OFFICE] = td_list.eq(3).text()
item[GsModel.AbnormalOperationInfo.REMOVE_REASON] = td_list.eq(4).text()
item[GsModel.AbnormalOperationInfo.REMOVE_DATE] = td_list.eq(5).text()
item[GsModel.AbnormalOperationInfo.REMOVE_DECIDE_OFFICE] = td_list.eq(6).text()
if len(item) > 0:
result_list.append(item)
return abnormal_operation_info_dict
# 股权出质登记信息 详情
def get_equity_pledged_info_detail(self, onclick, detail_list):
return {}
# 股权出质登记信息 股权出资登记
def get_equity_pledged_info(self, equity_pledged_info):
equity_pledged_info_dict = dict()
result_list = list()
# 记录信息 空表也需要
equity_pledged_info_dict[GsModel.EQUITY_PLEDGED_INFO] = result_list
detail_list = self.get_crawl_page(equity_pledged_info, multi=True, part=Model.type_detail)
page_text = self.get_crawl_page(equity_pledged_info)
if page_text is None:
return equity_pledged_info_dict
jq = PyQuery(page_text, parser='html')
move_tab = jq.find("#stockTab")
tr_list = move_tab.find('tr').items()
for tr in tr_list:
td_list = tr.find('td')
if td_list.length < 11:
continue
item = dict()
item[GsModel.EquityPledgedInfo.REGISTER_NUM] = td_list.eq(1).text()
item[GsModel.EquityPledgedInfo.MORTGAGOR] = td_list.eq(2).text()
item[GsModel.EquityPledgedInfo.MORTGAGOR_NUM] = td_list.eq(3).text()
item[GsModel.EquityPledgedInfo.PLEDGE_STOCK_AMOUNT] = util.get_amount_with_unit(td_list.eq(4).text())
item[GsModel.EquityPledgedInfo.PLEDGEE] = td_list.eq(5).text()
item[GsModel.EquityPledgedInfo.PLEDGEE_NUM] = td_list.eq(6).text()
item[GsModel.EquityPledgedInfo.REGISTER_DATE] = td_list.eq(7).text()
item[GsModel.EquityPledgedInfo.STATUS] = td_list.eq(8).text()
item[GsModel.EquityPledgedInfo.PUBLISH_DATE] = td_list.eq(9).text()
item[GsModel.EquityPledgedInfo.EQUITY_PLEDGED_DETAIL] = self.get_equity_pledged_info_detail(
td_list.eq(10).find('a').attr('onclick'), detail_list)
if len(item) > 0:
result_list.append(item)
return equity_pledged_info_dict
# 年报信息
def get_annual_info(self, annual_item_list):
return ParseGanSuAnnual(annual_item_list, self.log).get_result()
class ParseGanSuAnnual(object):
def __init__(self, annual_item_list, log):
self.annual_info_dict = {}
if not isinstance(annual_item_list, list) or len(annual_item_list) <= 0:
return
self.log = log
self.annual_item_list = annual_item_list
# 分发解析
self.dispatch()
def dispatch(self):
if self.annual_item_list is None:
raise IndexError("未抓取到相关网页,或者抓取网页失败")
if len(self.annual_item_list) <= 0:
return {}
annual_item = self.annual_item_list[0]
page = annual_item.get(u'text', u'')
if page is None or len(page) == 0:
return {}
py_all = PyQuery(page, parser='html')
# 基本信息
info = py_all.find('.info_name').items()
annual_base_info = self.get_annual_base_info(info)
self.annual_info_dict.update(annual_base_info)
# 年报 企业资产状况信息
tds = py_all.find('#zczkId')
asset_model = self.get_annual_asset_info(tds, py_all)
self.annual_info_dict[AnnualReports.ENTERPRISE_ASSET_STATUS_INFORMATION] = asset_model
divs = py_all.find('.webStyle.anchebotLine').items()
for div in divs:
# 网店
if u'网址' in div.text():
py_websites = div.find('#webInfo').items()
lst_websites = self.get_annual_web_site_info(py_websites)
self.annual_info_dict[AnnualReports.WEBSITES] = lst_websites
# 对外投资
elif u'注册号' in div.text():
py_inv_company = div.find('#webInfo').items()
lst_inv = self.get_annual_inv_info(py_inv_company)
self.annual_info_dict[AnnualReports.INVESTED_COMPANIES] = lst_inv
# 对外担保
py_share_hold = py_all.find('#dBaoAnrepTab').find('tr').items()
lst_out_guaranty = self.get_annual_out_guarantee_info(py_share_hold)
self.annual_info_dict[AnnualReports.OUT_GUARANTEE_INFO] = lst_out_guaranty
# 股东出资信息
py_share_hold = py_all.find('#gdczAnrepTab').find('tr').items()
lst_share_hold = self.get_annual_share_hold_info(py_share_hold)
self.annual_info_dict[AnnualReports.SHAREHOLDER_INFORMATION] = lst_share_hold
# 股权变更
py_edit_shareholding_change = py_all.find('#gqAlertTab').find('tr').items()
lst_edit_shareholding_change = self.get_annual_edit_shareholding_change(py_edit_shareholding_change)
self.annual_info_dict[AnnualReports.EDIT_SHAREHOLDING_CHANGE_INFOS] = lst_edit_shareholding_change
# 修改记录
py_edit_change = py_all.find('#modifyTab').find('tr').items()
lst_edit_change = self.get_annual_edit_change(py_edit_change)
self.annual_info_dict[AnnualReports.EDIT_CHANGE_INFOS] = lst_edit_change
# 年报基本信息
@staticmethod
def get_annual_base_info(py_items):
annual_base_info_dict = {}
for item in py_items:
item_content = item.text().replace(u'•', u'').replace(u':', u':')
part = item_content.split(u':', 2)
k = AnnualReports.format_base_model(part[0].strip())
annual_base_info_dict[k] = part[1].strip()
return annual_base_info_dict
# 年报网站信息
@staticmethod
def get_annual_web_site_info(py_websites):
lst_web = []
for py_web in py_websites:
py_items = py_web.find('p').items()
web_item = {}
for item in py_items:
if len(item.find('span')) == 1:
web_item[AnnualReports.WebSites.NAME] = item.text()
else:
item_content = item.text().replace(u'·', u'')
part = item_content.split(u':', 2)
k = AnnualReports.format_website_model(part[0].strip())
web_item[k] = part[1].strip()
lst_web.append(web_item)
return lst_web
# 年报 企业资产状况信息
@staticmethod
def get_annual_asset_info(table_body, py_all):
if len(table_body) <= 0:
return {}
model = {}
lst_value = table_body.find('td').text().split(' ')
lst_title = table_body.find('th').text().split(' ')
if lst_title[0] == '':
ent_body = py_all.find('#entZczk')
lst_value = ent_body.find('td').text().split(' ')
lst_title = ent_body.find('th').text().split(' ')
map_title_value = zip(lst_title, lst_value)
for k_title, v_value in map_title_value:
k = AnnualReports.format_asset_model(k_title)
model[k] = v_value
return model
# 年报 对外投资信息
@staticmethod
def get_annual_inv_info(py_inv_company):
lst_inv = []
for py_inv_item in py_inv_company:
inv_item = {}
ps_items = py_inv_item.find('p').items()
for item in ps_items:
if len(item.find('span')) == 1:
inv_item[AnnualReports.InvestedCompanies.COMPANY_NAME] = item.text()
else:
item_content = item.text().replace(u'·', u'')
part = item_content.split(u':', 2)
inv_item[AnnualReports.InvestedCompanies.CODE] = part[1].strip()
lst_inv.append(inv_item)
return lst_inv
# 年报 对外担保方法
@staticmethod
def get_annual_out_guarantee_info(py_items):
lst = []
for trs in py_items:
tds = trs.find('td')
if tds.text() == '':
continue
out_guarantee_model = {
AnnualReports.OutGuaranteeInfo.CREDITOR: tds.eq(1).text(),
AnnualReports.OutGuaranteeInfo.OBLIGOR: tds.eq(2).text(),
AnnualReports.OutGuaranteeInfo.DEBT_TYPE: tds.eq(3).text(),
AnnualReports.OutGuaranteeInfo.DEBT_AMOUNT: tds.eq(4).text(),
AnnualReports.OutGuaranteeInfo.PERFORMANCE_PERIOD: tds.eq(5).text(),
AnnualReports.OutGuaranteeInfo.GUARANTEE_PERIOD: tds.eq(6).text(),
AnnualReports.OutGuaranteeInfo.GUARANTEE_TYPE: tds.eq(7).text()
}
lst.append(out_guarantee_model)
return lst
# 年报 股东出资信息(客户端分页)
@staticmethod
def get_annual_share_hold_info(gdcz_item):
lst = []
for trs in gdcz_item:
tds = trs.find('td')
if tds.text() == '':
continue
share_model = {
AnnualReports.ShareholderInformation.SHAREHOLDER_NAME: tds.eq(1).text(),
AnnualReports.ShareholderInformation.SUBSCRIPTION_AMOUNT: util.get_amount_with_unit(tds.eq(2).text()),
AnnualReports.ShareholderInformation.SUBSCRIPTION_TIME: tds.eq(3).text(),
AnnualReports.ShareholderInformation.SUBSCRIPTION_TYPE: tds.eq(4).text(),
AnnualReports.ShareholderInformation.PAIED_AMOUNT: util.get_amount_with_unit(tds.eq(5).text()),
AnnualReports.ShareholderInformation.PAIED_TIME: tds.eq(6).text(),
AnnualReports.ShareholderInformation.PAIED_TYPE: tds.eq(7).text()
}
lst.append(share_model)
return lst
# 年报 股权变更方法
@staticmethod
def get_annual_edit_shareholding_change(py_items):
lst = []
for trs in py_items:
tds = trs.find('td')
if tds.text() == '':
continue
change_model = {
AnnualReports.EditShareholdingChangeInfos.SHAREHOLDER_NAME: tds.eq(1).text(),
AnnualReports.EditShareholdingChangeInfos.BEFORE_CONTENT: tds.eq(2).text(),
AnnualReports.EditShareholdingChangeInfos.AFTER_CONTENT: tds.eq(3).text(),
AnnualReports.EditShareholdingChangeInfos.CHANGE_DATE: tds.eq(4).text()
}
lst.append(change_model)
return lst
def get_result(self):
return self.annual_info_dict
# 年报 修改记录
@staticmethod
def get_annual_edit_change(py_items):
lst = []
for trs in py_items:
tds = trs.find('td')
if tds.text().strip() == u'':
continue
edit_model = {
AnnualReports.EditChangeInfos.CHANGE_ITEM: tds.eq(1).text(),
AnnualReports.EditChangeInfos.BEFORE_CONTENT: tds.eq(2).text(),
AnnualReports.EditChangeInfos.AFTER_CONTENT: tds.eq(3).text(),
AnnualReports.EditChangeInfos.CHANGE_DATE: tds.eq(4).text()
}
lst.append(edit_model)
return lst
| [
"[email protected]"
]
| |
3cce5f338f335566330177c42858bb6eb0baddd8 | 80e6e31054fe9105d2c26be7aac53c4cd6a4a33f | /scripts/spyder/amac_company.py | a1708762c46b6687a0c4ff41381e2b7ae1bca9d9 | []
| no_license | alionishere/learn_python | 8a7f6dc7d754a357d4cb720f4bc0d5c3e6e5e895 | 832b8e0579da0b7ab37e815be10204f8de1ad22d | refs/heads/master | 2021-06-24T11:02:05.111027 | 2021-06-23T08:47:06 | 2021-06-23T08:47:06 | 223,834,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,079 | py | # -*- coding: utf-8 -*-
import requests,io,sys,time
from bs4 import BeautifulSoup
import json,urllib
import cx_Oracle
from lxml import etree
import threading
from queue import Queuea
def conndb():
username="kingstar"
userpwd="kingstar"
host="10.29.7.211"
port=1521
dbname="siddc01"
dsn=cx_Oracle.makedsn(host, port, dbname)
db=cx_Oracle.connect(username, userpwd, dsn)
return db
def ExecDB(sql):
db=conndb()
cursor = db.cursor()
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
#return result
def GetJson(url):
payload = {}
headers = {'content-type': 'application/json'}
ret = requests.post(url, data=json.dumps(payload), headers=headers)
ret.encoding = 'utf-8'
ret = ret.text
text = json.loads(ret)
return text
def getHtml(url):
b = False
while not b:
try:
page=urllib.request.urlopen(url)
html=page.read().decode(encoding='utf-8',errors='strict')
page.close()
b = True
except :
pass
return html
def ListTemp(lists):
if lists:
member = lists[0].xpath('string(.)').replace('\n','').replace(' ','').replace(' ','')
else :
member = 'None'
return member
def Getcontent(url):
text2 = GetJson(url)
content = text2['content']
return content
def Gettxt(text):
txt=' '.join(text.split())
return txt
def Trysql(sql):
try:
sql = ExecDB(sql.encode("GB18030"))
#print(sql)
except:
#print("sql:",sql)
pass
def Truncate():
truncate_1 = "truncate table amac_smjj_company"
truncate_1 = ExecDB(truncate_1)
def GetQ():
url = 'http://gs.amac.org.cn/amac-infodisc/api/pof/manager?rand=0.3904312621164139&page=0&size=20'
global q
q = Queue()
text = GetJson(url)
n = text['totalPages']
for i in range(n):
url2 = 'http://gs.amac.org.cn/amac-infodisc/api/pof/manager?rand=0.3904312621164139&page='+str(i)+'&size=20'
q.put(url2)
return q
def Thread():
threads=[]
for code in range(20):
thread=threading.Thread(target=Work)
threads.append(thread)
for t in threads:
t.start() #启动一个线程
for t in threads:
t.join() #等待每个线程
def Work():
while not q.empty():
print(q.qsize())
url3 = q.get()
try :
txt = GetJson(url3)
print(txt)
m = txt['numberOfElements']
content = Getcontent(url3)
for x in range(m):
dicts = content[x]
managerName =str(dicts['managerName'])
registerNo =str(dicts['registerNo'])
registerAddress =str(dicts['registerAddress'])
officeAddress =str(dicts['officeAddress'])
sql = "insert into amac_smjj_company(managerName,registerNo,registerAddress,officeAddress)"\
"values('"+managerName+"','"+registerNo+"','"+registerAddress+"','"+officeAddress+"')"
# #print(sql)
sql = Trysql(sql)
# html_doc1 = getHtml(fundurl)
# html = etree.HTML(html_doc1)
# jjmc = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[1]/td[2]'))).strip().replace('\'','\'\'')
# jjbm = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[2]/td[2]'))).strip()
# clsj = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[3]/td[2]'))).strip().replace('-','')
# basj = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[4]/td[2]'))).strip().replace('-','')
# jjbajd = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[5]/td[2]'))).strip().replace('-','')
# jjlx = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[6]/td[2]'))).strip()
# bz = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[7]/td[2]'))).strip()
# jjglr = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[8]/td[2]'))).strip()
# gllx = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[9]/td[2]'))).strip()
# tgr = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[10]/td[2]'))).strip()
# yzzt = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[11]/td[2]'))).strip()
# jjxxzhgxsj = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[12]/td[2]'))).strip().replace('-','')
# jjxhtbts = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[13]/td[2]'))).strip()
# dyyb = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[15]/td[2]'))).strip()
# bnb = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[16]/td[2]'))).strip()
# nb = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[17]/td[2]'))).strip()
# jb = str(ListTemp(html.xpath('/html/body/div/div[2]/div/table/tbody/tr[18]/td[2]'))).strip()
# sql2 = "insert into amac_smjj_fund(jjmc,jjbm,clsj,basj,jjbajd,jjlx,bz,jjglr,gllx,tgr,"\
# "yzzt,jjxxzhgxsj,jjxhtbts,dyyb,bnb,nb,jb) "\
# "values('"+jjmc+"','"+jjbm+"','"+clsj+"','"+basj+"','"+jjbajd+"','"+jjlx+"','"+bz\
# +"','"+jjglr+"','"+gllx+"','"+tgr+"','"+yzzt+"','"+jjxxzhgxsj+"','"+jjxhtbts+"','"\
# +dyyb+"','"+bnb+"','"+nb+"','"+jb+"')"
# #print(sql2)
# sql2 = Trysql(sql2)
# html_doc2 = getHtml(managerurl)
# html2 = etree.HTML(html_doc2)
# jgcxxx = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[1]/td[2]')))).strip().replace('\'','\'\'')
# jjglrch = str(ListTemp(html2.xpath('//*[@id="complaint2"]'))).replace(' ','').strip().replace('\'','\'\'')
# jjglrzh = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[4]/td[2]'))).strip().replace('\'','\'\'')
# djbh = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[5]/td[2]'))).strip()
# zzjgdm = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[6]/td[2]'))).strip()
# djsj = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[7]/td[2]'))).strip().replace('-','')
# clsj = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[7]/td[4]'))).strip().replace('-','')
# zcdz = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[8]/td[2]'))).strip()
# bgdz = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[9]/td[2]'))).strip()
# zczb = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[10]/td[2]'))).strip()
# sjzb = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[10]/td[4]'))).strip()
# qyxz = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[11]/td[2]'))).strip()
# zczbsjbl = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[11]/td[4]'))).strip()
# gljjzylb = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[12]/td[2]'))).strip()
# sqqtywlx = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[12]/td[4]'))).strip()
# ygrs = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[13]/td[2]'))).strip()
# jgwz = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[13]/td[4]'))).strip()
# sfwhy = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[15]/td[2]'))).strip()
# if sfwhy == '是' :
# dqhylx = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[16]/td[2]'))).strip()
# rhsj = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[16]/td[4]'))).strip().replace('-','')
# flyjszt = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[18]/td[2]'))).strip()
# if flyjszt == '办结':
# lsswsmc = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[19]/td[2]'))).strip().replace('\'','\'\'')
# lsxm = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[20]/td[2]'))).strip()
# fddbr = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[22]/td[2]'))).strip()
# sfycyzg = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[23]/td[2]'))).strip()
# zgqdfs = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[23]/td[4]'))).strip()
# gzll = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[24]/td[2]')))).strip().replace('\'','\'\'')
# ggqk = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[25]/td[2]')))).strip().replace('\'','\'\'')
# jgxxzhgxsj = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[30]/td[2]'))).strip().replace('-','')
# tbtsxx = str(ListTemp(html2.xpath('//*[@id="specialInfos"]'))).strip()
# else :
# lsswsmc = ''
# lsxm = ''
# fddbr = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[20]/td[2]'))).strip()
# sfycyzg = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[21]/td[2]'))).strip()
# zgqdfs = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[21]/td[4]'))).strip()
# gzll = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[22]/td[2]')))).strip().replace('\'','\'\'')
# ggqk = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[23]/td[2]')))).strip().replace('\'','\'\'')
# jgxxzhgxsj = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[28]/td[2]'))).strip().replace('-','')
# tbtsxx = str(ListTemp(html2.xpath('//*[@id="specialInfos"]'))).strip()
# else:
# dqhylx = ''
# rhsj = ''
# flyjszt = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[17]/td[2]'))).strip()
# if flyjszt == '办结' :
# lsswsmc = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[18]/td[2]'))).strip().replace('\'','\'\'')
# lsxm = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[19]/td[2]'))).strip()
# fddbr = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[21]/td[2]'))).strip()
# sfycyzg = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[22]/td[2]'))).strip()
# zgqdfs = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[22]/td[4]'))).strip()
# gzll = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[23]/td[2]')))).strip().replace('\'','\'\'')
# ggqk = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[24]/td[2]')))).strip().replace('\'','\'\'')
# jgxxzhgxsj = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[29]/td[2]'))).strip().replace('-','')
# tbtsxx = str(ListTemp(html2.xpath('//*[@id="specialInfos"]'))).strip()
# else :
# lsswsmc = ''
# lsxm = ''
# fddbr = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[19]/td[2]'))).strip()
# sfycyzg = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[20]/td[2]'))).strip()
# zgqdfs = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[20]/td[4]'))).strip()
# gzll = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[21]/td[2]')))).strip().replace('\'','\'\'')
# ggqk = str(Gettxt(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[22]/td[2]')))).strip().replace('\'','\'\'')
# jgxxzhgxsj = str(ListTemp(html2.xpath('/html/body/div/div[2]/div/table/tbody/tr[27]/td[2]'))).strip().replace('-','')
# tbtsxx = str(ListTemp(html2.xpath('//*[@id="specialInfos"]'))).strip()
# sql3 = "declare \n"\
# " gzll_v clob; \n"\
# " ggqk_v clob; \n"\
# "begin \n"\
# " gzll_v := '"+gzll+"';\n"\
# " ggqk_v := '"+ggqk+"';\n"\
# " insert into amac_smjj_manager (jgcxxx,jjglrch,jjglrzh,djbh,zzjgdm,djsj,clsj,zcdz,bgdz,zczb,sjzb,qyxz,zczbsjbl,"\
# "gljjzylb,sqqtywlx,ygrs,jgwz,sfwhy,dqhylx,rhsj,flyjszt,lsswsmc,lsxm,fddbr,sfycyzg,zgqdfs,gzll,ggqk,jgxxzhgxsj,tbtsxx) "\
# "values('"+jgcxxx+"','"+jjglrch+"','"+jjglrzh+"','"+djbh+"','"+zzjgdm+"','"+djsj+"','"+clsj+"','"+zcdz+"','"\
# +bgdz+"','"+zczb+"','"+sjzb+"','"+qyxz+"','"+zczbsjbl+"','"+gljjzylb+"','"+sqqtywlx+"','"+ygrs+"','"+jgwz+"','"\
# +sfwhy+"','"+dqhylx+"','"+rhsj+"','"+flyjszt+"','"+lsswsmc+"','"+lsxm+"','"+fddbr+"','"+sfycyzg+"','"+zgqdfs\
# +"',gzll_v,ggqk_v,'"+jgxxzhgxsj+"','"+tbtsxx+"');\n"\
# "end;"
# sql3 = Trysql(sql3)
except :
#print("ERR: "+url3+"\n")
q.put(url3)
def main():
Truncate()
GetQ()
#Thread()
Work()
if __name__ == '__main__':
start = time.time()
print(start)
main()
end = time.time()
print(end)
m, s = divmod(end-start, 60)
h, m = divmod(m, 60)
print("运行时长:%02d:%02d:%02d" % (h, m, s))
| [
"[email protected]"
]
| |
f5cb07d8502e20680654e1356fdb1f556e65edf0 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/ad_group_extension_setting_service/client.py | 751f45c7d71aa3e7c322a037ec62396c7f357549 | [
"Apache-2.0"
]
| permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,859 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.enums.types import extension_setting_device
from google.ads.googleads.v6.enums.types import extension_type
from google.ads.googleads.v6.resources.types import ad_group_extension_setting
from google.ads.googleads.v6.services.types import ad_group_extension_setting_service
from google.rpc import status_pb2 as status # type: ignore
from .transports.base import AdGroupExtensionSettingServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AdGroupExtensionSettingServiceGrpcTransport
class AdGroupExtensionSettingServiceClientMeta(type):
"""Metaclass for the AdGroupExtensionSettingService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[AdGroupExtensionSettingServiceTransport]]
_transport_registry['grpc'] = AdGroupExtensionSettingServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[AdGroupExtensionSettingServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AdGroupExtensionSettingServiceClient(metaclass=AdGroupExtensionSettingServiceClientMeta):
"""Service to manage ad group extension settings."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupExtensionSettingServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AdGroupExtensionSettingServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AdGroupExtensionSettingServiceTransport:
"""Return the transport used by the client instance.
Returns:
AdGroupExtensionSettingServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def ad_group_path(customer_id: str,ad_group_id: str,) -> str:
"""Return a fully-qualified ad_group string."""
return "customers/{customer_id}/adGroups/{ad_group_id}".format(customer_id=customer_id, ad_group_id=ad_group_id, )
@staticmethod
def parse_ad_group_path(path: str) -> Dict[str,str]:
"""Parse a ad_group path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def ad_group_extension_setting_path(customer_id: str,ad_group_id: str,extension_type: str,) -> str:
"""Return a fully-qualified ad_group_extension_setting string."""
return "customers/{customer_id}/adGroupExtensionSettings/{ad_group_id}~{extension_type}".format(customer_id=customer_id, ad_group_id=ad_group_id, extension_type=extension_type, )
@staticmethod
def parse_ad_group_extension_setting_path(path: str) -> Dict[str,str]:
"""Parse a ad_group_extension_setting path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/adGroupExtensionSettings/(?P<ad_group_id>.+?)~(?P<extension_type>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def extension_feed_item_path(customer_id: str,feed_item_id: str,) -> str:
"""Return a fully-qualified extension_feed_item string."""
return "customers/{customer_id}/extensionFeedItems/{feed_item_id}".format(customer_id=customer_id, feed_item_id=feed_item_id, )
@staticmethod
def parse_extension_feed_item_path(path: str) -> Dict[str,str]:
"""Parse a extension_feed_item path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/extensionFeedItems/(?P<feed_item_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, AdGroupExtensionSettingServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the ad group extension setting service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AdGroupExtensionSettingServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AdGroupExtensionSettingServiceTransport):
# transport is a AdGroupExtensionSettingServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AdGroupExtensionSettingServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_ad_group_extension_setting(self,
request: ad_group_extension_setting_service.GetAdGroupExtensionSettingRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_extension_setting.AdGroupExtensionSetting:
r"""Returns the requested ad group extension setting in
full detail.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetAdGroupExtensionSettingRequest`):
The request object. Request message for
[AdGroupExtensionSettingService.GetAdGroupExtensionSetting][google.ads.googleads.v6.services.AdGroupExtensionSettingService.GetAdGroupExtensionSetting].
resource_name (:class:`str`):
Required. The resource name of the ad
group extension setting to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.AdGroupExtensionSetting:
An ad group extension setting.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_extension_setting_service.GetAdGroupExtensionSettingRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_extension_setting_service.GetAdGroupExtensionSettingRequest):
request = ad_group_extension_setting_service.GetAdGroupExtensionSettingRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_ad_group_extension_setting]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def mutate_ad_group_extension_settings(self,
request: ad_group_extension_setting_service.MutateAdGroupExtensionSettingsRequest = None,
*,
customer_id: str = None,
operations: Sequence[ad_group_extension_setting_service.AdGroupExtensionSettingOperation] = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ad_group_extension_setting_service.MutateAdGroupExtensionSettingsResponse:
r"""Creates, updates, or removes ad group extension
settings. Operation statuses are returned.
Args:
request (:class:`google.ads.googleads.v6.services.types.MutateAdGroupExtensionSettingsRequest`):
The request object. Request message for
[AdGroupExtensionSettingService.MutateAdGroupExtensionSettings][google.ads.googleads.v6.services.AdGroupExtensionSettingService.MutateAdGroupExtensionSettings].
customer_id (:class:`str`):
Required. The ID of the customer
whose ad group extension settings are
being modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (:class:`Sequence[google.ads.googleads.v6.services.types.AdGroupExtensionSettingOperation]`):
Required. The list of operations to
perform on individual ad group extension
settings.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.services.types.MutateAdGroupExtensionSettingsResponse:
Response message for an ad group
extension setting mutate.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([customer_id, operations]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a ad_group_extension_setting_service.MutateAdGroupExtensionSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, ad_group_extension_setting_service.MutateAdGroupExtensionSettingsRequest):
request = ad_group_extension_setting_service.MutateAdGroupExtensionSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.mutate_ad_group_extension_settings]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('customer_id', request.customer_id),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'AdGroupExtensionSettingServiceClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
0f97f6497d711f09d33f01461d992e7caa12c186 | ed32eb1eb0a328a4ffe89e178fc4987470f333cd | /module/multi_process/multi_process_data_share_queue.py | 5a0bf787d49a21a07716851ed7ecdbf5bd202769 | []
| no_license | xiaoyaojjian/py_learn | c6f5bdf31bcebf29dd914e81e6be9305a61265cc | 95e494ea823d2074a05c1c2a49595002a1576093 | refs/heads/master | 2020-12-05T23:22:11.017066 | 2016-09-08T01:13:08 | 2016-09-08T01:13:08 | 67,654,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | """
使用 multiprocessing 中的 Queue 队列, 实现进程间数据共享
"""
from multiprocessing import Process, Queue
def fun(q, n):
q.put(['hi, ', n])
if __name__ == '__main__':
q = Queue()
q.put('Ao')
for i in range(5):
p = Process(target=fun, args=(q, i))
p.start()
while True:
print(q.get()) | [
"[email protected]"
]
| |
a811d153e337706d515599bbb07ff549b3e288e1 | b0f45a16f34ff84e217ff20cc06f1e8280459504 | /antgo/measures/matting_task.py | b4d89c343881b9e68b17ca02028d5a8540f7ccae | []
| no_license | zhaoqike/antgo | c41dd4b8bc3e969f6008a6c17f0b44d0fe4a8eae | c8a62b2567f62db15f26c75dcc2191cb69f392ab | refs/heads/master | 2021-07-18T17:37:58.652112 | 2017-09-12T01:19:15 | 2017-09-12T01:19:15 | 102,823,416 | 0 | 0 | null | 2017-09-12T08:04:20 | 2017-09-08T05:57:28 | Python | UTF-8 | Python | false | false | 3,153 | py | # encoding=utf-8
# @Time : 17-7-25
# @File : matting_task.py
# @Author :
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import numpy as np
from antgo.task.task import *
from antgo.measures.base import *
from antgo.dataflow.common import *
from antgo.measures.error import *
class AntSADMatting(AntMeasure):
def __init__(self, task):
super(AntSADMatting, self).__init__(task, 'MATTING-SAD')
assert (task.task_type == 'MATTING')
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
sad = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
sad += np.sum(np.abs(predict - gt))
count += 1
val = sad / count
return {'statistic':{'name':self.name, 'value':[{'name':self.name, 'value': val, 'type': 'SCALAR'}]}}
def AntMSEMatting(AntMeasure):
def __init__(self, task):
super(AntMSEMatting, self).__init__(task, 'MATTING-MSE')
assert (task.task_type == 'MATTING')
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
res = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
res += mse(gt, predict)
count += 1
val = res / count
return {'statistic': {'name': self.name, 'value': [{'name': self.name, 'value': val, 'type': 'SCALAR'}]}}
def AntGradientMatting(AntMeasure):
def __init__(self, task):
# paper: Christoph Rhemann, etc. A Perceptually Motivated Online Benchmark for Image Matting
super(AntGradientMatting, self).__init__(task, 'MATTING-GRADIENT')
assert (task.task_type == 'MATTING')
# delta = 1.4, q = 2
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
res = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
predict_grad = scipy.ndimage.filters.gaussian_filter(predict, 1.4, order=1)
gt_grad = scipy.ndimage.filters.gaussian_filter(gt, 1.4, order=1)
res += np.sum(np.power(predict_grad - gt_grad, 2))
count += 1
val = res / count
return {'statistic': {'name': self.name, 'value': [{'name': self.name, 'value': val, 'type': 'SCALAR'}]}}
def AntConnectivityMatting(AntMeasure):
def __init__(self, task):
# paper: Christoph Rhemann, etc. A Perceptually Motivated Online Benchmark for Image Matting
super(AntConnectivityMatting, self).__init__(task, 'MATTING-CONNECTIVITY')
assert (task.task_type == 'MATTING')
# theta=0.15, p=1
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
res = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
count += 1
val = 0.0
return {'statistic': {'name': self.name, 'value': [{'name': self.name, 'value': val, 'type': 'SCALAR'}]}}
| [
"[email protected]"
]
| |
33f8e8c7bd58dedd1a60ff330d66f61e776030ca | 17e1fef277dce4113f5d4f34107ba9f4b25d6646 | /env/bin/s3multiput | f5e975662b4c63d861ea8958d11063454b5b4f2c | []
| no_license | morenopc/botoS3-storages-test-project | 278a597d9c79b8b2c79b9e4b8bbe953e4c079270 | 4672fb00d0866e019d04ec6cd0a27bf3a049b0e1 | refs/heads/master | 2021-01-23T22:15:24.813838 | 2012-10-23T17:49:44 | 2012-10-23T17:49:44 | 6,357,058 | 0 | 1 | null | 2020-07-25T21:42:25 | 2012-10-23T17:46:06 | Python | UTF-8 | Python | false | false | 12,970 | #!/home/moreno/projects/django/botoS3-storages-test-project/env/bin/python
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# multipart portions copyright Fabian Topfstedt
# https://gist.github.com/924094
import math
import mimetypes
from multiprocessing import Pool
import getopt, sys, os
import boto
from boto.exception import S3ResponseError
from boto.s3.connection import S3Connection
from filechunkio import FileChunkIO
usage_string = """
SYNOPSIS
s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
-b/--bucket <bucket_name> [-c/--callback <num_cb>]
[-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
[-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
[-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced] path
Where
access_key - Your AWS Access Key ID. If not supplied, boto will
use the value of the environment variable
AWS_ACCESS_KEY_ID
secret_key - Your AWS Secret Access Key. If not supplied, boto
will use the value of the environment variable
AWS_SECRET_ACCESS_KEY
bucket_name - The name of the S3 bucket the file(s) should be
copied to.
path - A path to a directory or file that represents the items
to be uploaded. If the path points to an individual file,
that file will be uploaded to the specified bucket. If the
path points to a directory, s3_it will recursively traverse
the directory and upload all files to the specified bucket.
debug_level - 0 means no debug output (default), 1 means normal
debug output from boto, and 2 means boto debug output
plus request/response output from httplib
ignore_dirs - a comma-separated list of directory names that will
be ignored and not uploaded to S3.
num_cb - The number of progress callbacks to display. The default
is zero which means no callbacks. If you supplied a value
of "-c 10" for example, the progress callback would be
called 10 times for each file transferred.
prefix - A file path prefix that will be stripped from the full
path of the file when determining the key name in S3.
For example, if the full path of a file is:
/home/foo/bar/fie.baz
and the prefix is specified as "-p /home/foo/" the
resulting key name in S3 will be:
/bar/fie.baz
The prefix must end in a trailing separator and if it
does not then one will be added.
key_prefix - A prefix to be added to the S3 key name, after any
stripping of the file path is done based on the
"-p/--prefix" option.
reduced - Use Reduced Redundancy storage
grant - A canned ACL policy that will be granted on each file
transferred to S3. The value of provided must be one
of the "canned" ACL policies supported by S3:
private|public-read|public-read-write|authenticated-read
no_overwrite - No files will be overwritten on S3, if the file/key
exists on s3 it will be kept. This is useful for
resuming interrupted transfers. Note this is not a
sync, even if the file has been updated locally if
the key exists on s3 the file on s3 will not be
updated.
If the -n option is provided, no files will be transferred to S3 but
informational messages will be printed about what would happen.
"""
def usage():
print usage_string
sys.exit()
def submit_cb(bytes_so_far, total_bytes):
print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
def get_key_name(fullpath, prefix, key_prefix):
key_name = fullpath[len(prefix):]
l = key_name.split(os.sep)
return key_prefix + '/'.join(l)
def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
source_path, offset, bytes, debug, cb, num_cb, amount_of_retries=10):
if debug == 1:
print "_upload_part(%s, %s, %s)" % (source_path, offset, bytes)
"""
Uploads a part with retries.
"""
def _upload(retries_left=amount_of_retries):
try:
if debug == 1:
print 'Start uploading part #%d ...' % part_num
conn = S3Connection(aws_key, aws_secret)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
for mp in bucket.get_all_multipart_uploads():
if mp.id == multipart_id:
with FileChunkIO(source_path, 'r', offset=offset,
bytes=bytes) as fp:
mp.upload_part_from_file(fp=fp, part_num=part_num, cb=cb, num_cb=num_cb)
break
except Exception, exc:
if retries_left:
_upload(retries_left=retries_left - 1)
else:
print 'Failed uploading part #%d' % part_num
raise exc
else:
if debug == 1:
print '... Uploaded part #%d' % part_num
_upload()
def upload(bucketname, aws_key, aws_secret, source_path, keyname,
reduced, debug, cb, num_cb,
acl='private', headers={}, guess_mimetype=True, parallel_processes=4):
"""
Parallel multipart upload.
"""
conn = S3Connection(aws_key, aws_secret)
conn.debug = debug
bucket = conn.get_bucket(bucketname)
if guess_mimetype:
mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
headers.update({'Content-Type': mtype})
mp = bucket.initiate_multipart_upload(keyname, headers=headers, reduced_redundancy=reduced)
source_size = os.stat(source_path).st_size
bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
5242880)
chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
pool = Pool(processes=parallel_processes)
for i in range(chunk_amount):
offset = i * bytes_per_chunk
remaining_bytes = source_size - offset
bytes = min([bytes_per_chunk, remaining_bytes])
part_num = i + 1
pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
part_num, source_path, offset, bytes, debug, cb, num_cb])
pool.close()
pool.join()
if len(mp.get_all_parts()) == chunk_amount:
mp.complete_upload()
key = bucket.get_key(keyname)
key.set_acl(acl)
else:
mp.cancel_upload()
def main():
# default values
aws_access_key_id = None
aws_secret_access_key = None
bucket_name = ''
ignore_dirs = []
total = 0
debug = 0
cb = None
num_cb = 0
quiet = False
no_op = False
prefix = '/'
key_prefix = ''
grant = None
no_overwrite = False
reduced = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet', 'secret_key=',
'no_overwrite', 'reduced'])
except:
usage()
# parse opts
for o, a in opts:
if o in ('-h', '--help'):
usage()
if o in ('-a', '--access_key'):
aws_access_key_id = a
if o in ('-b', '--bucket'):
bucket_name = a
if o in ('-c', '--callback'):
num_cb = int(a)
cb = submit_cb
if o in ('-d', '--debug'):
debug = int(a)
if o in ('-g', '--grant'):
grant = a
if o in ('-i', '--ignore'):
ignore_dirs = a.split(',')
if o in ('-n', '--no_op'):
no_op = True
if o in ('w', '--no_overwrite'):
no_overwrite = True
if o in ('-p', '--prefix'):
prefix = a
if prefix[-1] != os.sep:
prefix = prefix + os.sep
if o in ('-k', '--key_prefix'):
key_prefix = a
if o in ('-q', '--quiet'):
quiet = True
if o in ('-s', '--secret_key'):
aws_secret_access_key = a
if o in ('-r', '--reduced'):
reduced = True
if len(args) != 1:
usage()
path = os.path.expanduser(args[0])
path = os.path.expandvars(path)
path = os.path.abspath(path)
if not bucket_name:
print "bucket name is required!"
usage()
c = boto.connect_s3(aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key)
c.debug = debug
b = c.get_bucket(bucket_name)
# upload a directory of files recursively
if os.path.isdir(path):
if no_overwrite:
if not quiet:
print 'Getting list of existing keys to check against'
keys = []
for key in b.list(get_key_name(path, prefix, key_prefix)):
keys.append(key.name)
for root, dirs, files in os.walk(path):
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
for file in files:
fullpath = os.path.join(root, file)
key_name = get_key_name(fullpath, prefix, key_prefix)
copy_file = True
if no_overwrite:
if key_name in keys:
copy_file = False
if not quiet:
print 'Skipping %s as it exists in s3' % file
if copy_file:
if not quiet:
print 'Copying %s to %s/%s' % (file, bucket_name, key_name)
if not no_op:
if os.stat(fullpath).st_size == 0:
# 0-byte files don't work and also don't need multipart upload
k = b.new_key(key_name)
k.set_contents_from_filename(fullpath, cb=cb, num_cb=num_cb,
policy=grant, reduced_redundancy=reduced)
else:
upload(bucket_name, aws_access_key_id,
aws_secret_access_key, fullpath, key_name,
reduced, debug, cb, num_cb, grant or 'private')
total += 1
# upload a single file
elif os.path.isfile(path):
key_name = get_key_name(os.path.abspath(path), prefix, key_prefix)
copy_file = True
if no_overwrite:
if b.get_key(key_name):
copy_file = False
if not quiet:
print 'Skipping %s as it exists in s3' % path
if copy_file:
if not quiet:
print 'Copying %s to %s/%s' % (path, bucket_name, key_name)
if not no_op:
if os.stat(path).st_size == 0:
# 0-byte files don't work and also don't need multipart upload
k = b.new_key(key_name)
k.set_contents_from_filename(path, cb=cb, num_cb=num_cb, policy=grant,
reduced_redundancy=reduced)
else:
upload(bucket_name, aws_access_key_id,
aws_secret_access_key, path, key_name,
reduced, debug, cb, num_cb, grant or 'private')
if __name__ == "__main__":
main() | [
"[email protected]"
]
| ||
4b64ead8aaa5f3622333594515050ea8272d1336 | c39e19e8fada4df5bf8999f93a470fc5db0b8ea7 | /tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py | 4802c8d07d7c1f2aa5807fb9066c48b3319404fb | [
"Apache-2.0"
]
| permissive | ivomarb/tensorflow | 6bb05bc6dbaa8e59b43d00a8216bb0b8cb766080 | df2fbb89588065fca2c6e5fcfba7d8c2b4378591 | refs/heads/master | 2020-06-26T05:03:06.321649 | 2019-07-29T20:30:03 | 2019-07-29T21:40:30 | 199,530,704 | 1 | 0 | Apache-2.0 | 2019-07-29T21:45:39 | 2019-07-29T21:45:38 | null | UTF-8 | Python | false | false | 4,359 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateful tf.keras LSTM models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.keras.distribute import keras_correctness_test_base
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
def strategies_for_stateful_embedding_model():
"""Returns TPUStrategy with single core device assignment."""
return [
strategy_combinations.tpu_strategy_one_core,
strategy_combinations.tpu_strategy_one_step_one_core
]
def test_combinations_for_stateful_embedding_model():
return (combinations.combine(
distribution=strategies_for_stateful_embedding_model(),
mode='graph',
use_numpy=False,
use_validation_data=False,
run_distributed=[True, False]))
class DistributionStrategyStatefulLstmModelCorrectnessTest(
keras_correctness_test_base
.TestDistributionStrategyEmbeddingModelCorrectnessBase):
def get_model(self,
max_words=10,
initial_weights=None,
distribution=None,
run_distributed=None,
input_shapes=None):
del input_shapes
batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids = keras.layers.Input(
shape=(max_words,),
batch_size=batch_size,
dtype=np.int32,
name='words')
word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
lstm_embed = keras.layers.LSTM(
units=4, return_sequences=False, stateful=True)(
word_embed)
preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
model = keras.Model(inputs=[word_ids], outputs=[preds])
if initial_weights:
model.set_weights(initial_weights)
optimizer_fn = gradient_descent_keras.SGD
model.compile(
optimizer=optimizer_fn(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
# TODO(jhseu): Disabled to fix b/130808953. Need to investigate why it
# doesn't work and enable for DistributionStrategy more generally.
@combinations.generate(test_combinations_for_stateful_embedding_model())
def disabled_test_stateful_lstm_model_correctness(
self, distribution, use_numpy, use_validation_data, run_distributed):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True,
run_distributed=run_distributed)
@combinations.generate(
combinations.times(
keras_correctness_test_base.test_combinations_with_tpu_strategies(),
combinations.combine(run_distributed=[True, False])))
def test_incorrectly_use_multiple_cores_for_stateful_lstm_model(
self, distribution, use_numpy, use_validation_data, run_distributed):
with self.assertRaisesRegexp(
ValueError,
'Single core must be used for computation on stateful models. Consider '
'adding `device_assignment` parameter to TPUStrategy'):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True,
run_distributed=run_distributed)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
]
| |
94e94f0a49146bdb6be636a8ec08afefab19692d | 34652a47355a8dbe9200db229a1bbc62619de364 | /Matlibplots/dataset/weighted_moving_average.py | ba1c72dd6c5721cf2c82d983bfe57dd402757fc0 | []
| no_license | btrif/Python_dev_repo | df34ab7066eab662a5c11467d390e067ab5bf0f8 | b4c81010a1476721cabc2621b17d92fead9314b4 | refs/heads/master | 2020-04-02T13:34:11.655162 | 2019-11-10T11:08:23 | 2019-11-10T11:08:23 | 154,487,015 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | # Created by Bogdan Trif on 05-02-2018 , 11:25 AM.
import numpy as np
import matplotlib.pyplot as plt
#first generate some datapoint for a randomly sampled noisy sinewave
x = np.random.random(1000)*10
noise = np.random.normal(scale=0.3,size=len(x))
y = np.sin(x) + noise
#plot the data
plt.plot(x,y,'ro',alpha=0.3,ms=4,label='data')
plt.xlabel('Time')
plt.ylabel('Intensity')
def weighted_moving_average(x,y,step_size=0.05,width=1):
bin_centers = np.arange(np.min(x),np.max(x)-0.5*step_size,step_size)+0.5*step_size
bin_avg = np.zeros(len(bin_centers))
#We're going to weight with a Gaussian function
def gaussian(x,amp=1,mean=0,sigma=1):
return amp*np.exp(-(x-mean)**2/(2*sigma**2))
for index in range(0,len(bin_centers)):
bin_center = bin_centers[index]
weights = gaussian(x,mean=bin_center,sigma=width)
bin_avg[index] = np.average(y,weights=weights)
return (bin_centers,bin_avg)
#plot the moving average
bins, average = weighted_moving_average(x,y)
plt.plot(bins, average,label='moving average')
plt.grid(which='both')
plt.show() | [
"[email protected]"
]
| |
104faf0976a57398e08a2092df8011c01c40ff5a | 2e5dbb3b851a3e96d715bc50c54b2dbe84b52a7d | /dl/lecture01/furniture/download_furniture.py | bf49967dc44eb0f6705f470a5f97d465ab403096 | []
| no_license | devforfu/fastai_courses | b3116ec93cef2174e661c1d1884a33d8510f08a5 | 82ee6e299c805f10e224c6a3473ac75ffbfdada4 | refs/heads/master | 2020-04-02T07:04:23.766567 | 2018-12-21T16:49:04 | 2018-12-21T16:49:04 | 154,180,455 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,327 | py | import os
import json
import argparse
from io import BytesIO
from pathlib import Path
from dataclasses import dataclass, asdict
from functools import partial
import configparser
from multiprocessing import Pool, cpu_count
import requests
import numpy as np
import pandas as pd
from PIL import Image
from fastai.core import partition
from projects.logger import get_logger
PATH = Path.home()/'data'/'furniture'
IMAGES = PATH/'images'
TRAIN_IMAGES = IMAGES/'train'
VALID_IMAGES = IMAGES/'valid'
TEST_IMAGES = IMAGES/'test'
LABELS = PATH/'labels.csv'
HEADERS = {'User-Agent': 'Python3'}
RANDOM_STATE = 1
np.random.seed(RANDOM_STATE)
log = get_logger()
def main():
args = parse_args()
name = args.subset
path = IMAGES/name
os.makedirs(path, exist_ok=True)
json_file = PATH/f'{name}.json'
index_file = PATH/f'{name}_index.csv'
prepare_url_index(json_file, index_file, pct=args.pct)
log.info(f'Downloading {args.pct:2.2%} of {json_file}...')
index = pd.read_csv(index_file)
info = download(index, path, args.chunk_size, args.proxy)
info.to_pickle(IMAGES/f'{name}_info.pickle')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--subset',
default='train', choices=['train', 'validation', 'test'],
help='Subset to download'
)
parser.add_argument(
'--pct',
default=0.1, type=float,
help='Percent of images to download'
)
parser.add_argument(
'--chunk-size',
default=1000, type=int,
help='Number of images to download per multi-threaded pool run'
)
parser.add_argument(
'--proxy',
default=None,
help='proxy configuration (if required)'
)
args = parser.parse_args()
if args.proxy is not None:
conf = configparser.ConfigParser()
conf.read(args.proxy)
proxy = dict(conf['proxy'])
url = 'socks5://{username}:{password}@{host}:{port}'.format(**proxy)
args.proxy = {'http': url, 'https': url}
return args
def prepare_url_index(json_file, index_file, pct=0.1):
"""Saves meta-information about images into CSV file.
Args:
json_file: Path to JSON file with dataset information.
index_file: Path to CSV file to save image URL, label ID, and image ID
pct: Percentage of dataset to take.
"""
with json_file.open() as file:
content = json.load(file)
images = content['images']
if 'annotations' in content:
labels = content['annotations']
else:
labels = [
{'image_id': img['image_id'], 'label_id': 0}
for img in images]
urls = [img['url'][0] for img in images]
records = pd.DataFrame([
{'url': url, **lbl}
for url, lbl in zip(urls, labels)])
if pct is not None and pct < 1:
pct = max(0.0, min(pct, 1.0))
subsets = []
for key, group in records.groupby('label_id'):
size = int(len(group) * pct)
subsets.extend(group.sample(size, random_state=RANDOM_STATE).to_dict('records'))
records = pd.DataFrame(subsets)
records.to_csv(index_file, index=None)
@dataclass
class ImageInfo:
path: Path
label_id: int
image_id: int
url: str
failed: bool = False
def download(index, path, chunk_size: int=1000, proxy: dict=None):
"""Downloads images with URLs from index dataframe."""
n_cpu = cpu_count()
worker = partial(download_single, path, proxy)
queue = index.to_dict('records')
meta = []
with Pool(n_cpu) as pool:
chunks = partition(queue, chunk_size)
n_chunks = len(chunks)
for i, chunk in enumerate(chunks):
log.info('Downloading chunk %d of %d' % (i+1, n_chunks))
data = [x for x in pool.imap_unordered(worker, chunk) if not x.failed]
meta.extend([asdict(info) for info in data])
return pd.DataFrame(meta)
def download_single(folder, proxy, info):
url = info['url']
img_name = str(info['image_id']) + '.jpg'
path = folder/img_name
result = {
'label_id': info['label_id'],
'image_id': info['image_id'],
'path': path,
'url': url}
if path.exists():
return ImageInfo(**result)
error, msg = True, ''
try:
r = requests.get(
url, allow_redirects=True, timeout=60,
headers=HEADERS, proxies=proxy)
r.raise_for_status()
error = False
except requests.HTTPError:
msg = 'HTTP error'
except requests.ConnectionError:
msg = 'Connection error'
except requests.Timeout:
msg = 'Waiting response too long'
except Exception as e:
msg = str(e)[:80]
if error:
log.warning('%s: %s', msg, url)
return ImageInfo(failed=True, **result)
try:
pil_image = Image.open(BytesIO(r.content)).convert('RGB')
pil_image.save(path, format='JPEG', quality=90)
except Exception as e:
log.warning('Cannot create PIL Image: %s', str(e))
return ImageInfo(failed=True, **result)
if os.stat(path).st_size <= 0:
log.warning('Saved image file is emtpy: %s', path)
return ImageInfo(failed=True, **result)
return ImageInfo(**result)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
9a7ac17a45a71f1de7afea23e28e8af49840222c | 645aa520f2eff7e6001574e57c986aba129e4dd3 | /tests/test_visualize_pathways.py | 608a46c9346eadcb1ae18f44d046049486192b9e | [
"Apache-2.0"
]
| permissive | google/transitfeed | 08c4ecfb6872b6c0dc409d9a35b32ef515e30253 | 104b5a5b339c62a94c1579d7209a41c7c0833e35 | refs/heads/master | 2023-09-05T03:08:17.640950 | 2022-05-23T16:23:53 | 2022-05-23T16:23:53 | 24,061,376 | 680 | 299 | Apache-2.0 | 2022-09-28T09:02:50 | 2014-09-15T15:16:32 | Python | UTF-8 | Python | false | false | 649 | py | import os.path
import unittest
import visualize_pathways
def get_file_contents(filename):
with open(filename, 'rb') as f:
return f.read()
class TestVisualizePathways(unittest.TestCase):
def test_gtfs_to_graphviz(self):
testdata_dir = os.path.join(os.path.dirname(__file__),
'data/au-sydney-entrances')
golden_data = get_file_contents(
os.path.join(testdata_dir, 'au-sydney-entrances.dot'))
reader = visualize_pathways.GtfsReader(testdata_dir)
self.assertEqual(
str(visualize_pathways.gtfs_to_graphviz(reader)),
golden_data)
| [
"[email protected]"
]
| |
24e60d5e6ab4bd5e2bb4c8fbc73fed26abb5cbe7 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003815.py | 5e5bdd4608ad234dd7dc490f41749f64838b0581 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher66552(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher66552._instance is None:
CommutativeMatcher66552._instance = CommutativeMatcher66552()
return CommutativeMatcher66552._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 66551
return
yield
from collections import deque | [
"[email protected]"
]
| |
425ae4bad5ec2bf6ae6e55096f9b329ab59d9a73 | 022b22d343e2c3d89a865c2b5d684e82c692771e | /frontend_docker/project/main/views.py | 753ec13fe01e0c17192f09af50c9bdade4d1cc2f | [
"MIT"
]
| permissive | jessequinn/hbsis | f4050f5f0850001bc3284ce2c94266ccb00a4c70 | 149b8c41c75732dcbcc23e667831fdb42cab786e | refs/heads/master | 2022-12-18T01:13:27.354613 | 2019-02-08T10:27:35 | 2019-02-08T10:27:35 | 169,249,120 | 0 | 0 | MIT | 2022-12-08T01:35:31 | 2019-02-05T13:54:21 | JavaScript | UTF-8 | Python | false | false | 5,545 | py | import datetime
import json
import pytz
import urllib.request
from flask import render_template, request, flash, Blueprint, redirect, url_for
from flask_login import login_required, current_user
from project import app, db
from project.models import WeatherRegistration
from .forms import WeatherRegistrationForm
main_blueprint = Blueprint(
'main', __name__,
template_folder='templates'
)
def datetimefilter(value, format="%A"):
'''
Datetime filter for Jinja. Formats date to US/Eastern from the UTC value.
:param value: input value
:param format: format of return date. default day of week.
:return: formatted date
'''
value = datetime.datetime.fromtimestamp(value)
tz = pytz.timezone('US/Eastern')
utc = pytz.timezone('UTC')
value = utc.localize(value, is_dst=None).astimezone(pytz.utc)
local_dt = value.astimezone(tz)
return local_dt.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@main_blueprint.route('/', methods=['GET', 'POST'])
@login_required
def home():
'''
Main page after login. Contains a search form for city weather forecast.
:return: rendered template
'''
weatherRegistrations = db.session.query(WeatherRegistration).filter_by(user_id=current_user.id).all()
with urllib.request.urlopen('http://localhost:5050/countries') as url:
data = json.loads(url.read().decode())
error = None
form = WeatherRegistrationForm(request.form)
form.country.choices = [(c, c) for c in data['data']] # dyanmically produce countries
if request.method == 'POST':
if form.validate_on_submit():
if form.city.data != '':
with urllib.request.urlopen(
'http://localhost:5050/' + form.country.data.upper() + '/' + form.city.data.capitalize()) as url:
ids = json.loads(url.read().decode())
if not ids['data']:
error = 'No data exists for ' + form.city.data.capitalize() + '!'
return render_template('index.html', form=form, error=error, user=current_user, weatherRegistrations=weatherRegistrations)
else:
if any(ids['data'][0]['id'] == wr.city_id for wr in weatherRegistrations):
error = form.city.data.capitalize() + ' has already been registered.'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
new_weatherregistration = WeatherRegistration(form.city.data, ids['data'][0]['id'],
form.country.data, current_user.id)
db.session.add(new_weatherregistration)
failed = False
try:
db.session.commit()
except Exception as e:
db.session.rollback()
db.session.flush()
failed = True
print(e)
if failed:
error = 'Error with registration.'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
flash(form.city.data.capitalize() + ' was registered successfully.')
return redirect(url_for('main.home'))
else:
error = 'Enter a city name!'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
@main_blueprint.route('/forecast<id>')
@login_required
def forecast(id):
'''
5 day forecast page.
:param id: city id
:return: rendered template
'''
with urllib.request.urlopen(
'http://api.openweathermap.org/data/2.5/forecast/daily?id=' + id + '&cnt=5&APPID=eb8b1a9405e659b2ffc78f0a520b1a46&units=metric') as url:
data = json.loads(url.read().decode())
return render_template('forecast.html', data=data)
@main_blueprint.route('/remove<id>')
@login_required
def remove(id):
'''
Function simply removes city from list of cities.
:param id: city id
:return: rendered template
'''
with urllib.request.urlopen('http://localhost:5050/countries') as url:
data = json.loads(url.read().decode())
form = WeatherRegistrationForm(request.form)
form.country.choices = [(c, c) for c in data['data']] # dyanmically produce countries
db.session.query(WeatherRegistration).filter_by(id=id).delete()
failed = False
try:
db.session.commit()
except Exception as e:
db.session.rollback()
db.session.flush()
failed = True
print(e)
if failed:
error = 'Could not remove registration.'
weatherRegistrations = db.session.query(WeatherRegistration).filter_by(user_id=current_user.id).all()
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
flash('Registration was removed successfully.')
return redirect(url_for('main.home'))
| [
"[email protected]"
]
| |
bc27b8fa61132158c9004b0c3d96302cee57c123 | c9fde4576216a22e8d5711bbe97adda1aafa2f08 | /model-optimizer/mo/front/caffe/extractor.py | 8f6115655b5973294584661bca6889d30733e4aa | [
"Apache-2.0"
]
| permissive | dliang0406/dldt | c703d6a837de3f996528fc8a9543f9530b23342c | d9b10abcebafe8b10ba81e09e433de7a366c072c | refs/heads/2018 | 2020-04-03T08:24:47.723353 | 2018-10-29T07:58:05 | 2018-10-29T07:58:05 | 155,132,108 | 3 | 1 | Apache-2.0 | 2019-10-10T08:39:46 | 2018-10-29T01:03:54 | C++ | UTF-8 | Python | false | false | 5,994 | py | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.caffe.extractors.batchnorm import batch_norm_ext
from mo.front.caffe.extractors.concat import concat_ext
from mo.front.caffe.extractors.convolution import convolution_ext
from mo.front.caffe.extractors.deconvolution import deconvolution_ext
from mo.front.caffe.extractors.eltwise import eltwise_ext
from mo.front.caffe.extractors.flatten import flatten_ext
from mo.front.caffe.extractors.inner_product import inner_product_ext
from mo.front.caffe.extractors.input import global_input_ext, input_ext
from mo.front.caffe.extractors.lrn import lrn_ext
from mo.front.caffe.extractors.native_caffe import native_caffe_node_extractor
from mo.front.caffe.extractors.permute import permute_ext
from mo.front.caffe.extractors.pooling import pooling_ext
from mo.front.caffe.extractors.power import power_ext
from mo.front.caffe.extractors.relu import relu_ext
from mo.front.caffe.extractors.reshape import reshape_ext
from mo.front.caffe.extractors.roipooling import roipooling_ext
from mo.front.caffe.extractors.scale import scale_ext
from mo.front.caffe.extractors.slice import slice_ext
from mo.front.caffe.extractors.softmax import softmax_ext
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.register_custom_ops import extension_op_extractor
from mo.front.extractor import CaffePythonFrontExtractorOp, FrontExtractorOp
from mo.graph.graph import Node
from mo.ops.op import Op
from mo.utils.error import Error
from mo.utils.utils import refer_to_faq_msg
def node_pb_arg(pb_extractor):
return lambda node: pb_extractor(node.pb, node.model_pb)
"""
Keys are names that appear as layer names in .prototxt.
Full list is available here: http://caffe.berkeleyvision.org/tutorial/layers.html
"""
caffe_type_extractors = {
# Data Layers
'input': node_pb_arg(input_ext),
'globalinput': node_pb_arg(global_input_ext),
# Common Layers
'innerproduct': node_pb_arg(inner_product_ext),
'inner_product': node_pb_arg(inner_product_ext),
'dropout': node_pb_arg(lambda _, __: dict(op='Dropout', infer=copy_shape_infer)),
# Vision Layers
'convolution': node_pb_arg(convolution_ext),
'deconvolution': node_pb_arg(deconvolution_ext),
'pooling': node_pb_arg(pooling_ext),
# Normalization Layers
'batchnorm': node_pb_arg(batch_norm_ext),
'lrn': node_pb_arg(lrn_ext),
# Activation Layers
'power': node_pb_arg(power_ext),
'relu': node_pb_arg(relu_ext),
'scale': node_pb_arg(scale_ext),
# Utility Layers
'concat': node_pb_arg(concat_ext),
'eltwise': node_pb_arg(eltwise_ext),
'flatten': node_pb_arg(flatten_ext),
'reshape': node_pb_arg(reshape_ext),
'slice': node_pb_arg(slice_ext),
'softmax': node_pb_arg(softmax_ext),
# Custom, implemented in IE, SSD-specific
'permute': node_pb_arg(permute_ext),
# Custom, implemented in IE, Fast-RCNN-specific
'roipooling': node_pb_arg(roipooling_ext),
}
def common_caffe_fields(node: Node) -> dict:
if node.has_valid('op') and node.op == 'Identity':
return {}
pb = node.pb if node.pb else node
layer_type = pb.type
if isinstance(layer_type, int):
layer_type = pb.LayerType.DESCRIPTOR.values_by_number[layer_type].name
layer_type = str(layer_type)
return {
'kind': 'op',
'name': pb.name,
'type': layer_type,
'op': layer_type,
# generic code relies on op; it should be overridden by specific op extractor
'infer': None,
'precision': 'FP32' # TODO use real precision derived from the model
}
def caffe_extractor(node: Node, lowered_keys_map: dict) -> (bool, dict):
if node.has_valid('op') and node.op == 'Identity':
return True, {}
result = common_caffe_fields(node)
supported = False
name = None
layer_type = result['type'].lower()
if layer_type in lowered_keys_map:
layer_type = lowered_keys_map[layer_type]
assert layer_type in caffe_type_extractors
name = layer_type
if name: # it is either standard or registered via CustomLayersMapping.xml
attrs = caffe_type_extractors[name](node)
# intentionally as Python registry if not found returns None
if attrs is not None:
result.update(attrs)
supported = True
if not supported:
raise Error('Found custom layer "{}". Model Optimizer does not support this layer. '.format(node.id) +
'Please, implement extension. ' +
refer_to_faq_msg(45))
if 'infer' not in result or not result['infer']:
result.update(native_caffe_node_extractor(node))
phase_attr = check_phase(node)
result.update(phase_attr)
return supported, result
def check_phase(node: Node):
if node.has_valid('pb') and hasattr(node.pb, 'include'):
for i in node.pb.include:
if hasattr(i, 'phase'):
return {'phase': i.phase}
return {}
def register_caffe_python_extractor(op: Op, name: str = None):
if not name and hasattr(op, 'op'):
name = op.op
if not name:
raise Error("Can not register Op {}. Please, call function 'register_caffe_python_extractor'"
"with parameter 'name' .".format(op),
refer_to_faq_msg(87))
CaffePythonFrontExtractorOp.registered_ops[name] = lambda node: extension_op_extractor(node, op)
| [
"[email protected]"
]
| |
a0364d6e371684383b61216b8d9e5677beb97814 | b394bb6bd3e8848688b525f55e82962f152c1bb3 | /demos/upload/linear_systems/Complexity of Mat-Mat multiplication and LU.py | f4e79703fc521e02f24bfee84b294558fbd90ad9 | []
| no_license | lukeolson/cs450-f20-demos | 02c2431d7696348cf9ca1ab67bdd5c44a97ac38b | 040e7dfa15c68f7f426cf69655cb600926f9f626 | refs/heads/master | 2023-01-22T19:12:33.394521 | 2020-12-03T19:48:18 | 2020-12-03T19:48:18 | 288,542,898 | 5 | 10 | null | 2020-10-05T19:39:07 | 2020-08-18T19:13:52 | null | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/env python
# coding: utf-8
# # Relative cost of matrix operations
# In[1]:
import numpy as np
import scipy.linalg as spla
import scipy as sp
import matplotlib.pyplot as pt
from time import time
np.alterdot()
# In[2]:
n_values = (10**np.linspace(1, 3.25, 15)).astype(np.int32)
n_values
# In[3]:
def mat_mul(A):
return A.dot(A)
for name, f in [
("mat_mul", mat_mul),
("lu", spla.lu_factor),
]:
times = []
print("----->", name)
for n in n_values:
print(n)
A = np.random.randn(n, n)
start_time = time()
f(A)
times.append(time() - start_time)
pt.plot(n_values, times, label=name)
pt.grid()
pt.legend(loc="best")
pt.xlabel("Matrix size $n$")
pt.ylabel("Wall time [s]")
# * The faster algorithms make the slower ones look bad. But... it's all relative.
# * Is there a better way of plotting this?
# * Can we see the asymptotic cost ($O(n^3)$) of these algorithms from the plot?
# In[3]:
| [
"[email protected]"
]
| |
90afc3a58e3e9c99e3416d2d843ca5e084f3e87a | a2b6bc9bdd2bdbe5871edb613065dd2397175cb3 | /medium/Rotate List.py | 8ad51c14cc944ec9af60a8ec92c7c8d4a1311263 | []
| no_license | Asunqingwen/LeetCode | ed8d2043a31f86e9e256123439388d7d223269be | b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee | refs/heads/master | 2022-09-26T01:46:59.790316 | 2022-09-01T08:20:37 | 2022-09-01T08:20:37 | 95,668,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,931 | py | # -*- coding: utf-8 -*-
# @Time : 2019/10/29 0029 10:26
# @Author : 没有蜡笔的小新
# @E-mail : [email protected]
# @FileName: Rotate List.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
"""
Given a linked list, rotate the list to the right by k places, where k is non-negative.
Example 1:
Input: 1->2->3->4->5->NULL, k = 2
Output: 4->5->1->2->3->NULL
Explanation:
rotate 1 steps to the right: 5->1->2->3->4->NULL
rotate 2 steps to the right: 4->5->1->2->3->NULL
Example 2:
Input: 0->1->2->NULL, k = 4
Output: 2->0->1->NULL
Explanation:
rotate 1 steps to the right: 2->0->1->NULL
rotate 2 steps to the right: 1->2->0->NULL
rotate 3 steps to the right: 0->1->2->NULL
rotate 4 steps to the right: 2->0->1->NULL
"""
import json
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def stringToIntegerList(input):
return json.loads(input)
def stringToListNode(input):
input = input.split(',')
dummyRoot = ListNode(0)
ptr = dummyRoot
for number in input:
ptr.next = ListNode(int(number))
ptr = ptr.next
ptr = dummyRoot.next
return ptr
def listNodeToString(node):
if not node:
return "[]"
result = ""
while node:
result += str(node.val) + ", "
node = node.next
return "[" + result[:-2] + "]"
def rotateRight(head: ListNode, k: int) -> ListNode:
p1 = head
length = 0
while p1:
length += 1
p1 = p1.next
if length <= 1 or k == 0:
return head
k %= length
p1, p2 = head, head
for i in range(k):
p2 = p2.next
for i in range(length - k):
if not p1.next:
p1.next = head
if not p2.next:
p2.next = head
p1 = p1.next
p2 = p2.next
head = p1
for i in range(length - 1):
p1 = p1.next
p1.next = None
return head
if __name__ == '__main__':
input = "1,2"
k = 0
head = stringToListNode(input)
result = rotateRight(head, k)
result = listNodeToString(result)
print(result)
| [
"[email protected]"
]
| |
f3815f70c32f2896f4449435b1bacfddcf8375c9 | 39e647e9ec8524a1cee90ef15f37a3d3bbf8ac43 | /poet/trunk/pythonLibs/Django-1.3/tests/modeltests/proxy_models/models.py | 30f14bc931b97f11cff5cbcf7a99bf9d15829bc7 | [
"BSD-3-Clause"
]
| permissive | AgileAdaptiveTools/POETTools | 85158f043e73b430c1d19a172b75e028a15c2018 | 60244865dd850a3e7346f9c6c3daf74ea1b02448 | refs/heads/master | 2021-01-18T14:46:08.025574 | 2013-01-28T19:18:11 | 2013-01-28T19:18:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,229 | py | """
By specifying the 'proxy' Meta attribute, model subclasses can specify that
they will take data directly from the table of their base class table rather
than using a new table of their own. This allows them to act as simple proxies,
providing a modified interface to the data from the base class.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
# A couple of managers for testing managing overriding in proxy model cases.
class PersonManager(models.Manager):
def get_query_set(self):
return super(PersonManager, self).get_query_set().exclude(name="fred")
class SubManager(models.Manager):
def get_query_set(self):
return super(SubManager, self).get_query_set().exclude(name="wilma")
class Person(models.Model):
"""
A simple concrete base class.
"""
name = models.CharField(max_length=50)
objects = PersonManager()
def __unicode__(self):
return self.name
class Abstract(models.Model):
"""
A simple abstract base class, to be used for error checking.
"""
data = models.CharField(max_length=10)
class Meta:
abstract = True
class MyPerson(Person):
"""
A proxy subclass, this should not get a new table. Overrides the default
manager.
"""
class Meta:
proxy = True
ordering = ["name"]
objects = SubManager()
other = PersonManager()
def has_special_name(self):
return self.name.lower() == "special"
class ManagerMixin(models.Model):
excluder = SubManager()
class Meta:
abstract = True
class OtherPerson(Person, ManagerMixin):
"""
A class with the default manager from Person, plus an secondary manager.
"""
class Meta:
proxy = True
ordering = ["name"]
class StatusPerson(MyPerson):
"""
A non-proxy subclass of a proxy, it should get a new table.
"""
status = models.CharField(max_length=80)
# We can even have proxies of proxies (and subclass of those).
class MyPersonProxy(MyPerson):
class Meta:
proxy = True
class LowerStatusPerson(MyPersonProxy):
status = models.CharField(max_length=80)
class User(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class UserProxy(User):
class Meta:
proxy = True
class UserProxyProxy(UserProxy):
class Meta:
proxy = True
# We can still use `select_related()` to include related models in our querysets.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country)
def __unicode__(self):
return self.name
class StateProxy(State):
class Meta:
proxy = True
# Proxy models still works with filters (on related fields)
# and select_related, even when mixed with model inheritance
class BaseUser(models.Model):
name = models.CharField(max_length=255)
class TrackerUser(BaseUser):
status = models.CharField(max_length=50)
class ProxyTrackerUser(TrackerUser):
class Meta:
proxy = True
class Issue(models.Model):
summary = models.CharField(max_length=255)
assignee = models.ForeignKey(TrackerUser)
def __unicode__(self):
return ':'.join((self.__class__.__name__,self.summary,))
class Bug(Issue):
version = models.CharField(max_length=50)
reporter = models.ForeignKey(BaseUser)
class ProxyBug(Bug):
"""
Proxy of an inherited class
"""
class Meta:
proxy = True
class ProxyProxyBug(ProxyBug):
"""
A proxy of proxy model with related field
"""
class Meta:
proxy = True
class Improvement(Issue):
"""
A model that has relation to a proxy model
or to a proxy of proxy model
"""
version = models.CharField(max_length=50)
reporter = models.ForeignKey(ProxyTrackerUser)
associated_bug = models.ForeignKey(ProxyProxyBug)
class ProxyImprovement(Improvement):
class Meta:
proxy = True | [
"[email protected]"
]
| |
fd8aa3ea52ca26a7bff1f7c7d6f9d22f8f4d59b7 | c038d06c31de0919d70c04f517f7490146ff80df | /train_nn.py | b8a8c4cc52d03dcfcdb4e339de6e584971a4eca1 | []
| no_license | KWAN-YWAN/gtd-analytics | 235df79f9b95b1734928cd2a9b4d54c5bf3f88e8 | 10fd7fa2965bb0efcc2396d86d3998afbc0fe7c8 | refs/heads/master | 2020-03-28T18:13:39.976015 | 2018-07-11T21:56:29 | 2018-07-11T21:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,579 | py | #!/usr/bin/env python
import numpy as np
import pandas as pd
# Plotter library
import seaborn as sns
import matplotlib.pyplot as plt
# Own libraries
import data_preparation as prep
from nn import Nn
from sklearn import preprocessing
def filter_data(df):
# Filter for only kidnapping data (1st, 2nd or 3rd attack type)
kidnap_cats = [5, 6]
df = df[df.attacktype1.isin(kidnap_cats) | df.attacktype2.isin(kidnap_cats) | df.attacktype3.isin(
kidnap_cats) | df.ishostkid == 1]
# Drop attacktype columns. They aren't needed anymore
df = df.drop(['attacktype1', 'attacktype2', 'attacktype3', 'ishostkid'], axis=1)
# Filter also broken data from our classes
df = df[df.hostkidoutcome.notnull()]
# Filter data for NaN nreleased or value -99
df = df[df.nreleased.notnull()]
df = df[df.nreleased != -99]
# Filter also data where nhostkid is lower than nreleased
df = df[df.nhostkid >= df.nreleased]
return df
def augmentate_data(df):
# Add an ID group for gname to the DataFrame
df['gname_id'], _ = prep.str_to_index_arr(df['gname'])
# Add a normalisation for how many of the hostage victims survived
df['nreleased_p'] = np.divide(df.nreleased, df.nhostkid)
# Add a column of died hostages
df['ndied'] = np.subtract(df.nhostkid, df.nreleased)
# Drop all string columns and keep only numeric ones.
df = df._get_numeric_data()
return df
def handle_NaN_in_data(df):
from sklearn.preprocessing import Imputer
fill_NaN = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputed_df = pd.DataFrame(fill_NaN.fit_transform(df))
imputed_df.columns = df.columns
imputed_df.index = df.index
df = imputed_df
return df
def set_NaN_to_value(df, value):
return df.replace(np.nan, value)
def set_unknown_to_NaN(df, unknown_values):
for unknown_value in unknown_values:
df.replace(unknown_value, np.nan)
return df
def visualize_data(df, path='', suffix=''):
# First: a plot about number of kidnapped persons
sns.set(style="darkgrid", color_codes=True)
g1 = sns.jointplot(
'iyear',
'nhostkid',
data=df,
kind="reg",
color='r',
size=7,
xlim=[1970, 2016]
)
g1.set_axis_labels('Years', 'Number of kidnapped victims')
g1.savefig(path + 'interaction-iyear_nhostkid' + suffix + '.png')
g1.savefig(path + 'interaction-iyear_nhostkid' + suffix + '.pdf')
# Outcomes vs percentage of released victims
g2 = sns.violinplot(
x='hostkidoutcome',
y='nreleased_p',
data=df,
hue='ransom'
)
g2.figure.savefig(path + 'interaction-hostkidoutcome_nreleased_p' + suffix + '.png')
g2.figure.savefig(path + 'interaction-hostkidoutcome_nreleased_p' + suffix + '.pdf')
### Correlation
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
g3 = sns.heatmap(
corr,
mask=mask,
cmap=cmap,
vmax=.3,
center=0,
square=True,
linewidths=.5,
cbar_kws={"shrink": .5}
)
g3.figure.savefig(path + 'correlation_full' + suffix + '.png')
g3.figure.savefig(path + 'correlation_full' + suffix + '.pdf')
def train(X, Y):
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train, Y_train)
def train_svm(X, y, C=1.0):
'''
Trains the SVM with X as input and y as output data
Input:
- X: Input vector with features
- y: Output vector with one label column
- C: SVM regularisation parameter
'''
from sklearn.svm import SVC
svm_model = SVC(kernel='linear', C=C, decision_function_shape='ovr')
svm_model.fit(X, y)
return svm_model
def predict_svm(model, X, y):
Z = model.predict(X)
return Z
if __name__ == "__main__":
#####
# The purpose of our classifier is to predict the hostkidoutcome category and a percentage of released persons.
# Y: hostkidoutcome, npreleased
# X: extended, iyear, gname_id, nhostkid, ndays, ransom, ransompaid, ishostkid
#####
### Data filtering
# Read data and exclude cols
# @Snippet: To exclude: lambda x: x not in ["eventid","imonth","iday", "attacktype2","claims2","claimmode2","claimmode3","gname2"]
df = prep.read_data('globalterrorismdb_0617dist.csv',
usecols=['nreleased', 'attacktype1', 'attacktype2', 'attacktype3', 'extended', 'iyear', 'gname',
'nhostkid', 'nhours', 'ndays', 'ransom', 'ransompaid', 'ransompaidus', 'ishostkid',
'hostkidoutcome'])
df = filter_data(df)
df = augmentate_data(df)
# We also have sometimes -99 or -9 as values when things were unknown. We have to replace them as well with NaNs
df = set_unknown_to_NaN(df, [-9, -99])
# We have a whole number of columns which contains NaNs for missing data. To overcome those, we simply use the sklearn Imputer to fill the NaNs with the mean values
df = set_NaN_to_value(df, -1)
head = df.head()
print(df.head())
# Plot data
visualize_data(df, path="plots/")
print('Resulting columns for training: \n{}\n'.format(df.columns))
# Normalize to 0-1
x = df.values
x_normed = x / x.max(axis=0)
df = pd.DataFrame(columns=[head], data=x_normed)
print(df)
### Separate set into train, validation, test by assigning each to the preferred class randomly.
train = df.sample(frac=0.6, replace=True)
validation = df.sample(frac=0.2, replace=True)
test = df.sample(frac=0.2, replace=True)
labels = ['hostkidoutcome', 'nreleased_p']
X_train, Y_train, Y_train_columns = prep.separate_labels(train, labels)
X_validation, Y_validation, Y_validation_columns = prep.separate_labels(validation, labels)
X_test, Y_test, Y_test_columns = prep.separate_labels(test, labels)
nn = Nn()
nn.create_model()
nn.load_model_from_json()
# nn.train(x=X_train.values,
# y=Y_train.values,
# validation_data=(X_validation.values, Y_validation.values))
# nn.persist_model()
score = nn.evaluate(x=X_test, y=Y_test)
print("Achieved Score:", score)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.