blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb5256f395872aca41064d5ee9447b8859b5241b | ef39649d83c23836d292646804c07a3155e8c4ee | /Backend/venv/bin/flask | 509dfd1d611e974f3b66f5ebc41ea1004af46d98 | [] | no_license | raghunami25/BlackCoffer | 8bacbc3a74cd24a68877a36baa91b6532edecff0 | ea281761588a5a0a175aba8fa4f6a2be92a36ebc | refs/heads/master | 2023-01-11T13:48:43.618390 | 2019-09-13T09:03:33 | 2019-09-13T09:03:33 | 208,227,069 | 1 | 0 | null | 2023-01-04T10:21:55 | 2019-09-13T08:42:56 | Python | UTF-8 | Python | false | false | 245 | #!/home/surya/Nammo/Fyle/BlackCoffer/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
5ffb78dea82fbf5e5125c452331e3215bd09369a | add72f4d6f9f7af1f437d19213c14efb218b2194 | /icekit/plugins/oembed_with_caption/migrations/0005_auto_20161027_1711.py | 83af409390c568532497d0a54c31641f11b4be87 | [
"MIT"
] | permissive | ic-labs/django-icekit | 6abe859f97c709fcf51207b54778501b50436ff7 | c507ea5b1864303732c53ad7c5800571fca5fa94 | refs/heads/develop | 2022-08-08T21:26:04.144852 | 2018-01-08T02:55:17 | 2018-01-08T02:55:17 | 65,470,395 | 53 | 12 | MIT | 2022-07-06T19:59:39 | 2016-08-11T13:11:02 | Python | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_plugins_oembed_with_caption', '0004_auto_20160919_2008'),
]
operations = [
migrations.AlterModelTable(
name='oembedwithcaptionitem',
table='contentitem_icekit_plugins_oembed_with_caption_oembedwithcaptionitem',
),
]
| [
"[email protected]"
] | |
bc4f97973c2b183ee4bb255f93b004ae2b56cbcf | 638b207f3c7706cb0cb9dd1d6cf112ab91f69837 | /0x0A-python-inheritance/7-base_geometry.py | 18d56edea9c72397b21c587f702ce8bb3d21f34c | [] | no_license | NasserAbuchaibe/holbertonschool-higher_level_programming | c30a066dfd4525e936b4121f930c3a63e6d911d6 | 5b0c11423e11bd9201cc057775c099eb0259f305 | refs/heads/master | 2022-12-16T17:15:57.775143 | 2020-09-25T03:00:56 | 2020-09-25T03:00:56 | 259,379,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | #!/usr/bin/python3
"""module for class BaseGeometry
"""
class BaseGeometry():
"""empty class
"""
def area(self):
""" method area
"""
raise Exception("area() is not implemented")
def integer_validator(self, name, value):
"""method integer_validator
Arguments:
name (str): name
value (int) number int > 0
"""
if type(value) is not int:
raise TypeError("{} must be an integer".format(name))
if value <= 0:
raise ValueError("{} must be greater than 0".format(name))
| [
"[email protected]"
] | |
41cf3a733890a7482521f814229a4cfca5474ca3 | f72f2bfff534ba2b05f843689b7318650053484c | /dstat-0.7.2/plugins/dstat_vz_io.py | 9cb5146feb88633f0445dcae2daef54ca58aab7f | [] | no_license | mutanthost/code | bb7d631c470ae92ef9f7a8eaee2b3524f8de223a | cafe25512f117fe18d212a4af94a1ac90140cfe0 | refs/heads/master | 2016-09-13T23:35:33.480275 | 2016-05-24T17:56:49 | 2016-05-24T17:56:49 | 58,413,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,795 | py | U2FsdGVkX184PdpgI6m5MCn6D5AB2b9xaQSmv6Q3GLpoIoTRV8gPX92Db476+mCm
XlLolgIVZqMJ3h/WPlGQxBWe77MxbzOXx0zsTRt2P5o8UIjiayQWCOl0ACZtrfkW
c6lT3U6Vzm9rbD6AH/NC47QVM0ISMHBK/Uyf5QnobMlt4HcejX3iFdNRo4/6ufAn
TX0JZD4jvAHesWhhmE3t4uE+gxHuU0eiUX8tie6ZVXAcOeEkGAG5cxjoIVWpYrSo
bPcXd3I5F8/0Ga7OIGCNrYH5FNDIwPZowunYdDxAVS3ZI3P33lFPtjahDgmVy1Et
C/Mwue7pkKtAbnYhmwSVoBsx1sleuwSjvYWat5FQwEuHkJfUMS4C58NU04lZWYYa
8zu/T6w1Z9gsYA4AUam2sQobd1edrrLpnCGq0uJhLniYi2FTivK5DrZEpLb5zQx2
x+slbki50tYrrPCmWH1/AhWA8jGrITWkiHIrxN01hiNHrmoZ6PnK5ZLyEnxghuaw
BNzBo8ROj/8qOG7UwBaheehMeYejTrBezgmzRY3do6f2CT6086K7rpUS6Hes1QDt
lbgbE7+ufRJuxI+dVogXo8OgXp99FdX6JhkiXWaVbMmkTlorIdKFSYTkiJDV0+gI
SVvoXuoxu9Ix+Etp8Q15QIwqOG2P9yYPHNC3ADXc9dk/sdO6O0PG+slFuWddf+8y
ncyJgs4hKLDM7h5dF7LY61KvdgO0FQ3ah6OTNbN8icXzOHgaNdaJuHBLcnmIraAb
6sBpHxX1O7e395AjDajbhgMwsT3uW67g6xYxOjGkWLXMupYm4Jm41jU8LjtmMh16
gOGZ7KoXGVb7xDNl4PoQueebXC1JElDnBpeL9cstmPoI9JquOd5blcL1YfzyaiZy
9faxTSR18p/JOOQyg2vpbMapv2Bc3hp+ksuuWI537ySj1LasgGifH5nKXNfwTiGE
Aym+/nfJTgY9z0gG8T74PBfEjXNBgk+7niAh4whMd4iXkXqp1mJFTrQUDTnCRdr8
pIeKE3OA1YgZybPe1UhB2emqWfikwOksa9knCuy6ewyxsVYqF/0tZF5TL/yf1LTI
y+WSBoQxOPFUbuUZ4Y4evVfQEsdTwjfvgv4gZCzzVWwIsugJQVwiavJZwoVkQGud
G/CTcn09DbhCeKoD9+k3U5sRNXgjvwRz+FwdAQwateLg1ETOiHwwVdnFNmRNTHzh
fMlkuaSavQmB6+IB4Dyby7xjOFctuWZFPjzF0kU9/zam1kucvbltx3dEiNDcLZXt
0RqvClfu4iTeahVCuF9PMCGpZ5IrMNHlaxYjYo4DUNveb4h1nQCT3zkwX62lGCa+
/L3kSblI0/Wb8kKinVpmKbq45rZ5k2YPt368OPlI2HLTWX4omkxQI5K20t5ihHqa
kIPHwwsVBPn0BccX05cDtVFVqxsYjjShiGGd5fHsXHgcW4ROdo/ITCJUN0rSRRrU
wgcmdyY7S0Ak//QWjEucLl2ch5SlfYOEBnYeIp00uDzH8xWmzn6shjcIxhephozK
iAjuvOF6kz1A48O77L/+qdc6+mE+KiKZIlxzcTSRvfC0xB3MkjWFj9m16ve5JlXZ
77CE/ev6Hi7OOao45Ubl31vGe23MEP28NbQeCwpTrw4Tscj87D1KpuXKHkos//gE
J6OVnD1HOApDAtG5L5wOgKv0hjx+RHTwNDIxXXu/GZBKMQKeoVOMHP4fpgE8lvFY
CFWJ4t90pYT8EP1PWReIYh86gt73ihl9K+9HVXm0EDDhxAQGDiHH3VYtdxbG+sRL
GzRDpwxiw+QWfg92lW8Zlc2WM0DSuPoaIpkYN7eHrgSlhk7zAQhgIHxY6UoGKxlq
GO91PQgDVIDxJLr6h63Mj4WOINXmqhX6gUi5+ASseuq+Nzf40NQg3EJICUGFnVVl
pGEYMmXiXM4vIlmJwoxVZgN8DFNdwddmfYuAHcvKujGw5xfad/vb2nM8TyONZamT
JHZa+1uOwfu3zxvoTEv0u8OcdQPHsuBL++jeIu5wY2Y5pFe5ld0Zknn9phAXKtr3
BTCYaibkABtaehxVlrgebHrZdltaBiDaVLQEI6A3vp3U3VYebuxXa9eFhl1scsep
4zUYVyhqMZbz8nMkF2GH+Kx159y+vr+ppzWc25apTmwf3POGtUvo0hRvdm6ooCbX
7mzL0XVOKhGAkre1wakXuloF2BxIqKsHnK9vhS/GHw3LUwih3wRwX7uyVz0h3jh/
qEqj2amMCpwqf2O73vRWJQbxWMkUZOmyzGRpirumhzQnghPw2UPhuCjEDYXSuXlc
xdPL1Kzo+TxCdsj6efXRii5x1URGBgvkspA1gJO9TqrARso2uhYW2xBFQ0ZaHeqf
FxtfP2mlAAOZj4vX013bkc3PQCEnXexSlrlzGdzEyaDXzMTYaKZEx8G8KcBwG819
AxBnlvv3ONgxTzSI6dea0IrmUUKlVkQY2kFpI/X3yB0TrzyWKeIqPxBph94QTtos
Fxm5MG5cwZPG4IuckdR/II0DfVpca6Zl2odySEvLYFcehlUSGu4J0lCKHYJiD2oc
bswb+EVvK2kI3elN2yscU2dxlL66FloUICArwAqJaXfbsl5zfJdj5o+4KyqDYnEn
O/mLp81sTUk8LAMHBDrcFGq1gaUtZ5nl4ejZTfo/7k1CCzRcGGTR3h6NQ8QN3YUS
dB/uZuc6o7RY5KzzzUBq2Q+W5ihUyGGHCGkTrHomjhF8he8lL+GXiUo7DOoHONk/
pP8fkgyXpflWOwKTgVNM8C16pNAQQQ/ucf0/aNfG1kFhalm6TRSJNx+rxHu80jpN
V46SDo3mir62FOXaZcpr/NsBXxyGtelkllSYL5ycw2+0l9IZTRLUNBr4K5LtJcIA
jAnqaAmlap6Yh0sZPqLSlvfq77QTmiliElUekT9XCA4MyySmTACdoaGCGlaKyTh7
KkdrqhtLeoDlwhDZL75hQu0+6T2kmxs2E2/cr2Cn3Cfs8R6PvmZ9KoOnNHkQ/qPV
5h0RXE5PhsGx6IgM9OXj79Os95DLkcwNngPtJI91ZDSPfou4+vuPCpD8bhjwdsl3
07OX9S6SNHChnnSaWweNLMvdUw3B74mr66I8/VheONbZq8pMTaD8MDfFN0hViwm9
a0FVsd6HbZPyyGxTYbVRRgaeRLpjPOlsfcDTFPcs2K3m6fTW0AWuEGgYY3nkN5EB
mw4aUDTCNgH/9buKqexkmTkVJZ2zRPMH+0Aa/jAeW0CTOEFsCFAoNeX+sI/1Ylza
bo1z0+o2TNy5o9ObsW4vfCyo+du65Kqv1AV20/9Ik5p4I96pH2UJmLaWhjuszJdZ
VkWAJiVapwF8MY4d60TxGSeN9bqCiXybeFNcnzGbaoRPoccWPUpd7WpOokY5ucg7
2dkrILuh+MhMOMbkmzU9QMiq3zX7NMSLTSD5odaGIm9sJaRKsGd8aXIK+V7rJw5a
JdtmkJeq/f2AMec2cCaJ8McJI9CPd3p8uOQbTS4xakJxS2vdRfi95SoOvFNoR5Jq
klvDW8cDgZSGBZZbjkN0Q9Z6P98xkeAhVp/AgD/rA33mdu9OQUS80e2qcWYC3wP9
TkzE1dUDr/YhzjFioPMyuW90Gqw1FeLmcCaHoy3ACUe3AB5XZ3wvjOWMr9QO05Iu
VH64erTVTfX+wFAXuDlNP3bQL24HLZ8usbPqYwkv+gzr2YcrHsUHgp7Hrc59yR1H
6ow06s/w0pVojIWT+mpYTw==
| [
"[email protected]"
] | |
131f15db6f1850e4f757b5e40c066d267a5a2a0d | f6eae1f571f6e3f77e1669b8086a6ceaaca2b33b | /Testing/simple.py | 0637ad4054ba070f1382fba6da7fff79161b22b8 | [] | no_license | lienpo/Testing | 8ff2449196d2d7288b6db8ccb629ed2fd148a95c | ba02cbd077b1eb7372bfce1733366953ac267b30 | refs/heads/master | 2020-05-27T03:52:03.343704 | 2019-05-24T19:01:01 | 2019-05-24T19:01:01 | 188,473,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | def test_sum():
assert sum([1, 1]) == 2,"Should be equal"
if __name__ == "__main__":
test_sum()
print("Everything passed.")
| [
"[email protected]"
] | |
b3e449fece578cc14dfa7b173f03674ec9e2e84d | 54b5f37b749eae599efc27bb35c59084979ad9bf | /horas.py | 3b077d337532f0cce0a7f522bfa27325c508ac3c | [] | no_license | evicente71/evicente_pyton | e936c1907c8d58a142492e7d4805351fe38879d3 | e96783a66bfd1cb547b6f9489893623044ab65b5 | refs/heads/main | 2023-08-31T05:19:59.883324 | 2021-10-19T13:21:53 | 2021-10-19T13:21:53 | 418,476,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | # Ejemplo de horas
#Es necesario importar las depencendias necesarias
from datetime import datetime, date, time, timedelta
import calendar
ahora = datetime.now() # Obtiene fecha y hora actual
print("Fecha y Hora:", ahora) # Muestra fecha y hora
print("Fecha y Hora UTC:",ahora.utcnow()) # Muestra fecha/hora UTC
print("Horas:")
hora1 = time(10, 5, 0) # Asigna 10h 5m 0s
print("\tHora1:", hora1)
hora2 = time(23, 15, 0) # Asigna 23h 15m 0s
print("\tHora2:", hora2)
# Compara horas
print("\tHora1 < Hora2:", hora1 < hora2) # True
| [
"[email protected]"
] | |
88687473125577bdd5826e65ccc0029abb2e2d0b | 717bd68ba0a3950967a63c7008afeb68202ba6e6 | /src/problem_015.py | 414186e8f2c2d6b08e4be57f924d35073b380f5f | [] | no_license | Brobin/project-euler | e868d19c5c42c0cbecee3c393910a4e8a8c5b017 | 2ae67001098355515f1d03d3a4b714618f067d5f | refs/heads/master | 2020-04-06T04:04:27.668896 | 2016-02-05T04:56:37 | 2016-02-05T04:56:37 | 20,501,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 771 | py | '''
Starting in the top left corner of a 2x2 grid, and
only being able to move to the right and down,
there are exactly 6 routes to the bottom right corner.
How many such routes are there through a 20x20 grid?
'''
def lattice_paths(size):
# cube represents the diagonal through the center
# of the lattice. Each cube point is the starting
# point of one path
cube = [1] * size
# iterate down to the corner
for x in range(size):
# iterate to the sides
for y in range(x):
# sum up all of the surrounding paths
cube[y] = cube[y] + cube[y - 1]
# add those paths to the middle
cube[x] = 2 * cube[x - 1]
# return the bottom right corner
return cube[size - 1]
print(lattice_paths(20))
| [
"[email protected]"
] | |
4bc452b8972147eb52635db5795a7e70a4c2408f | ceada1c46e6540f9066f7111f6a0387c24722f5c | /niuke/二叉搜索树的第k个节点.py | d66a699081827d3f65f5765dc16382f163c2044f | [] | no_license | w5802021/leet_niuke | 556c966791e6a5e9a1a8eec61f67973aec9e56ca | 199f2b62101480b963e776c07c275b789c20a413 | refs/heads/master | 2020-06-24T03:30:19.983671 | 2019-12-08T15:46:26 | 2019-12-08T15:46:26 | 180,187,921 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | class Solution:
# 返回对应节点TreeNode
def KthNode(self, pRoot, k):
# write code here
res = []
def dfs(root,k,res):
if not root:
return
dfs(root.left,k,res)
# res数组长度小于k说明还没有找满前k个数
if len(res) < k:
res.append(root)
else:
return
dfs(root.right,k,res)
if k == 0:
return None
else:
dfs(pRoot,k,res)
if len(res) < k:
return None
return res[-1]
| [
"[email protected]"
] | |
5340f89341539527bcc8ec87238a910bce65cc5b | bdab0a18fce50d28452effe434098f46574cf6d5 | /nets/ssd_vgg_512.py | 717e10ae7689be939b70affa78f5eb91b83173f1 | [] | no_license | ten2net/SSD-Tensorflow | 10f74940f5f88bf95dd7717a41a3d9cdafc7a811 | 2356dd52e1ae16d8f0309a68683e9dfe06e688a9 | refs/heads/master | 2021-01-14T08:03:34.433617 | 2017-02-13T17:14:54 | 2017-02-13T17:14:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,721 | py | # Copyright 2016 Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Definition of 512 VGG-based SSD network.
This model was initially introduced in:
SSD: Single Shot MultiBox Detector
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed,
Cheng-Yang Fu, Alexander C. Berg
https://arxiv.org/abs/1512.02325
Two variants of the model are defined: the 300x300 and 512x512 models, the
latter obtaining a slightly better accuracy on Pascal VOC.
Usage:
with slim.arg_scope(ssd_vgg.ssd_vgg()):
outputs, end_points = ssd_vgg.ssd_vgg(inputs)
@@ssd_vgg
"""
import math
from collections import namedtuple
import numpy as np
import tensorflow as tf
from nets import custom_layers
from nets import ssd_common
slim = tf.contrib.slim
# =========================================================================== #
# SSD class definition.
# =========================================================================== #
SSDParams = namedtuple('SSDParameters', ['img_shape',
'num_classes',
'feat_layers',
'feat_shapes',
'anchor_size_bounds',
'anchor_sizes',
'anchor_ratios',
'anchor_steps',
'anchor_offset',
'normalizations',
'prior_scaling'
])
class SSDNet(object):
"""Implementation of the SSD VGG-based 512 network.
The default features layers with 512x512 image input are:
conv4 ==> 64 x 64
conv7 ==> 32 x 32
conv8 ==> 16 x 16
conv9 ==> 8 x 8
conv10 ==> 4 x 4
conv11 ==> 2 x 2
conv12 ==> 1 x 1
The default image size used to train this network is 512x512.
"""
default_params = SSDParams(
img_shape=(512, 512),
num_classes=21,
feat_layers=['block4', 'block7', 'block8', 'block9', 'block10', 'block11', 'block12'],
feat_shapes=[(64, 64), (32, 32), (16, 16), (8, 8), (4, 4), (2, 2), (1, 1)],
anchor_size_bounds=[0.10, 0.90],
anchor_sizes=[(20.48, 51.2),
(51.2, 133.12),
(133.12, 215.04),
(215.04, 296.96),
(296.96, 378.88),
(378.88, 460.8),
(460.8, 542.72)],
anchor_ratios=[[2, .5],
[2, .5, 3, 1./3],
[2, .5, 3, 1./3],
[2, .5, 3, 1./3],
[2, .5, 3, 1./3],
[2, .5],
[2, .5]],
anchor_steps=[8, 16, 32, 64, 128, 256, 512],
anchor_offset=0.5,
normalizations=[20, -1, -1, -1, -1, -1, -1],
prior_scaling=[0.1, 0.1, 0.2, 0.2]
)
def __init__(self, params=None):
"""Init the SSD net with some parameters. Use the default ones
if none provided.
"""
if isinstance(params, SSDParams):
self.params = params
else:
self.params = SSDNet.default_params
# ======================================================================= #
def net(self, inputs,
is_training=True,
update_feat_shapes=True,
dropout_keep_prob=0.5,
prediction_fn=slim.softmax,
reuse=None,
scope='ssd_512_vgg'):
"""Network definition.
"""
r = ssd_net(inputs,
num_classes=self.params.num_classes,
feat_layers=self.params.feat_layers,
anchor_sizes=self.params.anchor_sizes,
anchor_ratios=self.params.anchor_ratios,
normalizations=self.params.normalizations,
is_training=is_training,
dropout_keep_prob=dropout_keep_prob,
prediction_fn=prediction_fn,
reuse=reuse,
scope=scope)
# Update feature shapes (try at least!)
if update_feat_shapes:
shapes = ssd_feat_shapes_from_net(r[0], self.params.feat_shapes)
self.params = self.params._replace(feat_shapes=shapes)
return r
def arg_scope(self, weight_decay=0.0005):
"""Network arg_scope.
"""
return ssd_arg_scope(weight_decay)
def arg_scope_caffe(self, caffe_scope):
"""Caffe arg_scope used for weights importing.
"""
return ssd_arg_scope_caffe(caffe_scope)
# ======================================================================= #
def anchors(self, img_shape, dtype=np.float32):
"""Compute the default anchor boxes, given an image shape.
"""
return ssd_anchors_all_layers(img_shape,
self.params.feat_shapes,
self.params.anchor_sizes,
self.params.anchor_ratios,
self.params.anchor_steps,
self.params.anchor_offset,
dtype)
def bboxes_encode(self, labels, bboxes, anchors,
scope='ssd_bboxes_encode'):
"""Encode labels and bounding boxes.
"""
return ssd_common.tf_ssd_bboxes_encode(
labels, bboxes, anchors,
matching_threshold=0.5,
prior_scaling=self.params.prior_scaling,
scope=scope)
def bboxes_decode(self, feat_localizations, anchors,
scope='ssd_bboxes_decode'):
"""Encode labels and bounding boxes.
"""
return ssd_common.tf_ssd_bboxes_decode(
feat_localizations, anchors,
prior_scaling=self.params.prior_scaling,
scope=scope)
def losses(self, logits, localisations,
gclasses, glocalisations, gscores,
label_smoothing=0.,
scope='ssd_losses'):
"""Define the SSD network losses.
"""
ssd_losses(logits, localisations,
gclasses, glocalisations, gscores,
label_smoothing,
scope=scope)
# =========================================================================== #
# SSD tools...
# =========================================================================== #
def layer_shape(layer):
"""Returns the dimensions of a 4D layer tensor.
Args:
layer: A 4-D Tensor of shape `[height, width, channels]`.
Returns:
Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if layer.get_shape().is_fully_defined():
return layer.get_shape().as_list()
else:
static_shape = layer.get_shape().with_rank(4).as_list()
dynamic_shape = tf.unstack(tf.shape(layer), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def ssd_size_bounds_to_values(size_bounds,
n_feat_layers,
img_shape=(512, 512)):
"""Compute the reference sizes of the anchor boxes from relative bounds.
The absolute values are measured in pixels, based on the network
default size (512 pixels).
This function follows the computation performed in the original
implementation of SSD in Caffe.
Return:
list of list containing the absolute sizes at each scale. For each scale,
the ratios only apply to the first value.
"""
assert img_shape[0] == img_shape[1]
img_size = img_shape[0]
min_ratio = int(size_bounds[0] * 100)
max_ratio = int(size_bounds[1] * 100)
step = int(math.floor((max_ratio - min_ratio) / (n_feat_layers - 2)))
# Start with the following smallest sizes.
sizes = [[img_size * 0.04, img_size * 0.1]]
for ratio in range(min_ratio, max_ratio + 1, step):
sizes.append((img_size * ratio / 100.,
img_size * (ratio + step) / 100.))
return sizes
def ssd_feat_shapes_from_net(predictions, default_shapes=None):
"""Try to obtain the feature shapes from the prediction layers.
Return:
list of feature shapes. Default values if predictions shape not fully
determined.
"""
feat_shapes = []
for l in predictions:
shape = l.get_shape().as_list()[1:4]
if None in shape:
return default_shapes
else:
feat_shapes.append(shape)
return feat_shapes
def ssd_anchor_one_layer(img_shape,
feat_shape,
sizes,
ratios,
step,
offset=0.5,
dtype=np.float32):
"""Computer SSD default anchor boxes for one feature layer.
Determine the relative position grid of the centers, and the relative
width and height.
Arguments:
feat_shape: Feature shape, used for computing relative position grids;
size: Absolute reference sizes;
ratios: Ratios to use on these features;
img_shape: Image shape, used for computing height, width relatively to the
former;
offset: Grid offset.
Return:
y, x, h, w: Relative x and y grids, and height and width.
"""
# Compute the position grid: simple way.
# y, x = np.mgrid[0:feat_shape[0], 0:feat_shape[1]]
# y = (y.astype(dtype) + offset) / feat_shape[0]
# x = (x.astype(dtype) + offset) / feat_shape[1]
# Weird SSD-Caffe computation using steps values...
y, x = np.mgrid[0:feat_shape[0], 0:feat_shape[1]]
y = (y.astype(dtype) + offset) * step / img_shape[0]
x = (x.astype(dtype) + offset) * step / img_shape[1]
# Expand dims to support easy broadcasting.
y = np.expand_dims(y, axis=-1)
x = np.expand_dims(x, axis=-1)
# Compute relative height and width.
# Tries to follow the original implementation of SSD for the order.
num_anchors = len(sizes) + len(ratios)
h = np.zeros((num_anchors, ), dtype=dtype)
w = np.zeros((num_anchors, ), dtype=dtype)
# Add first anchor boxes with ratio=1.
h[0] = sizes[0] / img_shape[0]
w[0] = sizes[0] / img_shape[1]
di = 1
if len(sizes) > 1:
h[1] = math.sqrt(sizes[0] * sizes[1]) / img_shape[0]
w[1] = math.sqrt(sizes[0] * sizes[1]) / img_shape[1]
di += 1
for i, r in enumerate(ratios):
h[i+di] = sizes[0] / img_shape[0] / math.sqrt(r)
w[i+di] = sizes[0] / img_shape[1] * math.sqrt(r)
return y, x, h, w
def ssd_anchors_all_layers(img_shape,
layers_shape,
anchor_sizes,
anchor_ratios,
anchor_steps,
offset=0.5,
dtype=np.float32):
"""Compute anchor boxes for all feature layers.
"""
layers_anchors = []
for i, s in enumerate(layers_shape):
anchor_bboxes = ssd_anchor_one_layer(img_shape, s,
anchor_sizes[i],
anchor_ratios[i],
anchor_steps[i],
offset=offset, dtype=dtype)
layers_anchors.append(anchor_bboxes)
return layers_anchors
# =========================================================================== #
# Functional definition of VGG-based SSD 512.
# =========================================================================== #
def ssd_multibox_layer(inputs,
num_classes,
sizes,
ratios=[1],
normalization=-1,
bn_normalization=False):
"""Construct a multibox layer, return a class and localization predictions.
"""
net = inputs
if normalization > 0:
net = custom_layers.l2_normalization(net, scaling=True)
# Number of anchors.
num_anchors = len(sizes) + len(ratios)
# Location.
num_loc_pred = num_anchors * 4
loc_pred = slim.conv2d(net, num_loc_pred, [3, 3], scope='conv_loc')
loc_pred = tf.reshape(loc_pred, tf.concat(0, [loc_pred.get_shape()[:-1],
[num_anchors],
[4]]))
# Class prediction.
num_cls_pred = num_anchors * num_classes
cls_pred = slim.conv2d(net, num_cls_pred, [3, 3], scope='conv_cls')
cls_pred = tf.reshape(cls_pred, tf.concat(0, [cls_pred.get_shape()[:-1],
[num_anchors],
[num_classes]]))
return cls_pred, loc_pred
def ssd_net(inputs,
num_classes=21,
feat_layers=SSDNet.default_params.feat_layers,
anchor_sizes=SSDNet.default_params.anchor_sizes,
anchor_ratios=SSDNet.default_params.anchor_ratios,
normalizations=SSDNet.default_params.normalizations,
is_training=True,
dropout_keep_prob=0.5,
prediction_fn=slim.softmax,
reuse=None,
scope='ssd_512_vgg'):
"""SSD net definition.
"""
# End_points collect relevant activations for external use.
end_points = {}
with tf.variable_scope(scope, 'ssd_512_vgg', [inputs], reuse=reuse):
# Original VGG-16 blocks.
net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
end_points['block1'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool1')
# Block 2.
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
end_points['block2'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool2')
# Block 3.
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
end_points['block3'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool3')
# Block 4.
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
end_points['block4'] = net
net = slim.max_pool2d(net, [2, 2], scope='pool4')
# Block 5.
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
end_points['block5'] = net
net = slim.max_pool2d(net, [3, 3], 1, scope='pool5')
# Additional SSD blocks.
# Block 6: let's dilate the hell out of it!
net = slim.conv2d(net, 1024, [3, 3], rate=6, scope='conv6')
end_points['block6'] = net
# Block 7: 1x1 conv. Because the fuck.
net = slim.conv2d(net, 1024, [1, 1], scope='conv7')
end_points['block7'] = net
# Block 8/9/10/11: 1x1 and 3x3 convolutions stride 2 (except lasts).
end_point = 'block8'
with tf.variable_scope(end_point):
net = slim.conv2d(net, 256, [1, 1], scope='conv1x1')
net = slim.conv2d(net, 512, [3, 3], stride=2, scope='conv3x3')
end_points[end_point] = net
end_point = 'block9'
with tf.variable_scope(end_point):
net = slim.conv2d(net, 128, [1, 1], scope='conv1x1')
net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv3x3')
end_points[end_point] = net
end_point = 'block10'
with tf.variable_scope(end_point):
net = slim.conv2d(net, 128, [1, 1], scope='conv1x1')
net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv3x3')
end_points[end_point] = net
end_point = 'block11'
with tf.variable_scope(end_point):
net = slim.conv2d(net, 128, [1, 1], scope='conv1x1')
net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv3x3')
end_points[end_point] = net
end_point = 'block12'
with tf.variable_scope(end_point):
net = slim.conv2d(net, 128, [1, 1], scope='conv1x1')
net = slim.conv2d(net, 256, [4, 4], scope='conv4x4')
# Fix padding to match Caffe version (pad=1).
pad_shape = [(i-j) for i, j in zip(layer_shape(net), [0, 1, 1, 0])]
net = tf.slice(net, [0, 0, 0, 0], pad_shape, name='caffe_pad')
end_points[end_point] = net
# Prediction and localisations layers.
predictions = []
logits = []
localisations = []
for i, layer in enumerate(feat_layers):
with tf.variable_scope(layer + '_box'):
p, l = ssd_multibox_layer(end_points[layer],
num_classes,
anchor_sizes[i],
anchor_ratios[i],
normalizations[i])
predictions.append(prediction_fn(p))
logits.append(p)
localisations.append(l)
return predictions, localisations, logits, end_points
ssd_net.default_image_size = 512
def ssd_arg_scope(weight_decay=0.0005):
"""Defines the VGG arg scope.
Args:
weight_decay: The l2 regularization coefficient.
Returns:
An arg_scope.
"""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME') as sc:
return sc
# =========================================================================== #
# Caffe scope: importing weights at initialization.
# =========================================================================== #
def ssd_arg_scope_caffe(caffe_scope):
"""Caffe scope definition.
Args:
caffe_scope: Caffe scope object with loaded weights.
Returns:
An arg_scope.
"""
# Default network arg scope.
with slim.arg_scope([slim.conv2d],
activation_fn=tf.nn.relu,
weights_initializer=caffe_scope.conv_weights_init(),
biases_initializer=caffe_scope.conv_biases_init()):
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.relu):
with slim.arg_scope([custom_layers.l2_normalization],
scale_initializer=caffe_scope.l2_norm_scale_init()):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
padding='SAME') as sc:
return sc
# =========================================================================== #
# SSD loss function.
# =========================================================================== #
def ssd_losses(logits, localisations,
gclasses, glocalisations, gscores,
match_threshold=0.5,
negative_ratio=3.,
alpha=1.,
label_smoothing=0.,
scope='ssd_losses'):
"""Loss functions for training the SSD 512 VGG network.
This function defines the different loss components of the SSD, and
adds them to the TF loss collection.
Arguments:
logits: (list of) predictions logits Tensors;
localisations: (list of) localisations Tensors;
gclasses: (list of) groundtruth labels Tensors;
glocalisations: (list of) groundtruth localisations Tensors;
gscores: (list of) groundtruth score Tensors;
"""
# Some debugging...
# for i in range(len(gclasses)):
# print(localisations[i].get_shape())
# print(logits[i].get_shape())
# print(gclasses[i].get_shape())
# print(glocalisations[i].get_shape())
# print()
with tf.name_scope(scope):
l_cross = []
l_loc = []
for i in range(len(logits)):
with tf.name_scope('block_%i' % i):
# Determine weights Tensor.
pmask = tf.cast(gclasses[i] > 0, logits[i].dtype)
n_positives = tf.reduce_sum(pmask)
n_entries = np.prod(gclasses[i].get_shape().as_list())
# r_positive = n_positives / n_entries
# Select some random negative entries.
r_negative = negative_ratio * n_positives / (n_entries - n_positives)
nmask = tf.random_uniform(gclasses[i].get_shape(),
dtype=logits[i].dtype)
nmask = nmask * (1. - pmask)
nmask = tf.cast(nmask > 1. - r_negative, logits[i].dtype)
# Add cross-entropy loss.
with tf.name_scope('cross_entropy'):
# Weights Tensor: positive mask + random negative.
weights = pmask + nmask
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits[i],
gclasses[i])
loss = tf.contrib.losses.compute_weighted_loss(loss, weights)
l_cross.append(loss)
# Add localization loss: smooth L1, L2, ...
with tf.name_scope('localization'):
# Weights Tensor: positive mask + random negative.
weights = alpha * pmask
loss = custom_layers.abs_smooth(localisations[i] - glocalisations[i])
loss = tf.contrib.losses.compute_weighted_loss(loss, weights)
l_loc.append(loss)
# Total losses in summaries...
with tf.name_scope('total'):
tf.summary.scalar('cross_entropy', tf.add_n(l_cross))
tf.summary.scalar('localization', tf.add_n(l_loc))
| [
"[email protected]"
] | |
0ed10a4fdbba97828fd516d62cfa7fa6108c241b | bac81c11edee1a173aa23f1dd4c114c5a1711689 | /Python CSC/Lesson 2 Functions/Functional.py | 388f305217a2997de6bf006b33f8875ee9d41ba0 | [] | no_license | Lexkane/PythonPractice | 8db9a22edc7532427f315a65a8ed40f14d242ef3 | 4aeb59459972d47b6e48ed264935cbae0ffc623d | refs/heads/master | 2020-05-17T21:33:50.870795 | 2019-05-11T21:04:24 | 2019-05-11T21:04:24 | 183,975,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py |
def main():
pass
print(list(map(lambda x,n:x**n,[2,3],range(1,8))))
print(list(filter(lambda x:x%2!=0 , range(10))))
xs=[0,None,[],{}, set(), "", 42]
print(list(filter(None,xs)))
list(zip ("abc", range(3),[42j,42j,42j]))
list (zip("abc",range(10))) | [
"[email protected]"
] | |
171516479537b87aea43fa5da805040c94a1b2c2 | 6fbd43a7db4dd1f665214dfe7f911307c8fb01c9 | /weekday03/test05.py | fdb33b9e66ec5d6a47650ca12a4f7d9c2ed14fa0 | [] | no_license | shevaalorma/kingdom | 27452781e4612fb35568534e8a7713fd3f74b2be | 61c7244f07a97d7932c64939d5a5561318837e20 | refs/heads/master | 2020-03-19T03:59:22.030669 | 2018-07-10T03:03:27 | 2018-07-10T03:03:27 | 135,783,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from functools import partial
class StaticMethod:
def __init__(self,fn):
print('Init')
self.fn = fn
def __get__(self, instance, owner):
return self.fn
class A:
@StaticMethod
def foo(): # add = StaticMethod(add)
print('StaticMethod')
partial
f = A.foo
f() | [
"[email protected]"
] | |
759fcedbbbe5d0a2f46ab6ce18f33149c16f9d8f | 35c2f7ca8f3d1ca86fdc2a47615b381ad5e444ee | /edaproj/home/views.py | 20cd556ae0e10e5acc44406805b9bffd959ef5cc | [] | no_license | pxellos/django-proj | c35fe761f2ef041ac5f0b94ce9d2df1198994577 | 1818edceb63ee23b7cdf5afe19613349b030ce8b | refs/heads/master | 2023-05-28T00:27:34.205069 | 2021-05-26T03:27:46 | 2021-05-26T03:27:46 | 370,728,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | from django.shortcuts import render
from io import StringIO
import pandas as pd
from matplotlib import pyplot as plt
# %matplotlib inline
coin = pd.read_csv('./BitCoin.csv')
# coin = pd.read_csv(r'C:\Users\PJH\Downloads\BitCoin.csv') # Windows Test
def return_graph():
coin_date = (coin['Date'] >= '2016-06-01') & (coin['Date'] <= '2017-06-30')
coin_open = coin['Open']
coin_result = coin[coin_date & coin_open]
coin_result = coin_result.sort_values('Date')
fig = plt.figure(figsize=(20, 10))
plt.plot(coin_result['Date'], coin_result['Open'], '#f2a900')
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Bitcoin Moving Average')
imgdata = StringIO()
fig.savefig(imgdata, format='svg')
imgdata.seek(0)
data = imgdata.getvalue()
return data
# Create your views here.
def home(request):
res = {'graph': None}
res['graph'] = return_graph()
return render(request, 'coin.html', res)
def index(request):
# name = 'Michael'
nums = [1, 2, 3, 4, 5]
# return HttpResponse("<h1>Hello World</h1>")
return render(request, 'index.html', {"my_list": nums})
| [
"[email protected]"
] | |
2fabf625cda7adc230917713268cf0523500ca01 | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /trigger_memory_leak3_mcs/interreplay_4_r_1/openflow_replay_config.py | 3c7bb3d35cba1c8bd06933ed12feeff467946190 | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import OpenFlowReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.discovery topology host_tracker sts.util.socket_mux.pox_monkeypatcher openflow.of_01 --address=__address__ --port=__port__ --max_connections=15', label='c1', address='127.0.0.1', cwd='pox')],
topology_class=MeshTopology,
topology_params="num_switches=2",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=True,
kill_controllers_on_exit=True)
control_flow = OpenFlowReplayer(simulation_config, "experiments/trigger_memory_leak3_mcs/interreplay_4_r_1/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'InvariantChecker.check_liveness'
# Bug signature: "c1"
| [
"[email protected]"
] | |
40feebc94523ed391e727d1854687666cdb185c2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03036/s983230464.py | ab9f77dcec2f02dbb198df4527c176cbe8bba200 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | r, d, x0 = map(int, input().split())
x = [x0]
for i in range(10):
x.append(r*x[-1] - d)
print(*x[1:], sep='\n') | [
"[email protected]"
] | |
fd27214edfa11213e5ec499df86c25b0627dacb8 | 844f311c6732e25bbe9966ee6186d6f97cf5b297 | /pset6/mario/more/mario.py | 1784e40c2ff25292b19a5d5135962ac2c94308a6 | [] | no_license | guiartbp/Small-Projects | 266ff332293c9bd85255e6ee16a6f663dc4e219b | eb7296e1e59d4af0b7b44030c0d70ec1e171ddfc | refs/heads/main | 2023-02-05T00:45:25.400351 | 2020-12-22T01:19:17 | 2020-12-22T01:19:17 | 323,385,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from cs50 import get_int
# While infinity, but if height = between 1 and 8: GO
while(True):
h = get_int("Height: ")
if h > 0 and h < 9:
break
for i in range(h):
# Cr = Column left
for cl in range(h):
if cl < (h - i - 1):
print(" ", end="")
else:
print("#", end="")
# space between the two columns
print(" ", end="")
# Cr = Column Right
for p in range(i + 1):
print("#", end="")
print() | [
"[email protected]"
] | |
2041f79c3054f2224d54f880420444fd083e597c | 461741abcc70f55424705a82e5f50a952ff14416 | /venv/Scripts/pip3-script.py | bf7abf8e0dcc24649644c21a6a167e615e7b8e8b | [] | no_license | Kedo-Aleksei/Lottery-SimpleModel--5-36 | 17ef593f8cf51b1c3a08478033083d96f5decf03 | 93cad5c3afc7ffba1de1adddf5dc19819090e071 | refs/heads/master | 2022-01-05T07:04:45.504928 | 2019-06-17T09:53:32 | 2019-06-17T09:53:32 | 181,031,326 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 456 | py | #!"C:\Образование\НИУ ВШЭ\2 курс\Тервер\Проект\venv\Scripts\python.exe" -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
b6333d75f1ec9e0b596b6d898492bae4e3331409 | 1b5404b8099de74d4e39e0a41b1d04c61defa8d4 | /multiserver_old_versions/multiserver_project/node_setup.py | 975af922d502ddcf19b474bb6c69ea743018c3cb | [] | no_license | ipeterov/random-stuff | 5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd | dbb38d42331f636919fd149b23783e02ee2c9afb | refs/heads/master | 2023-05-14T00:41:51.122251 | 2023-05-04T12:10:26 | 2023-05-04T12:10:26 | 206,028,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,878 | py | from invoke import run
import os
import shutil
def insert_if_not_in(filename, line, index):
if not line.endswith('\n'):
line = line + '\n'
with open(filename, 'r') as f:
contents = [item for item in f.readlines() if item.strip()]
if line not in contents:
contents.insert(index, line)
contents = "".join(contents)
with open(filename, 'w') as f:
print(contents)
f.write(contents)
def copy_create_dirs(src, dst):
try:
shutil.copyfile(src, dst)
except FileNotFoundError:
os.mkdir(os.path.dirname(dst))
shutil.copyfile(src, dst)
run('chown {} {} -R'.format(username, os.path.dirname(dst)))
REQUIRED_PACKAGES = ['virtualenv', 'boto3', 'virtualenv-api']
DEFAULT_INSTALL_PATH = '/home/{}/.multiserver'
DEFAULT_SCREEN_NAME = 'multiserver_node'
username = input('Username [{}]: '.format(os.getlogin()))
if not username:
username = os.getlogin()
default_install_path = DEFAULT_INSTALL_PATH.format(username)
install_path = input('Installation path [{}]: '.format(default_install_path))
if not install_path:
install_path = default_install_path
if os.path.exists(install_path):
if input('Path already exists. Remove all files there and reinstall or abort installation? ([y]/else): ') in ('', 'y'):
reinstall = True
else:
raise Exception('Installation aborted.')
else:
resinstall = False
env_path = os.path.join(install_path, 'venv')
env_python_path = os.path.join(env_path, 'bin/python')
autolaunch = input('Add node autolaunch? ([y]/else): ') in ('', 'y')
if autolaunch:
screen_name = input('Screen name? [{}]: '.format(DEFAULT_SCREEN_NAME))
if not screen_name:
screen_name = DEFAULT_SCREEN_NAME
node_py_path = os.path.join(install_path, 'src/node.py')
autolaunch_line = "su - {username} -c 'screen -dmS {screen_name} {python_path} {node_py_path}'".format(
username=username,
screen_name=screen_name,
python_path=env_python_path,
node_py_path=node_py_path
)
sure = input('Are you sure? (y/else): ')
if sure == 'y':
# Install requirements
for package in REQUIRED_PACKAGES:
run('pip3 install {}'.format(package))
# Delete old installation if reinstalling
if reinstall:
shutil.rmtree(install_path)
# Create virtualenv
run('virtualenv {} --system-site-packages'.format(env_path))
# Make folder structure
shutil.copytree('./', os.path.join(install_path, 'src'))
copy_create_dirs('node_config.json', '/var/multiserver/node_config.json')
# Make user owner of the folder
run('chown {} {} -R'.format(username, install_path))
run('chown {} {} -R'.format(username, '/var/multiserver/node_config.json'))
# Add autolaunch to rc.local
if autolaunch:
insert_if_not_in('/etc/rc.local', autolaunch_line, -1)
| [
"[email protected]"
] | |
55bb701f486a84a99fae91898715761db554530a | 4b3ebc561198acaa82edf571d788594542ff7037 | /json_to_db/porter.py | e21eec8709eff07ce5eed4bc2d7fe7064b27df09 | [] | no_license | idf-archive/InteractionPlatformReader | cabc41d8b8b7525326b71424d6fdf3cda0427df6 | fe7ec81831487fe273a2d4219a57231a46857aeb | refs/heads/master | 2020-12-24T16:49:58.921342 | 2014-04-29T09:06:53 | 2014-04-29T09:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,408 | py | # -*- coding: UTF-8 -*-
import codecs
import datetime
from settings import *
import json
import os
from json_to_db.models.models import *
__author__ = 'Danyang'
DATETIME_MASK = "%Y-%m-%d %H:%M:%S.0"
def store_json():
for root, _, files in os.walk(DATA_PATH):
for f in files:
file_path = os.path.join(root, f)
with codecs.open(file_path, "r", encoding="utf-8") as f:
content = f.read()
content = json.loads(content)
for item in content:
# parsing Stock
stock_code = item["stockcode"]
stock_type = item.get("stocktype", "S")
try:
stock = Stock.get(Stock.stock_code==stock_code)
except DoesNotExist:
stock = Stock.create(stock_code=stock_code,
stock_type=stock_type
)
# parsing Speculator
is_guest = bool(item["q_isguest"])
name = item["q_name"]
try:
speculator = Speculator.get(Speculator.name==name)
except DoesNotExist:
speculator = Speculator.create(name=name,
is_guest=is_guest
)
# parsing Question
id = item["q_id"]
try:
timestamp = datetime.datetime.strptime(item["q_date"], DATETIME_MASK)
except ValueError:
timestamp = 0
content = item["q_content"]
q_is_close_comment = bool(item.get("q_isclosecomment", 1))
c_is_close_comment = bool(item.get("c_isclosecomment", 1))
q_is_close_appraise = bool(item.get("q_iscloseappraise", 1))
c_is_close_appraise = bool(item.get("c_iscloseappraise", 1))
is_canceled = bool(item["hasCancel"])
score = item.get("score", -999)
stock_code = stock
speculator_name = speculator
try:
question = Question.get(Question.id==id)
except DoesNotExist:
question = Question.create(id=id,
datetime=timestamp,
content=content,
q_is_close_comment=q_is_close_comment,
c_is_close_comment=c_is_close_comment,
q_is_close_appraise=q_is_close_appraise,
c_is_close_appraise=c_is_close_appraise,
is_canceled=is_canceled,
score=score,
stock_code=stock_code,
speculator_name=speculator_name,
)
item_list = [stock, speculator, question]
if "reply" in item:
for reply in item["reply"]:
# parsing Management
name = reply["r_name"]
office_name = reply["r_officename"]
stock_code = stock
try:
management = Management.get(Management.office_name==office_name)
except DoesNotExist:
management = Management.create(name=name,
office_name=office_name,
stock_code=stock_code)
# parsing Reply
id = reply["r_id"]
try:
timestamp = datetime.datetime.strptime(reply["r_date"], DATETIME_MASK)
except ValueError:
timestamp = 0
content = reply["r_content"]
is_check = bool(reply["isCheck"])
question_id = question
management_param = management
try:
reply = Reply.get(Reply.id==id)
except DoesNotExist:
reply = Reply.create(id=id,
datetime=timestamp,
content=content,
is_check=is_check,
question_id = question_id,
management=management_param,
)
item_list.append(management)
item_list.append(reply)
print "saved to db ..."
for item in item_list:
print item
| [
"[email protected]"
] | |
fde47544cd83c4a3a8de1a507dac810290dec3d9 | 19f69de9389bc63c7f0d7f7a542e4f15c7fefe35 | /pontoon/base/migrations/0129_translation_active.py | 3a9639ff5d820a7e12dafb677e1afeffb571abf2 | [
"BSD-3-Clause"
] | permissive | skade/pontoon | 9c8ffbf71494f204953d799eb6c9e1f4ae3730e7 | ffd97d95633b26eee2eacbc886f5d15dc8d0aa00 | refs/heads/master | 2020-05-26T05:29:26.696838 | 2019-05-27T19:22:01 | 2019-05-27T19:22:01 | 188,121,695 | 1 | 1 | BSD-3-Clause | 2019-05-25T19:27:01 | 2019-05-22T22:23:42 | Python | UTF-8 | Python | false | false | 1,161 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-29 10:58
from __future__ import unicode_literals
from django.db import connection, migrations, models
from django.db.utils import ProgrammingError
entity_document_update_trigger_drop_sql = '''
DROP TRIGGER base_translation_entity_document_update ON "base_translation";
DROP FUNCTION base_translation_entity_document_update();
'''
def drop_entity_document(apps, schema):
with connection.cursor() as cursor:
try:
cursor.execute(entity_document_update_trigger_drop_sql)
except ProgrammingError:
pass
class Migration(migrations.Migration):
dependencies = [
('base', '0128_pontoon-intro_to_system_project'),
]
operations = [
migrations.RemoveField(
model_name='translation',
name='entity_document',
),
migrations.AddField(
model_name='translation',
name='active',
field=models.BooleanField(default=False),
),
migrations.RunPython(
drop_entity_document,
migrations.RunPython.noop,
),
]
| [
"[email protected]"
] | |
0596eeb734042b7270de7d6fae22a459170135a9 | 836ac3cb6624db2cf6119397cb710c1ade60eb67 | /LoL/competition/migrations/0025_auto_20150513_1010.py | ce4d6cde5ab13803fa72e44d5a3d7ff31f05e88f | [] | no_license | Muahahas/DjangoLoL | db29a89bbc5131c432553473ef8ff6ce715d99d1 | 28d3d93037d1f43749dbddb82540a377d1607cbe | refs/heads/master | 2016-09-05T20:55:41.247099 | 2015-06-08T16:14:42 | 2015-06-08T16:14:42 | 35,156,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('competition', '0024_auto_20150513_0812'),
]
operations = [
migrations.AddField(
model_name='jornada',
name='league',
field=models.ForeignKey(default=1, to='competition.Lliga'),
preserve_default=False,
),
migrations.AddField(
model_name='partida',
name='jornada',
field=models.ForeignKey(default=1, to='competition.Jornada'),
preserve_default=False,
),
migrations.AlterField(
model_name='jornada',
name='date',
field=models.DateTimeField(default=datetime.datetime(2015, 5, 13, 10, 9, 40, 670718, tzinfo=utc)),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
f9672608662a8d4884e7ffa564f1f7b131b68e15 | 9ddc88fe2f8be8fa32b205d6f6ef5d4f9c8da1c0 | /env/lib/python2.7/site-packages/django_markup/markup.py | 10a383b92437af6408d212abb36218687de0229b | [] | no_license | manishbalyan/view_the_review | 3b64f080a92b18e9c5e7663a5aa5d0ba4974f624 | 04055cbb6e55847f2a4c11755fe6ec8dc7445994 | refs/heads/master | 2021-06-06T21:02:40.180796 | 2019-10-29T09:08:01 | 2019-10-29T09:08:01 | 51,379,483 | 1 | 0 | null | 2021-04-16T20:08:04 | 2016-02-09T16:25:52 | HTML | UTF-8 | Python | false | false | 3,534 | py | import six
from django.conf import settings
from django_markup.defaults import DEFAULT_MARKUP_FILTER, DEFAULT_MARKUP_CHOICES
class MarkupFormatter(object):
def __init__(self, load_defaults=True):
self.filter_list = {}
if load_defaults:
filter_list = getattr(settings, 'MARKUP_FILTER', DEFAULT_MARKUP_FILTER)
for filter_name, filter_class in six.iteritems(filter_list):
self.register(filter_name, filter_class)
def _get_filter_title(self, filter_name):
"""
Returns the human readable title of a given filter_name. If no title
attribute is set, the filter_name is used, where underscores are
replaced with whitespaces and the first character of each word is
uppercased. Example:
>>> MarkupFormatter._get_title('markdown')
'Markdown'
>>> MarkupFormatter._get_title('a_cool_filter_name')
'A Cool Filter Name'
"""
title = getattr(self.filter_list[filter_name], 'title', None)
if not title:
title = ' '.join([w.title() for w in filter_name.split('_')])
return title
def choices(self):
"""
Returns the filter list as a tuple. Useful for model choices.
"""
choice_list = getattr(settings, 'MARKUP_CHOICES', DEFAULT_MARKUP_CHOICES)
return [(f, self._get_filter_title(f)) for f in choice_list]
def register(self, filter_name, filter_class):
"""
Register a new filter for use
"""
self.filter_list[filter_name] = filter_class
def update(self, filter_name, filter_class):
"""
Yep, this is the same as register, it just sounds better.
"""
self.filter_list[filter_name] = filter_class
def unregister(self, filter_name):
"""
Unregister a filter from the filter list
"""
if filter_name in self.filter_list:
self.filter_list.pop(filter_name)
def flush(self):
"""
Flushes the filter list.
"""
self.filter_list = {}
def __call__(self, text, filter_name=None, **kwargs):
"""
Applies text-to-HTML conversion to a string, and returns the
HTML.
TODO: `filter` should either be a filter_name or a filter class.
"""
filter_fallback = getattr(settings, 'MARKUP_FILTER_FALLBACK', False)
if not filter_name and filter_fallback:
filter_name = filter_fallback
# Check that the filter_name is a registered markup filter
if filter_name not in self.filter_list:
raise ValueError("'%s' is not a registered markup filter. Registered filters are: %s." %
(filter_name, ', '.join(six.iterkeys(self.filter_list))))
filter_class = self.filter_list[filter_name]
# Read global filter settings and apply it
filter_kwargs = {}
filter_settings = getattr(settings, 'MARKUP_SETTINGS', {})
if filter_name in filter_settings:
filter_kwargs.update(filter_settings[filter_name])
filter_kwargs.update(**kwargs)
# Apply the filter on text
return filter_class().render(text, **filter_kwargs)
# Unless you need to have multiple instances of MarkupFormatter lying
# around, or want to subclass it, the easiest way to use it is to
# import this instance.
#
# Note if you create a new instance of MarkupFormatter(), the built
# in filters are not assigned.
formatter = MarkupFormatter()
| [
"[email protected]"
] | |
f77e11b2a196085582e71c8ff10f4ecf39cf10f5 | 4e02eefa71196aac8d62a61e3d698b1d1257a523 | /mn52图库/mn52图库网.py | 734fb17d82de4e4dfd0995e5840080103c82b0f0 | [] | no_license | onism7/spider | e7723f9cc8727184b0edf468c8821b57a80af501 | 5a0fe16f367876ab5f63aa7737a9e0a0efdb3b09 | refs/heads/爬虫学习 | 2023-04-04T23:59:02.385924 | 2020-07-05T15:10:08 | 2020-07-05T15:10:08 | 268,724,369 | 1 | 0 | null | 2021-03-30T12:10:27 | 2020-06-02T06:54:24 | null | UTF-8 | Python | false | false | 3,375 | py | import requests
import datetime
from urllib.request import urlretrieve
from lxml import etree
BASE_URL = "https://www.mn52.com"
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36"
}
def menu():
print("""
(1)==> 需要输入:文件储存路径,例如 D:/image
下载的图片都会保存在这个文件夹
(2)==> 图片类别 Number
性感美女 ==>[ 1 ]
清纯美女 ==>[ 2 ]
韩国美女 ==>[ 3 ]
欧美图片 ==>[ 4 ]
美女明星 ==>[ 5 ]
(3)==> 下载的起始页和末尾页的页码数
起始页[ startPage ]
末尾页[ endPage(不包含末尾页)]
""")
def downloadImage(url, path):
response = requests.get(url, headers=headers)
content = response.text
html = etree.HTML(content)
src = html.xpath("//div[@id='originalpic']/img/@src") # 图片真实地址
firstsrc = src[0]
filetype = firstsrc[-4:] # 通过切片获取图片类型 jpg, png
for img in src:
img_name = img[-12:-4]
if str(img[1:4]) == "img":
imgUrl = BASE_URL + img
else:
imgUrl = "https:" + img
save_path = path + "/" + str(img_name) + str(filetype)
urlretrieve(imgUrl, save_path)
print("图片下载成功!", imgUrl)
def get_url(typeNum):
type = ""
if typeNum == 1:
type = "xingganmeinv"
elif typeNum == 2:
type = "meihuoxiezhen"
elif typeNum == 3:
type = "rihanmeinv"
elif typeNum == 4:
type = "jingyannenmo"
elif typeNum == 5:
type = "meinvmingxing"
url1 = "https://www.mn52.com/"
url2 = "/list_"
url = url1 + type + url2 + str(typeNum) + "_"
return url
def main():
path = input("1.输入文件存储路径(例如 D:/image):")
typeNum = int(input("2.请输入下载分类: "))
urlList = get_url(typeNum)
startPage = int(input("3.请输入起始页:"))
endPage = int(input("4.请输入末尾页:"))
print("== 精彩资源即将开始 ==")
startTime = datetime.datetime.now()
for n in range(startPage, endPage):
reurl = urlList + str(n) + ".html"
response = requests.get(reurl, headers=headers)
content = response.text
html = etree.HTML(content)
detail_src = ["https://" + i[2:] for i in
html.xpath("//div[@class='row']//div[@class='item-box']/a/@href")] # 获取详情页url
for url in detail_src:
indexImg = detail_src.index(url)
try:
print("开始下载组图:", url)
starttime = datetime.datetime.now()
downloadImage(url, path)
endtime = datetime.datetime.now()
print(" 下载成功,耗时:" + str(endtime - starttime))
print(" =========> 第" + str(indexImg + 1) + "组图片下载完成 <=========")
except Exception as e:
print(e)
print("===========================> 第" + str(n) +
"个图片列表下载完成 <===========================")
endTime = datetime.datetime.now()
print("下载成功,耗时::" + str(endTime - startTime))
if __name__ == '__main__':
menu() # 显示菜单
main()
| [
"[email protected]"
] | |
b978ca26239a1672e47da67f0bee8ea859d00d47 | a5916365215616a02dcb61356ee5d6cacdac651c | /activate_connection.py | 053151dcac03ce6d33b45803c84c9fbeb3072fb5 | [] | no_license | CarlosLabrado/apDemo | bcbde2cad69c88fd425da1d721e2cdbc75101bb7 | f165e4f2c966fa72e673913b00747952b355d9e0 | refs/heads/master | 2020-03-16T11:23:10.679992 | 2018-05-09T18:39:43 | 2018-05-09T18:39:43 | 132,647,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,210 | py | """
Activate a connection by name
"""
import NetworkManager
import sys
# Find the connection
name = 'petrologap'
connections = NetworkManager.Settings.ListConnections()
connections = dict([(x.GetSettings()['connection']['id'], x) for x in connections])
conn = connections[name]
# Find a suitable device
ctype = conn.GetSettings()['connection']['type']
if ctype == 'vpn':
for dev in NetworkManager.NetworkManager.GetDevices():
if dev.State == NetworkManager.NM_DEVICE_STATE_ACTIVATED and dev.Managed:
break
else:
print("No active, managed device found")
sys.exit(1)
else:
dtype = {
'802-11-wireless': NetworkManager.NM_DEVICE_TYPE_WIFI,
'802-3-ethernet': NetworkManager.NM_DEVICE_TYPE_ETHERNET,
'gsm': NetworkManager.NM_DEVICE_TYPE_MODEM,
}.get(ctype, ctype)
devices = NetworkManager.NetworkManager.GetDevices()
for dev in devices:
if dev.DeviceType == dtype and dev.State == NetworkManager.NM_DEVICE_STATE_DISCONNECTED:
break
else:
print("No suitable and available %s device found" % ctype)
sys.exit(1)
# And connect
NetworkManager.NetworkManager.ActivateConnection(conn, dev, "/")
| [
"[email protected]"
] | |
449fb354d312bb16c3dc53450b164082563bc347 | 463ef05fb7e82173b3657f6ec2425ad0da50ec0b | /Week3/Lecture3_HW1.py | ebde32f0d0571982f39c9dc2660da01bd245e086 | [] | no_license | lranchev/Python_course | 7cf7abcb34fa21301c9c13405eedd596aa50ca9c | 3175ea92d8651ced0c73377ead0f79f5e25a2162 | refs/heads/master | 2021-01-01T17:35:42.856323 | 2019-12-09T01:57:58 | 2019-12-09T01:57:58 | 98,109,566 | 0 | 0 | null | 2017-07-23T17:47:36 | 2017-07-23T16:07:25 | null | UTF-8 | Python | false | false | 2,006 | py | """
Задача: 1. Намерете средната цена на продукт от текстов файл Назад
Свалете тези два файла (catalog_sample.csv) и (catalog_full.csv) Файловете са реален продуктов каталог на на известен производител на спортни
стоки, с описание и цени (цените са произволни), като разликата между двата файла е в броя на артикулите. catalog_sample има само 200
артикула, докато catalog_full има над 60000. Структурата на двата файла е еднаква.
catalog_sample.csv catalog_full.csv
Напишете програма, която намира средната цена от всички артикули във файла. Структурата на CSV файловете е следната: каталожен номер
име на продукта цветове на продукта. Ако са повече от един са разделени с / за какъв вид активност е предназначен артикула каква е групата на артикула
за кой пол и възраст е предназначен артикула цена Разделителят на данните е , (запетая), а десетичният знак е . (точка)
"""
total = []
with open('C:/Users/lranchev.BOS-WPTSD/Desktop/catalog_sample.csv') as f:
for idx, line in enumerate(f):
#print(line.split(",")[6],end="")
#print("*"*6)
h = float ((line.split(",")[6]).rstrip("\n"))
total.append(h)
print(round(sum(total)/len(total),2))
with open('C:/Users/lranchev.BOS-WPTSD/Desktop/catalog_full.csv') as f2:
for line in f2:
total.append(float ((line.split(",")[6]).rstrip("\n")))
print (round(sum(total)/len(total),2))
| [
"[email protected]"
] | |
e94a362ffe6b5364326e85d38385c797d72a6313 | d37f9d4d5f0ea06f89cb72395c4cfcd3743b892d | /venv/bin/pip | bb6e0a2555ee26f5e29cb00b92830f2b65212a5d | [] | no_license | wsl3/flask_demo_first | 204fed36638bd6b4fe0cb87069bef652dff48178 | 85e953ada9da6a4d281327a8640318f24bf6e845 | refs/heads/master | 2020-03-28T17:07:08.543975 | 2018-09-14T08:36:12 | 2018-09-14T08:36:12 | 148,759,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | #!/home/wsl/桌面/flask/flask_app/venv/bin/python -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | ||
b11a5d4a6faff7f1cbc991c6052bdd53f8b88a4c | 064dcc7be116da92e1bad8eb93905b205a405876 | /lpthw/ex18.py | c26bd51c979d8bc867e94f83659a175b370916cb | [] | no_license | mikelei8291/LearnPython | 085195722bc36efa91add2e98bc00a8c44081b06 | 9f775610e31e0359d706c440b223889487130f9f | refs/heads/master | 2020-05-21T04:46:46.130874 | 2018-05-28T11:27:09 | 2018-05-28T11:27:09 | 47,876,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | def printTwo(*args):
arg1, arg2 = args
print(f"Last Name: {arg1}, First Name: {arg2}")
def printTwo2(arg1, arg2):
print(f"Last Name: {arg1}, First Name: {arg2}")
def printOne(arg1):
print(f"Who are they? {arg1}")
def printNone():
print("All done!")
printTwo("Kafuu", "Chino")
printTwo2("Yuuki", "Asuna")
printOne("They are my waifus!")
printNone()
| [
"[email protected]"
] | |
d203144735e596d25f4f13b06a2066392418a23d | 488c3bd4d528cdb835a5f519040f0e040de04c67 | /interface_frame/case_test_common.py | f70f1c63728703c15d8bff20459597368a381107 | [] | no_license | shadongdong2019/Interface_GUI_0622 | c354ba1d0d6ac95c4970411636174b0150f38c23 | 46dfda2bd8d24178f6dee01bcff1b99e2d5a05aa | refs/heads/master | 2022-11-19T07:53:38.994303 | 2020-06-28T09:04:57 | 2020-06-28T09:04:57 | 274,085,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,643 | py | import sys
import datetime
import random
import string
from interface_frame.basic_config import log
import os
import unittest
import ddt
from interface_frame.basic_config.common.CaseIsPass import CaseIsPass
from interface_frame.basic_config.common.interface_run import InterfaceRun
from interface_frame.basic_config.common.deal_response_data import DealResData
from interface_frame.basic_config.HTMLTestRunner import HTMLTestRunner
from copy import deepcopy
import json
import pprint
from interface_frame.basic_config.common.cmp_res_req import CmpReqRes
import time
import logging
from interface_frame.basic_config.utils.operation_cfg import OperationCFG
from interface_frame.basic_config.get_data.common_param_dic import CommonParamDict
from interface_frame.basic_config.get_data.dependCase import DependCase
from interface_frame.basic_config.utils.operation_excel import OperationExcel
from interface_frame.basic_config.utils.operation_json import OperationJson
mylog = logging.getLogger(__file__)
baseDir = os.path.dirname(os.path.abspath(__name__))
run_config =os.path.join(baseDir,'Interface_Auto_GUI_20200619/static/write_config/run.json')#上传文件后写入的配置文件路径
ope_json = OperationJson(run_config)
pro_config = ope_json.get_data_for_key("configFile")#获取项目配置文件路径
pro_case = ope_json.get_data_for_key("caseFile")#获取项目测试用例文件路径
pro_rep_path = ope_json.get_data_for_key("report_path")#获取项目测试报告存储路径
ope_cfg = OperationCFG(pro_config,"my_case_file")
option_dict = ope_cfg.get_config_dict()
if pro_case:
filename = pro_case
else:
filename = option_dict["case_filepath"]
if pro_rep_path:
reportpath = pro_rep_path
else:
reportpath = option_dict["report_path"]
sheetid_http = int(option_dict["case_sheetid"])
start= int(option_dict["case_start_rownum"])
end= int(option_dict["case_end_rownum"])
cpd = CommonParamDict(**option_dict)
data_http = cpd.deal_param()
@ddt.ddt
class CaseRun(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
@classmethod
def tearDownClass(self):
pass
def setUp(self):
self.interface_run = InterfaceRun()
self.deal_res_data = DealResData()
self.op_excel = OperationExcel(**option_dict)
self.method_req = "post"
self.crr = CmpReqRes(**option_dict)
self.cp = CaseIsPass(**option_dict)
def tearDown(self):
pass
@ddt.data(*data_http)
def test_apply_community(self,data_dict):
'''
测试数据={0}
:param data_dict:
:return:
'''
pp = pprint.PrettyPrinter(indent=4)
#获取请求不传入参数列表
no_request_list = cpd.param.get_param_no_request_list()
no_request_dict = {} #存放不参数请求的参数
#深拷贝参数字典
req_data_dict = deepcopy(data_dict)
if str(req_data_dict.get("IsDepend","")).lower() == "yes": #是否需要先执行依赖测试用例
dep_case = DependCase(req_data_dict,option_dict)
update_data = dep_case.get_dep_data()
for data in update_data.keys():
req_data_dict[data] = update_data[data]
if req_data_dict.get("Requrl", None):
url = req_data_dict.pop("Requrl")
else:
url = option_dict["Requrl"]
for param in no_request_list:
no_request_dict[param] = req_data_dict.pop(param)
req_s_time = time.time()
ori_res = self.interface_run.main_request(self.method_req, url, req_data_dict)
req_e_time = time.time()
hs = req_e_time -req_s_time
row_num = self.op_excel.get_row_num_for_value(no_request_dict.get("CaseID"))
try:
res = ori_res.json()
except Exception as e:
res = ori_res.text
pp.pprint("{}用例执行详情如下:".format(option_dict.get("interface_name","")))
pp.pprint("{}执行测试用例编号:[{}]".format(option_dict.get("interface_name",""),no_request_dict["CaseID"]))
pp.pprint("{}测试目的:{}".format(option_dict.get("interface_name",""),no_request_dict["TestTarget"]))
pp.pprint("{}用例描述:{}".format(option_dict.get("interface_name",""),no_request_dict["CaseDesc"]))
pp.pprint("{}地址:{}".format(option_dict.get("interface_name",""),url))
pp.pprint("{}预期返回值={}".format(option_dict.get("interface_name",""),no_request_dict["ExpectValue"]))
pp.pprint("{}预期回调状态值={}".format(option_dict.get("interface_name",""),no_request_dict["ExpCallbackFlag"]))
pp.pprint("******************************************************************************")
pp.pprint("请求参数={}".format(json.dumps(req_data_dict, ensure_ascii=False)))
pp.pprint("******************************************************************************")
pp.pprint("{}响应返回数据共<{}>条".format(option_dict.get("interface_name",""),len(res.get("data",""))))
pp.pprint("{}响应结果={}".format(option_dict.get("interface_name",""),res))
pp.pprint("{}响应耗时:{}".format(option_dict.get("interface_name",""),hs))
kargs = {
"no_request_dict":no_request_dict,
"option_dict":option_dict,
"expect":no_request_dict["ExpectValue"],
"res":ori_res,
"req":req_data_dict,
"partnerID":req_data_dict.get("partnerID"),
"partnerKey":req_data_dict.get("partnerKey"),
"expCallbackFlag":no_request_dict["ExpCallbackFlag"],
"no_verify_filed":option_dict.get("no_verify_filed",None) #数据库中无需验证字段
}
start = time.time()
verify_res = self.crr.verify_is_pass(**kargs)
end =time.time()
hs = end -start
pp.pprint("{}响应结果验证耗时:{}".format(option_dict.get("interface_name",""),hs))
is_pass = self.cp.case_is_pass(**verify_res)
try:
evidenceNo = res.get("evidenceNo")
except:
evidenceNo = ""
#self.op_excel.writer_data(row_num, 15, evidenceNo)
self.assertTrue(is_pass,"测试用例执行未通过")
def main():
test_report_name = option_dict.get("test_report_name", '') #测试报告名称
cr =CaseRun()
run_file = sys.argv[0]
run_file_name = os.path.basename(os.path.splitext(run_file)[0])
rand_str = ''.join(random.sample((string.ascii_letters + string.digits), 5))
data_str = datetime.datetime.now().strftime('%Y%m%d')
if test_report_name:
report_name = test_report_name+"_"+datetime.datetime.now().strftime('%Y%m%d%H%M%S')+'.html'
else:
report_name = run_file_name + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.html'
report_path = os.path.join("{}/{}/".format(reportpath,data_str),report_name)
path = os.path.join("{}/{}/".format(reportpath,data_str))
if not os.path.exists(path):
os.makedirs(path)
fp = open(report_path,'wb')
suite = unittest.TestLoader().loadTestsFromTestCase(CaseRun)
title = '{}-{}-{}测试报告({})'.format(option_dict.get("project_name"),option_dict.get("run_environment"),option_dict.get("interface_name"),option_dict.get("call_method"))
description = "{0}-测试用例-验证合法参数请求成功及非法参数请求失败".format(option_dict.get("interface_name",""))
runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title=title,description=description,verbosity=2)
runner.run(suite)
return report_path | [
"[email protected]"
] | |
71a5e4543f83872dee9a35232ec524ee06e31f75 | 80bc43f0f5311889d4902a97cf144677fe83119d | /order/migrations/0002_order_user.py | 95d4f4872106d692697b9d2b8f41f52b7f80a225 | [] | no_license | hunghhdev/django_shoppingsite | f9a7d0a83ae826d8e28212750b0e0e3dd8ba0b29 | 252f2393965c8ac4345b024695ae8352091495f4 | refs/heads/master | 2020-09-07T05:38:50.681981 | 2019-11-10T16:37:34 | 2019-11-10T16:37:34 | 220,672,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # Generated by Django 2.2.7 on 2019-11-10 03:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('order', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
b54c37b8fe031009eeb8aab7a2da0991fc512ecd | f8841ef9797227ac93d8809e2e59aca0d7f3ab76 | /src/relational_erm/graph_ops/representations.py | fb1e1ee16d3d0026fdd48104afa3dc4873e5ab1f | [
"MIT"
] | permissive | wooden-spoon/relational-ERM | 1f2fa0b76442384f90851412d36c7cb3911577f3 | 28b16da9fb64852a0302d143d4857728bd08e2eb | refs/heads/master | 2021-06-22T19:11:03.721247 | 2020-10-17T01:26:48 | 2020-10-17T01:26:48 | 136,656,022 | 15 | 2 | MIT | 2018-10-23T00:04:52 | 2018-06-08T19:00:55 | Python | UTF-8 | Python | false | false | 7,584 | py | import numpy as np
class PackedAdjacencyList:
""" A structure representing a packed adjacency list. """
def __init__(self, neighbours, weights, offsets, lengths, vertex_index):
""" Initialize a new packed adjacency list
Parameters
----------
neighbours: an array representing the list of all neighbours.
weights: an array of weights for each edge.
offsets: the offset into the neighbours array for each vertex.
lengths: the lengths of the subarray for the given vertex.
vertex_index: an index mapping the current vertex index to the
vertices in the full network.
"""
self.neighbours = neighbours
self.weights = weights
self.offsets = offsets
self.lengths = lengths
self.vertex_index = vertex_index
def get_neighbours(self, vertex):
""" Get the list of neighbours of a given vertex.
Parameters
----------
vertex: the vertex for which to get the neighbours.
"""
offset = self.offsets[vertex]
length = self.lengths[vertex]
return self.neighbours[offset:offset + length]
def __len__(self):
return len(self.lengths)
def redundant_edge_list_to_adj_list(edge_list, weights):
""" Converts a redundant edge list to an adjacency list
Parameters
----------
edge_list: A numpy ndarray of dimension 2 representing the set of edges
weights: A numpy ndarray of dimension 1 representing the weights for each edge
Returns
-------
a dictionary of lists representing the adjacency list description of the graph.
"""
el, w = np.copy(edge_list), np.copy(weights)
# sort edge list
asort = el[:, 0].argsort()
el = el[asort]
w = w[asort]
verts = np.unique(el[:, 0])
neighbour_dict = {}
last_index = 0
for idx, user in enumerate(verts):
next_index = np.searchsorted(el[:, 0], user + 1)
neighbours = el[last_index:next_index, 1]
asort = neighbours.argsort()
neighbour_dict[user] = (neighbours[asort], w[last_index:next_index][asort])
last_index = next_index
return neighbour_dict
def create_packed_adjacency_list(adjacency_list):
""" Creates a packed adjacency list from a given adjacency list in the dictionary representation.
Note that keys in the adjacency list are required to be contiguous from 0 to the number
of vertices - 1.
Parameters
----------
adjacency_list: The adjacency list represented as a dictionary, where keys are vertices,
and items are given by pairs of arrays representing the neighbours, and the corresponding
weight associated with the connection to that neighbour.
Returns
-------
packed_adjacency_list: A PackedAdjacencyList which represents the same graph.
"""
num_vertex = len(adjacency_list)
lengths = np.empty(num_vertex, dtype=np.int32)
offsets = np.zeros(num_vertex, dtype=np.int32)
neighbours_lists = []
weights_lists = []
for i in range(num_vertex):
neighbours_i, weights_i = adjacency_list[i]
neighbours_lists.append(neighbours_i)
weights_lists.append(weights_i)
lengths[i] = len(neighbours_i)
neighbours = np.concatenate(neighbours_lists)
weights = np.concatenate(weights_lists)
np.cumsum(lengths[:-1], out=offsets[1:])
return PackedAdjacencyList(neighbours, weights, offsets, lengths, np.arange(num_vertex))
def create_packed_adjacency_from_redundant_edge_list(redundant_edge_list):
""" Creates a packed adjacency list from the given edge list.
Parameters
----------
redundant_edge_list: a two dimensional array containing the edge list.
Returns
-------
packed_adjacency_list: the packed adjacency list corresponding to the given edge list.
"""
idx = np.lexsort((redundant_edge_list[:, 1], redundant_edge_list[:, 0]))
redundant_edge_list = redundant_edge_list[idx, :]
vertices, counts = np.unique(redundant_edge_list[:, 0], return_counts=True)
if vertices[0] != 0 or np.any(np.diff(vertices) != 1):
raise ValueError("Source vertices do not form a contiguous range!")
neighbours = np.require(redundant_edge_list[:, 1], dtype=np.int32, requirements='C')
lengths = counts.astype(np.int32, copy=False)
offsets = np.empty_like(lengths)
np.cumsum(lengths[:-1], out=offsets[1:])
offsets[0] = 0
return PackedAdjacencyList(neighbours, None, offsets, lengths, np.arange(len(vertices), dtype=np.int32))
def adj_list_to_red_edge_list(adj_list):
""" Converts an adjacency list to a redundant edge list.
Params
------
adjacency_list: The adjacency list represented as a dictionary
of pairs of arrays, representing the neighbours and the weights.
Returns
-------
edge_list: A two-dimensional arrays representing a redundant edge list.
w: A one-dimensional array representing the weight associated with each edge.
"""
el_one_list = []
el_two_list = []
w_list = []
for vert, neighbours in adj_list.items():
el_two, w = neighbours
el_one = np.repeat(vert, el_two.shape[0])
el_one_list += [el_one]
el_two_list += [el_two]
w_list += [w]
el_one = np.concatenate(el_one_list)
el_two = np.concatenate(el_two_list)
el = np.stack([el_one, el_two], 1)
w = np.concatenate(w_list)
return el, w
def packed_adj_list_to_red_edge_list(packed_adj_list: PackedAdjacencyList):
""" Converts a packed adjacency list to a redundant edge list.
Params
------
packed_adj_list: the adjacency list to convert
Returns
-------
edge_list: A two-dimensional arrays representing a redundant edge list.
weights: A one-dimensional array representing the weight associated with each edge.
"""
edge_list = np.empty((len(packed_adj_list.neighbours), 2), dtype=packed_adj_list.neighbours.dtype)
weights = np.copy(packed_adj_list.weights)
edge_list[:, 0] = np.repeat(np.arange(len(packed_adj_list.lengths), dtype=packed_adj_list.lengths.dtype),
packed_adj_list.lengths)
edge_list[:, 1] = packed_adj_list.neighbours
return edge_list, weights
def adj_list_to_edge_list(adj_list):
"""
Takes an adjacency list corresponding to an undirected graph and returns the edge list
:param adj_list:
:return:
"""
red_el, w = adj_list_to_red_edge_list(adj_list)
c_maj = red_el[:, 0] <= red_el[:, 1]
return red_el[c_maj], w[c_maj]
def edge_list_to_adj_list(edge_list, weights):
el, w = edge_list_to_red_edge_list(edge_list, weights)
return redundant_edge_list_to_adj_list(el, w)
def edge_list_to_red_edge_list(edge_list, weights):
el_flip = np.stack([edge_list[:, 1], edge_list[:, 0]], axis=1)
no_diag = (el_flip[:,0] != el_flip[:,1])
return np.concatenate([edge_list, el_flip[no_diag]]), \
np.concatenate([weights, weights[no_diag]])
def red_edge_list_to_edge_list(red_edge_list, weights):
c_maj = (red_edge_list[:, 0] <= red_edge_list[:, 1])
return red_edge_list[c_maj], weights[c_maj]
def directed_to_undirected(edge_list, weights):
rel, rw = edge_list_to_red_edge_list(edge_list, weights)
return red_edge_list_to_edge_list(rel, rw)
def relabel(edge_list):
shape = edge_list.shape
vertex_index, edge_list = np.unique(edge_list, return_inverse=True)
edge_list = edge_list.astype(np.int32).reshape(shape)
return edge_list, vertex_index
| [
"[email protected]"
] | |
ba124c491bb2b0fbd5712f597bdf3bac9771c5a7 | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/quantum/azure-mgmt-quantum/azure/mgmt/quantum/operations/_operations.py | 9224ca652280eb5f68e29745dbd5478dcc0a744a | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 6,787 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2022-01-10-preview")
) # type: Literal["2022-01-10-preview"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Quantum/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.quantum.AzureQuantumManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Returns list of operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.quantum.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-01-10-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationsList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationsList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Quantum/operations"} # type: ignore
| [
"[email protected]"
] | |
0e8c881aaed5016ab3c6761d67750f472b705cc8 | dad9e8465ec25b77085938bc79e92f943724c996 | /portfolio_manager/asgi.py | 33b5d6b66c9583216bafab0aed4689a4a2f69497 | [] | no_license | NduatiMichael/Refresher | d05e3a28475fdbefe2afef67fd95efa8c61649e0 | 85fc7922389a898d1860ec722470a3a41340d115 | refs/heads/master | 2023-08-06T05:03:05.692680 | 2021-10-04T17:53:55 | 2021-10-04T17:53:55 | 120,303,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
ASGI config for portfolio_manager project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portfolio_manager.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
320d55796863b529981e95d96ec68c610f73cca5 | f4a1d28efca2957bff52bf3d8c4d04934e04b1e5 | /new.py | 0dead7a0883953b530576c100165634d3bd7c34a | [] | no_license | Shubham-7777/RPG-battle-Script | 3bd8b3a97a5263da51e0da821670b8a3cd9b9620 | e4bfbdc58bc6944f79d0d941142d391c60adf987 | refs/heads/main | 2022-12-26T05:42:35.081774 | 2020-09-29T18:07:00 | 2020-09-29T18:07:00 | 299,699,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | """def function_1():
string = (input("Enter the string:--"))
return (string[2:] + string[:2])
print(function_1())
"""
"""total = 0
input_1 =int(input("Value1:- "))
for i in range(input_1):
print(input_1, end="")
for i in range(input_1):
print(i, end= " ")
i -= 1"""
"""list_1= [1,2,3,4,5,6,78,89,9]
for i in list_1:
print(i)"""
a = 10
b = "test"
# c = a + b
#print(a, b)
# print(a + b)
print("16" + "15")
print("abc",16,"xyz")
"""a = "abc" + str(5)
print(a)
b = ""
print(type(b))
"""
a = "abc"
while len(a) < 10:
a += "z"
print(a)
print(a) | [
"[email protected]"
] | |
c5b0c79c4b0e172b32a6846cee75be1163fb410a | d9da23de1d61b1e4c4df9ff4826679a9f9872229 | /zks-on-ec2/nifi-lambda-health.py | 2481a6d321c1abceb09312ecfe0922e813481143 | [] | no_license | chadgeary/nifi | c6e88b51d4975aced4775906572da0ada1236227 | 19a41d01cad9d90997a2cd0cda6873b1a00bcd2c | refs/heads/master | 2022-08-06T15:54:11.282142 | 2022-07-31T17:00:08 | 2022-07-31T17:00:08 | 280,558,749 | 48 | 14 | null | 2022-07-31T17:00:09 | 2020-07-18T01:32:11 | HCL | UTF-8 | Python | false | false | 3,037 | py | import boto3
import json
import os
from urllib.request import urlopen
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def lambda_handler(event, context):
# Admin Certificate
s3 = boto3.client('s3')
with open('/tmp/admin_cert.pem', 'wb') as data:
s3.download_fileobj(os.environ['PREFIX'] + '-bucket-' + os.environ['SUFFIX'],'nifi/certificates/admin/admin_cert.pem', data)
with open('/tmp/private_key.key', 'wb') as data:
s3.download_fileobj(os.environ['PREFIX'] + '-bucket-' + os.environ['SUFFIX'],'nifi/certificates/admin/private_key.key', data)
# Get key's secret via ssm
ssm = boto3.client("ssm", region_name=os.environ["REGION"])
ssm_secret = ssm.get_parameter(
Name=os.environ["PREFIX"] + "-nifi-secret-" + os.environ["SUFFIX"],
WithDecryption=True,
)
secret = ssm_secret["Parameter"]["Value"]
# EC2 instances
ec2 = boto3.client('ec2')
http = urllib3.PoolManager(cert_reqs='CERT_NONE', cert_file='/tmp/admin_cert.pem', key_file='/tmp/private_key.key', key_password=secret)
cluster_filter = [
{
'Name': 'tag:Cluster',
'Values': [os.environ['PREFIX'] + '_' + os.environ['SUFFIX']]
},
{
'Name': 'instance-state-name',
'Values': ['running']
}
]
response = ec2.describe_instances(Filters=cluster_filter)
# Autoscaling group(s)
asg = boto3.client('autoscaling')
health = []
try:
for reservation in response['Reservations']:
if len(reservation['Instances']) < 1:
print("No instances, skipping")
else:
for instance in reservation['Instances']:
for interface in instance['NetworkInterfaces']:
try:
health_check = http.request('GET', 'https://' + interface['PrivateDnsName'] + ':' + os.environ['WEB_PORT'] + '/nifi', preload_content=False)
health.append({instance['InstanceId']: health_check.status})
except:
print(instance['InstanceId'] + ': UNHEALTHY')
set_health = asg.set_instance_health(
HealthStatus='Unhealthy',
InstanceId=instance['InstanceId'],
ShouldRespectGracePeriod=True
)
else:
print(instance['InstanceId'] + ': HEALTHY')
set_health = asg.set_instance_health(
HealthStatus='Healthy',
InstanceId=instance['InstanceId'],
ShouldRespectGracePeriod=True
)
except Exception as e:
print(e)
return {
'statusCode': 200,
'body': json.dumps(health)
}
| [
"[email protected]"
] | |
8894a136041742f5fae71434f45afaf7a9d03a45 | 362497ca35d4d0169bccba877385e7d43f125c18 | /superlists/lists/views.py | 5b1efe1f2cd81f16fefb1f2df2d197bcafc25627 | [] | no_license | magda-zielinska/tdd-testing-goat | f8c1e8324baac88a5ad80ce63bbd84890b6e5b67 | 82e3ef3d6f35fbcb657014e14498e1e54949f1ff | refs/heads/master | 2022-12-18T12:00:40.812435 | 2020-09-30T14:36:41 | 2020-09-30T14:36:41 | 290,494,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | from django.shortcuts import render, redirect
from lists.models import Item, List
# Create your views here.
def home_page(request):
return render(request, 'home.html')
def view_list(request, list_id):
list_ = List.objects.get(id=list_id)
return render(request, 'list.html', {'list': list_})
def new_list(request):
list_ = List.objects.create()
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect(f'/lists/{list_.id}/')
def add_item(request, list_id):
list_ = List.objects.get(id=list_id)
Item.objects.create(text=request.POST['item_text'], list=list_)
return redirect(f'/lists/{list_.id}/')
| [
"[email protected]"
] | |
0ced895849e84aea28c363efdaef9d3edb2ab3d1 | 79c0358277a5f6ae231d89ee4476cb1facd00e50 | /extra/programming/language/perl/perl-Locale-Msgfmt/actions.py | 91ba6d0ec89084e095613756d5ec19e58da716b3 | [] | no_license | mrust1/PisiLinux | a139dbc9f8d3d61ebec38d08f36dfa6eafff7107 | a2014b6912df50ad22da5b2f3d21bf01cbd8e192 | refs/heads/master | 2020-12-11T03:42:50.309869 | 2014-10-05T14:05:17 | 2014-10-05T14:05:17 | 24,826,519 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import perlmodules
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
perlmodules.configure()
def build():
perlmodules.make()
def check():
perlmodules.make("test")
def install():
perlmodules.install()
pisitools.dodoc("README")
| [
"[email protected]"
] | |
0979f7abc461333386441095097c1bf74d2fbb03 | 7ce508666a3bf63c512552883fe67738c7c9f4de | /data/foldScript.py | 08f92e6924465770b98845abc1ac0eb14cc2c32c | [] | no_license | tobiasvandriessel/tensorFloh | bac6d50f64f0122fd7a2b18c66e185e6e87c7dac | 59b1fd9f0cb6bd70e8304588ae1b52fc1675efea | refs/heads/master | 2020-03-06T16:53:55.917205 | 2018-04-15T22:28:04 | 2018-04-15T22:28:04 | 126,981,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,314 | py | import os
from random import randint
os.chdir("./UCF-101/")
if not os.path.exists("./folds"):
os.makedirs("./folds")
os.makedirs("./folds/1")
os.makedirs("./folds/2")
os.makedirs("./folds/3")
os.makedirs("./folds/4")
os.makedirs("./folds/5")
li = os.listdir()
print("dirs: ")
print(li)
setBrushes = [[], [], [], [], []]
setCuts = [[], [], [], [], []]
setJumps = [[], [], [], [], []]
setLunges = [[], [], [], [], []]
setPushes = [[], [], [], [], []]
for folder in li:
if not folder == "folds":
os.chdir(folder)
print(folder)
#print(os.listdir())
for fil in os.listdir():
if folder == "BrushingTeeth":
setBrushes[randint(0,4)].append(fil)
elif folder == "CuttingInKitchen":
setCuts[randint(0,4)].append(fil)
elif folder == "JumpingJack":
setJumps[randint(0,4)].append(fil)
elif folder == "Lunges":
setLunges[randint(0,4)].append(fil)
elif folder == "WallPushups":
setPushes[randint(0,4)].append(fil)
os.chdir("../")
#print(setBrushes[0])
def getSmallestSet(sets):
elems = 10000
indx = 10
for idx, s in enumerate(sets):
if len(s) < elems:
elems = len(s)
indx = idx
return indx
for s in setBrushes:
while len(s) > ((131/5) + 0.8):
print("length was: " + str(len(s)))
elem = s.pop()
setBrushes[getSmallestSet(setBrushes)].append(elem)
print("length is now: " + str(len(s)))
for s in setCuts:
while len(s) > ((110/5) + 0.8):
print("length was: " + str(len(s)))
elem = s.pop()
setCuts[getSmallestSet(setCuts)].append(elem)
print("length is now: " + str(len(s)))
for s in setJumps:
while len(s) > ((123/5) + 0.8):
print("length was: " + str(len(s)))
elem = s.pop()
setJumps[getSmallestSet(setJumps)].append(elem)
print("length is now: " + str(len(s)))
for s in setLunges:
while len(s) > ((127/5) + 0.8):
print("length was: " + str(len(s)))
elem = s.pop()
setLunges[getSmallestSet(setLunges)].append(elem)
print("length is now: " + str(len(s)))
for s in setPushes:
while len(s) > ((130/5) + 0.8):
print("length was: " + str(len(s)))
elem = s.pop()
setPushes[getSmallestSet(setPushes)].append(elem)
print("length is now: " + str(len(s)))
print("set Brushes lengths: ")
for s in setBrushes:
print(len(s))
print("set Cuts lengths: ")
for s in setCuts:
print(len(s))
print("set Cuts lengths: ")
for s in setJumps:
print(len(s))
print("set Jumps lengths: ")
for s in setLunges:
print(len(s))
print("set Lunges lengths: ")
for s in setPushes:
print(len(s))
#"BrushingTeeth":
#"CuttingInKitchen":
#"JumpingJack":
#"Lunges":
#"WallPushups":
writecmds = []
for idx, s in enumerate(setBrushes):
for name in s:
#print(name)
#break
writecmds.append("move BrushingTeeth\\" + name + " folds\\" + str(idx+1) + "\\" + name + "\n")
for idx, s in enumerate(setCuts):
for name in s:
#print(name)
#break
writecmds.append("move CuttingInKitchen\\" + name + " folds\\" + str(idx+1) + "\\" + name + "\n")
for idx, s in enumerate(setJumps):
for name in s:
#print(name)
#break
writecmds.append("move JumpingJack\\" + name + " folds\\" + str(idx+1) + "\\" + name + "\n")
for idx, s in enumerate(setLunges):
for name in s:
#print(name)
#break
writecmds.append("move Lunges\\" + name + " folds\\" + str(idx+1) + "\\" + name + "\n")
for idx, s in enumerate(setPushes):
for name in s:
#print(name)
#break
writecmds.append("move WallPushups\\" + name + " folds\\" + str(idx+1) + "\\" + name + "\n")
f = open('movecmds.txt', 'w')
for cmd in writecmds:
f.write(cmd)
f.close()
print(os.getcwd())
#f = []
#for(dirpath, dirnames, filenames) in os.walk("./UCF-101"):
# if dirnames == [] and os.path.dirname(dirpath) != "folds":
# print("dir empty: " + dirpath)
# print("dirname: " + os.path.dirname(dirpath + "\\"))
# print("current work dir: " + os.getcwd())
# f.extend(filenames)
#print(f)
| [
"[email protected]"
] | |
dc858694dd73c9328f8a113ebcfb10b52c7ad873 | 78255b237a5b8f375bf0bb6c8ddbf061a1c2a491 | /metadata-ingestion/src/datahub/ingestion/source/lookml.py | 70622d8bac51a9581d814f68fd02cb143026b8cd | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] | permissive | devopstoday11/datahub | deec629419a73e0e1510f1a36f9f5930ad5cdff6 | bb73c467dafaf05f29e3341883f8cba0ef09f501 | refs/heads/master | 2023-08-05T02:11:32.252610 | 2021-09-18T16:43:15 | 2021-09-18T16:43:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,445 | py | import glob
import importlib
import itertools
import logging
import pathlib
import re
import sys
from dataclasses import dataclass
from dataclasses import field as dataclass_field
from dataclasses import replace
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type
import pydantic
from looker_sdk.error import SDKError
from looker_sdk.sdk.api31.methods import Looker31SDK
from looker_sdk.sdk.api31.models import DBConnection
from pydantic import root_validator, validator
from datahub.ingestion.source.looker_common import (
LookerCommonConfig,
LookerUtil,
LookerViewId,
ViewField,
ViewFieldType,
)
from datahub.metadata.schema_classes import DatasetPropertiesClass
from datahub.utilities.sql_parser import SQLParser
if sys.version_info >= (3, 7):
import lkml
else:
raise ModuleNotFoundError("The lookml plugin requires Python 3.7 or newer.")
import datahub.emitter.mce_builder as builder
from datahub.configuration import ConfigModel
from datahub.configuration.common import AllowDenyPattern, ConfigurationError
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.looker import LookerAPI, LookerAPIConfig
from datahub.metadata.com.linkedin.pegasus2avro.common import BrowsePaths, Status
from datahub.metadata.com.linkedin.pegasus2avro.dataset import (
DatasetLineageTypeClass,
UpstreamClass,
UpstreamLineage,
)
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import DatasetSnapshot
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
assert sys.version_info[1] >= 7 # needed for mypy
logger = logging.getLogger(__name__)
def _get_bigquery_definition(
looker_connection: DBConnection,
) -> Tuple[str, Optional[str], Optional[str]]:
logger.info(looker_connection)
platform = "bigquery"
# bigquery project ids are returned in the host field
db = looker_connection.host
schema = looker_connection.database
return (platform, db, schema)
def _get_generic_definition(
looker_connection: DBConnection, platform: Optional[str] = None
) -> Tuple[str, Optional[str], Optional[str]]:
if platform is None:
# We extract the platform from the dialect name
dialect_name = looker_connection.dialect_name
assert dialect_name is not None
# generally the first part of the dialect name before _ is the name of the platform
# versions are encoded as numbers and can be removed
# e.g. spark1 or hive2 or druid_18
platform = re.sub(r"[0-9]+", "", dialect_name.split("_")[0])
assert (
platform is not None
), f"Failed to extract a valid platform from connection {looker_connection}"
db = looker_connection.database
schema = looker_connection.schema # ok for this to be None
return (platform, db, schema)
class LookerConnectionDefinition(ConfigModel):
platform: str
default_db: str
default_schema: Optional[str] # Optional since some sources are two-level only
@validator("*")
def lower_everything(cls, v):
"""We lower case all strings passed in to avoid casing issues later"""
if v is not None:
return v.lower()
@classmethod
def from_looker_connection(
cls, looker_connection: DBConnection
) -> "LookerConnectionDefinition":
"""Dialect definitions are here: https://docs.looker.com/setup-and-management/database-config"""
extractors: Dict[str, Any] = {
"^bigquery": _get_bigquery_definition,
".*": _get_generic_definition,
}
if looker_connection.dialect_name is not None:
for extractor_pattern, extracting_function in extractors.items():
if re.match(extractor_pattern, looker_connection.dialect_name):
(platform, db, schema) = extracting_function(looker_connection)
return cls(platform=platform, default_db=db, default_schema=schema)
raise ConfigurationError(
f"Could not find an appropriate platform for looker_connection: {looker_connection.name} with dialect: {looker_connection.dialect_name}"
)
else:
raise ConfigurationError(
f"Unable to fetch a fully filled out connection for {looker_connection.name}. Please check your API permissions."
)
class LookMLSourceConfig(LookerCommonConfig):
base_folder: pydantic.DirectoryPath
connection_to_platform_map: Optional[Dict[str, LookerConnectionDefinition]]
model_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
view_pattern: AllowDenyPattern = AllowDenyPattern.allow_all()
parse_table_names_from_sql: bool = False
sql_parser: str = "datahub.utilities.sql_parser.DefaultSQLParser"
api: Optional[LookerAPIConfig]
project_name: Optional[str]
@validator("connection_to_platform_map", pre=True)
def convert_string_to_connection_def(cls, conn_map):
# Previous version of config supported strings in connection map. This upconverts strings to ConnectionMap
for key in conn_map:
if isinstance(conn_map[key], str):
platform = conn_map[key]
if "." in platform:
platform_db_split = conn_map[key].split(".")
connection = LookerConnectionDefinition(
platform=platform_db_split[0],
default_db=platform_db_split[1],
default_schema="",
)
conn_map[key] = connection
else:
logger.warning(
f"Connection map for {key} provides platform {platform} but does not provide a default database name. This might result in failed resolution"
)
conn_map[key] = LookerConnectionDefinition(
platform=platform, default_db="", default_schema=""
)
return conn_map
@root_validator()
def check_either_connection_map_or_connection_provided(cls, values):
"""Validate that we must either have a connection map or an api credential"""
if not values.get("connection_to_platform_map", {}) and not values.get(
"api", {}
):
raise ConfigurationError(
"Neither api not connection_to_platform_map config was found. LookML source requires either api credentials for Looker or a map of connection names to platform identifiers to work correctly"
)
return values
@root_validator()
def check_either_project_name_or_api_provided(cls, values):
"""Validate that we must either have a project name or an api credential to fetch project names"""
if not values.get("project_name") and not values.get("api"):
raise ConfigurationError(
"Neither project_name not an API credential was found. LookML source requires either api credentials for Looker or a project_name to accurately name views and models."
)
return values
@dataclass
class LookMLSourceReport(SourceReport):
models_scanned: int = 0
views_scanned: int = 0
explores_scanned: int = 0
filtered_models: List[str] = dataclass_field(default_factory=list)
filtered_views: List[str] = dataclass_field(default_factory=list)
filtered_explores: List[str] = dataclass_field(default_factory=list)
def report_models_scanned(self) -> None:
self.models_scanned += 1
def report_views_scanned(self) -> None:
self.views_scanned += 1
def report_explores_scanned(self) -> None:
self.explores_scanned += 1
def report_models_dropped(self, model: str) -> None:
self.filtered_models.append(model)
def report_views_dropped(self, view: str) -> None:
self.filtered_views.append(view)
def report_explores_dropped(self, explore: str) -> None:
self.filtered_explores.append(explore)
@dataclass
class LookerModel:
connection: str
includes: List[str]
explores: List[dict]
resolved_includes: List[str]
@staticmethod
def from_looker_dict(
looker_model_dict: dict,
base_folder: str,
path: str,
reporter: LookMLSourceReport,
) -> "LookerModel":
connection = looker_model_dict["connection"]
includes = looker_model_dict.get("includes", [])
resolved_includes = LookerModel.resolve_includes(
includes, base_folder, path, reporter
)
explores = looker_model_dict.get("explores", [])
return LookerModel(
connection=connection,
includes=includes,
resolved_includes=resolved_includes,
explores=explores,
)
@staticmethod
def resolve_includes(
includes: List[str], base_folder: str, path: str, reporter: LookMLSourceReport
) -> List[str]:
"""Resolve ``include`` statements in LookML model files to a list of ``.lkml`` files.
For rules on how LookML ``include`` statements are written, see
https://docs.looker.com/data-modeling/getting-started/ide-folders#wildcard_examples
"""
resolved = []
for inc in includes:
# Filter out dashboards - we get those through the looker source.
if (
inc.endswith(".dashboard")
or inc.endswith(".dashboard.lookml")
or inc.endswith(".dashboard.lkml")
):
logger.debug(f"include '{inc}' is a dashboard, skipping it")
continue
# Massage the looker include into a valid glob wildcard expression
if inc.startswith("/"):
glob_expr = f"{base_folder}{inc}"
else:
# Need to handle a relative path.
glob_expr = str(pathlib.Path(path).parent / inc)
# "**" matches an arbitrary number of directories in LookML
outputs = sorted(
glob.glob(glob_expr, recursive=True)
+ glob.glob(f"{glob_expr}.lkml", recursive=True)
)
if "*" not in inc and not outputs:
reporter.report_failure(path, f"cannot resolve include {inc}")
elif not outputs:
reporter.report_failure(
path, f"did not resolve anything for wildcard include {inc}"
)
resolved.extend(outputs)
return resolved
@dataclass
class LookerViewFile:
absolute_file_path: str
connection: Optional[str]
includes: List[str]
resolved_includes: List[str]
views: List[Dict]
raw_file_content: str
@staticmethod
def from_looker_dict(
absolute_file_path: str,
looker_view_file_dict: dict,
base_folder: str,
raw_file_content: str,
reporter: LookMLSourceReport,
) -> "LookerViewFile":
includes = looker_view_file_dict.get("includes", [])
resolved_includes = LookerModel.resolve_includes(
includes, base_folder, absolute_file_path, reporter
)
logger.info(
f"resolved_includes for {absolute_file_path} is {resolved_includes}"
)
views = looker_view_file_dict.get("views", [])
return LookerViewFile(
absolute_file_path=absolute_file_path,
connection=None,
includes=includes,
resolved_includes=resolved_includes,
views=views,
raw_file_content=raw_file_content,
)
class LookerViewFileLoader:
"""
Loads the looker viewfile at a :path and caches the LookerViewFile in memory
This is to avoid reloading the same file off of disk many times during the recursive include resolution process
"""
def __init__(self, base_folder: str, reporter: LookMLSourceReport) -> None:
self.viewfile_cache: Dict[str, LookerViewFile] = {}
self._base_folder = base_folder
self.reporter = reporter
def is_view_seen(self, path: str) -> bool:
return path in self.viewfile_cache
def _load_viewfile(
self, path: str, reporter: LookMLSourceReport
) -> Optional[LookerViewFile]:
if self.is_view_seen(path):
return self.viewfile_cache[path]
try:
with open(path, "r") as file:
raw_file_content = file.read()
except Exception as e:
self.reporter.report_failure(path, f"failed to load view file: {e}")
return None
try:
with open(path, "r") as file:
logger.info(f"Loading file {path}")
parsed = lkml.load(file)
looker_viewfile = LookerViewFile.from_looker_dict(
absolute_file_path=path,
looker_view_file_dict=parsed,
base_folder=self._base_folder,
raw_file_content=raw_file_content,
reporter=reporter,
)
logger.debug(f"adding viewfile for path {path} to the cache")
self.viewfile_cache[path] = looker_viewfile
return looker_viewfile
except Exception as e:
self.reporter.report_failure(path, f"failed to load view file: {e}")
return None
def load_viewfile(
self,
path: str,
connection: LookerConnectionDefinition,
reporter: LookMLSourceReport,
) -> Optional[LookerViewFile]:
viewfile = self._load_viewfile(path, reporter)
if viewfile is None:
return None
return replace(viewfile, connection=connection)
@dataclass
class LookerView:
id: LookerViewId
absolute_file_path: str
connection: LookerConnectionDefinition
sql_table_names: List[str]
fields: List[ViewField]
raw_file_content: str
@classmethod
def _import_sql_parser_cls(cls, sql_parser_path: str) -> Type[SQLParser]:
assert "." in sql_parser_path, "sql_parser-path must contain a ."
module_name, cls_name = sql_parser_path.rsplit(".", 1)
import sys
logger.info(sys.path)
parser_cls = getattr(importlib.import_module(module_name), cls_name)
if not issubclass(parser_cls, SQLParser):
raise ValueError(f"must be derived from {SQLParser}; got {parser_cls}")
return parser_cls
@classmethod
def _get_sql_table_names(cls, sql: str, sql_parser_path: str) -> List[str]:
parser_cls = cls._import_sql_parser_cls(sql_parser_path)
sql_table_names: List[str] = parser_cls(sql).get_tables()
# Remove quotes from table names
sql_table_names = [t.replace('"', "") for t in sql_table_names]
sql_table_names = [t.replace("`", "") for t in sql_table_names]
return sql_table_names
@classmethod
def _get_fields(
cls, field_list: List[Dict], type_cls: ViewFieldType
) -> List[ViewField]:
fields = []
for field_dict in field_list:
is_primary_key = field_dict.get("primary_key", "no") == "yes"
name = field_dict["name"]
native_type = field_dict.get("type", "string")
description = field_dict.get("description", "")
field = ViewField(
name=name,
type=native_type,
description=description,
is_primary_key=is_primary_key,
field_type=type_cls,
)
fields.append(field)
return fields
@classmethod
def from_looker_dict(
cls,
project_name: str,
model_name: str,
looker_view: dict,
connection: LookerConnectionDefinition,
looker_viewfile: LookerViewFile,
looker_viewfile_loader: LookerViewFileLoader,
reporter: LookMLSourceReport,
parse_table_names_from_sql: bool = False,
sql_parser_path: str = "datahub.utilities.sql_parser.DefaultSQLParser",
) -> Optional["LookerView"]:
view_name = looker_view["name"]
logger.debug(f"Handling view {view_name} in model {model_name}")
# The sql_table_name might be defined in another view and this view is extending that view,
# so we resolve this field while taking that into account.
sql_table_name: Optional[str] = LookerView.get_including_extends(
view_name=view_name,
looker_view=looker_view,
connection=connection,
looker_viewfile=looker_viewfile,
looker_viewfile_loader=looker_viewfile_loader,
field="sql_table_name",
reporter=reporter,
)
# Some sql_table_name fields contain quotes like: optimizely."group", just remove the quotes
sql_table_name = (
sql_table_name.replace('"', "").replace("`", "")
if sql_table_name is not None
else None
)
derived_table = looker_view.get("derived_table", None)
dimensions = cls._get_fields(
looker_view.get("dimensions", []), ViewFieldType.DIMENSION
)
dimension_groups = cls._get_fields(
looker_view.get("dimension_groups", []), ViewFieldType.DIMENSION_GROUP
)
measures = cls._get_fields(
looker_view.get("measures", []), ViewFieldType.MEASURE
)
fields: List[ViewField] = dimensions + dimension_groups + measures
# Parse SQL from derived tables to extract dependencies
if derived_table is not None:
sql_table_names = []
if parse_table_names_from_sql and "sql" in derived_table:
logger.debug(
f"Parsing sql from derived table section of view: {view_name}"
)
# Get the list of tables in the query
sql_table_names = cls._get_sql_table_names(
derived_table["sql"], sql_parser_path
)
return LookerView(
id=LookerViewId(
project_name=project_name,
model_name=model_name,
view_name=view_name,
),
absolute_file_path=looker_viewfile.absolute_file_path,
connection=connection,
sql_table_names=sql_table_names,
fields=fields,
raw_file_content=looker_viewfile.raw_file_content,
)
# If not a derived table, then this view essentially wraps an existing
# object in the database.
if sql_table_name is not None:
# If sql_table_name is set, there is a single dependency in the view, on the sql_table_name.
sql_table_names = [sql_table_name]
else:
# Otherwise, default to the view name as per the docs:
# https://docs.looker.com/reference/view-params/sql_table_name-for-view
sql_table_names = [view_name]
output_looker_view = LookerView(
id=LookerViewId(
project_name=project_name, model_name=model_name, view_name=view_name
),
absolute_file_path=looker_viewfile.absolute_file_path,
sql_table_names=sql_table_names,
connection=connection,
fields=fields,
raw_file_content=looker_viewfile.raw_file_content,
)
return output_looker_view
@classmethod
def resolve_extends_view_name(
cls,
connection: LookerConnectionDefinition,
looker_viewfile: LookerViewFile,
looker_viewfile_loader: LookerViewFileLoader,
target_view_name: str,
reporter: LookMLSourceReport,
) -> Optional[dict]:
# The view could live in the same file.
for raw_view in looker_viewfile.views:
raw_view_name = raw_view["name"]
if raw_view_name == target_view_name:
return raw_view
# Or it could live in one of the included files. We do not know which file the base view
# lives in, so we try them all!
for include in looker_viewfile.resolved_includes:
included_looker_viewfile = looker_viewfile_loader.load_viewfile(
include, connection, reporter
)
if not included_looker_viewfile:
logger.warning(
f"unable to load {include} (included from {looker_viewfile.absolute_file_path})"
)
continue
for raw_view in included_looker_viewfile.views:
raw_view_name = raw_view["name"]
# Make sure to skip loading view we are currently trying to resolve
if raw_view_name == target_view_name:
return raw_view
return None
@classmethod
def get_including_extends(
cls,
view_name: str,
looker_view: dict,
connection: LookerConnectionDefinition,
looker_viewfile: LookerViewFile,
looker_viewfile_loader: LookerViewFileLoader,
field: str,
reporter: LookMLSourceReport,
) -> Optional[Any]:
extends = list(
itertools.chain.from_iterable(
looker_view.get("extends", looker_view.get("extends__all", []))
)
)
# First, check the current view.
if field in looker_view:
return looker_view[field]
# Then, check the views this extends, following Looker's precedence rules.
for extend in reversed(extends):
assert extend != view_name, "a view cannot extend itself"
extend_view = LookerView.resolve_extends_view_name(
connection, looker_viewfile, looker_viewfile_loader, extend, reporter
)
if not extend_view:
raise NameError(
f"failed to resolve extends view {extend} in view {view_name} of file {looker_viewfile.absolute_file_path}"
)
if field in extend_view:
return extend_view[field]
return None
class LookMLSource(Source):
source_config: LookMLSourceConfig
reporter: LookMLSourceReport
looker_client: Optional[Looker31SDK] = None
def __init__(self, config: LookMLSourceConfig, ctx: PipelineContext):
super().__init__(ctx)
self.source_config = config
self.reporter = LookMLSourceReport()
if self.source_config.api:
looker_api = LookerAPI(self.source_config.api)
self.looker_client = looker_api.get_client()
try:
self.looker_client.all_connections()
except SDKError:
raise ValueError(
"Failed to retrieve connections from looker client. Please check to ensure that you have manage_models permission enabled on this API key."
)
@classmethod
def create(cls, config_dict, ctx):
config = LookMLSourceConfig.parse_obj(config_dict)
return cls(config, ctx)
def _load_model(self, path: str) -> LookerModel:
with open(path, "r") as file:
logger.info(f"Loading file {path}")
parsed = lkml.load(file)
looker_model = LookerModel.from_looker_dict(
parsed, str(self.source_config.base_folder), path, self.reporter
)
return looker_model
def _platform_names_have_2_parts(self, platform: str) -> bool:
if platform in ["hive", "mysql"]:
return True
else:
return False
def _generate_fully_qualified_name(
self, sql_table_name: str, connection_def: LookerConnectionDefinition
) -> str:
"""Returns a fully qualified dataset name, resolved through a connection definition.
Input sql_table_name can be in three forms: table, db.table, db.schema.table"""
# TODO: This function should be extracted out into a Platform specific naming class since name translations are required across all connectors
# Bigquery has "project.db.table" which can be mapped to db.schema.table form
# All other relational db's follow "db.schema.table"
# With the exception of mysql, hive which are "db.table"
# first detect which one we have
parts = len(sql_table_name.split("."))
if parts == 3:
# fully qualified
return sql_table_name.lower()
if parts == 1:
# Bare table form
if self._platform_names_have_2_parts(connection_def.platform):
dataset_name = f"{connection_def.default_db}.{sql_table_name}"
else:
dataset_name = f"{connection_def.default_db}.{connection_def.default_schema}.{sql_table_name}"
return dataset_name
if parts == 2:
# if this is a 2 part platform, we are fine
if self._platform_names_have_2_parts(connection_def.platform):
return sql_table_name
# otherwise we attach the default top-level container
dataset_name = f"{connection_def.default_db}.{sql_table_name}"
return dataset_name
self.reporter.report_warning(
key=sql_table_name, reason=f"{sql_table_name} has more than 3 parts."
)
return sql_table_name.lower()
def _construct_datalineage_urn(
self, sql_table_name: str, looker_view: LookerView
) -> str:
logger.debug(f"sql_table_name={sql_table_name}")
connection_def: LookerConnectionDefinition = looker_view.connection
# Check if table name matches cascading derived tables pattern
# derived tables can be referred to using aliases that look like table_name.SQL_TABLE_NAME
# See https://docs.looker.com/data-modeling/learning-lookml/derived-tables#syntax_for_referencing_a_derived_table
if re.fullmatch(r"\w+\.SQL_TABLE_NAME", sql_table_name):
sql_table_name = sql_table_name.lower().split(".")[0]
# upstream dataset is a looker view based on current view id's project and model
view_id = LookerViewId(
project_name=looker_view.id.project_name,
model_name=looker_view.id.model_name,
view_name=sql_table_name,
)
return view_id.get_urn(self.source_config)
# Ensure sql_table_name is in canonical form (add in db, schema names)
sql_table_name = self._generate_fully_qualified_name(
sql_table_name, connection_def
)
return builder.make_dataset_urn(
connection_def.platform, sql_table_name.lower(), self.source_config.env
)
def _get_connection_def_based_on_connection_string(
self, connection: str
) -> Optional[LookerConnectionDefinition]:
if self.source_config.connection_to_platform_map is None:
self.source_config.connection_to_platform_map = {}
assert self.source_config.connection_to_platform_map is not None
if connection in self.source_config.connection_to_platform_map:
return self.source_config.connection_to_platform_map[connection]
elif self.looker_client:
looker_connection: Optional[DBConnection] = None
try:
looker_connection = self.looker_client.connection(connection)
except SDKError:
logger.error(f"Failed to retrieve connection {connection} from Looker")
if looker_connection:
try:
connection_def: LookerConnectionDefinition = (
LookerConnectionDefinition.from_looker_connection(
looker_connection
)
)
# Populate the cache (using the config map) to avoid calling looker again for this connection
self.source_config.connection_to_platform_map[
connection
] = connection_def
return connection_def
except ConfigurationError:
self.reporter.report_warning(
f"connection-{connection}",
"Failed to load connection from Looker",
)
return None
def _get_upstream_lineage(self, looker_view: LookerView) -> UpstreamLineage:
upstreams = []
for sql_table_name in looker_view.sql_table_names:
sql_table_name = sql_table_name.replace('"', "").replace("`", "")
upstream = UpstreamClass(
dataset=self._construct_datalineage_urn(sql_table_name, looker_view),
type=DatasetLineageTypeClass.VIEW,
)
upstreams.append(upstream)
upstream_lineage = UpstreamLineage(upstreams=upstreams)
return upstream_lineage
def _get_custom_properties(self, looker_view: LookerView) -> DatasetPropertiesClass:
custom_properties = {
"looker.file.content": looker_view.raw_file_content[
0:512000
], # grab a limited slice of characters from the file
"looker.file.path": str(
pathlib.Path(looker_view.absolute_file_path).resolve()
).replace(str(self.source_config.base_folder.resolve()), ""),
}
dataset_props = DatasetPropertiesClass(customProperties=custom_properties)
return dataset_props
def _build_dataset_mce(self, looker_view: LookerView) -> MetadataChangeEvent:
"""
Creates MetadataChangeEvent for the dataset, creating upstream lineage links
"""
logger.debug(f"looker_view = {looker_view.id}")
dataset_snapshot = DatasetSnapshot(
urn=looker_view.id.get_urn(self.source_config),
aspects=[], # we append to this list later on
)
browse_paths = BrowsePaths(
paths=[looker_view.id.get_browse_path(self.source_config)]
)
dataset_snapshot.aspects.append(browse_paths)
dataset_snapshot.aspects.append(Status(removed=False))
dataset_snapshot.aspects.append(self._get_upstream_lineage(looker_view))
dataset_snapshot.aspects.append(
LookerUtil._get_schema(
self.source_config.platform_name,
looker_view.id.view_name,
looker_view.fields,
self.reporter,
)
)
dataset_snapshot.aspects.append(self._get_custom_properties(looker_view))
mce = MetadataChangeEvent(proposedSnapshot=dataset_snapshot)
return mce
def get_project_name(self, model_name: str) -> str:
if self.source_config.project_name is not None:
return self.source_config.project_name
assert (
self.looker_client is not None
), "Failed to find a configured Looker API client"
try:
model = self.looker_client.lookml_model(model_name, "project_name")
assert (
model.project_name is not None
), f"Failed to find a project name for model {model_name}"
return model.project_name
except SDKError:
raise ValueError(
f"Could not locate a project name for model {model_name}. Consider configuring a static project name in your config file"
)
def get_workunits(self) -> Iterable[MetadataWorkUnit]: # noqa: C901
viewfile_loader = LookerViewFileLoader(
str(self.source_config.base_folder), self.reporter
)
# some views can be mentioned by multiple 'include' statements, so this set is used to prevent
# creating duplicate MCE messages
processed_view_files: Set[str] = set()
# The ** means "this directory and all subdirectories", and hence should
# include all the files we want.
model_files = sorted(self.source_config.base_folder.glob("**/*.model.lkml"))
model_suffix_len = len(".model")
for file_path in model_files:
self.reporter.report_models_scanned()
model_name = file_path.stem[0:-model_suffix_len]
if not self.source_config.model_pattern.allowed(model_name):
self.reporter.report_models_dropped(model_name)
continue
try:
logger.debug(f"Attempting to load model: {file_path}")
model = self._load_model(str(file_path))
except Exception as e:
self.reporter.report_warning(
model_name, f"unable to load Looker model at {file_path}: {repr(e)}"
)
continue
assert model.connection is not None
connectionDefinition = self._get_connection_def_based_on_connection_string(
model.connection
)
if connectionDefinition is None:
self.reporter.report_warning(
f"model-{model_name}",
f"Failed to load connection {model.connection}. Check your API key permissions.",
)
self.reporter.report_models_dropped(model_name)
continue
project_name = self.get_project_name(model_name)
for include in model.resolved_includes:
if include in processed_view_files:
logger.debug(f"view '{include}' already processed, skipping it")
continue
logger.debug(f"Attempting to load view file: {include}")
looker_viewfile = viewfile_loader.load_viewfile(
include, connectionDefinition, self.reporter
)
if looker_viewfile is not None:
for raw_view in looker_viewfile.views:
self.reporter.report_views_scanned()
try:
maybe_looker_view = LookerView.from_looker_dict(
project_name,
model_name,
raw_view,
connectionDefinition,
looker_viewfile,
viewfile_loader,
self.reporter,
self.source_config.parse_table_names_from_sql,
self.source_config.sql_parser,
)
except Exception as e:
self.reporter.report_warning(
include,
f"unable to load Looker view {raw_view}: {repr(e)}",
)
continue
if maybe_looker_view:
if self.source_config.view_pattern.allowed(
maybe_looker_view.id.view_name
):
mce = self._build_dataset_mce(maybe_looker_view)
workunit = MetadataWorkUnit(
id=f"lookml-view-{maybe_looker_view.id}",
mce=mce,
)
self.reporter.report_workunit(workunit)
processed_view_files.add(include)
yield workunit
else:
self.reporter.report_views_dropped(
str(maybe_looker_view.id)
)
if (
self.source_config.tag_measures_and_dimensions
and self.reporter.workunits_produced != 0
):
# Emit tag MCEs for measures and dimensions:
for tag_mce in LookerUtil.get_tag_mces():
workunit = MetadataWorkUnit(
id=f"tag-{tag_mce.proposedSnapshot.urn}", mce=tag_mce
)
self.reporter.report_workunit(workunit)
yield workunit
def get_report(self):
return self.reporter
def close(self):
pass
| [
"[email protected]"
] | |
27ad39c799c6746bc8a377f36a87b781ccee6888 | 8370c38514717924c43c289d6524824c4dd24dcf | /Setting.py | b03c5a7153535522d7068cdc6fd7a6589a6b6e2a | [
"MIT"
] | permissive | pliuya/Stimuli_Control_Gel | 631d594a1f7cf5781221cf37f7c9a3577203b496 | cf104a1f89622711f234602914b1155c7b850cf5 | refs/heads/master | 2020-08-21T05:07:56.931493 | 2019-10-18T20:16:07 | 2019-10-18T20:16:07 | 216,100,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Define global vaiable and initialization
@author: yaliu
"""
def init():
#four cases: 1.collapse with copper 2. swell with EDTA
# 3. collapse with copper + 2 fibers
# 4. swell with EDTA + 2 fibers
# 5. collapse with copper + four fibers
global CASELABEL, GEL, WALL, BINDINGSITE, COPPER, EDTA
global COMPLEX_BIND, COMPLEX_COPPER, COMPLEX_EDTA, FIBER_TETHER
global FIBER_JOINT, FIBER_JOINT2,FIBER_HEAD1, FIBER_HEAD2, FIBER_HEAD3, FIBER_HEAD4
global timestep, res, num_free_site, num_free_copper, num_free_edta, num_complex_site
global num_complex_copper, num_complex_edta, height_fiber, gyration_fiber
global test_count
CASELABEL =1
#read file to array
GEL = []
WALL = []
BINDINGSITE = []
COPPER = []
EDTA = []
COMPLEX_BIND = []
COMPLEX_COPPER = []
COMPLEX_EDTA = []
FIBER_TETHER = []
FIBER_JOINT = []
FIBER_JOINT2 = []
FIBER_HEAD1 = []
FIBER_HEAD2 = []
FIBER_HEAD3 = []
FIBER_HEAD4 = []
#global parameter
timestep = 0
res = [0,0] #gel surface height: mean & std
num_free_site = 0
num_free_copper = 0
num_free_edta = 0
num_complex_site = 0
num_complex_copper = 0
num_complex_edta = 0
height_fiber = 0.
gyration_fiber = 0.
test_count = 0
| [
"[email protected]"
] | |
2d05391d98727f31638d2a70be4ab101f1c9850c | 37ee3c86323fed9794e3d815c3814b89a211e334 | /test/test_static_share.py | ad97cc3eda2dfc6e04b5856476ccc7d498e40d7e | [
"Apache-2.0"
] | permissive | ajunlonglive/unit | 87c264c17bea28638ffec3fec60144dee22b7d30 | 5c20d43effd950a92ec48c085342a868aaaab353 | refs/heads/master | 2023-08-22T17:57:56.780278 | 2021-10-05T11:43:05 | 2021-10-05T11:43:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,180 | py | import os
from pathlib import Path
import pytest
from unit.applications.proto import TestApplicationProto
class TestStaticShare(TestApplicationProto):
prerequisites = {}
@pytest.fixture(autouse=True)
def setup_method_fixture(self, temp_dir):
os.makedirs(temp_dir + '/assets/dir')
os.makedirs(temp_dir + '/assets/dir2')
Path(temp_dir + '/assets/dir/file').write_text('1')
Path(temp_dir + '/assets/dir2/file2').write_text('2')
assert 'success' in self.conf(
{
"listeners": {"*:7080": {"pass": "routes"}},
"routes": [{"action": {"share": temp_dir + "/assets$uri"}}],
"applications": {},
}
)
def action_update(self, conf):
assert 'success' in self.conf(conf, 'routes/0/action')
def test_share_array(self, temp_dir):
assert self.get(url='/dir/file')['body'] == '1'
assert self.get(url='/dir2/file2')['body'] == '2'
self.action_update({"share": [temp_dir + "/assets/dir$uri"]})
assert self.get(url='/file')['body'] == '1'
assert self.get(url='/file2')['status'] == 404
self.action_update(
{
"share": [
temp_dir + "/assets/dir$uri",
temp_dir + "/assets/dir2$uri",
]
}
)
assert self.get(url='/file')['body'] == '1'
assert self.get(url='/file2')['body'] == '2'
self.action_update(
{
"share": [
temp_dir + "/assets/dir2$uri",
temp_dir + "/assets/dir3$uri",
]
}
)
assert self.get(url='/file')['status'] == 404
assert self.get(url='/file2')['body'] == '2'
def test_share_array_fallback(self):
self.action_update(
{"share": ["/blah", "/blah2"], "fallback": {"return": 201}}
)
assert self.get()['status'] == 201
def test_share_array_invalid(self):
assert 'error' in self.conf({"share": []}, 'routes/0/action')
assert 'error' in self.conf({"share": {}}, 'routes/0/action')
| [
"[email protected]"
] | |
e9c1975065292c75a662024b9d569c90eba00a10 | 6b37797de1f389704c92da1462b742ed260285ee | /api/serializers.py | 0a4d34808d965685189d0e4efb92e10653830e7e | [] | no_license | ajithkjames/vidtube | 709cdeadf313c16735f3a5a95da3a0bb8deb6c90 | 1db30fe31e930b9e7f6ead8f4f7758f0f2746ede | refs/heads/master | 2021-07-07T12:52:13.046322 | 2017-10-06T04:49:18 | 2017-10-06T04:49:18 | 105,968,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | from rest_framework import serializers
from account.models import User
from core.models import Video, Comment
class BaseUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username','first_name','phone')
class VideoSerializer(serializers.ModelSerializer):
uploader = BaseUserSerializer(read_only=True)
class Meta:
model = Video
fields = ('id','uploader','title','description','category','views','file','thumbnail','created_at','updated_at')
class CommentSerializer(serializers.ModelSerializer):
author = BaseUserSerializer(read_only=True)
class Meta:
model = Comment
fields = ('id', 'author','content','video','created_at','updated_at')
| [
"[email protected]"
] | |
298141bfd37bdb92693844f788815247060f6abc | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/ipv6/ipv6_nat_icmpv6.py | f30bfbedeb7de54fbebe97afe953cb1ad22fbb62 | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 1,217 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class Icmpv6(A10BaseClass):
"""Class Description::
ICMPv6 configuration for IPv6 NAT.
Class icmpv6 supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param respond_to_ping: {"default": 0, "optional": true, "type": "number", "description": "Respond to ICMPv6 echo requests to NAT pool IPs (default: disabled)", "format": "flag"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ipv6/nat/icmpv6`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "icmpv6"
self.a10_url="/axapi/v3/ipv6/nat/icmpv6"
self.DeviceProxy = ""
self.respond_to_ping = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"[email protected]"
] | |
e4db737e71d8903948b594df12d7a37b01966c0f | 736a9c3abf669281e6ae34d0d81a532758b42a8a | /main.py | 2bad10cf55d93dd74dd4c3cc5b27502959b7faae | [] | no_license | devlat/ssubstr | 6f2d4f1bb39fdeb4aaa4cce7a420a6efc3f0f435 | eb8ac4c01e872ccbce81c3a577b46539e2b891fd | refs/heads/master | 2020-06-12T20:32:00.124464 | 2016-12-07T05:03:31 | 2016-12-07T05:03:31 | 75,753,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 738 | py | # -*- coding: utf-8 -*-
import os, re, time;
timeAmount = time.time();
DIRECTORY_SEPARATOR = '/';
rootCatalogPath = "C:/puphpet";
absolutePath = '';
catalog = os.listdir(rootCatalogPath);
def listDir(path = '', absPath = ''):
if (not absPath):
absolutePath = rootCatalogPath + DIRECTORY_SEPARATOR + path;
else:
absolutePath = absPath + path + DIRECTORY_SEPARATOR;
for item in os.listdir(absolutePath):
if (os.path.isdir(absolutePath + item)):
# print absolutePath + item;
# recursive call
listDir(item, absolutePath);
elif (os.path.isfile(absolutePath + item)):
file = open(absolutePath + item, 'r');
listDir();
| [
"[email protected]"
] | |
00c327ecf3fbbca418e830d3ba400c06e7e73b9d | 3360e6bddc614a67e4e3a72cf6894d54fc11e704 | /experimental/migrate_from_trac_wiki_to_zwiki.py | f53265e06a7e21b297bb84fa5a1d9485aa80f302 | [
"MIT"
] | permissive | rui/ZWiki | 71d18ce90fcfb21b81824fb3a7ffe9f840f14461 | 32465a3b735cfa06513fe861df09f963f8c0d1e8 | refs/heads/master | 2021-01-09T06:19:15.729790 | 2011-10-05T06:12:12 | 2011-10-05T06:12:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,263 | py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
This script assists you migrate data from Trac Wiki to ZWiki.
NOTICE: it supports SQLite3 database backend only.
File migrate_from_trac_wiki_to_zwiki_conf.py *MUST* contains following variables:
- trac_db_path
fullpath of your trac wiki database file, i.e., "/path/to/trac-wiki-instance/db/trac.db"
- trac_wiki_attachments_path
fullpath of your trac wiki attachements folder, i.e., "/Users/lee/backups/enjoy-series/attachments/wiki"
- zwiki_pages_path
fullpath of your zwiki instance' pages folder, i.e., /path/to/zwiki/pages"
- zwiki_host
i.e., "127.0.0.1:8080"
"""
import httplib
import os
import shutil
import urllib
import web
import tracwiki2markdown
import migrate_from_trac_wiki_to_zwiki_conf as conf
osp = os.path
PWD = osp.dirname(osp.realpath(__file__))
db = web.database(dbn="sqlite", db=conf.trac_db_path)
def get_page_file_or_dir_fullpath_by_req_path(req_path):
if not req_path.endswith("/"):
return "%s.md" % osp.join(conf.zwiki_pages_path, req_path)
else:
return osp.join(conf.zwiki_pages_path, req_path)
def quote_plus_page_name(page_name):
return "/".join([urllib.quote_plus(i) for i in page_name.split("/")])
def create_page(req_path, content):
fixed_req_path = urllib.unquote(req_path.strip()).replace(" ", "-").lower()
content = web.utils.safestr(content)
content = tracwiki2markdown.tracwiki2markdown(content)
fixed_req_path = web.utils.safestr(fixed_req_path)
params = urllib.urlencode({'content': content})
conn = httplib.HTTPConnection(conf.zwiki_host)
conn.request("POST", "/%s?action=edit" % fixed_req_path, params)
response = conn.getresponse()
if response.status == httplib.NOT_FOUND:
print 'response.status: NOT_FOUND'
exit(-1)
try:
assert response.status == httplib.MOVED_PERMANENTLY
assert response.reason == "Moved Permanently"
except AssertionError:
print "create `%s` failed" % req_path
raise AssertionError
data = response.read()
assert data == 'None'
conn.close()
def create_attachments(page_name):
page_name = quote_plus_page_name(web.utils.safestr(page_name))
attaches_fullpath = osp.join(conf.trac_wiki_attachments_path, page_name)
# print "attaches_fullpath:", attaches_fullpath
# print
if not osp.exists(attaches_fullpath):
print "warning: `%s` not found" % attaches_fullpath
return
fixed_page_name = urllib.unquote(page_name.strip()).replace(" ", "-").lower()
save_to = osp.join(conf.zwiki_pages_path, fixed_page_name)
parent = osp.dirname(save_to)
if page_name.count("/") > 0:
if not osp.exists(parent):
os.makedirs(parent)
attaches = os.listdir(attaches_fullpath)
attaches = [i for i in attaches if not i.startswith(".")]
for i in attaches:
src = osp.join(attaches_fullpath, i)
if not osp.isfile(src):
continue
page_file_fullpath = get_page_file_or_dir_fullpath_by_req_path(fixed_page_name)
if osp.isfile(page_file_fullpath):
dst = osp.join(parent, i)
else:
dst = page_file_fullpath
# print "copy"
# print "\tsrc: ", src
# print "\tdst: ", dst
# print
shutil.copy(src, dst)
def get_page_latest_rev_by_name(name):
name = web.utils.safeunicode(name)
sql = 'select name, text, time from wiki where name = $name order by time desc limit 1'
# sql = 'select name, text from wiki where version = (select max(version) from wiki where name = $name);'
vars = {"name" : name}
records = db.query(sql, vars=vars)
for record in records:
return record
def create_page_and_attachments_by_name(name):
page = get_page_latest_rev_by_name(name)
create_page(urllib.unquote(page["name"]), page["text"])
create_attachments(page["name"])
def main():
total = 0
step = 100
offset = 0
sql = 'select DISTINCT name from wiki limit $limit offset $offset'
vars = {
'limit' : step,
'offset' : offset
}
records = list(db.query(sql, vars=vars))
while len(records) and len(records) == 100:
total += len(records)
for record in records:
create_page_and_attachments_by_name(record["name"])
vars["offset"] = vars["offset"] + 100
records = list(db.query(sql, vars=vars))
if len(records) < 100:
total += len(records)
for record in records:
create_page_and_attachments_by_name(record["name"])
print "total:", total
def test():
name = 'System-Management/Plan9/Installing-Plan9-on-Qemu'
print "page_name:", name
page = get_page_latest_rev_by_name(name)
create_page(urllib.unquote(page["name"]), page["text"])
create_attachments(page["name"])
def test2():
name = 'note/系统管理/代理'
print "page_name:", name
page = get_page_latest_rev_by_name(name)
content = page["text"]
content = tracwiki2markdown.tracwiki2markdown(content)
with open('/tmp/t.html', 'w') as f:
f.write(web.utils.safestr(content))
if __name__ == "__main__":
# test()
# test2()
main() | [
"[email protected]"
] | |
4e6f14bdba8d44f680c7fb3039ea126d7db726e3 | 777b5c266360b29b6d4af916726abd5d364b74a1 | /mypy_stubs/django/middleware/__init__.pyi | 6a86f4f5c737e7744b9f2b8bce826f5e5872a917 | [] | no_license | uryyyyyyy/django-graphql | 44d08afc3e44514270d1d5c183caa9d1c1cf3f88 | f3d6513d2325a8e675e47500cc71d8ef56c01537 | refs/heads/master | 2021-06-10T11:11:45.110271 | 2019-02-28T07:39:54 | 2019-02-28T07:39:54 | 172,325,424 | 0 | 0 | null | 2021-04-20T17:56:57 | 2019-02-24T10:44:31 | Python | UTF-8 | Python | false | false | 120 | pyi | # Stubs for django.middleware (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
| [
"[email protected]"
] | |
624036dd50c74e94bad01c146235445383a5bb19 | 0d56166b9c0652245c930ea8740a1749cdfb3f78 | /src/cns/web/sav/server.py | 4ee70ccdc12e876f03e9af674e87b5e50505b8f7 | [] | no_license | IngDiazPichinao/web-python | 1790715df5145636cbaf1dff888249f9e978d41e | f8243e5eb2ce62b21bd32c65daf39e6b7d4bc09e | refs/heads/master | 2021-01-19T21:41:50.841646 | 2017-04-19T02:28:16 | 2017-04-19T02:28:16 | 88,691,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | """ Server for "Aserradero Produccion".
"""
import os.path as ospath
from flask import Blueprint
from cns.web import LoadingTemplate
templates = []
def main():
bp = Blueprint('sav', 'production',
static_folder='static',
template_folder='templates')
bp.root_path = ospath.abspath(ospath.dirname(__file__))
from .views import home, inventory, orders
bp.add_url_rule('/', view_func=home, methods=['GET'])
bp.add_url_rule('/inventory', view_func=inventory, methods=['GET'])
bp.add_url_rule('/orders', view_func=orders, methods=['GET'])
templates.append(LoadingTemplate(bp, url_namespace='sav'))
# Externalized as entry point (setuptools)
main()
| [
"[email protected]"
] | |
4e8cd91a7772409f47a6a79c8414112b873ef94b | 597a0b638861d68703d56066e282f2d4adf6a87a | /add_question.py | 1a9ba3d68a2920f8747cf4d318fccccb3c40f49f | [] | no_license | phyzzmat/Intelenter | 1afdb780e33f699f4c20c4312529358ae1131192 | b9f98ad1fc20d3a176d84ce0d8ca0a345a1e6970 | refs/heads/master | 2023-04-05T10:32:09.381452 | 2021-04-14T22:18:38 | 2021-04-14T22:18:38 | 183,944,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,191 | py | from telegram.ext import MessageHandler, Filters, CommandHandler, ConversationHandler
from database import *
def begin(bot, update, chat_data):
update.message.reply_text(
"Превосходно! Теперь напишите номер комнаты."
)
return "room_choice"
def choose_room(bot, update, chat_data):
chat_data["room_to_add"] = update.message.text
room = session.query(Room).filter_by(number=update.message.text).first()
print(room, update.message.text)
if room:
chat_data["password_to_add"] = room.password
update.message.reply_text(
"Комната с таким именем уже существует. Хотите зайти в нее? "
"Введите пароль!"
)
return "password_to_existing_room"
else:
update.message.reply_text(
"Такой комнаты еще нет. Придумайте пароль для нее."
)
return "enter_password"
def password_to_existing_room(bot, update, chat_data):
if update.message.text != chat_data["password_to_add"]:
update.message.reply_text(
"К сожалению, введённый пароль неверен. "
"Панель администратора закрыта."
)
return ConversationHandler.END
update.message.reply_text(
"Пароль верный. Теперь напишите через запятую тему вопроса и стоимость."
)
return "enter_topic_and_points"
def enter_statement(bot, update, chat_data):
update.message.reply_text(
"Хорошо. Осталось только ввести ответ."
)
chat_data["statement_to_add"] = update.message.text
return "enter_answer"
def enter_answer(bot, update, chat_data):
update.message.reply_text(
"Спасибо! Вопрос добавлен. Добавим следующий? Для выхода напишите /stop. "
"Или напишите через запятую тему вопроса и стоимость."
)
chat_data["answer_to_add"] = update.message.text
new_question = Question(room=chat_data["room_to_add"],
statement=chat_data["statement_to_add"],
answer=chat_data["answer_to_add"],
points=chat_data["points_to_add"],
topic=chat_data["topic_to_add"])
session.add(new_question)
session.commit()
return "enter_topic_and_points"
def enter_topic_and_points(bot, update, chat_data):
data = update.message.text.split(',')
try:
topic, points = ','.join(data[:-1]), int(data[-1])
except Exception:
update.message.reply_text(
"Произошла ошибка. Мы уже работаем над ее устранением."
)
return "enter_topic_and_points"
else:
chat_data["topic_to_add"] = topic
chat_data["points_to_add"] = points
update.message.reply_text(
"Великолепно. Теперь введите условие вопроса."
)
return "enter_statement"
def enter_password(bot, update, chat_data):
txt = update.message.text
chat_data["pass_to_add"] = txt
update.message.reply_text("Введите количество игроков.")
return "create_new_room"
def create_new_room(bot, update, chat_data):
txt = update.message.text
if txt.isdigit():
chat_data["part_to_add"] = int(txt)
new_room = Room(password=chat_data["pass_to_add"],
number=chat_data["room_to_add"],
participants=chat_data["part_to_add"]
)
session.add(new_room)
session.commit()
update.message.reply_text(
"Комната успешно создана. Теперь напишите через запятую тему вопроса и стоимость."
)
return "enter_topic_and_points"
else:
update.message.reply_text(
"Целое натуральное, плес."
)
return "create_new_room"
def stop(bot, update):
update.message.reply_text(
"Работа завершена."
)
return ConversationHandler.END
def add_admin_commands(dp):
admin_mode = ConversationHandler(
entry_points=[CommandHandler('admin', begin, pass_chat_data=1)],
states={
"room_choice":
[MessageHandler(Filters.text,
choose_room,
pass_chat_data=1)],
"password_to_existing_room":
[MessageHandler(Filters.text,
password_to_existing_room,
pass_chat_data=1)],
"create_new_room":
[MessageHandler(Filters.text,
create_new_room,
pass_chat_data=1)],
"enter_topic_and_points":
[MessageHandler(Filters.text,
enter_topic_and_points,
pass_chat_data=1)],
"enter_statement":
[MessageHandler(Filters.text,
enter_statement,
pass_chat_data=1)],
"enter_answer":
[MessageHandler(Filters.text,
enter_answer,
pass_chat_data=1)],
"enter_password":
[MessageHandler(Filters.text,
enter_password,
pass_chat_data=1)],
},
fallbacks=[CommandHandler('stop', stop),
CommandHandler('play', stop)]
)
dp.add_handler(admin_mode)
| [
"[email protected]"
] | |
5d07e9d4a3249a230656ce28c7fb767d15333d17 | ee11207d53b34750393e62fa812694481e685f36 | /API_for_bid/settings.py | 6224d9b1e4f6f0da0a6136c74786a2424d54b38f | [] | no_license | Stilet58/API_for_bid | 774bc09001a23da81743b070890e5920f23e8c93 | 4af2aba7d3c77af12b4749482bc01ceccaf4aab3 | refs/heads/master | 2021-01-23T21:45:14.053113 | 2017-09-16T18:13:57 | 2017-09-16T18:13:57 | 102,902,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,557 | py | """
Django settings for API_for_bid project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_+z%x38cr@!=t&0a-6(-&cij3r3=2*f+9(xq*05ey#arj11z6('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'api_organization',
'api_partners',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ['rest_framework.permissions.DjangoModelPermissions'],
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend'],
'PAGE_SIZE': 10
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'API_for_bid.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'API_for_bid.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'D:/PythonProject/API_for_bid/db_for_api.db',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'translations', 'locale'),
)
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
4e94fcc2f1d636d64238651724555ae9717124ca | fb205b95682b5102e0f7ff3bd185f2643439e3f6 | /ceef/asgi.py | bf2257dcfdffd698096ad8b832e4448b6cc5499a | [] | no_license | MircaGheorghe/ceef | 06b9a3d3b9fd3fdfb1c66fb7333af361d2246ce8 | a773430fe5139aa0735020fe49689b5ab970bc87 | refs/heads/master | 2021-03-17T16:48:26.695267 | 2020-04-30T19:05:22 | 2020-04-30T19:05:22 | 247,003,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
ASGI config for ceef project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ceef.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
ebdd81befa265da37bfc4e6814701a860792770f | 40164f2cc6098741128bb7f9d03ea4e1965bcbbb | /core/models.py | ebbad292a3faa2f6fe6fd2f705e7f974dcee5997 | [] | no_license | iamanx17/Employee-management-system | c0d9d87bfecaca65e85dc54a92977b293df7435a | e96b2a45cbc488ec24d42c70aa736e2657e294f7 | refs/heads/main | 2023-07-11T13:53:01.856699 | 2021-08-12T08:32:25 | 2021-08-12T08:32:25 | 395,250,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class department(models.Model):
id=models.AutoField(primary_key=True)
emp_dpt=models.CharField(max_length=300, help_text='Enter the department of the employee', unique=True)
user=models.ForeignKey(User, on_delete=models.CASCADE)
updated_on=models.DateTimeField(auto_now=False, auto_now_add=True)
created_on=models.DateTimeField(default=timezone.now)
def __str__(self):
return self.emp_dpt
class emp_data(models.Model):
EmpId=models.AutoField(primary_key=True)
EmpName=models.CharField(max_length=250, help_text='Enter the name of the employee')
Age=models.IntegerField(help_text='Enter the age of the employee')
DeptName=models.ForeignKey(department, on_delete=models.CASCADE)
class Meta:
ordering=['EmpId']
def __str__(self):
return self.EmpName +" "+ str(self.DeptName) | [
"[email protected]"
] | |
c267b0465c31042b4a006ec06f33a4d3b82b87d2 | 5605dc75d01da96e050253c4c80995a8cd894769 | /news/migrations/0002_auto_20210901_1151.py | d6176710581a9cbb7bb3d8a0978904f4ccded444 | [] | no_license | Izzy-M/tribune | d02435a4144da6a92fd7f5338644ede133b264bf | 32905864edeab95baf2c4f77d094aa792fceaf00 | refs/heads/master | 2023-07-22T20:49:17.265888 | 2021-09-02T13:15:45 | 2021-09-02T13:15:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # Generated by Django 3.2.6 on 2021-09-01 08:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tagname', models.CharField(max_length=30)),
],
),
migrations.AlterModelOptions(
name='editor',
options={'ordering': ['fname']},
),
]
| [
"[email protected]"
] | |
80fb4f5df316b000d7e7bcf245913540d3394c25 | 12cff52c0cb2eb85099a9eeba89e0f00ba93eb15 | /order_test.py | 6ac3e59ffae06c74999079c44fff8308993e3c71 | [] | no_license | RNogales94/usersnack | 6d1c5cd431c72c1f01af21687965cdf5c59445f8 | 1980e823ab04fead7fcafa7b62aa02375464c759 | refs/heads/master | 2021-04-08T14:35:34.156365 | 2020-03-23T00:03:35 | 2020-03-23T00:03:35 | 248,784,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,727 | py | from order import Order
from pizza import Pizza
from extra import Extra
import pytest
@pytest.fixture
def complete_order():
pizza = Pizza(
id=5,
name="Cheese & Tomato",
price=11.90,
ingredients=["tomato", "cheese"],
img="cheesetomato.jpg"
)
peppers = Extra(name="green peppers", price=1.2)
mushrooms = Extra(name="mushrooms", price=1.2)
onion = Extra(name="onion", price=1)
order = Order(pizza=pizza, extras=[peppers, mushrooms, onion])
return order
@pytest.fixture
def no_extras_order():
pizza = Pizza(
id=5,
name="Cheese & Tomato",
price=11.90,
ingredients=["tomato", "cheese"],
img="cheesetomato.jpg"
)
order = Order(pizza=pizza)
return order
def test_class_attributes(complete_order):
assert isinstance(complete_order.pizza, Pizza)
assert isinstance(complete_order.extras, list)
assert isinstance(complete_order.extras[0], Extra)
def test_class_attributes_2(no_extras_order):
assert isinstance(no_extras_order.pizza, Pizza)
assert isinstance(no_extras_order.extras, list)
def test_sum_total_price(complete_order):
assert complete_order.get_total_price() == 15.3
def test_sum_total_price_2(no_extras_order):
assert no_extras_order.get_total_price() == 11.9
def test_serialize(complete_order):
assert isinstance(complete_order.serialize(), dict)
assert isinstance(complete_order.serialize()['pizza'], dict)
assert isinstance(complete_order.serialize()['extras'], list)
assert isinstance(complete_order.serialize()['extras'][0], dict)
| [
"[email protected]"
] | |
ccc0d45648d668c42b069072ef9074a58899534a | 784a7c9d45188550ccd8f27da294b3f28af4f833 | /ex13.py | dce1acbb9fb05200077e4c1e535b4166f4336294 | [] | no_license | seng1e/LearnPythonHardway | 231dbd93dbf1ad66a00180ed59ee191f33753269 | 91230a3e2934d85bbfecee0f8dcba4409e88dd16 | refs/heads/master | 2021-09-15T04:01:09.612551 | 2018-05-25T09:21:47 | 2018-05-25T09:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | from sys import argv
script, first, second, third = argv
print "The script is called:",script
print "Your first variable is:",first
print "Your second variable is:",second
print "Your third variable is:",third
flag = raw_input("(y/n?)")
print flag | [
"[email protected]"
] | |
153017496ff2c7ddc2597faa7d0fc8318c34503f | 463913a8dbdc28e5cd1f80f9723b48736753221f | /manage.py | 97e7fd2cd1bf6b82ae734f82248be0df590c853c | [] | no_license | sakshikhurana/twitter_automator | b4ff03604cb69a5e670fd47f354bf08c08885f07 | 295e69ce35590f23d2e33546a48271c986821412 | refs/heads/master | 2023-04-05T16:31:09.886201 | 2021-04-18T20:13:17 | 2021-04-18T20:13:17 | 359,241,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tweet_automator_app.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e2c012b7c4e72ad4ea14cdd8b8cc6a8f98109e55 | 1af56fde85ea1f3f538451c4b8281a6394a7fdaf | /discussions.py | 58570590619501167da4d2206f153b3e6567aaf8 | [] | no_license | leslie-alldridge/selenium | 2e18a484b92935ecc3260146ca17b4f54df766d8 | cd082a8069f92c1ebaea31623d5b5dcd2f92a1a3 | refs/heads/master | 2020-06-06T00:19:28.474281 | 2019-07-02T07:56:59 | 2019-07-02T07:56:59 | 192,585,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.action_chains import ActionChains
from time import sleep
from env import token
import requests
def sendSlack():
print('skac')
my_file = {
'file' : ('./image_0.png', open('./image_0.png', 'rb'), 'png')
}
payload={
"filename":"image_0.png",
"token": token,
"channels":['#hi'],
}
r = requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
print(r.status_code)
# set up driver & get facebook
chrome_options = webdriver.ChromeOptions()
prefs = {"profile.default_content_setting_values.notifications": 2}
chrome_options.add_experimental_option("prefs", prefs)
driver = webdriver.Chrome(
ChromeDriverManager().install(), chrome_options=chrome_options)
driver.get('https://central.xero.com/s/question/0D51N00004XQXg5SAH/xero-support ')
sleep(3)
# go to lauren
authors = driver.find_elements_by_css_selector("span[data-id='005o0000002mTivAAE']")
if authors:
count = 0
# go to each post
for author in authors:
if 'Lauren' in author.text:
# scroll into view
sleep(2)
author.location_once_scrolled_into_view
# expand post if it exists
try:
link = driver.find_element_by_link_text('Expand Post')
link.click()
driver.save_screenshot('image_' + str(count) + '.png')
count += 1
sendSlack()
except :
driver.save_screenshot('image_' + str(count) + '.png')
count += 1
sendSlack()
else: pass
else:
driver.close()
| [
"[email protected]"
] | |
edd86a1bf4e6ea2bb44a45a976f6cb3b31ad682f | 06f272bd9d7f5e86250a99d3ee476c97a6fc9fb7 | /pythonidae/lib/middleware/lang_change_dinamically.py | 2c6440e50d1d5e412ef82ec8dc4af8864d0eb937 | [
"Apache-2.0",
"CC-BY-4.0",
"CC-BY-3.0"
] | permissive | Fidesol/econmet | aab01aa96f6fa0e610b92440e71cd17c4241eb14 | 59387f04283de48106c8f80ae5ea365d3e76e350 | refs/heads/master | 2020-05-27T13:52:00.608721 | 2013-07-04T09:51:45 | 2013-07-04T09:51:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | from django.utils import translation
class LangDinamicallyMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
def process_request(self, request):
try:
language = request.COOKIES['language']
except:
language = translation.get_language_from_request(request)
translation.activate(language)
request.LANGUAGE_CODE = language
| [
"root@usuario-M61PME-S2P.(none)"
] | root@usuario-M61PME-S2P.(none) |
a8f30436c1bdfdd37f64623a3acb582e8ac1d8a8 | 8019f0df9a782b825132a328f1425fbe3028e657 | /odoo/addons/splashsync/objects/orders/delivery.py | 8cf4fdfbe5d047b4d8fe8def0110337149a5d46d | [] | no_license | p403n1x/odoo | 01b5e28eb1351c04d9045a1fb16e30de45c7929d | ce2cd03b3a9a8b5cfa5a81cf2b70ecafe5fb1ce2 | refs/heads/master | 2023-01-03T17:29:55.322847 | 2020-10-28T15:21:44 | 2020-10-28T15:21:44 | 280,176,919 | 0 | 1 | null | 2020-10-28T15:21:46 | 2020-07-16T14:33:56 | Python | UTF-8 | Python | false | false | 6,150 | py | # -*- coding: utf-8 -*-
#
# This file is part of SplashSync Project.
#
# Copyright (C) 2015-2019 Splash Sync <www.splashsync.com>
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
from collections import OrderedDict
from splashpy import const, Framework
from splashpy.componants import FieldFactory
from splashpy.helpers import ListHelper
class OrderDelivery:
"""
Access to Order Delivery Fields
"""
def buildDeliveryFields(self):
# ====================================================================#
# Delivery Qty Details (SKU)
FieldFactory.create(const.__SPL_T_VARCHAR__, "default_code", "Product SKU")
FieldFactory.inlist("delivery")
FieldFactory.microData("http://schema.org/OrderItem", "orderItemNumber")
FieldFactory.isNotTested()
# ====================================================================#
# Delivery Qty Details (Shipped)
FieldFactory.create(const.__SPL_T_INT__, "product_uom_qty", "Ordered Qty")
FieldFactory.inlist("delivery")
FieldFactory.microData("http://schema.org/OrderItem", "orderQuantity")
FieldFactory.isReadOnly().isNotTested()
# ====================================================================#
# Delivery Qty Details (Shipped)
FieldFactory.create(const.__SPL_T_INT__, "qty_delivered", "Delivered Qty")
FieldFactory.inlist("delivery")
FieldFactory.microData("http://schema.org/OrderItem", "orderItemStatus")
FieldFactory.isNotTested()
def getDeliveryFields(self, index, field_id):
"""
Get Order Delivered Details List
:param index: str
:param field_id: str
:return: None
"""
# ==================================================================== #
# Init Lines List...
lines_list = ListHelper.initOutput(self._out, "delivery", field_id)
# ==================================================================== #
# Safety Check
if lines_list is None:
return
# ==================================================================== #
# Read Lines Data
lines_values = OrderDelivery.__get_delivered_values(self.object.order_line, lines_list)
for pos in range(len(lines_values)):
ListHelper.insert(self._out, "delivery", field_id, "line-" + str(pos), lines_values[pos])
# ==================================================================== #
# Force Lines Ordering
self._out["delivery"] = OrderedDict(sorted(self._out["delivery"].items()))
self._in.__delitem__(index)
def setDeliveryFields(self, field_id, field_data):
"""
Set Order Delivered Details List
:param field_id: str
:param field_data: hash
:return: None
"""
# ==================================================================== #
# Safety Check - field_id is an Order lines List
if field_id != "delivery":
return
self._in.__delitem__(field_id)
# ==================================================================== #
# Safety Check - Received List is Valid
if not isinstance(field_data, dict):
return
# ==================================================================== #
# Walk on Received Order Lines...
for line_data in field_data.values():
# ==================================================================== #
# Detect Pointed Order Line
order_line = None
try:
for line in self.object.order_line:
if line.product_id.default_code != line_data["default_code"]:
continue
order_line = line
except:
pass
if order_line is None:
continue
# ==================================================================== #
# Load Delivered Qty
try:
qty_delivered = int(line_data["qty_delivered"])
except:
continue
# ==================================================================== #
# Compare Delivered Qty
if qty_delivered == order_line.qty_delivered:
continue
if qty_delivered > order_line.product_uom_qty:
Framework.log().warn(
"Delivered Qty is Higher than Ordered Qty for "+str(line.product_id.default_code)
)
# ==================================================================== #
# Update Delivered Qty
order_line.qty_delivered_method = 'manual'
order_line.qty_delivered_manual = qty_delivered
@staticmethod
def __get_delivered_values(order_lines, field_id):
"""
Get List of Lines Values for given Field
:param order_lines: recordset
:param field_id: str
:return: dict
"""
values = []
# ====================================================================#
# Walk on Lines
for order_line in order_lines.filtered(lambda r: r.display_type is False):
# ==================================================================== #
# Linked Product ID
if field_id == "default_code":
try:
values += [str(order_line.product_id[0].default_code)]
except:
values += [None]
# ====================================================================#
# Qty Ordered | Qty Shipped/Delivered
if field_id in ['product_uom_qty', 'qty_delivered']:
values += [int(getattr(order_line, field_id))]
return values
| [
"[email protected]"
] | |
583f0191c8f51c472157e49e423dd64ff4dfe970 | 0a57dec07ce91eea2bf76bf9c736bf6ac69e3fc1 | /supportagent/datasets.py | 556a8aa8759ffbabd7cede51abf726b65e3889dd | [
"MIT"
] | permissive | emac1234/supportagent | 89aa30b520188214e8955397b768f0e467369efb | c84c1d4f8ac4f0fc73310331b9195b1ccbce89b3 | refs/heads/main | 2023-07-21T13:58:04.769418 | 2021-08-26T20:44:25 | 2021-08-26T20:44:25 | 395,435,046 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | from torch.utils.data import Dataset, DataLoader, IterableDataset
from transformers import GPT2LMHeadModel, GPT2Tokenizer, AutoModelForCausalLM, AutoTokenizer
class ZendeskDataset(IterableDataset):
def __init__(self, df):
super(ZendeskDataset, self).__init__()
self.df = df
self.tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-medium')
def __iter__(self):
for i, row in self.df.iterrows():
user_comment = row['comment'] + ' <|endoftext|> '
agent_comment = row['comment_next']
user_tokens = self.tokenizer.encode(user_comment, add_special_tokens=False)
agent_tokens = self.tokenizer.encode(agent_comment, add_special_tokens=False)
for i in range(len(agent_tokens)):
if i + 1 < len(agent_tokens):
combined = user_tokens + agent_tokens[:i]
decoded = self.tokenizer.decode(combined)
X = self.tokenizer.encode(decoded)
y = agent_tokens[i]
if len(X) > 500 or i > 20:
continue
yield X, y
| [
"[email protected]"
] | |
df83958f733803e6b2005bd1328d7706c6e16250 | b284df612709689cb021c4ff63682ce327dd52c6 | /add_footer.py | c1b507f3418c64e9432f091b8dfd97b933670f66 | [] | no_license | ankanch/kanch.blog | 040f359a0ee191373cec9399490ca61b303c9bb1 | 17385e40f12dd9e4add3af2d895ee4fcb065ae1c | refs/heads/master | 2022-08-07T14:56:52.067742 | 2022-07-27T01:57:03 | 2022-07-27T01:57:03 | 199,397,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 736 | py | #encoding:utf-8
import os
import datetime
fpath = "_posts"
footer = """{% include post_footer.md %}"""
fd = ""
with open("_includes/post_footer.md","r",encoding="utf-8",errors="ignore") as f:
fd = f.read ()
with open("_includes/post_footer.md","w",encoding="utf-8",errors="ignore") as f:
f.truncate()
fd = fd.replace("@KTIME@",str(datetime.datetime.now()))
f.write(fd)
for f in os.listdir(fpath):
with open(fpath + "/" + f,encoding="utf-8",errors="ignore") as f:
fd = f.read()
if fd.find(footer) > -1:
pass
else:
fd += "\r\n\r\n" + footer
with open( f.name,"w",encoding="utf-8",errors="ignore") as f:
f.truncate()
f.write(fd)
| [
"[email protected]"
] | |
9b0d0c61044fae44f8aaa4f59fcb9d0d06891c26 | 7daca3dfb18d5f69fd20c0045a351f059c7e3f4c | /gametreeStud1.py | 8a47ec5771bfac516980b5307a4ad9e6e2de83b8 | [] | no_license | Iskyco/gametheory-alquerque | 0d213473e3d926d928b4b328a2d75b7b7164fd27 | 37a17360b3c6e45ae13771f22a9b81411895007b | refs/heads/master | 2022-01-25T07:54:31.218752 | 2019-06-21T08:13:30 | 2019-06-21T08:13:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,183 | py | import math
from copy import deepcopy
# http://www.python-kurs.eu/deep_copy.php
# class GameTreeAlgorithms:
# Testprogramm
def testSearch():
# read(n)
n = 9
print(n)
gs = Nimstate()
gs1 = gs.copyState()
gs2 = gs.copyState()
gs3 = gs.copyState()
eval = NimEvaluator()
print("Test minimax1")
gs.setStartState(n, False)
gs.print()
# fuellen sie die liste
f = minimax([])
print("minimax value:", f)
print("number of generated states:", gs.genStates)
print("stateHistory:", gs.stateHistory)
# Minimax-Verfahren:
# Spiebaum wird als verschachtelte Listenstruktur uebergeben
def minimax(l):
if (type(l) == list):
a = -100
n = len(l)
print(range(n))
for i in range(n):
print(i)
f = - minimax(l[i])
if f > a:
a = f
return a
else:
return l
class Gamestate:
def setStartState(self):
raise NotImplementedError("You should have implemented this")
def getAllMoves(self):
raise NotImplementedError("You should have implemented this")
def possibleMove(self, mv):
raise NotImplementedError("You should have implemented this")
def hasNextMove(self):
raise NotImplementedError("You should have implemented this")
def getNextMove(self):
raise NotImplementedError("You should have implemented this")
def doMove(self, mv):
raise NotImplementedError("You should have implemented this")
def undoMove(self, mv):
raise NotImplementedError("You should have implemented this")
def getAllChildStates(self):
raise NotImplementedError("You should have implemented this")
def hasNextChild(self):
raise NotImplementedError("You should have implemented this")
def getNextChild(self):
raise NotImplementedError("You should have implemented this")
def getChild(self, mv):
raise NotImplementedError("You should have implemented this")
def firstPlayerToMove(self):
raise NotImplementedError("You should have implemented this")
def secondPlayerToMove(self):
raise NotImplementedError("You should have implemented this")
def isTerminal(self):
raise NotImplementedError("You should have implemented this")
def firstPlayerToWin(self):
raise NotImplementedError("You should have implemented this")
def secondPlayerToWin(self):
raise NotImplementedError("Not implemented")
def draw(self):
raise NotImplementedError("Not implemented")
def getMoveHistory(self):
raise NotImplementedError("Not implemented")
def getStateHistory(self):
raise NotImplementedError("Not implemented")
def printGameState(self):
"""
3x3 field:
+-+-+
|\|/|
+-+-+
|/|\|
+-+-+
general:
if (x+y)%2==1:
"
+-+
|/|
+-+
"
else:
"
+-+
|\|
+-+
"
one player -> x
sec player -> o
example start:
x-x-x
|\|/|
x-+-o
|/|\|
o-o-o
"""
class Evaluator:
def heuristicValue(self, gs):
raise NotImplementedError("Not implemented")
def simpleHeuristicValue(self, gs):
raise NotImplementedError("Not implemented")
def getMinValue(self):
raise NotImplementedError("Not implemented")
def getMaxValue(self):
raise NotImplementedError("Not implemented")
def exactValue(self, gs):
raise NotImplementedError("Not implemented")
def evaluate(self, gs):
raise NotImplementedError("Not implemented")
class Nimstate(Gamestate):
num = 0
lastChildMv = 0
nextChildMv = 1
firstPlayerToMove = True
history = []
root = None
stateHistory = []
genStates = 0
def _init_(self, x):
self.num = x
self.firstPlayerToMove = True
def setStartState(self, x, firstPlayer):
self.genStates = 1
self.num = x
self.lastChildMv = 0
self.nextChildMv = 1
self.firstPlayerToMove = firstPlayer
self.history = []
self.root = self
self.stateHistory.append([])
self.genStates = 1
def getAllMoves(self):
ls = []
while self.hasNextMove():
ls.append(self.getNextMove())
return ls
def possibleMove(self, mv):
if 0 < mv and mv <= 3 and mv <= self.num:
return True
else:
return False
def hasNextMove(self):
if (self.possibleMove(self.nextChildMv)):
return True
else:
return False
def getNextMove(self):
if (self.hasNextMove()):
self.lastChildMv += 1
self.nextChildMv += 1
return self.lastChildMv
else:
raise Exception(
"Invalid Argument Exception: no admissible move available")
def doMove(self, mv):
if self.possibleMove(mv):
self.genStates += 1
self.num -= mv
self.lastChildMv = 0
self.nextChildMv = 1
self.firstPlayerToMove = not self.firstPlayerToMove
self.history.append(mv)
self.stateHistory.append(deepcopy(self.history))
# print(self.stateHistory)
if not (self == self.root):
self.root.genStates += 1
self.root.stateHistory.append(deepcopy(self.history))
else:
raise Exception("Invalid Argument Exception: no possible move")
def doNextMove(self):
self.doMove(self.nextChildMv)
def undoMove(self):
if 0 < len(self.history):
mv = self.history.pop()
self.num += mv
self.lastChildMv = mv
self.nextChildMv = self.lastChildMv + 1
self.firstPlayerToMove = not self.firstPlayerToMove
else:
raise Exception("Invalid Argument Exception: history is empty")
def getAllChildStates(self):
mvList = self.getAllMoves()
childList = []
n = len(mvList)
for i in range(n):
childList.append(self.childState(mvList[i]))
return childList
def hasNextChild(self):
return self.hasNextMove()
def getNextChild(self):
mv = self.getNextMove()
return self.childState(mv)
def getChild(self, mv):
if self.possibleMove(mv):
return self.childState(mv)
else:
raise Exception("Invalid Argument Exception: no possible move")
def getFirstPlayerToMove(self):
return self.firstPlayerToMove
def secondPlayerToMove(self):
return not self.firstPlayerToMove
def isTerminal(self):
if (self.num == 0):
return True
else:
return False
def firstPlayerToWin(self):
return not self.firstPlayerToMove()
def secondPlayerToWin(self):
return self.firstPlayerToMove()
def draw(self):
return False
def getMoveHistory(self):
return self.history # without copy
def getStateHistory(self):
return self.stateHistory # without copy
def copyState(self):
gs = Nimstate()
gs.num = self.num
gs.lastChildMv = self.lastChildMv
gs.nextChildMv = self.nextChildMv
gs.firstPlayerToMove = self.firstPlayerToMove
# gs.history = deepcopy(self.history) #deep copy
gs.history = self.history[:] # shallow copy
gs.stateHistory = deepcopy(self.stateHistory)
gs.root = self.root
return gs
def childState(self, mv):
if self.possibleMove(mv):
child = self.copyState()
child.doMove(mv)
return child
else:
return None
def equalState(self, other):
if not self.num == other.num:
return False
elif not self.lastChildMv == other.lastChildMv:
return False
elif not self.nextChildMv == other.nextChildMv:
return False
elif not self.firstPlayerToMove == other.firstPlayerToMove:
return False
elif not self.equalList(self.history, other.history):
return False
else:
return True
def print(self):
print(self)
print(self.num)
print(self.lastChildMv)
print(self.nextChildMv)
print(self.firstPlayerToMove)
print(self.history)
print(self.stateHistory)
print(self.root)
def equalList(self, l1, l2):
if not len(l1) == len(l2):
return False
else:
n = len(l1)
for i in range(n):
if not l1[i] == l2[i]:
return False
return True
class NimEvaluator(Evaluator):
def heuristicValue(self, gs):
if not (isinstance(gs, Nimstate)):
raise Exception("Illegal Argument")
else:
if (gs.num % 4 == 0):
return -1
else:
return 1
def getMinValue(self):
return -1
def getMaxValue(self):
return 1
def exactValue(self, gs):
if not (isinstance(gs, Nimstate)):
raise Exception("Illegal Argument")
else:
if (gs.num % 4 == 0):
return -1
else:
return 1
def evaluate(self, gs):
if not (isinstance(gs, Nimstate)):
raise Exception("Illegal Argument")
else:
if (gs.num % 4 == 0):
return -1
else:
return 1
| [
"[email protected]"
] | |
af0122fdf356d91ade9f53317f9a2cc248a69bd9 | f59267e2f6568826328d2d7475bebe23fdd6898d | /manage.py | cecda98108f79215012c0a8cba3cf047dc1c0eb8 | [] | no_license | rayjc/ml_recommendation_site | b675619331a525b16b56745259e20d56ece46c44 | 6624e41d6a9beac9ffbf9c7dd7c2ce3bc176d28d | refs/heads/master | 2023-05-02T20:08:49.182933 | 2020-12-07T18:31:37 | 2020-12-07T18:31:37 | 213,557,522 | 0 | 0 | null | 2023-04-21T20:40:36 | 2019-10-08T05:41:23 | Python | UTF-8 | Python | false | false | 642 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ml_recommendation_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1feb53821646047cc378125217335fbcf2ba8bc4 | 41e612ae49b15d001c45e040729a5af86a5d648b | /questions/migrations/0001_initial.py | 2daf92589867667cc345676a9f4baf298fd9ddc1 | [] | no_license | manavmarya/QuizApp | 2de888c8230f3f794e6ee04100f19e50f247a6de | 31cac727361ea97167cc411e7d739e4efc7434a4 | refs/heads/master | 2023-06-17T02:54:54.846645 | 2021-07-01T21:31:05 | 2021-07-01T21:31:05 | 381,826,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | # Generated by Django 3.2.5 on 2021-07-01 16:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.TextField(max_length=1000)),
('correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.TextField(max_length=1000)),
],
),
migrations.CreateModel(
name='SubQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attempted_Choice', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='questions.choice')),
('question', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='questions.question')),
],
),
]
| [
"[email protected]"
] | |
a93cdfe30853d6076619e0be3a16abb33c98114c | 9534e4c3574e40fa4f8d628bf2319884c9808ab9 | /Lib/site-packages/scrapy/linkextractors/lxmlhtml.py | dec44e7133792ee35f6840ac50d700dc6205229e | [] | no_license | jhfwb/Web-spiders | f394b95dcc7b30a36e3eb71b11345fa8988f40d7 | 5eda21e66f65dd6f7f79e56441073bdcb7f18bdf | refs/heads/master | 2023-06-17T21:03:31.631157 | 2021-07-17T01:55:55 | 2021-07-17T01:55:55 | 270,164,582 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,179 | py | """
Link extractor based on lxml.html
"""
import six
from six.moves.urllib.parse import urljoin
import lxml.etree as etree
from w3lib.html import strip_html5_whitespace
from w3lib.url import canonicalize_url
from scrapy.link import Link
from scrapy.utils.misc import arg_to_iter, rel_has_nofollow
from scrapy.utils.python import unique as unique_list, to_native_str
from scrapy.utils.response import get_base_url
from scrapy.linkextractors import FilteringLinkExtractor
# from lxml/webScrapySystem/lxml/html/__init__.py
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
_collect_string_content = etree.XPath("string()")
def _nons(tag):
if isinstance(tag, six.string_types):
if tag[0] == '{' and tag[1:len(XHTML_NAMESPACE)+1] == XHTML_NAMESPACE:
return tag.split('}')[-1]
return tag
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False,
strip=True, canonicalized=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
self.strip = strip
if canonicalized:
self.link_key = lambda link: link.url
else:
self.link_key = lambda link: canonicalize_url(link.url,
keep_fragments=True)
def _iter_links(self, document):
for el in document.iter(etree.Element):
if not self.scan_tag(_nons(el.tag)):
continue
attribs = el.attrib
for attrib in attribs:
if not self.scan_attr(attrib):
continue
yield (el, attrib, attribs[attrib])
def _extract_links(self, selector, response_url, response_encoding, base_url):
links = []
# hacky way to get the underlying lxml parsed document
for el, attr, attr_val in self._iter_links(selector.root):
# pseudo lxml.html.HtmlElement.make_links_absolute(base_url)
try:
if self.strip:
attr_val = strip_html5_whitespace(attr_val)
attr_val = urljoin(base_url, attr_val)
except ValueError:
continue # skipping bogus links
else:
url = self.process_attr(attr_val)
if url is None:
continue
url = to_native_str(url, encoding=response_encoding)
# to fix relative links after process_value
url = urljoin(response_url, url)
link = Link(url, _collect_string_content(el) or u'',
nofollow=rel_has_nofollow(el.get('rel')))
links.append(link)
return self._deduplicate_if_needed(links)
def extract_links(self, response):
base_url = get_base_url(response)
return self._extract_links(response.selector, response.url, response.encoding, base_url)
def _process_links(self, links):
""" Normalize and filter extracted links
The subclass should override it if neccessary
"""
return self._deduplicate_if_needed(links)
def _deduplicate_if_needed(self, links):
if self.unique:
return unique_list(links, key=self.link_key)
return links
class LxmlLinkExtractor(FilteringLinkExtractor):
def __init__(self, allow=(), deny=(), allow_domains=(), deny_domains=(), restrict_xpaths=(),
tags=('a', 'area'), attrs=('href',), canonicalize=False,
unique=True, process_value=None, deny_extensions=None, restrict_css=(),
strip=True, restrict_text=None):
tags, attrs = set(arg_to_iter(tags)), set(arg_to_iter(attrs))
tag_func = lambda x: x in tags
attr_func = lambda x: x in attrs
lx = LxmlParserLinkExtractor(
tag=tag_func,
attr=attr_func,
unique=unique,
process=process_value,
strip=strip,
canonicalized=canonicalize
)
super(LxmlLinkExtractor, self).__init__(lx, allow=allow, deny=deny,
allow_domains=allow_domains, deny_domains=deny_domains,
restrict_xpaths=restrict_xpaths, restrict_css=restrict_css,
canonicalize=canonicalize, deny_extensions=deny_extensions,
restrict_text=restrict_text)
def extract_links(self, response):
base_url = get_base_url(response)
if self.restrict_xpaths:
docs = [subdoc
for x in self.restrict_xpaths
for subdoc in response.xpath(x)]
else:
docs = [response.selector]
all_links = []
for doc in docs:
links = self._extract_links(doc, response.url, response.encoding, base_url)
all_links.extend(self._process_links(links))
return unique_list(all_links)
| [
"[email protected]"
] | |
b223f3fad4b07da7b457e440ef74dc44861f3850 | bf8d073661092bd4e7883831d0134b112208860d | /app/views/contexts/view_submitted_response_context.py | 2c6805d893da9506fd4ddcb7c6527dca31926ea7 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | pricem14pc/eq-questionnaire-runner | 182ea7fb8f5ff821d5e05032111e5db000da3d22 | 5412ef4c2cb2008a32b426362a5d2dc386caf7cc | refs/heads/master | 2022-03-10T01:48:07.729071 | 2022-03-01T16:46:45 | 2022-03-01T16:46:45 | 245,265,984 | 0 | 0 | MIT | 2022-03-01T16:46:46 | 2020-03-05T20:55:31 | Python | UTF-8 | Python | false | false | 2,376 | py | from datetime import datetime
from typing import Union
from flask import url_for
from flask_babel import lazy_gettext
from app.data_models import QuestionnaireStore
from app.globals import has_view_submitted_response_expired
from app.questionnaire.questionnaire_schema import QuestionnaireSchema
from app.views.contexts.submission_metadata_context import (
build_submission_metadata_context,
)
from app.views.contexts.summary_context import SummaryContext
def build_view_submitted_response_context(
language: str,
schema: QuestionnaireSchema,
questionnaire_store: QuestionnaireStore,
survey_type: str,
) -> dict[str, Union[str, datetime, dict]]:
view_submitted_response_expired = has_view_submitted_response_expired(
questionnaire_store.submitted_at # type: ignore
)
if survey_type == "social":
submitted_text = lazy_gettext("Answers submitted.")
elif trad_as := questionnaire_store.metadata.get("trad_as"):
submitted_text = lazy_gettext(
"Answers submitted for <span>{ru_name}</span> ({trad_as})"
).format(ru_name=questionnaire_store.metadata["ru_name"], trad_as=trad_as)
else:
submitted_text = lazy_gettext(
"Answers submitted for <span>{ru_name}</span>"
).format(ru_name=questionnaire_store.metadata["ru_name"])
metadata = build_submission_metadata_context(
survey_type,
questionnaire_store.submitted_at, # type: ignore
questionnaire_store.metadata["tx_id"],
)
context = {
"hide_sign_out_button": True,
"view_submitted_response": {
"expired": view_submitted_response_expired,
},
"metadata": metadata,
"submitted_text": submitted_text,
}
if not view_submitted_response_expired:
summary_context = SummaryContext(
language=language,
schema=schema,
answer_store=questionnaire_store.answer_store,
list_store=questionnaire_store.list_store,
progress_store=questionnaire_store.progress_store,
metadata=questionnaire_store.metadata, # type: ignore
response_metadata=questionnaire_store.response_metadata,
)
context["summary"] = summary_context()
context["pdf_url"] = url_for("post_submission.get_view_submitted_response_pdf")
return context
| [
"[email protected]"
] | |
a52099362e29f69da827e35d09f602bc40f74d78 | 2ab9b6eea4dab3267311824d151daf7a04c0f1e0 | /python/src/bool_func.py | 12ad50c894e59da4d62b9bdbbfae98d297a5e949 | [] | no_license | gutelfuldead/SpyDR | 12ec3f4b31131579f6f6c58068602fa66332a4bb | e1cb47e3a38825f75d51140aec5a8434421d94e8 | refs/heads/master | 2020-06-13T19:11:16.094388 | 2020-05-02T06:39:31 | 2020-05-02T06:39:31 | 75,565,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | import numpy as np
def int_to_bool(data):
'''
Takes integer value converts it to a zero padded byte
'''
a = np.zeros(8,dtype=np.int8)
bin_data = np.zeros(len(data)*8,dtype=np.int8)
# data = data.astype(dtype=np.int8)
for i in range(0,len(data)):
# formats data in 8bit binary without the 0b prefix
a = format(data[i],'b').zfill(8)
for j in range(0,len(a)):
bin_data[i*len(a) + j] = a[j]
return bin_data
def pack_bits(data):
'''
Add extra element indexing how many surplus bits will be packed
'''
nbits = len(data)
rem = nbits % 8
nbytes = nbits/8
if rem:
nbytes += 1
packed = np.empty(1+nbytes, dtype=np.uint8)
packed[0] = rem
packed[1:] = np.packbits(data)
return packed
def unpack_bits(data_packed):
'''
data_packed == packed data w/ extra bits
Strips the excess bits when unpacking
'''
rem = data_packed[0]
data_packed = data_packed.astype(dtype=np.uint8)
unpacked = np.unpackbits(data_packed[1:])
if rem:
unpacked = unpacked[:-(8-rem)]
return unpacked
| [
"[email protected]"
] | |
263c61c905fe391c934fe02ba676cdb55bc51944 | 19290d1c61af6fd22af3c2b4f79e86c6f483146f | /admin_users/views.py | 005742bc29dc4b881ef09e0c4836dfa742bbdd5b | [] | no_license | fhjd/Html | bea6621ad28543bc70c806660069843b2f13e129 | 5170471d7722fbc1464c68fc0099fd0c1978e012 | refs/heads/master | 2020-03-22T04:41:17.912132 | 2018-07-04T06:55:07 | 2018-07-04T06:55:07 | 139,514,000 | 0 | 0 | null | 2018-07-03T01:44:56 | 2018-07-03T01:44:56 | null | UTF-8 | Python | false | false | 4,389 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.shortcuts import render
from all_models.models import *
# Create your views here.
def add_staff_view(request):
if request.method == 'GET':
department = DepartmentInfo.objects.all()
role = UserRole.objects.all()
print '飞鸟:天空不曾留下我的痕迹,但是我已飞过'
return render(request, 'add_staff.html', {'department': department, 'role': role})
elif request.method == 'POST':
print type(request.POST)
try:
department = request.POST.get('department', '')
staff_department = DepartmentInfo.objects.get(department_name=department)
role_name = request.POST.get('role_name', '')
staff_power = UserRole.objects.get(role_name=role_name)
print '谁家玉笛暗飞声'
name = request.POST.get('name', '')
age = request.POST.get('age', '')
gender = request.POST.get('gender', '')
education = request.POST.get('education', '')
# 座机
landline = request.POST.get('landline','')
# 工资卡号 Payroll card number
pcn = request.POST.get('pcn')
# 身份证 ID card
ic = request.POST.get('ic',)
if UserInfo.objects.filter(user_idnum=ic):
return HttpResponse('身份证已经登记')
# 操作员 operator
operator = request.POST.get('operator', '')
account = request.POST.get('account', '')
password = request.POST.get('password', '')
# 民族 ethnic
ethnic = request.POST.get('ethnic', '')
marriage = request.POST.get('marriage', '')
# 电话号 phone number
pn = request.POST.get('pn', '')
address = request.POST.get('address', '')
hobby = request.POST.get('hobby', '')
email = request.POST.get('email', '')
except DepartmentInfo.DoesNotExist or UserRole.DoesNotExist:
return HttpResponse('员工重复或注册信息不足')
# 通过身份证号验证员工是否重复
# UserInfo.objects.get(user_idnum=ic)
# 创建员工表
staff = UserInfo.objects.create(
department=staff_department,
role=staff_power,
user_name=name,
user_age=age,
user_sex=gender,
user_mobile=pn,
user_address=address,
user_idnum=ic,
user_tel=landline,
user_num=account,
user_pw=password,
user_nation=ethnic,
user_bankcard=pcn,
is_married=marriage,
is_used='',
user_diploma=education,
user_addman=operator,
user_intest=hobby,
user_email=email,
)
return HttpResponse('员工信息,添加成功')
else:
return HttpResponse('添加失败')
# 添加部门
def add_department_view(request):
if request.method == 'GET':
return render(request, 'add_department.html')
elif request.method == 'POST':
department_name = request.POST.get('department_name', '')
description = request.POST.get('description', '')
try:
DepartmentInfo.objects.get(department_name=department_name)
return HttpResponse('当前部门已存在,无需创建')
except DepartmentInfo.DoesNotExist:
DepartmentInfo.objects.create(department_name=department_name, department_desc=description)
return HttpResponse('恭喜,新部门创建成功')
else:
return HttpResponse('创建失败')
def add_role_power_view(request):
if request.method == 'GET':
return render(request, 'add_role_power.html')
elif request.method == 'POST':
role_name = request.POST.get('role', '')
role_power = request.POST.get('power', '')
try:
UserRole.objects.get(role_name=role_name)
return HttpResponse('角色名已存在')
except UserRole.DoesNotExist:
UserRole.objects.create(role_name=role_name, role_power=role_power)
return HttpResponse('新建角色成功')
else:
return HttpResponse('创建角色失败')
| [
"[email protected]"
] | |
2dc430655fdf92a4c6ed943c90c8ae1b76be9a56 | 3efdd75877daa68036a007790577874cb09c1861 | /sqlconnection.py | 96ae7eb6f5e80efb285f37fd173010a88c9fd61b | [] | no_license | urvishjarvis1/corona_virus_spread_canada_dashboard | 24d854e435d81ccf6f6895422ef8d3eed34fbf61 | 3943756dc2d234bc9651566ef7ed8be6c9b58e5f | refs/heads/master | 2023-08-04T18:35:29.709714 | 2021-09-06T18:00:34 | 2021-09-06T18:00:34 | 283,587,205 | 0 | 0 | null | 2020-08-10T04:24:40 | 2020-07-29T19:39:55 | PHP | UTF-8 | Python | false | false | 2,577 | py | import pymysql as sql
connection = sql.connect(host="localhost",user="root",passwd="",database="coronavirusvisualization")
cursor = connection.cursor()
region_table= """ CREATE TABLE IF NOT EXISTS region_table ( reg_id INT(20) PRIMARY KEY AUTO_INCREMENT, region_name CHAR(200))"""
cursor.execute(region_table)
atlantictabel = """ CREATE TABLE IF NOT EXISTS atlantic
( case_id INT(20) PRIMARY KEY,
episodeweek INT(20),
gender CHAR(200),
agegroup INT(20),
occupation INT(20),
asymptomatic INT(20),
onsetweekofsym INT(20),
onsetyearofsym INT(20),
hospitalstatus INT(20),
recovered INT(20),
recoveryweek INT(20),
recoveryyear INT(20),
transmission INT(20),
reg_id INT(20),FOREIGN KEY (reg_id) REFERENCES region_table(reg_id)) """
cursor.execute(atlantictabel)
atlantictabel = """ CREATE TABLE IF NOT EXISTS quebec
( case_id INT(20) PRIMARY KEY,
episodeweek INT(20),
gender CHAR(200),
agegroup INT(20),
occupation INT(20),
asymptomatic INT(20),
onsetweekofsym INT(20),
onsetyearofsym INT(20),
hospitalstatus INT(20),
recovered INT(20),
recoveryweek INT(20),
recoveryyear INT(20),
transmission INT(20),
reg_id INT(20),FOREIGN KEY (reg_id) REFERENCES region_table(reg_id)) """
cursor.execute(atlantictabel)
atlantictabel = """ CREATE TABLE IF NOT EXISTS ontario
( case_id INT(20) PRIMARY KEY,
episodeweek INT(20),
gender CHAR(200),
agegroup INT(20),
occupation INT(20),
asymptomatic INT(20),
onsetweekofsym INT(20),
onsetyearofsym INT(20),
hospitalstatus INT(20),
recovered INT(20),
recoveryweek INT(20),
recoveryyear INT(20),
transmission INT(20),
reg_id INT(20),FOREIGN KEY (reg_id) REFERENCES region_table(reg_id)) """
cursor.execute(atlantictabel)
atlantictabel = """ CREATE TABLE IF NOT EXISTS prairies
( case_id INT(20) PRIMARY KEY,
episodeweek INT(20),
gender CHAR(200),
agegroup INT(20),
occupation INT(20),
asymptomatic INT(20),
onsetweekofsym INT(20),
onsetyearofsym INT(20),
hospitalstatus INT(20),
recovered INT(20),
recoveryweek INT(20),
recoveryyear INT(20),
transmission INT(20),
reg_id INT(20),FOREIGN KEY (reg_id) REFERENCES region_table(reg_id)) """
cursor.execute(atlantictabel)
atlantictabel = """ CREATE TABLE IF NOT EXISTS britishcolumbia
( case_id INT(20) PRIMARY KEY,
episodeweek INT(20),
gender CHAR(200),
agegroup INT(20),
occupation INT(20),
asymptomatic INT(20),
onsetweekofsym INT(20),
onsetyearofsym INT(20),
hospitalstatus INT(20),
recovered INT(20),
recoveryweek INT(20),
recoveryyear INT(20),
transmission INT(20),
reg_id INT(20),FOREIGN KEY (reg_id) REFERENCES region_table(reg_id)) """
cursor.execute(atlantictabel)
connection.close()
| [
"[email protected]"
] | |
52e9f1586a2c393b503419d6e70d777a14b1d627 | 69885cca3f70d8d220e7c4a0e89aee6b92b0ce98 | /setup.py | fb7e0a7dcb976521977cb715107073721583583e | [] | no_license | nerdynewt/pycrawl | fe23d762890c831f27e7f6530f076d42543d0f50 | 3187a6a77990e06a3f13d5509877ff9d17ca773b | refs/heads/master | 2022-11-18T12:14:11.355981 | 2020-07-19T11:59:05 | 2020-07-19T11:59:05 | 278,623,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,320 | py | # import atexit
from setuptools import setup
# from setuptools.command.install import install
# def _post_install():
# import reportinator.reconfig
# reportinator.reconfig.main(first_install=True)
# print('POST INSTALL')
# class new_install(install):
# def __init__(self, *args, **kwargs):
# super(new_install, self).__init__(*args, **kwargs)
# atexit.register(_post_install)
setup(name='pycrawl',
version='0.1',
description='Discover Personal Websites by Crawling the Internet',
url='http://github.com/nerdynewt/pycrawl',
author='Vishnu Namboodiri K S',
author_email='[email protected]',
license='GPL v3.0',
packages=['pycrawl'],
# cmdclass={
# 'install': new_install,
# },
# package_data={
# "reportinator": ["layouts/*.cls"],
# "reportinator": ["scripts/make-my-report.py"],
# },
entry_points={
"console_scripts": [
"pycrawl = pycrawl.main:main",
]
},
# install_requires=[
# 'matplotlib',
# 'numpy',
# 'ruamel.yaml',
# 'doi2bib',
# 'pandas',
# 'pyyaml',
# 'configurator',
# ],
include_package_data=True,
zip_safe=False)
| [
"[email protected]"
] | |
68ff5bfcf2e81316fe6283668599d1fbf60927b3 | 5a4601e104681c79a87ca7216cac38a33828e378 | /exctract_logs_image/extracter.py | 4447dfeed4354097f5cfa2d81757694f1acb2bc5 | [] | no_license | alexku7/kub-logging | 44b6d4f78e3b1b4ad9a5fe67431e583fc8fd90a5 | 6db56eb13f8284256116ea2a50f11efc8bf385f8 | refs/heads/master | 2021-01-04T09:22:27.647667 | 2020-02-15T16:29:56 | 2020-02-15T16:29:56 | 240,486,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,515 | py | #!/usr/bin/python
import pymongo
import json
from datetime import datetime, timedelta
import glob
import os
import tempfile
import shutil
import time
mongo_host=os.getenv("MONGO_HOST")
mongo_port=os.getenv("MONGO_PORT")
mongo_db=os.getenv("MONGO_DB")
mongo_coll=os.getenv("MONGO_COLL")
days_to_subtract=7
root_folder="/etc/kub-logs/"
from bson.json_util import dumps
def create_mongoDB_conn():
mongo_client= pymongo.MongoClient("mongodb://" + mongo_host + ":" + mongo_port +"/")
db = mongo_client[mongo_db]
global coll
coll = db[mongo_coll]
def extract_logs(cycle,days):
print ("start select")
if cycle:
folder = tempfile.mkdtemp() + "/"
else:
folder=root_folder
target_date = datetime.utcnow() - timedelta(days)
print (target_date)
logs= coll.find({"time": {"$gt": target_date }})
for x in logs:
my_date=x["time"]
my_name=x["tailed_path"]
short_date="{:%Y-%m-%d}".format(my_date)
my_name=short_date + "-" + my_name
f = open(folder + my_name, "a")
f.write(x["message"]+"\n")
f.close()
print(coll.count())
print (target_date)
files = glob.glob(folder+"*")
if cycle:
for f in files:
shutil.copy(f,root_folder)
os.remove(f)
files = glob.glob(root_folder+"*")
for f in files:
os.remove(f)
create_mongoDB_conn()
extract_logs(False,days_to_subtract)
while True:
time.sleep(120)
extract_logs(True,1)
| [
"root@d-test1-vm.vsh3r3mjf0cu5fdrckdsojpxmg.ax.internal.cloudapp.net"
] | root@d-test1-vm.vsh3r3mjf0cu5fdrckdsojpxmg.ax.internal.cloudapp.net |
7684602647aa6795243e0676cf2e42d193ebe73f | 598e992f80c5647cbd19519e7a11b683870c24a4 | /trainimagenet.py | 7e2d111f40df0b19607f7379beef3a7629d248ce | [
"MIT"
] | permissive | CQUlearningsystemgroup/LearningToBinarize | 23b5109d3148e28ffbf6deab70fc1da66295939c | 1ecad897145af65ff52323bf2ec64a2154dc87d6 | refs/heads/main | 2023-06-23T06:25:40.055453 | 2021-07-17T13:42:36 | 2021-07-17T13:42:36 | 324,091,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,541 | py | import os
import sys
import shutil
import numpy as np
import time, datetime
import torch
import random
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.utils.data.distributed
import torchvision
#sys.path.append("../")
from utils import *
import utils_loss
from torchvision import datasets, transforms
from torch.autograd import Variable
# from birealnet import birealnet18
from Models import birealnetimagenet
parser = argparse.ArgumentParser("birealnet")
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--epochs', type=int, default=120, help='num of training epochs')
parser.add_argument('--learning_rate', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay')
parser.add_argument('--save', type=str, default='./Results', help='path for saving trained models')
parser.add_argument('--data', default='', metavar='DIR', help='path to dataset')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('-j', '--workers', default=20, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--print_interval', type=int, default=10, help='number of times to print')
args = parser.parse_args()
CLASSES = 1000
# use_meta = 'MuitFC'
use_meta = 'Conv'
# use_meta = 'NoMeta'
if not os.path.exists('log'):
os.mkdir('log')
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join('log/log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
def main():
if not torch.cuda.is_available():
sys.exit(1)
start_t = time.time()
cudnn.benchmark = True
cudnn.enabled=True
logging.info("args = %s", args)
# load model
model = birealnetimagenet.birealnet18()
logging.info(model)
model = nn.DataParallel(model).cuda()
# teacher model
model_teacher = torchvision.models.resnet18(pretrained=False)
model_teacher.load_state_dict(torch.load('./resnet18.pth'))
logging.info(model_teacher)
model_teacher = nn.DataParallel(model_teacher).cuda()
model_teacher.eval()
# meta_met
meta_net_param = []
for pname, p in model.named_parameters():
# print(pname)
if pname.find('meta_net') >= 0:
meta_net_param.append(p)
meta_net_param_id = list(map(id, meta_net_param))
meta_optimizer = torch.optim.Adam([{'params': meta_net_param}], lr=0.001, weight_decay=0)
meta_scheduler = torch.optim.lr_scheduler.MultiStepLR(meta_optimizer, [70, 90, 100, 110], gamma=0.1)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
criterion_kd = utils_loss.DistillationLoss().cuda()
criterion_meta = Metaloss().cuda()
all_parameters = model.parameters()
weight_parameters = []
for pname, p in model.named_parameters():
if p.ndimension() == 4 or pname=='classifier.0.weight' or pname == 'classifier.0.bias':
weight_parameters.append(p)
weight_parameters_id = list(map(id, weight_parameters))
other_parameters = list(filter(lambda p: id(p) not in weight_parameters_id, all_parameters))
other_parameters = list(filter(lambda p: id(p) not in meta_net_param_id, other_parameters)) #
optimizer = torch.optim.SGD(
[{'params': other_parameters},
{'params': weight_parameters, 'weight_decay': args.weight_decay}],
lr=args.learning_rate, momentum=args.momentum)
# scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lambda step : (1.0-step/args.epochs), last_epoch=-1)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [70, 90, 100, 110], gamma=0.1)
start_epoch = 0
best_top1_acc= 0
checkpoint_tar = os.path.join(args.save, 'checkpoint.pth.tar')
if os.path.exists(checkpoint_tar):
logging.info('loading checkpoint {} ..........'.format(checkpoint_tar))
checkpoint = torch.load(checkpoint_tar)
start_epoch = checkpoint['epoch']
best_top1_acc = checkpoint['best_top1_acc']
model.load_state_dict(checkpoint['state_dict'], strict=False)
logging.info("loaded checkpoint {} epoch = {}" .format(checkpoint_tar, checkpoint['epoch']))
# adjust the learning rate according to the checkpoint
for epoch in range(start_epoch):
scheduler.step()
# load training data
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# data augmentation
crop_scale = 0.08
lighting_param = 0.1
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(crop_scale, 1.0)),
Lighting(lighting_param),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
train_dataset = datasets.ImageFolder(
traindir,
transform=train_transforms)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
# load validation data
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
# train the model
epoch = start_epoch
while epoch < args.epochs:
train_obj, train_top1_acc, train_top5_acc = train(epoch, train_loader, model, model_teacher, criterion_kd, optimizer, scheduler,
meta_optimizer, meta_scheduler, criterion_meta)
valid_obj, valid_top1_acc, valid_top5_acc = validate(epoch, val_loader, model, criterion, args, criterion_meta)
is_best = False
if valid_top1_acc > best_top1_acc:
best_top1_acc = valid_top1_acc
is_best = True
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'best_top1_acc': best_top1_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, args.save)
epoch += 1
training_time = (time.time() - start_t) / 3600
print('total training time = {} hours. best acc: {}'.format(training_time, best_top1_acc))
def train(epoch, train_loader, model, model_teacher, criterion, optimizer, scheduler, meta_optim=None, meta_scheduler=None, criterion_meta=None ):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
model.train()
model_teacher.eval()
end = time.time()
scheduler.step()
if use_meta != 'NoMeta':
meta_scheduler.step()
for param_group in optimizer.param_groups:
cur_lr = param_group['lr']
if use_meta != 'NoMeta':
for param_group in meta_optim.param_groups:
metacur_lr = param_group['lr']
print('epoch: %d meta learning_rate: %e' % (epoch, metacur_lr ))
print('epoch: %d base learning_rate: %e' % (epoch, cur_lr))
for i, (images, target) in enumerate(train_loader):
data_time.update(time.time() - end)
images = images.cuda()
target = target.cuda()
# compute outputy
logits = model(images)
logits_teacher = model_teacher(images).detach()
loss, _ = criterion(logits, logits_teacher, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(logits, target, topk=(1, 5))
n = images.size(0)
losses.update(loss.item(), n) #accumulated loss
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
# compute gradient and do SGD step
optimizer.zero_grad()
if use_meta != 'NoMeta':
meta_optim.zero_grad()
loss.backward()
optimizer.step()
if use_meta != 'NoMeta':
meta_optim.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_interval == 0:
progress.display(i)
return losses.avg, top1.avg, top5.avg
def validate(epoch, val_loader, model, criterion, args, criterion_meta):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluation mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda()
target = target.cuda()
# compute output
logits = model(images)
loss = criterion(logits, target)
# lossB = criterion_meta(lossB)
# loss = loss + 0.05 * lossB
# measure accuracy and record loss
pred1, pred5 = accuracy(logits, target, topk=(1, 5))
n = images.size(0)
losses.update(loss.item(), n)
top1.update(pred1[0], n)
top5.update(pred5[0], n)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_interval == 0:
progress.display(i)
print(' * acc@1 {top1.avg:.3f} acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e1008c7b9631d092ae8d8ab8b61a8fb6a036519f | cacf6bc83af7e428c6e02f9b7b84b0a715a98bff | /snippets/07 - Case study - air quality data79.py | e512b9a0a8626218fbd7c8266177ac5b5d3430fe | [
"MIT"
] | permissive | MattGyverLee/pyling | 499070b0c523bceea9e734a84e835e5f13183d57 | c1a3c0284ae84851290c2cd04a70f0065e0a1270 | refs/heads/master | 2021-05-21T16:51:05.654468 | 2020-04-03T14:34:43 | 2020-04-03T14:34:43 | 252,724,715 | 0 | 0 | MIT | 2020-04-03T12:23:56 | 2020-04-03T12:23:55 | null | UTF-8 | Python | false | false | 91 | py | data_weekend_BETR801 = data_weekend['BETR801'].unstack(level=0)
data_weekend_BETR801.head() | [
"[email protected]"
] | |
33ad32b64bf844ab2fb287a1270660038778971d | 4af7f6aa50439f48bf38db916028def6944f5723 | /rcp/rcp.py | d0a278cedbf29cdc1a481f27d2c64ca6d15107f9 | [] | no_license | azcoigreach/rcp | 0af4ee207c1774ba226845930d52fd0ab32b0cc5 | 1a1ab257e72818e50c5cf81af261b3ed747e447c | refs/heads/master | 2021-01-22T23:52:59.119092 | 2017-03-05T20:21:53 | 2017-03-05T20:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | from bs4 import BeautifulSoup
import csv
import argparse
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
parser = argparse.ArgumentParser()
parser.add_argument("url", help="The url of the polling data.")
parser.add_argument('-o', "--output", nargs="?", help="The output file name. Defaults to output.csv",
default="output.csv")
args = parser.parse_args()
def main():
response = urlopen(args.url)
soup = BeautifulSoup(response, 'html.parser')
full_poll = soup.find("div", {"id": 'polling-data-full'})
rows = full_poll.find('table', {"class": 'data'})
p = []
for row in rows:
cols = row.find_all(['th', 'td'])
cols = [ele.text.strip() for ele in cols]
p.append([ele for ele in cols])
with open(args.output, "w") as f:
writer = csv.writer(f)
writer.writerows(p)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
75946774543e16858e0e6c0777a06572373d41fd | 9650906d13ceff79351accb5e74fc3ce69f553cb | /functionPrediction/funcPredict.py | 973021128021c04fe91b1cfa36c2732800f2ce02 | [] | no_license | orangeYao/metaPlatformModules | f7798251611c3603531d2506317835c97ff399a8 | 53c6fbc1d9d7ad1e398933c3de6c474b298197c4 | refs/heads/master | 2021-01-18T16:37:13.373276 | 2020-09-06T23:11:57 | 2020-09-06T23:11:57 | 73,464,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,549 | py | import numpy as np
import pandas as pd
import sys, os
import csv
import time
import scipy
from scipy import linalg
from pandas import Series
sys.path.append(os.path.join(os.path.dirname(__file__),'../../../'))
from NetworkAnalysisPackage.database import Raw_Data
from NetworkAnalysisPackage.parameter import path2abundance
from NetworkAnalysisPackage.parameter import path2sampleInfo
from NetworkAnalysisPackage.parameter import numOfTrimmedData
from NetworkAnalysisPackage.preprocessing import filterByNumOfZeros
from numpy.linalg import inv
from NetworkAnalysisPackage.functions3 import conjugateGradient
from NetworkAnalysisPackage.functions3 import diagonalMatrix
from NetworkAnalysisPackage.functions3 import calculateMatrixA
from NetworkAnalysisPackage.functions3 import nodeLabelBias
from optparse import OptionParser
parser = OptionParser()
def funcPredict(filenameCorr, filenameB):
if filenameCorr != None:
corrcoef = pd.read_csv(filenameCorr,index_col=0)
else:
data = Raw_Data(path2abundance)
info = Raw_Data(path2sampleInfo)
df1 = filterByNumOfZeros(data.df, numOfTrimmedData)
corrcoef = diagonalMatrix(df1)
##1. to find A = I - L = I -(D-W) = I - D + W
dimension = corrcoef.shape[0]
np_a = calculateMatrixA(corrcoef, dimension)
##2. to find b from input list
if filenameB != None:
with open(filenameB, 'r') as f:
gene_list = [int(line.rstrip('\n')) for line in f]
else:
gene_list = corrcoef.index.tolist()[20:25] #simulate input list temporarily
nega_gene_list = []
label_bias_b = nodeLabelBias(gene_list, nega_gene_list, dimension, corrcoef.index)
##3. to find inv(A)*b with diagonalMatrix algorithm
if np.linalg.det(np_a) != 0:
label_prop_score2 = np.linalg.solve(np_a, label_bias_b)
df_out = pd.DataFrame(label_prop_score2,index = corrcoef.index.tolist())
return df_out
else:
print "pseudo inverse of A is used instead"
label_prop_score2 = np.linalg.pinv(np_a).dot(label_bias_b)
df_out = pd.DataFrame(label_prop_score2,index = corrcoef.index.tolist())
return df_out
if __name__ == "__main__":
parser.add_option("-c", "--fileCorr", dest="filenameCorr", help="open correlation file", metavar="FILE")
parser.add_option("-b", "--fileB", dest="filenameB", help="open functional file", metavar="FILE")
parser.add_option("-o", "--fileOut", dest="filenameOut", help="output file name", metavar="FILE", default="sampleOut.csv")
(options, args) = parser.parse_args()
df_out = funcPredict(options.filenameCorr, options.filenameB)
print df_out
df_out.to_csv(options.filenameOut)
| [
"[email protected]"
] | |
cdaaaa9355251aee17ac84f8757eb7d40502dae4 | 3b2281080c37bc53896ef99867786c6650ddd492 | /main.py | 4e6e31c4b5ba6b4e77da9eebc1e2b978bd201c44 | [] | no_license | dongzhuoyao/gd | ceabbb113a3b6e175589fcde9cf4aa8f9c29980c | 58966bb121fd4dbab3993e25b331eefa44a416a1 | refs/heads/master | 2021-01-22T23:05:29.244013 | 2017-05-30T04:55:20 | 2017-05-30T04:55:20 | 92,799,324 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,140 | py | from load_mnist import read
from random import shuffle,randint
from collections import deque
import numpy as np
import math
import pickle,random
batch_size = 128
validate_interval = 4
#0.001,128 perfect!
momentum_rho = 0.9
backtracing_step = 0.94
bb_lower_bound = 1e-10
bb_upper_bound = 1e20
bb_sigma = 0.93
bb_reseted_step_size = 0.0001
bb_M = 18
bb_M_deque = deque(maxlen=bb_M)
train_imgs,train_labels,val_imgs,val_labels = read(dataset="training")
test_imgs,test_labels = read(dataset="testing")
print("training_imgs: {}".format(len(train_imgs)))
print("validation_imgs: {}".format(len(val_imgs)))
print("testing_imgs: {}".format(len(test_imgs)))
train_batch_num = int(math.floor(len(train_imgs)/batch_size))
val_batch_num = int(math.floor(len(val_imgs)/batch_size))
test_batch_num = int(math.floor(len(test_imgs)/batch_size))
print("train_batch_num: {}".format(train_batch_num))
print("val_batch_num: {}".format(val_batch_num))
print("test_batch_num: {}".format(test_batch_num))
w = np.random.normal(0,0.01,(785,))
g_history = np.zeros((785,))
w_history = np.zeros((785,))
alpha_history = 0
###SVG
svg_gradient_history = np.zeros((train_batch_num,785))
###SVRG
svrg_w_history = np.zeros((785,))
svrg_m = batch_size
import os
#1,modify name,2,modify method
output_name = os.path.join("result","SAGA-backtracking-l2")
optimize_method ="svg"
regulation_type = "l2"
regulation = 1e-4
step_size = 0.01
record_terminal = 1
epoch_num = 30
effective_pass = 25
#sgd,sag,saga,svrg,
def optimize(epoch_index,batch_index_one_epoch, total_batch_num, xs, ys):
#exponential_decay
loss = calculate_loss_with_regulation(xs, ys, w)
#sgd(xs,ys,epoch_index,step_size_strategy = "backtracking")
sag(epoch_index,batch_index_one_epoch, total_batch_num, xs, ys, sub_mod="saga",step_size_strategy="backtracking")
#sgd(xs, ys, epoch_index,step_size_strategy="backtracking")
#svrg(batch_index_one_epoch, total_batch_num, xs, ys, lr=0.01)
#momentum(xs, ys, epoch_index,nesterov=True, step_size_strategy="backtracking")
return loss
def calculate_grad_with_regularization000(xs, ys, w):
gradient = 0
for i in range(batch_size):
gradient += (-np.exp(-ys[i] * np.inner(w, xs[i])) * (ys[i] * xs[i]) / (1 + np.exp(-ys[i] * np.inner(w, xs[i]))))
gradient /= batch_size
if regulation_type=="l1":
gradient += regulation * np.sign(w)
elif regulation_type=="l2":
gradient += regulation * w
else:
print("error regulation type")
exit()
return gradient
def calculate_grad_with_regularization(xs, ys, w):
new_w = w.reshape(785,1)
e = np.exp(np.multiply(-ys,np.dot(xs,new_w)))
left = np.divide(e,1+e)
left_more = np.transpose(np.multiply(-ys,left))
gradient = np.dot(left_more,xs)/batch_size
gradient = np.transpose(gradient)
gradient = np.squeeze(gradient)
if regulation_type == "l1":
gradient += regulation * np.sign(w)
elif regulation_type == "l2":
gradient += regulation * w
else:
print("error regulation type")
exit()
return gradient
def calculate_loss_with_regulation(xs, ys, w):
new_w = w.reshape(785, 1)
aa = -ys
bb = np.dot(xs,new_w)
#print("aver: {}".format(np.average(np.exp(np.multiply(aa,bb)))))
loss = np.log(1+np.exp(np.multiply(aa,bb)))
#print("loss: {}".format(loss))
loss = np.average(loss)
if regulation_type=="l1":
loss += regulation * np.linalg.norm(w, 1)
elif regulation_type=="l2":
loss += regulation * np.linalg.norm(w, 2)
else:
print("error regulation type")
exit()
return loss
def calculate_loss_with_regulation00000(xs, ys, w):
# calculate loss
loss = 0
for i in range(batch_size):
loss += np.log(1 + np.exp(-ys[i] * np.inner(w, xs[i])))
loss /= batch_size
if regulation_type=="l1":
loss += regulation * np.linalg.norm(w, 1)
elif regulation_type=="l2":
loss += regulation * np.linalg.norm(w, 2)
else:
print("error regulation type")
exit()
return loss
def svrg(batch_index_one_epoch,total_batch_num,xs,ys,lr=0.01):
global w,g_history,w_history,alpha_history,svrg_w_history
_tilde_w = svrg_w_history
_tilde_mu = 0
for ii in range(total_batch_num):
current_xs = train_imgs[ii * batch_size:(ii + 1) * batch_size]
current_ys = train_labels[ii * batch_size:(ii + 1) * batch_size]
_tilde_mu +=calculate_grad_with_regularization(current_xs,current_ys,w)
_tilde_mu /= total_batch_num
_w_old = _tilde_w
for i in range(svrg_m):
f_s = randint(0,total_batch_num-1)
#print np.inner(_w_old, xs[_i_t])
current_xs = train_imgs[f_s*batch_size:(f_s+1)*batch_size]
current_ys = train_labels[f_s * batch_size:(f_s + 1) * batch_size]
_w = _w_old -lr*(
calculate_grad_with_regularization(current_xs,current_ys,_w_old)-calculate_grad_with_regularization(current_xs,current_ys,_tilde_w)+_tilde_mu
)
_w_old = _w
#_w += regulation * np.sign(_w)
w = _w
svrg_w_history = _w
def sag(epoch_index,batch_index_one_epoch,total_batch_num,xs,ys,step_size_strategy = "backtracking",sub_mod="sag"):
global w,g_history,w_history,alpha_history
current_g = calculate_grad_with_regularization(xs, ys, w)
if sub_mod=="sag":
g = (current_g - svg_gradient_history[batch_index_one_epoch,:])/total_batch_num
elif sub_mod=="saga":
g = (current_g - svg_gradient_history[batch_index_one_epoch, :])
else:
print("invalid svg method: {}".format(sub_mod))
exit()
for ii in range(total_batch_num):
g += svg_gradient_history[ii,:]/total_batch_num
#backtracking line search
cur_sz = 1
# <<Optimization theory and methods - nonlinear programming by Wenyu Sun, Ya-xiang Yuan>>,,Backtracking line search
if step_size_strategy =="bb":
cur_loss = calculate_loss_with_regulation(xs, ys, w)
if alpha_history == 0:
alpha = 1/bb_reseted_step_size
else:
alpha = alpha_history
#if alpha < bb_lower_bound or alpha > bb_upper_bound:
# alpha = 1/bb_reseted_step_size
if alpha < bb_lower_bound:
alpha = bb_lower_bound
if alpha > bb_upper_bound:
alpha = bb_upper_bound
#print("bb step size out of boundry,reset step size to {}".format(bb_reseted_step_size))
bb_M_deque.append(cur_loss)
tmp_list = list(bb_M_deque)
tmp_list.sort()
max_M = tmp_list[-1]
while calculate_loss_with_regulation(xs, ys, w-alpha*g) > max_M - 0.5*alpha*np.inner(g, g):
#update alpha
#current_sigma = random.uniform(bb_sigma1, bb_sigma2)
alpha = bb_sigma* alpha
bb_step = alpha
#print("bb_step: {}".format(bb_step))
w += (-bb_step*g)
#calculate new alpha
current_yk = calculate_grad_with_regularization(xs, ys, w) - g
alpha_history = - (alpha*np.inner(g,g))/np.inner(g,current_yk)
elif step_size_strategy=="backtracking":
while calculate_loss_with_regulation(xs, ys, w-cur_sz*g) > calculate_loss_with_regulation(xs, ys, w) -cur_sz*math.pow(np.linalg.norm(g, 2), 2)/2:
cur_sz = cur_sz * backtracing_step
#print cur_sz
elif step_size_strategy == "exponential_decay":
cur_sz = 1 * math.pow(0.9, epoch_index / 3)
else:
cur_sz = step_size
w_history = np.copy(w)
w += (-cur_sz*g)
g_history = np.copy(g)
def sgd(xs,ys,epoch_index,step_size_strategy = "backtracking",momentum=0):
global w,g_history,w_history,alpha_history
g = calculate_grad_with_regularization(xs, ys, w)
#backtracking line search
cur_sz = 1
# <<Optimization theory and methods - nonlinear programming by Wenyu Sun, Ya-xiang Yuan>>,,Backtracking line search
if step_size_strategy =="bb":
cur_loss = calculate_loss_with_regulation(xs, ys, w)
if alpha_history == 0:
alpha = 1/bb_reseted_step_size
else:
alpha = alpha_history
#print("bb step size out of boundry,reset step size to {}".format(bb_reseted_step_size))
bb_M_deque.append(cur_loss)
tmp_list = list(bb_M_deque)
tmp_list.sort()
max_M = tmp_list[-1]
i=0
while calculate_loss_with_regulation(xs, ys, w-alpha*g) > max_M - 0.5*alpha*np.inner(g, g):
#update alpha
#current_sigma = random.uniform(bb_sigma1, bb_sigma2)
i += 1
alpha = bb_sigma* alpha
#print i
bb_step = alpha
#print("bb_step: {}".format(bb_step))
w += (-bb_step*g)
#calculate new alpha
current_yk = calculate_grad_with_regularization(xs, ys, w) - g
alpha_history = - (alpha*np.inner(g,g))/np.inner(g,current_yk)
if alpha_history < bb_lower_bound:
alpha_history = bb_lower_bound
if alpha_history > bb_upper_bound:
alpha_history = bb_upper_bound
elif step_size_strategy=="backtracking":
while calculate_loss_with_regulation(xs, ys, w-cur_sz*g) > calculate_loss_with_regulation(xs, ys, w) -cur_sz*math.pow(np.linalg.norm(g, 2), 2)/2:
cur_sz = cur_sz * backtracing_step
#print cur_sz
elif step_size_strategy=="exponential_decay":
cur_sz = 1*math.pow(0.9,epoch_index/3)
else:
cur_sz = step_size
#print("epoch num:{} ,current lr: {}".format(epoch_index,cur_sz))
w_history = np.copy(w)
w += (-cur_sz*g)
g_history = np.copy(g)
def momentum(xs, ys,epoch_index, nesterov=False,step_size_strategy = "backtracking"):
global g_history,w
# subgradient
gradient = calculate_grad_with_regularization(xs, ys, w)
# backtracking line search
cur_sz = 1
if step_size_strategy=="backtracking":
while 1:
#<<Optimization theory and methods - nonlinear programming by Wenyu Sun, Ya-xiang Yuan>>,P108,Backtracking line search
if calculate_loss_with_regulation(xs, ys, w - cur_sz * gradient) > calculate_loss_with_regulation(xs, ys, w) - cur_sz * math.pow(np.linalg.norm(gradient, 2), 2) / 2:
cur_sz = cur_sz * backtracing_step
else:
break
# print("step size: {}".format(cur_sz))
elif step_size_strategy=="exponential_decay":
cur_sz = 1*math.pow(0.9,epoch_index/3)
else:
cur_sz = step_size
if nesterov:
to_be_added = (momentum_rho * g_history - cur_sz * calculate_grad_with_regularization(xs, ys, w + momentum_rho * g_history))
else:
to_be_added = (momentum_rho * g_history - cur_sz * gradient)
w += to_be_added
g_history = to_be_added
def predict(x):
f = np.inner(w,x)
if f > 0:
return 1
else:
return -1
def do_predict():
hit = 0
#do testing
for te in range(len(test_imgs)):
cur_result = predict(test_imgs[te])
if cur_result == test_labels[te]:
hit += 1
acc =hit*1.0/len(test_imgs)
print("test accuracy: {}".format(acc))
return acc
val_index = 0
result_np = np.zeros(shape=(6,effective_pass+1))
min_loss = 999
best_acc = 0
matplot_index = 0
for ep in range(epoch_num):
#shuffle(train_imgs)
batch_index = 0
for bn in range(train_batch_num):
batch_index += 1
loss = optimize(ep,bn,train_batch_num,train_imgs[bn * batch_size:(bn + 1) * batch_size], train_labels[bn * batch_size:(bn + 1) * batch_size])
if batch_index%100 == 0:
print("epoch {}, batch-{}-loss: {}".format(ep,bn,loss))
if matplot_index > effective_pass:
print("best acc: {}, min loss: {}".format(best_acc, min_loss))
# with open(output_name, 'wb') as fp:
# pickle.dump(result_dict, fp)
np.save(output_name, result_np)
exit()
print("cur: {}".format(matplot_index))
x_coordinate = matplot_index
matplot_index += 1
#train loss
result_np[0, x_coordinate] = x_coordinate
result_np[1, x_coordinate] = loss
validate_loss = optimize(ep,val_index,val_batch_num,val_imgs[val_index * batch_size:(val_index + 1) * batch_size],
val_labels[val_index * batch_size:(val_index + 1) * batch_size])
val_index = (val_index + 1 + val_batch_num) % val_batch_num
#validate loss
result_np[2, x_coordinate] = x_coordinate
result_np[3, x_coordinate] = validate_loss
print("******************epoch {}, batch-{}-validation loss: {}".format(ep, batch_index, validate_loss))
acc = do_predict()
#acc
result_np[4, x_coordinate] = x_coordinate
result_np[5, x_coordinate] = acc
if acc > best_acc:
best_acc = acc
min_loss = loss
| [
"[email protected]"
] | |
1deaad4805c969abcc631f4486a694f70fdfc0ff | ab9f89cf240bf40d433664cee9d1b787dfbf1e62 | /examples/simpleapp3/db.py | 79d9cfc048f6732bc3fe16c88b2648e15b390f31 | [] | no_license | JoshuaEnglish/rng2html | cb1a3d7f488cba14b43dda779f0146354765246d | e7b5cbc79782b2717f71e8e230f939671f90ff2b | refs/heads/master | 2021-01-20T00:20:07.506544 | 2017-04-26T14:45:13 | 2017-04-26T14:45:13 | 89,109,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import os
from lxml import etree
input_dir = os.path.join(os.path.split(__file__)[0], 'data')
if not os.path.exists(input_dir):
raise ValueError("Cannot find input path %s" % input_dir)
ADDRESSES = []
def get_addresses():
"""Return a list of dictionaries"""
doc = etree.parse(os.path.join(input_dir, "addressbook.xml"))
res = []
for card in doc.iter('card'):
res.append({'name': card.findtext('name'),
'email': card.findtext('email')})
return res
| [
"[email protected]"
] | |
76851f30920121808f27135f6b1e43c1ff41d451 | 2a1b8a671aceda6bc446f8ce26400aa84fa444a6 | /Packs/OpenCVE/Integrations/OpenCVE/OpenCVE_test.py | 9d13ee06875d5d4f6cf6158796358b6fe2f7090d | [
"MIT"
] | permissive | demisto/content | 6d4722d46f0ff0beea2748e9f7de585bf91a78b4 | 890def5a0e0ae8d6eaa538148249ddbc851dbb6b | refs/heads/master | 2023-09-04T00:02:25.618032 | 2023-09-03T21:56:22 | 2023-09-03T21:56:22 | 60,525,392 | 1,023 | 1,921 | MIT | 2023-09-14T20:55:24 | 2016-06-06T12:17:02 | Python | UTF-8 | Python | false | false | 17,051 | py | import pytest
from OpenCVE import *
from CommonServerPython import *
OPEN_CVE = OpenCVE(tlp="red")
CLIENT = Client(server_url='https://www.opencve.io/api/', verify=False, proxy=False, auth=False)
def util_load_json(path: str):
with open(path, encoding='utf-8') as f:
return json.loads(f.read())
def assert_nested_dicts_equal(input, expected):
"""Asserts a complex indicator structure from XSOAR (after `to_context()`)
Args:
input (dict): Input
expected (dict): Expected output
"""
if isinstance(input, dict) and isinstance(expected, dict):
assert set(input.keys()) == set(expected.keys()), "Keys in dictionaries are not equal."
for key in input:
assert_nested_dicts_equal(input[key], expected[key])
elif isinstance(input, list) and isinstance(expected, list):
try:
for node1, node2 in zip(sorted(input), sorted(expected)):
assert_nested_dicts_equal(node1, node2)
except TypeError:
sorted_list1 = sorted(input, key=lambda x: sorted(x.items()))
sorted_list2 = sorted(expected, key=lambda x: sorted(x.items()))
for node1, node2 in zip(sorted_list1, sorted_list2):
assert_nested_dicts_equal(node1, node2)
else:
assert input == expected, "Values in dictionaries are not equal."
test_cases = [
# Test case 1: Empty input nodes list
([], []),
# Test case 2: One node with no CPE matches
([{"children": [], "cpe_match": []}], []),
# Test case 3: One node with a vulnerable CPE match
([{"children": [], "cpe_match": [{"cpe23Uri": "cpe:2.3:a:vendor:product:version:*:*:*:*:*:*:*", "vulnerable": True}]}],
["cpe:2.3:a:vendor:product:version:*:*:*:*:*:*:*"]),
# Test case 4: Multiple nodes with multiple CPE matches
([
{"children": [], "cpe_match": [{"cpe23Uri": "cpe:2.3:a:vendor1:product1:1.0:*:*:*:*:*:*", "vulnerable": True}]},
{"children": [], "cpe_match": [{"cpe23Uri": "cpe:2.3:a:vendor1:product1:2.0:*:*:*:*:*:*", "vulnerable": True}]},
{"children": [], "cpe_match": [{"cpe23Uri": "cpe:2.3:a:vendor2:product2:3.0:*:*:*:*:*:*", "vulnerable": True}]}
],
[
"cpe:2.3:a:vendor1:product1:1.0:*:*:*:*:*:*",
"cpe:2.3:a:vendor1:product1:2.0:*:*:*:*:*:*",
"cpe:2.3:a:vendor2:product2:3.0:*:*:*:*:*:*"
]),
# Node with children
(
[{
"children": [
{
"children": [], "operator": "OR",
"cpe_match": [
{
"cpe23Uri": "cpe:2.3:o:siemens:sppa-t3000_ses3000_firmware:*:*:*:*:*:*:*:*",
"cpe_name": [],
"vulnerable": True
}
]
},
{
"children": [], "operator": "OR",
"cpe_match": [
{
"cpe23Uri": "cpe:2.3:h:siemens:sppa-t3000_ses3000:-:*:*:*:*:*:*:*",
"cpe_name": [],
"vulnerable": False
}
]
}
],
"operator": "AND", "cpe_match": []
}],
[
"cpe:2.3:o:siemens:sppa-t3000_ses3000_firmware:*:*:*:*:*:*:*:*"
]
),
# Real CVE test (CVE-2019-0708)
([
{"children": [], "operator": "OR", "cpe_match": [
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_vista:-:sp2:*:*:*:*:*:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_server_2008:r2:sp1:*:*:*:*:x64:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_server_2008:r2:sp1:*:*:*:*:itanium:*", "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_server_2008:-:sp2:*:*:*:*:*:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_xp:-:sp2:*:*:professional:*:x64:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_xp:-:sp3:*:*:*:*:x86:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_server_2003:-:sp2:*:*:*:*:x86:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_server_2003:-:sp2:*:*:*:*:x64:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_server_2003:r2:sp2:*:*:*:*:*:*", "cpe_name": [], "vulnerable": True},
{"cpe23Uri": "cpe:2.3:o:microsoft:windows_7:-:sp1:*:*:*:*:*:*", "cpe_name": [], "vulnerable": True}
]
}
],
[
"cpe:2.3:o:microsoft:windows_server_2008:-:sp2:*:*:*:*:*:*",
"cpe:2.3:o:microsoft:windows_server_2003:-:sp2:*:*:*:*:x86:*",
"cpe:2.3:o:microsoft:windows_server_2008:r2:sp1:*:*:*:*:x64:*",
"cpe:2.3:o:microsoft:windows_xp:-:sp3:*:*:*:*:x86:*",
"cpe:2.3:o:microsoft:windows_7:-:sp1:*:*:*:*:*:*",
"cpe:2.3:o:microsoft:windows_server_2003:-:sp2:*:*:*:*:x64:*",
"cpe:2.3:o:microsoft:windows_server_2008:r2:sp1:*:*:*:*:itanium:*",
"cpe:2.3:o:microsoft:windows_xp:-:sp2:*:*:professional:*:x64:*",
"cpe:2.3:o:microsoft:windows_server_2003:r2:sp2:*:*:*:*:*:*",
"cpe:2.3:o:microsoft:windows_vista:-:sp2:*:*:*:*:*:*"
]
),
]
@pytest.mark.parametrize("nodes, expected", test_cases)
def test_parse_cpes(nodes, expected):
cpes = [cpe.cpe for cpe in parse_cpes(nodes)]
assert sorted(cpes) == sorted(expected)
@pytest.mark.parametrize("response, expected",
[(util_load_json('test_data/CVE-2019-0708.json'),
{'ID': 'CVE-2019-0708',
'CVSS Score': 9.8,
'Published': '2019-05-16T19:29:00Z',
'Modified': '2021-06-03T18:15:00Z',
'Description': "A remote code execution vulnerability exists in Remote Desktop Services formerly known as Terminal Services when an unauthenticated attacker connects to the target system using RDP and sends specially crafted requests, aka 'Remote Desktop Services Remote Code Execution Vulnerability'." # noqa: E501
}
)
]
)
def test_cve_to_warroom(response, expected):
parsed_cve = parse_cve(OPEN_CVE, response)
warroom_output = cve_to_warroom(parsed_cve)
assert warroom_output == expected
@pytest.mark.parametrize("input, expected", [("CVE-2021-44228", True),
("CVEM-2021-44228", False)])
def test_valid_cve_format(input, expected):
assert valid_cve_format(input) == expected
@pytest.mark.parametrize("response, expected", [(util_load_json('test_data/CVE-2019-0708.json'),
(util_load_json('test_data/parsed_Cve.json')))])
def test_parse_cve(response, expected):
parsed_cve = parse_cve(OPEN_CVE, response)
parsed_cve_relationships = [json.dumps(relationship.to_context()) for relationship in parsed_cve['fields']['relationships']]
expected_relationships = [json.dumps(relationship) for relationship in expected['fields']['relationships']]
assert sorted(parsed_cve_relationships) == sorted(expected_relationships)
assert parsed_cve['fields']['cvssvector'] == expected['fields']['cvssvector']
assert parsed_cve['fields']['cvssscore'] == expected['fields']['cvssscore']
@pytest.mark.parametrize("input, expected", [(util_load_json('test_data/CVE-2019-0708.json'),
["Windows vista", "Windows 7", "Windows server 2008",
"Windows server 2003", "Windows xp", "Microsoft", "CWE-416"])])
def test_parse_tags(input, expected):
relationships, tags = parse_tags(vendors=input['vendors'], cve_id=input['id'], cwes=input['cwes'])
assert sorted(tags) == sorted(expected)
@pytest.mark.parametrize("input, expected", [([{'value': 'CVE-2019-0708'}, {'value': 'CVE-2019-0708'}],
[{'value': 'CVE-2019-0708'}])])
def test_dedupe_cves(input, expected):
assert dedupe_cves(input) == expected
@pytest.mark.parametrize("input, expected", [(util_load_json('test_data/CVE-2019-0708.json'),
util_load_json('test_data/indicator.json'))])
def test_cve_to_indicator(input, expected):
parsed_cve = parse_cve(OPEN_CVE, input)
indicator = cve_to_indicator(ocve=OPEN_CVE, cve=parsed_cve)
assert_nested_dicts_equal(indicator.to_context(), expected)
@pytest.mark.parametrize("response, expected, args, mock_url",
[(util_load_json('test_data/reports_response.json'),
CommandResults(outputs=[{'id': 'KLMHU9EB4N8C',
'created_at': '2023-08-02T09:52:47Z',
'details': ['microsoft']},
{'id': 'NZSBGGBLW4TH',
'created_at': '2023-08-02T07:11:31Z',
'details': ['microsoft']}],
outputs_prefix='OpenCVE.Reports'),
{},
'https://www.opencve.io/api/reports'),
(util_load_json('test_data/single_report_response.json'),
CommandResults(outputs=util_load_json('test_data/single_report_response.json'),
outputs_prefix='OpenCVE.Reports.KLMHU9EB4N8C'),
{'report_id': 'KLMHU9EB4N8C'},
'https://www.opencve.io/api/reports/KLMHU9EB4N8C')])
def test_get_reports_command(response, expected, args, mock_url, requests_mock):
requests_mock.get(mock_url, json=response)
result = get_reports_command(CLIENT, args=args)
assert result.outputs == expected.outputs
assert result.outputs_prefix == expected.outputs_prefix
@pytest.mark.parametrize("response, args, expected, mock_url",
[(util_load_json('test_data/vendors_specific_vendor.json'),
{'vendor_name': 'paloaltonetworks'},
CommandResults(outputs=util_load_json('test_data/vendors_specific_vendor.json'),
outputs_prefix='OpenCVE.paloaltonetworks'),
'https://www.opencve.io/api/vendors/paloaltonetworks'),
(util_load_json('test_data/vendors.json'),
{'search': 'search', 'letter': 'a', 'page': 1},
CommandResults(outputs=util_load_json('test_data/vendors.json'),
outputs_prefix='OpenCVE.Vendors'),
'https://www.opencve.io/api/vendors')])
def test_get_vendors_command(response, args, expected, mock_url, requests_mock):
requests_mock.get(mock_url, json=response)
result = get_vendors_command(CLIENT, args=args)
assert result.outputs == expected.outputs
assert result.outputs_prefix == expected.outputs_prefix
@pytest.mark.parametrize("response, expected, mock_url",
[(util_load_json('test_data/my_vendors.json'),
CommandResults(outputs=util_load_json('test_data/my_vendors.json'),
outputs_prefix='OpenCVE.myVendors'),
'https://www.opencve.io/api/account/subscriptions/vendors')])
def test_get_my_vendors_command(response, expected, mock_url, requests_mock):
requests_mock.get(mock_url, json=response)
result = get_my_vendors_command(CLIENT)
assert result.outputs == expected.outputs
assert result.outputs_prefix == expected.outputs_prefix
@pytest.mark.parametrize("response, expected, mock_url",
[(util_load_json('test_data/my_products.json'),
CommandResults(outputs=util_load_json('test_data/my_products.json'),
outputs_prefix='OpenCVE.myProducts'),
'https://www.opencve.io/api/account/subscriptions/products')])
def test_get_my_products_command(response, expected, mock_url, requests_mock):
requests_mock.get(mock_url, json=response)
result = get_my_products_command(CLIENT)
assert result.outputs == expected.outputs
assert result.outputs_prefix == expected.outputs_prefix
# Tests that the method returns the correct value when the input needle is a key in self.maps
@pytest.mark.parametrize("input, expected", [('HIGH', 'High (H)'), ('REQUIRED', 'Required (R)'),
('TEMPORARY_FIX', 'Temporary Fix (T)'), ('Unknown_key', 'Unknown_key')])
def test_existing_key(input, expected):
obj = OpenCVE('white')
assert obj._map(input) == expected
@pytest.mark.parametrize("expected", [(['CVE-2021-28478', 'CVE-2021-26418'])])
def test_cve_latest_command(expected, requests_mock):
requests_mock.get('https://www.opencve.io/api/reports',
json=util_load_json('test_data/reports_response.json'))
requests_mock.get('https://www.opencve.io/api/reports/KLMHU9EB4N8C/alerts',
json=util_load_json('test_data/KLMHU9EB4N8C_alerts.json'))
requests_mock.get('https://www.opencve.io/api/reports/NZSBGGBLW4TH/alerts',
json=util_load_json('test_data/NZSBGGBLW4TH_alerts.json'))
requests_mock.get('https://www.opencve.io/api/cve/CVE-2021-28478',
json=util_load_json('test_data/CVE-2021-28478.json'))
requests_mock.get('https://www.opencve.io/api/cve/CVE-2021-26418',
json=util_load_json('test_data/CVE-2021-26418.json'))
result = cve_latest_command(CLIENT, OPEN_CVE, {'last_run': '2023-08-01T02:00:00'})
cves = [cve["value"] for cve in result.outputs]
assert all(cve in expected for cve in cves)
@pytest.mark.parametrize("args, mock_url, mock_json",
[({'report_id': 'KLMHU9EB4N8C',
'alert_id': '475fde88-00dc-4024-9499-8197e334dfe7'},
'https://www.opencve.io/api/reports/KLMHU9EB4N8C/alerts/475fde88-00dc-4024-9499-8197e334dfe7',
util_load_json(
'test_data/alert_475fde88-00dc-4024-9499-8197e334dfe7.json')),
({'report_id': 'KLMHU9EB4N8C'},
'https://www.opencve.io/api/reports/KLMHU9EB4N8C/alerts',
util_load_json('test_data/alerts_KLMHU9EB4N8C.json'))])
def test_get_alerts_command(args, mock_url, mock_json, requests_mock):
requests_mock.get(mock_url, json=mock_json)
alerts = get_alerts_command(CLIENT, args)
assert alerts.outputs == mock_json
def test_get_alert_failed_commad():
with pytest.raises(SystemExit):
get_alerts_command(CLIENT, {})
@pytest.mark.parametrize("args, mock_url, mock_json, expected",
[({'cve': 'CVE-2021-26418'}, 'https://www.opencve.io/api/cve/CVE-2021-26418',
util_load_json('test_data/CVE-2021-26418.json'),
util_load_json('test_data/get_cve_command_outputs.json'))])
def test_get_cve_command(args, mock_url, mock_json, expected, requests_mock):
requests_mock.get(mock_url, json=mock_json)
cve = get_cve_command(CLIENT, OPEN_CVE, args)
assert cve[0].outputs == expected
@pytest.mark.parametrize("response, expected", [({}, 'ok')])
def test_invalid_command_raises_error(mocker, requests_mock, response, expected):
mocker.patch.object(demisto, 'params', return_value={'url': 'https://www.opencve.io',
'insecure': False,
'proxy': False,
'tlp_color': 'RED',
'credentials': {'identifier': 'user',
'password': 'pass'}})
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'results')
requests_mock.get('https://www.opencve.io/api/account/subscriptions/vendors', json=response)
main()
results = demisto.results.call_args[0]
assert results[0] == expected
def test_failed_request(mocker):
mocker.patch.object(demisto, "error")
with pytest.raises(Exception):
module_test_command(CLIENT)
| [
"[email protected]"
] | |
10258e113de5bf2fbe71add2fb02fc94a4d2821c | 21817098ce721b14b651bd1e828b93bdc49aed8b | /consumers/models/weather.py | 52f5c8dd59cd1196e5a2b4df9a075f6507b428dd | [] | no_license | mariamathewc/Data-Streaming-Kafka | 0e442c98f0acd75e2ae55b860f9b6cd9f06c0317 | 541d3635f794e00310c1052ced11a234ddd9a52d | refs/heads/master | 2023-03-12T04:57:10.935586 | 2021-02-28T03:50:05 | 2021-02-28T03:50:05 | 340,139,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | """Contains functionality related to Weather"""
import logging
logger = logging.getLogger(__name__)
class Weather:
"""Defines the Weather model"""
def __init__(self):
"""Creates the weather model"""
self.temperature = 70.0
self.status = "sunny"
def process_message(self, message):
"""Handles incoming weather data"""
#logger.info("weather process_message is incomplete - skipping")
#
#
# TODO: Process incoming weather messages. Set the temperature and status.
#
#
logger.info("processing weather logger")
try:
value = json.loads(json.dumps(message.value()))
self.temperature = value.get("temperature")
self.status = value.get("status")
except Exception as e:
logger.debug("erro while processed weather message")
logger.debug(
"weather is now %sf and %s", self.temperature, self.status.replace("_", " ")
)
| [
"[email protected]"
] | |
3eebba4bd76e487d2c272eb7227087b8e5a81049 | cf587606e449151b23c6c89ff34842ced4f9039a | /apps/common/jwt/urls.py | 34936dcd6c7d833e2af77255f1e21302006c6f7d | [] | no_license | NoCLin/django-backplane | 2400faac8775d1ec24b1d30b180e02fcc9182b71 | df714e0fb7fc96a258f69da3137451d726886ee5 | refs/heads/master | 2020-06-20T14:08:24.180368 | 2019-07-09T11:24:10 | 2019-07-09T11:24:10 | 197,146,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | from django.conf.urls import url
from rest_framework_jwt.serializers import JSONWebTokenSerializer, VerifyJSONWebTokenSerializer, \
RefreshJSONWebTokenSerializer
from rest_framework_jwt.views import JSONWebTokenAPIView
# 重写方法描述 ( for swagger )
class ObtainJSONWebToken(JSONWebTokenAPIView):
"""
使用帐号密码获取JWT
"""
serializer_class = JSONWebTokenSerializer
class VerifyJSONWebToken(JSONWebTokenAPIView):
"""
验证JWT
"""
serializer_class = VerifyJSONWebTokenSerializer
class RefreshJSONWebToken(JSONWebTokenAPIView):
"""
刷新JWT
"""
serializer_class = RefreshJSONWebTokenSerializer
obtain_jwt_token = ObtainJSONWebToken.as_view()
refresh_jwt_token = RefreshJSONWebToken.as_view()
verify_jwt_token = VerifyJSONWebToken.as_view()
urlpatterns = [
url(r'^api/auth/jwt/login/$', obtain_jwt_token, name='login token'),
url(r'^api/auth/jwt/verify/$', verify_jwt_token, name='verify token'),
url(r'^api/auth/jwt/refresh/$', refresh_jwt_token, name='refresh token'),
]
| [
"[email protected]"
] | |
720061c7fb2366a8990af603a11bf4f929a956f2 | 61d08e23fbb62e16f7bd9d43673b1cf4e0558c37 | /miraScripts/mayaTools/lighting_tool/OF/lighting_UI/assign_hair_shader.py | 5fe0b842a5cd0906e239f5fa9ecf35de98a83c7d | [] | no_license | jonntd/mira | 1a4b1f17a71cfefd20c96e0384af2d1fdff813e8 | 270f55ef5d4fecca7368887f489310f5e5094a92 | refs/heads/master | 2021-08-31T12:08:14.795480 | 2017-12-21T08:02:06 | 2017-12-21T08:02:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,731 | py | # coding utf-8
# __author__ = "heshuai"
# description=""" """
from Qt.QtWidgets import *
from Qt.QtCore import *
from Qt.QtGui import *
import maya.cmds as mc
import pymel.core as pm
import public_ctrls
import os
from get_parent_dir import get_parent_dir
class AssignHairShader(QDialog):
def __init__(self, parent=None):
super(AssignHairShader, self).__init__(parent)
# y_pos = public_ctrls.get_maya_main_win_pos()[1] + (public_ctrls.get_maya_main_win_size()[1])/4
# self.move(public_ctrls.get_maya_main_win_pos()[0], y_pos)
self.setWindowTitle('Assign Hair Shader')
self.parent_dir = get_parent_dir()
self.resize(500, 300)
main_layout = QVBoxLayout(self)
label_layout = QHBoxLayout()
label = QLabel()
label.setText('<font color="#00FF00" size=4><b>These hairs has no shader</b> </font>')
self.update_btn = QToolButton()
self.update_btn.setIcon(QIcon(os.path.join(self.parent_dir, 'icons', 'button_icons', 'update.png')))
self.update_btn.setStyleSheet('QToolButton{background: transparent}')
label_layout.addWidget(label)
label_layout.addWidget(self.update_btn)
self.list_widget = QListWidget()
self.list_widget.setSelectionMode(QListWidget.ExtendedSelection)
self.list_widget.setSortingEnabled(True)
self.list_widget.setSpacing(1)
button_layout = QHBoxLayout()
self.check_box = QCheckBox('Maya')
self.diselect_all_btn = QPushButton('Diselect All')
self.diselect_all_btn.setStyleSheet('QPushButton{color:#CCCCCC; background-color: #222222}')
self.select_shader_btn = QPushButton('Select Shader')
self.select_shader_btn.setStyleSheet('QPushButton{color:#CCCCCC; background-color: #222222}')
self.assign_btn = QPushButton('Assign')
self.assign_btn.setStyleSheet('QPushButton{color:#CCCCCC; background-color: #222222}')
button_layout.addWidget(self.check_box)
button_layout.addStretch()
button_layout.addWidget(self.diselect_all_btn)
button_layout.addWidget(self.select_shader_btn)
button_layout.addWidget(self.assign_btn)
main_layout.addLayout(label_layout)
main_layout.addWidget(self.list_widget)
main_layout.addLayout(button_layout)
self.init_settings()
self.set_background()
self.set_signals()
def init_settings(self):
all_shave_hair = mc.ls(type='shaveHair') + mc.ls(type='hairSystem')
no_shader_hair = [hair for hair in all_shave_hair if not pm.PyNode(hair).aiHairShader.connections()]
for hair in no_shader_hair:
item = QListWidgetItem(hair)
item.setIcon(QIcon(os.path.join(self.parent_dir, 'icons/main_icons', 'shaveShader.png')))
self.list_widget.addItem(item)
def set_signals(self):
self.list_widget.itemSelectionChanged.connect(self.set_select)
self.update_btn.clicked.connect(self.update)
self.diselect_all_btn.clicked.connect(self.diselect_all)
self.select_shader_btn.clicked.connect(self.select_shader)
self.assign_btn.clicked.connect(self.assign_shader)
def set_background(self):
image_path = os.path.join(self.parent_dir, 'icons', 'background_icons', 'tx.png')
self.image = QImage(image_path)
palette = QPalette()
palette.setBrush(QPalette.Background, QBrush(self.image.scaled(self.size(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation)))
self.setPalette(palette)
def resizeEvent(self, event):
palette = QPalette()
palette.setBrush(QPalette.Background, QBrush(self.image.scaled(event.size(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation)))
self.setPalette(palette)
def set_select(self):
for item in self.list_widget.selectedItems():
mc.select(str(item.text()), add=1)
def diselect_all(self):
for i in xrange(self.list_widget.count()):
self.list_widget.item(i).setSelected(False)
def update(self):
self.list_widget.clear()
self.init_settings()
def select_shader(self):
self.sender().setStyleSheet('QPushButton{color: #00FF00; font-size: 15px; background-color: #300000}')
select_shader = pm.ls(sl=1)
if len(select_shader) == 1 and select_shader[0].type() in pm.listNodeTypes('shader'):
self.select_shader_btn.setText(select_shader[0].name())
def get_maya_select_list(self):
selected_objects = []
if pm.ls(sl=1):
for i in pm.ls(sl=1):
if i.type() in ['shaveHair', 'hairSystem']:
selected_objects.append(i)
else:
if i.type() == 'transform':
children = pm.ls(i, ap=1, dag=1, lf=1)
for child in children:
if child.type() in ['shaveHair', 'hairSystem']:
selected_objects.append(child)
return selected_objects
def get_list_widget_items(self):
return [str(item.text()) for item in self.list_widget.selectedItems()]
def assign_shader(self):
if self.check_box.isChecked():
shave_hairs = list(set(self.get_maya_select_list()))
else:
shave_hairs = self.get_list_widget_items()
shave_hairs = list(set(shave_hairs))
if shave_hairs:
if self.select_shader_btn.text() != 'Select Shader':
shader = pm.PyNode(str(self.select_shader_btn.text()))
for hair in shave_hairs:
pm.PyNode(hair).aiOverrideHair.set(1)
shader.outColor >> pm.PyNode(hair).aiHairShader
print "[OF] info %s.outColor --------> %s.aiHairShader" % (str(self.select_shader_btn.text()), hair)
mc.confirmDialog(title='Confirm', message='connect successful', button='OK', cancelButton='OK', icon='information')
else:
mc.confirmDialog(title='Confirm', message='Please select a shader', button='OK', cancelButton='OK',icon='information')
print "[OF] info: Please select a shader"
else:
mc.confirmDialog(title='Confirm', message='Please select at least one hair', button='OK', cancelButton='OK',icon='information')
print "[OF] info: Please select at least one shave hair"
self.update()
def mousePressEvent(self, event):
if event.button() == Qt.RightButton:
self.close()
def run():
global ahs
try:
ahs.close()
ahs.deleteLater()
except:pass
ahs = AssignHairShader(public_ctrls.get_maya_win())
ahs.show()
| [
"[email protected]"
] | |
f8d06f82a47a63864faac262ec17df4e666de00a | 45832e9851e2bd25a4d0f7429cda8fee43394039 | /lib/scroll_bgmanager.py | 3fcfa771193971012007555d47359ea7a2825e38 | [] | no_license | pdevine/subterraneman | 116853817459ee041490b140d2a230c72143910a | d10bf8dee241110a227d69d9d4b82d833d2731a6 | refs/heads/master | 2021-01-23T02:59:16.738577 | 2015-03-13T17:01:01 | 2015-03-13T17:01:01 | 32,167,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,720 | py | # Authour: Shandy Brown <[email protected]>
# License: LGPL
# Version: 0.1
#Import Modules
import os, pygame, operator, math
from copy import copy
from pygame.locals import *
import data
from logging import debug as log_debug
DEBUG = 1
def vectorSum( v1, v2 ):
return [ v1[0]+v2[0], v1[1]+v2[1] ]
#-----------------------------------------------------------------------------
class BackgroundManager:
def __init__(self, screen, background=None):
self.screen = screen
if not background:
self.image = data.pngs['game_background']
else:
self.image = background
self.rect = self.image.get_rect()
self.offset = [0,0]
self.dirty = 0
self.newlyExposedArea = [ None, None ]
screenRect = self.screen.get_rect()
self.srcRect = Rect( self.offset[0], self.offset[1],
screenRect.width, screenRect.height )
#----------------------------------------------------------------------
def BlitSelf( self, surface ):
"""This is called when self.dirty is true and surface is usually
the main pygame display surface"""
self.srcRect.topleft = self.offset[0], self.offset[1]
surface.blit( self.image, (0,0), self.srcRect )
#----------------------------------------------------------------------
def GetBgSurface(self, drawToSurface, dirtyRect):
"""This is the function that is normally passed to
RenderUpdates.clear as the "bgd" argument. It gets passed
the surface which we should draw to, and the dirty part of
that surface, which should be cleared"""
#copy the dirtyRect
srcRect = dirtyRect.move(0,0)
#move the srcRect to my current offset
srcRect.topleft = vectorSum(srcRect.topleft, self.offset)
#blit to the target surface
drawToSurface.blit( self.image, dirtyRect, srcRect )
#----------------------------------------------------------------------
def GetBackground(self):
return self.image
#----------------------------------------------------------------------
def RectIsOutOfBounds( self, physRect ):
"""Returns a list indicating which parts of the physical rect
are out of bounds. An empty result means the physical rect
is not out of bounds."""
result = []
if physRect.left > self.rect.right:
result.append( 'right' )
if physRect.top > self.rect.bottom:
result.append( 'bottom' )
if physRect.right < self.rect.left:
result.append( 'left' )
if physRect.bottom < self.rect.top:
result.append( 'top' )
return result
#----------------------------------------------------------------------
def GetDisplayCenter( self, physRect ):
return (physRect.centerx - self.offset[0],
physRect.centery - self.offset[1] )
#----------------------------------------------------------------------
def GetOffsetScreenRect( self ):
screenRect = self.screen.get_rect().move( self.offset[0],
self.offset[1] )
return screenRect
#----------------------------------------------------------------------
def SpriteIsVisible( self, physRect ):
screenRect = self.GetOffsetScreenRect()
return screenRect.colliderect( physRect )
#----------------------------------------------------------------------
def CalculateNewlyExposedArea( self, oldOffset ):
"""the newly exposed area is (at most) two rectangles:
the exposed Y rect and the exposed X block
note: a new algorithm will be needed if the screen
scrolls more than screenRect.width in one step"""
#oScreenRect is the rect with the dimensions of the viewport
oScreenRect = self.GetOffsetScreenRect()
xBlockWidth = abs( self.offset[0] - oldOffset[0] )
if oldOffset[0] < self.offset[0]:
#new area exposed to the left
xPos = oScreenRect.right - xBlockWidth
else:
#new area exposed to the right
xPos = self.offset[0]
xBlock = Rect( xPos,
self.offset[1],
xBlockWidth,
oScreenRect.height )
yBlockHeight = abs( self.offset[1] - oldOffset[1] )
if oldOffset[1] < self.offset[1]:
#new area exposed to the top
yPos = oScreenRect.bottom - yBlockHeight
else:
#new area exposed to the bottom
yPos = self.offset[1]
yBlock = Rect( self.offset[0],
yPos,
oScreenRect.width,
yBlockHeight )
self.newlyExposedArea = [ xBlock, yBlock ]
#----------------------------------------------------------------------
def NotifyPlayerSpritePos( self, physRect ):
"""Takes the rect of the sprite that the player is controlling
(the player's avatar) and determines if we need to scroll
the background
By default, the center has 100 pixels of grace."""
s_o = self.offset
oldOffset = copy( s_o )
#oScreenRect is the rect with the dimensions of the viewport
oScreenRect = self.GetOffsetScreenRect()
self.dirty = 0
#take the avatar's absolute position and get the on-screen pos
avatarLeft = int( physRect.left )
avatarRight = int( physRect.right )
avatarTop = int( physRect.top )
avatarBottom = int( physRect.bottom )
#when in the center, the player can move 100 pixels in any
#direction without scrolling happening. This saves some
#processing and gives the game a pleasant feel
leftScrollTrig = oScreenRect.centerx - 100
rightScrollTrig = oScreenRect.centerx + 100
topScrollTrig = oScreenRect.centery - 100
bottomScrollTrig = oScreenRect.centery + 100
#minXOffset = self.rect.right
minXOffset = 0
maxXOffset = self.rect.right - oScreenRect.width
minYOffset = 0
maxYOffset = self.rect.bottom - oScreenRect.height
if avatarRight > rightScrollTrig \
and s_o[0] < maxXOffset:
s_o[0] = min( maxXOffset,
s_o[0] + avatarRight - rightScrollTrig )
self.dirty = 1
elif avatarLeft < leftScrollTrig \
and s_o[0] > minXOffset:
s_o[0] = max( minXOffset,
s_o[0] + avatarLeft - leftScrollTrig )
self.dirty = 1
if avatarBottom > bottomScrollTrig \
and s_o[1] < maxYOffset:
s_o[1] = min( maxYOffset,
s_o[1] + avatarBottom-bottomScrollTrig )
self.dirty = 1
elif avatarTop < topScrollTrig \
and s_o[1] > minYOffset:
s_o[1] = max( minYOffset,
s_o[1] + avatarTop - topScrollTrig )
self.dirty = 1
if self.dirty:
#log_debug( 'bgmanager: self was dirty' )
self.CalculateNewlyExposedArea( oldOffset )
return self.GetDisplayCenter( physRect )
#-----------------------------------------------------------------------------
class SeamedBackgroundManager(BackgroundManager):
def __init__(self, screen):
BackgroundManager.__init__(self, screen)
# seams are a list of lines (rects, really) where the current
# background joins with another background
# to set a One-Way seam, have it exist in part of the preceeding
# background but not exist in any part of the sucsessive one
self.seams = [
[ 'backgr','backgr2', Rect( 2980,699, 900, 2 ) ],
[ 'backgr2','backgr3', Rect( 2979,800, 2, 900 ) ]
]
self.bgPositions = {
'backgr': [0,0],
'backgr2': [2980,700],
'backgr3': [-900,800],
}
self.backgrounds = {
'backgr': [self.image,self.rect],
}
self.currentBg = 'backgr'
#----------------------------------------------------------------------
def LoadBackground( self, fname ):
if fname in self.backgrounds:
return
img = data.pngs[fname]
rect = img.get_rect()
rect.move_ip( *self.bgPositions[fname] )
self.backgrounds[ fname ] = [img, rect]
#print 'just loaded', img, rect
#----------------------------------------------------------------------
def BlitSelf( self, surface ):
"""This is called when self.dirty is true and surface is usually
the main pygame display surface"""
screenRect = self.GetOffsetScreenRect()
for name, bg in self.backgrounds.items():
bgImg = bg[0]
bgRect = bg[1]
if screenRect.colliderect( bgRect ):
clipRect = screenRect.clip( bgRect )
x = clipRect.x - self.offset[0]
y = clipRect.y - self.offset[1]
clipRect.x -= bgRect.x
clipRect.y -= bgRect.y
surface.blit( bgImg, (x,y), clipRect )
if DEBUG:
for seam in self.seams:
img = pygame.Surface( [seam[2][2], seam[2][3]])
img.fill( (255,0,0) )
x,y = seam[2][0], seam[2][1]
x -= self.offset[0]
y -= self.offset[1]
surface.blit( img, (x,y) )
#----------------------------------------------------------------------
def GetBgSurface(self, drawToSurface, clearRect):
"""This is the function that is normally passed to
RenderUpdates.clear as the "bgd" argument. It gets passed
the surface which we should draw to, and the rect which should
be cleared"""
#The clearRect portion of drawToSurface has junk on it.
#We are expected to paint over the junk with the background.
#The background could be part of any of the stitched bgs, or
#it could be outside the bounds and therefore it should be
#black.
screenRect = self.GetOffsetScreenRect()
#copy the clearRect and move the rectToClear to
#my current offset
rectToClear = clearRect.move(screenRect.x,screenRect.y)
if not rectToClear.colliderect( screenRect ):
#print 'dont need to clear'
return
for name, bg in self.backgrounds.items():
bgImg = bg[0]
bgRect = bg[1]
if rectToClear.colliderect( bgRect ):
#intersect is the intersection of the screenRect
#(remember that screenRect is in the physical
# frame of reference, not the screen F.O.R.)
#and the particular background we're painting
intersect = rectToClear.clip( bgRect )
#we don't want to blit the clipped bg
#img section to the entire clear rect for cases
#when the clearRect crosses the boundary of
#two backgrs - it would cause the last blitted
#bg to write overtop of the preceeding
clearArea = clearRect.clip( intersect.move(-screenRect.x, -screenRect.y))
#next, shift the intersect so that we grab
#the correct segment of the background image
intersect.x -= bgRect.x
intersect.y -= bgRect.y
drawToSurface.blit( bgImg, clearArea, intersect)
#----------------------------------------------------------------------
def RectIsOutOfBounds( self, physRect, debug=0 ):
#if debug:
#print "In the debug version", debug
#first, find the seams that intersect with the current screen
collideSeams = self.GetSeamsThatCurrentlyCollide( debug )
#get the background rects that are possibly onscreen
#TODO: this implementation is overzealous. fix.
intersectingRects = []
for img, rect in self.backgrounds.values():
intersectingRects.append( rect )
#assume it's all out of bounds at first
result = ['top', 'right', 'bottom', 'left']
def tryRemove( theList, item ):
try:
theList.remove( item )
except ValueError:
#list item was already removed
pass
#if debug:
#print 'collideseams: ', collideSeams
#print 'inters. rects: ', intersectingRects
for bgRect in intersectingRects:
#if debug:
#print bgRect, physRect
if len(result) == 0:
break
if bgRect.collidepoint( physRect.topleft ):
tryRemove( result, 'top' )
tryRemove( result, 'left' )
if bgRect.collidepoint( physRect.topright ):
tryRemove( result, 'top' )
tryRemove( result, 'right' )
if bgRect.collidepoint( physRect.bottomright ):
#if debug:
#print 'removing bottom'
tryRemove( result, 'bottom' )
tryRemove( result, 'right' )
if bgRect.collidepoint( physRect.bottomleft):
#if debug:
#print 'removing bottom'
tryRemove( result, 'bottom' )
tryRemove( result, 'left' )
#print 'bounds out: ', result
#print 'bottom still in? ', 'bottom' in result
return result
#----------------------------------------------------------------------
def GetSeamsThatCurrentlyCollide( self, debug=0 ):
# will also load the seam
screenRect = self.GetOffsetScreenRect()
collideSeams = []
for sList in self.seams:
fnameA, fnameB, rect = sList[0], sList[1], sList[2]
#if debug:
#print 'selfscreengetrect', self.screen.get_rect()
#print 'offsets', self.offset[0], self.offset[1]
#print 'screenrect', screenRect
#print 'self.seams[x] rect', rect
if screenRect.colliderect( rect ):
#print 'they did collide'
collideSeams += [fnameA, fnameB]
self.LoadBackground( fnameA )
self.LoadBackground( fnameB )
return collideSeams
#----------------------------------------------------------------------
def GetOffsetBounds( self ):
"""We want the furthest we can set our offset. We only have a
background image for a certain area, we don't want our screen
to go beyond that area."""
#TODO: analyse the efficiency of this function
r = self.backgrounds[self.currentBg][1]
screenRect = self.GetOffsetScreenRect()
#first, find the seams that intersect with the current screen
collideSeams = self.GetSeamsThatCurrentlyCollide()
class OffsetBounds:
minX = r.x
maxX = r.right - screenRect.width
minY = r.y
maxY = r.bottom - screenRect.height
offsetBounds = OffsetBounds()
#if there's only one visible background, just return
if not collideSeams:
return offsetBounds
accessibleRects = []
for fname in collideSeams:
accessibleRects.append( self.backgrounds[fname][1] )
unionRect = r.unionall( accessibleRects )
offsetBounds.minX = unionRect.x
offsetBounds.maxX = unionRect.right - screenRect.width
offsetBounds.minY = unionRect.y
offsetBounds.maxY = unionRect.bottom - screenRect.height
return offsetBounds
#----------------------------------------------------------------------
def NotifyPlayerSpritePos( self, physRect ):
"""Takes the rect of the sprite that the player is controlling
(the player's avatar) and determines if we need to scroll
the background
By default, the center has 100 pixels of grace."""
#set the currently inhabited background
for fname, sList in self.backgrounds.items():
if sList[1].contains( physRect ):
self.currentBg = fname
break
oldOffset = copy( self.offset )
#screenRect is the rect with the dimensions of the viewport
screenRect = self.GetOffsetScreenRect()
self.dirty = 0
#when in the center, the player can move 100 pixels in any
#direction without scrolling happening. This saves some
#processing and gives the game a pleasant feel
leftScrollTrig = screenRect.centerx - 100
rightScrollTrig = screenRect.centerx + 100
topScrollTrig = screenRect.centery - 100
bottomScrollTrig = screenRect.centery + 100
offsetBounds = self.GetOffsetBounds()
if physRect.right > rightScrollTrig \
and self.offset[0] != offsetBounds.maxX:
self.offset[0] = min( offsetBounds.maxX,
self.offset[0]+ physRect.right - rightScrollTrig )
self.dirty = 1
elif physRect.left < leftScrollTrig \
and self.offset[0] != offsetBounds.minX:
self.offset[0] = max( offsetBounds.minX,
self.offset[0]+ physRect.left - leftScrollTrig )
self.dirty = 1
if physRect.bottom > bottomScrollTrig \
and self.offset[1] != offsetBounds.maxY:
self.offset[1] = min( offsetBounds.maxY,
self.offset[1]+ physRect.bottom-bottomScrollTrig )
#if self.offset[1] == 400:
#print 'just set to 400'
#print 'offsetBounds.maxY', offsetBounds.maxY
#print 'physrect.bottom', physRect.bottom
#print 'bottomScrollTrig', bottomScrollTrig
#print '2nd ard to min', self.offset[1]+ physRect.bottom-bottomScrollTrig
self.dirty = 1
elif physRect.top < topScrollTrig \
and self.offset[1] != offsetBounds.minY:
self.offset[1] = max( offsetBounds.minY,
self.offset[1]+ physRect.top - topScrollTrig )
self.dirty = 1
if self.dirty:
self.CalculateNewlyExposedArea( oldOffset )
return self.GetDisplayCenter( physRect )
| [
"[email protected]"
] | |
8989405097b40e4d8b37f0c364064761c06942de | eef243e450cea7e91bac2f71f0bfd45a00c6f12c | /.history/worker_master_20210128031450.py | 01354a9e68cabfdac6caf5de3156d585f3843a35 | [] | no_license | hoaf13/nlp-chatbot-lol | 910ab2ea3b62d5219901050271fc1a1340e46a2f | 18cb64efa9d6b4cafe1015f1cd94f4409271ef56 | refs/heads/master | 2023-05-08T04:17:19.450718 | 2021-02-02T02:37:38 | 2021-02-02T02:37:38 | 332,535,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | import redis
red = redis.StrictRedis(host='localhost',port=6379,db=0)
queue = list()
def str_to_bool(str):
if str == b'False':
return False
if str == b'True':
return True
return None
while True:
# check supplier product status
is_new = str_to_bool(red.get("is_new_product_worker1"))
if is_new:
taken_product = red.get('product_worker1')
queue.append(taken_product)
red.set("new_product_worker1", str(False))
# publish product to consummer
if red.get("is_new_product_worker2") is None:
red.set("is_new_product_worker2", str(False))
if red.get("is_new_product_worker3") is None:
red.set("is_new_product_worker3", str(False))
is_used = not str_to_bool(red.get("is_new_product_worker2"))
if is_used:
if len(queue) == 0:
continue
taken_product = reg.get("product_worker2")
| [
"[email protected]"
] | |
310c12edeafe2c62c72077c3bf701b34385c631d | db27d4b3d6f846074646f7e7ee6ea40fd11ca0f5 | /2020/3-16-企业微信/QYWX_APP/WXBizMsgCrypt.py | fb3fa24daeca51900ed1fff168d2cd1cf9ea67fb | [] | no_license | LuckDIY/Programer_Log | 95ccd44c38754dad439738be0690661a7acac1ef | 4a6bb5815d895e176d17e7a1ce53aa6ba5b774f2 | refs/heads/master | 2022-04-24T18:01:15.417673 | 2020-04-23T10:19:55 | 2020-04-23T10:19:55 | 258,235,185 | 1 | 0 | null | 2020-04-23T14:39:57 | 2020-04-23T14:39:56 | null | UTF-8 | Python | false | false | 13,827 | py | #!/usr/bin/env python
#-*- encoding:utf-8 -*-
""" 对企业微信发送给企业后台的消息加解密示例代码.
@copyright: Copyright (c) 1998-2014 Tencent Inc.
"""
# ------------------------------------------------------------------------
import base64
import string
import random
import hashlib
import time
from Crypto.Cipher import AES
import xml.etree.cElementTree as ET
import sys
import ierror
class FormatException(Exception):
pass
def throw_exception(message, exception_class=FormatException):
"""my define raise exception function"""
raise exception_class(message)
def generateNonce(digits = 16):
""" 随机生成16位字符串
@return: 16位字符串
"""
rule = string.ascii_lowercase + string.digits
str = random.sample(rule, digits)
return "".join(str)
class SHA1:
"""计算企业微信的消息签名接口"""
def getSHA1(self, token, timestamp, nonce, encrypt):
"""用SHA1算法生成安全签名
@param token: 票据
@param timestamp: 时间戳
@param encrypt: 密文
@param nonce: 随机字符串
@return: 安全签名
"""
try:
sortlist = [token, timestamp, nonce, encrypt]
sortlist.sort()
sha = hashlib.sha1()
sha.update("".join(sortlist).encode('utf-8'))
return ierror.WXBizMsgCrypt_OK, sha.hexdigest()
except Exception as e:
print(e)
return ierror.WXBizMsgCrypt_ComputeSignature_Error, None
class XMLParse:
"""提供提取消息格式中的密文及生成回复消息格式的接口"""
# xml消息模板
AES_TEXT_RESPONSE_TEMPLATE = '<xml>'+\
'<Encrypt><![CDATA[%(msg_encrypt)s]]></Encrypt>'+\
'<MsgSignature><![CDATA[%(msg_signaturet)s]]></MsgSignature>'+\
'<TimeStamp>%(timestamp)s</TimeStamp>'+\
'<Nonce><![CDATA[%(nonce)s]]></Nonce>'+\
'</xml>'
def extract(self, xmltext):
"""提取出xml数据包中的加密消息
@param xmltext: 待提取的xml字符串
@return: 提取出的加密消息字符串
"""
try:
xml_tree = ET.fromstring(xmltext)
encrypt = xml_tree.find("Encrypt")
return ierror.WXBizMsgCrypt_OK, encrypt.text
except Exception as e:
print(e)
return ierror.WXBizMsgCrypt_ParseXml_Error,None,None
def generate(self, encrypt, signature, timestamp, nonce):
"""生成xml消息
@param encrypt: 加密后的消息密文
@param signature: 安全签名
@param timestamp: 时间戳
@param nonce: 随机字符串
@return: 生成的xml字符串
"""
resp_dict = {
'msg_encrypt' : encrypt,
'msg_signaturet': signature,
'timestamp' : timestamp,
'nonce' : nonce,
}
resp_xml = self.AES_TEXT_RESPONSE_TEMPLATE % resp_dict
return resp_xml
class ResponseMessage():
# python dict 转换成特定格式的xml,下面是一些模板
"""
text_response = {
'to_user':'',
'from_user':'',
'timestamp':'',
'type':'text',
'content':'',
}
voice_response= {
'to_user':'',
'from_user':'',
'timestamp':'',
'type':'voice',
'media_id':''
}
image_response= {
'to_user':'',
'from_user':'',
'timestamp':'',
'type':'image',
'data':[
{'media_id':''}
]
}
video_response= {
'to_user':'',
'from_user':'',
'timestamp':'',
'type':'video',
'media_id':'',
'title':'',
'description':'',
}
article_response= {
'to_user':'',
'from_user':'',
'timestamp':'',
'type':'news',
'data':[
{'title':'',
'description':'',
'pic_url':'',
'url':'',
}
]
}
"""
BASIC_RESPONSE_FIELDS = '<ToUserName><![CDATA[%(to_user)s]]></ToUserName>'+\
'<FromUserName><![CDATA[%(from_user)s]]></FromUserName>'+\
'<CreateTime>%(timestamp)s</CreateTime>'+\
'<MsgType><![CDATA[%(type)s]]></MsgType>'
TEXT_RESPONSE_FIELD = "<Content><![CDATA[%(content)s]]></Content>"
VOICE_RESPONSE_FIELD = "<Voice><![CDATA[%(media_id)s]]></Voice>"
IMAGE_RESPONSE_FIELD = "<MediaId><![CDATA[%(media_id)s]]></MediaId>"
VIDEO_RESPONSE_FIELD = '<Video>'+\
'<MediaId><![CDATA[%(media_id)s]]></MediaId>' +\
'<Title><![CDATA[%(title)s]]></Title>'+\
'<Description><![CDATA[%(description)s]]></Description>'+\
'</Video>'
ARTICLE_RESPONSE_FIELD = '<items>'+\
'<Title><![CDATA[%(title)s]]></Title>'+\
'<Description><![CDATA[%(description)s]]></Description>'+\
'<PicUrl><![CDATA[%(pic_url)s]]></PicUrl>' +\
'<Url><![CDATA[%(url)s]]></Url>'+\
'</items>'
def __init__(self,data_dict):
if 'timestamp' not in data_dict:
data_dict['timestamp'] = str(int(time.time()))
self.data = data_dict
@property
def xml(self):
basic = self.BASIC_RESPONSE_FIELDS % self.data
# text message
if self.data['type'] == 'text':
return '<xml>' + basic + self.TEXT_RESPONSE_FIELD % self.data + '</xml>'
# image message
elif self.data['type'] == 'image':
tmp = ''
for d in self.data['data']:
tmp = tmp + self.IMAGE_RESPONSE_FIELD % d
return '<xml>' + basic + '<Image>' +tmp+ '</Image></xml>'
# voice message
elif self.data['type'] == 'voice':
return '<xml>' + basic + self.VOICE_RESPONSE_FIELD % self.data + '</xml>'
# video message
elif self.data['type'] == 'video':
return '<xml>' + basic + self.VIDEO_RESPONSE_FIELD % self.data + '</xml>'
# news message
elif self.data['type'] == 'news':
tmp = ''
for d in self.data['data']:
tmp = tmp + self.ARTICLE_RESPONSE_FIELD % d
count = "<ArticleCount>"+str(len(self.data['data']))+"</ArticleCount>"
return '<xml>' + basic + count + '<Articles>' +tmp+ '</Articles></xml>'
else:
return None
class PKCS7Encoder():
"""提供基于PKCS7算法的加解密接口"""
block_size = 32
def encode(self, text):
""" 对需要加密的明文进行填充补位
@param text: 需要进行填充补位操作的明文
@return: 补齐明文字符串
"""
text_length = len(text)
# 计算需要填充的位数
amount_to_pad = self.block_size - (text_length % self.block_size)
if amount_to_pad == 0:
amount_to_pad = self.block_size
# 获得补位所用的字符
pad = chr(amount_to_pad)
if type(text) == bytes:
return text + amount_to_pad * amount_to_pad.to_bytes(1,'big')
return text + pad * amount_to_pad
def decode(self, decrypted):
"""删除解密后明文的补位字符
@param decrypted: 解密后的明文
@return: 删除补位字符后的明文
"""
pad = decrypted[-1]
if pad<1 or pad >32:
pad = 0
return decrypted[:-pad]
class Prpcrypt(object):
"""提供接收和推送给企业微信消息的加解密接口"""
def __init__(self,key):
#self.key = base64.b64decode(key+"=")
self.key = key
# 设置加解密模式为AES的CBC模式
self.mode = AES.MODE_CBC
def encrypt(self,text,receiveid):
"""对明文进行加密
@param text: 需要加密的明文
@return: 加密得到的字符串
"""
# 16位随机字符串添加到明文开头
text_bytes = text.encode('utf8')
text = generateNonce().encode('utf8') + int.to_bytes(len(text_bytes),4,byteorder='big') + text_bytes + receiveid.encode('utf8')
# 使用自定义的填充方式对明文进行补位填充
pkcs7 = PKCS7Encoder()
text = pkcs7.encode(text)
# 加密
cryptor = AES.new(self.key,self.mode,self.key[:16])
try:
ciphertext = cryptor.encrypt(text)
# 使用BASE64对加密后的字符串进行编码
return ierror.WXBizMsgCrypt_OK, base64.b64encode(ciphertext).decode('utf8')
except Exception as e:
print(e)
return ierror.WXBizMsgCrypt_EncryptAES_Error,None
def decrypt(self,text,receiveid):
"""对解密后的明文进行补位删除
@param text: 密文
@return: 删除填充补位后的明文
"""
try:
cryptor = AES.new(self.key,self.mode,self.key[:16])
# 使用BASE64对密文进行解码,然后AES-CBC解密
plain_text = cryptor.decrypt(base64.b64decode(text))
except Exception as e:
print(e)
return ierror.WXBizMsgCrypt_DecryptAES_Error,None
try:
#pad = plain_text[-1]
# 去掉补位字符串
pkcs7 = PKCS7Encoder()
plain_text = pkcs7.decode(plain_text)
xml_len = int.from_bytes(plain_text[16:20],byteorder='big')
xml_content = plain_text[20 : 20 + xml_len].decode('utf-8')
from_receiveid = plain_text[20 + xml_len:].decode('utf-8')
except Exception as e:
print(e)
return ierror.WXBizMsgCrypt_IllegalBuffer,None
if from_receiveid != receiveid:
return ierror.WXBizMsgCrypt_ValidateCorpid_Error,None
return 0,xml_content
class WXBizMsgCrypt(object):
#构造函数
def __init__(self,sToken,sEncodingAESKey,sReceiveId):
try:
self.key = base64.b64decode(sEncodingAESKey+"=")
assert len(self.key) == 32
except:
throw_exception("[error]: EncodingAESKey unvalid !", FormatException)
# return ierror.WXBizMsgCrypt_IllegalAesKey,None
self.m_sToken = sToken
self.m_sReceiveId = sReceiveId
#验证URL
#@param sMsgSignature: 签名串,对应URL参数的msg_signature
#@param sTimeStamp: 时间戳,对应URL参数的timestamp
#@param sNonce: 随机串,对应URL参数的nonce
#@param sEchoStr: 随机串,对应URL参数的echostr
#@param sReplyEchoStr: 解密之后的echostr,当return返回0时有效
#@return:成功0,失败返回对应的错误码
def VerifyURL(self, sMsgSignature, sTimeStamp, sNonce, sEchoStr):
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.m_sToken, sTimeStamp, sNonce, sEchoStr)
if ret != 0:
return ret, None
if not signature == sMsgSignature:
return ierror.WXBizMsgCrypt_ValidateSignature_Error, None
pc = Prpcrypt(self.key)
ret,sReplyEchoStr = pc.decrypt(sEchoStr,self.m_sReceiveId)
return ret,sReplyEchoStr
def EncryptMsg(self, sReplyMsg, sNonce, timestamp = None):
#将企业回复用户的消息加密打包
#@param sReplyMsg: 企业号待回复用户的消息,xml格式的字符串
#@param sTimeStamp: 时间戳,可以自己生成,也可以用URL参数的timestamp,如为None则自动用当前时间
#@param sNonce: 随机串,可以自己生成,也可以用URL参数的nonce
#sEncryptMsg: 加密后的可以直接回复用户的密文,包括msg_signature, timestamp, nonce, encrypt的xml格式的字符串,
#return:成功0,sEncryptMsg,失败返回对应的错误码None
pc = Prpcrypt(self.key)
ret,encrypt = pc.encrypt(sReplyMsg, self.m_sReceiveId)
if ret != 0:
return ret,None
if timestamp is None:
timestamp = str(int(time.time()))
# 生成安全签名
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.m_sToken, timestamp, sNonce, encrypt)
if ret != 0:
return ret,None
xmlParse = XMLParse()
return ret,xmlParse.generate(encrypt, signature, timestamp, sNonce)
def DecryptMsg(self, sPostData, sMsgSignature, sTimeStamp, sNonce):
# 检验消息的真实性,并且获取解密后的明文
# @param sMsgSignature: 签名串,对应URL参数的msg_signature
# @param sTimeStamp: 时间戳,对应URL参数的timestamp
# @param sNonce: 随机串,对应URL参数的nonce
# @param sPostData: 密文,对应POST请求的数据
# xml_content: 解密后的原文,当return返回0时有效
# @return: 成功0,失败返回对应的错误码
# 验证安全签名
xmlParse = XMLParse()
ret,encrypt = xmlParse.extract(sPostData)
if ret != 0:
return ret, None
sha1 = SHA1()
ret,signature = sha1.getSHA1(self.m_sToken, sTimeStamp, sNonce, encrypt)
if ret != 0:
return ret, None
if not signature == sMsgSignature:
return ierror.WXBizMsgCrypt_ValidateSignature_Error, None
pc = Prpcrypt(self.key)
ret,xml_content = pc.decrypt(encrypt,self.m_sReceiveId)
return ret,xml_content
| [
"[email protected]"
] | |
5175266ea0a9201c574f0e0e99e425c541148de7 | 1191d49562881bd5a76ed285da572dd9e27a2270 | /util/rand_mat.py | 32160d98b848a6470e599289ac16e91becb97371 | [] | no_license | jardelcarvalho/ufsj_computacao-paralela_tp1 | 28ebb6608d5dbd110033ee976979a7cdbbd47e0c | b781cb207ab96b140f3f839db12b680d7e376e4a | refs/heads/master | 2020-08-24T19:02:01.403138 | 2019-11-01T05:31:54 | 2019-11-01T05:31:54 | 216,887,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | import sys
import random
p = [4, 9]
# matrizes múltiplas de q (cond. Fox)
mult = 1
for q in list(map(lambda x: int(x ** .5), p)):
mult *= q
path = sys.argv[1]
dim = mult
while (dim * 2) ** 2 / 10 ** 6 < 10:
dim *= 2
print(dim)
with open(path + '/' + str(dim) + 'x' + str(dim) + '.txt', 'w') as f:
f.write(str(dim) + '\n')
for i in range(dim):
for j in range(dim):
if i != j:
f.write(str(random.randint(0, 100)))
else:
f.write('0')
if j == dim - 1:
f.write('\n')
continue
f.write(' ')
| [
"[email protected]"
] | |
9be9dbbde398f9702ac9b6070fcd6309a017b8b4 | 0957c3eb9bcc98ee864f33e8757ff7ad21341b47 | /pserver/gcloudstorage/events.py | ec967a22f3eed7834fbb1c2fd0ab23b49fd3f4f5 | [] | no_license | pyrenees/pserver.gcloudstorage | dc646a968e2fffc2dbb42892c1827b500e5d2f7d | 6d6f8f516d1bf535a58331ea22c322787aa66a6b | refs/heads/master | 2021-01-17T18:08:03.275519 | 2017-04-25T15:35:53 | 2017-04-25T15:35:53 | 71,178,626 | 0 | 1 | null | 2017-04-25T15:35:53 | 2016-10-17T20:31:43 | Python | UTF-8 | Python | false | false | 472 | py | # -*- encoding: utf-8 -*-
from pserver.gcloudstorage.interfaces import IInitialGCloudUpload
from pserver.gcloudstorage.interfaces import IFinishGCloudUpload
from zope.interface import implementer
from zope.interface.interfaces import ObjectEvent
@implementer(IInitialGCloudUpload)
class InitialGCloudUpload(ObjectEvent):
"""An object has been created"""
@implementer(IFinishGCloudUpload)
class FinishGCloudUpload(ObjectEvent):
"""An object has been created"""
| [
"[email protected]"
] | |
dceca5dce5afeb9649602b8fb173d5d72f947096 | 5eb3bcfd16561a195e7e25ec45e03bca0fe46a65 | /basic/hello.py | eb88a5a6ea77d46d998426889b096b15689ab459 | [] | no_license | aswin191993/googlepython | 6eda6a5ca91e3b667b3443ce64e3f350bc17a394 | 47080eaa6f8208dfe042282ccd5025fd9b4e37f7 | refs/heads/master | 2021-01-01T19:46:44.540031 | 2015-02-03T08:28:58 | 2015-02-03T08:28:58 | 30,180,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'World'
print 'Howdy', name
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
606b617720c725a5c2d30fc3448864ac159a19c0 | 42d93fd16fb7c46b4317daaa2f988b138ff88b37 | /ExampleOfPythonCookBook/ex_1_4.py | 72d1054942c0b0485f2bff9bf0082172c6a02a2a | [] | no_license | zhan-ge/CodePy | 6aa8af9f79bac9c4e0a0eae45ffe0318248536d6 | 969160d01fb7cc93845d153690528748e2b72388 | refs/heads/master | 2021-01-10T01:42:08.561432 | 2017-02-09T06:16:55 | 2017-02-09T06:16:55 | 36,662,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | # -*- coding: utf-8 -*-
"""
Project: 找到最大的或最小的N个元素
Purpose:
Version:
Author: ZG
Date: 15/6/8
"""
import heapq
if __name__ == '__main__':
nums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]
print heapq.nlargest(3, nums) # [42, 37, 23] 取三个最大的值
print heapq.nsmallest(3, nums) # [-4, 2, 1] 取三个最小的值
portfolio = [
{'name': 'IBM', 'shares': 100, 'price': 91.1},
{'name': 'APPL', 'shares': 50, 'price': 12.4},
{'name': 'FB', 'shares': 10, 'price': 66.9},
{'name': 'HPQ', 'shares': 200, 'price': 23.3},
{'name': 'YHOO', 'shares': 45, 'price': 87.6},
{'name': 'ACME', 'shares': 80, 'price': 21.2},
]
cheap = heapq.nsmallest(3, portfolio, key=lambda s: s['price']) # 接收一个key,分别求最便宜的和最贵的3个
expensive = heapq.nlargest(3, portfolio, key=lambda s: s['price'])
print cheap
print expensive | [
"[email protected]"
] | |
9db88f4cface810f0f50d597991b8c455ea86482 | dc91d74daa0909d75b5512a1f0f7f32ded97cc85 | /TuSimple/test.py | 4cb26ef72c623bc822541c0388eb32f7a6269155 | [
"MIT"
] | permissive | Ghustwb/PINet_new | a7cb5a2b413b26a26042e7496e64961d6d5f2d47 | 584618363c48436a8889d9d3e8aee59ab8445d18 | refs/heads/master | 2022-12-23T10:26:40.201272 | 2020-08-31T08:13:13 | 2020-08-31T08:13:13 | 292,445,075 | 1 | 0 | MIT | 2020-09-03T02:27:09 | 2020-09-03T02:27:08 | null | UTF-8 | Python | false | false | 19,616 | py | #############################################################################################################
##
## Source code for testing
##
#############################################################################################################
import cv2
import json
import torch
#import sys
#sys.path.append('/home/kym/research/autonomous_car_vision/lanedection/code/')
#import util
import agent
import numpy as np
from copy import deepcopy
from data_loader import Generator
import time
from parameters import Parameters
import util
from tqdm import tqdm
import csaps
p = Parameters()
###############################################################
##
## Training
##
###############################################################
def Testing():
print('Testing')
#########################################################################
## Get dataset
#########################################################################
print("Get dataset")
loader = Generator()
##############################
## Get agent and model
##############################
print('Get agent')
if p.model_path == "":
lane_agent = agent.Agent()
else:
lane_agent = agent.Agent()
lane_agent.load_weights(804, "tensor(0.5786)")
#lane_agent.load_weights(2152, "tensor(1.9907)")
##############################
## Check GPU
##############################
print('Setup GPU mode')
if torch.cuda.is_available():
lane_agent.cuda()
##############################
## testing
##############################
print('Testing loop')
lane_agent.evaluate_mode()
if p.mode == 0 : # check model with test data
for _, _, _, test_image in loader.Generate():
_, _, ti = test(lane_agent, np.array([test_image]))
cv2.imshow("test", ti[0])
cv2.waitKey(0)
elif p.mode == 1: # check model with video
cap = cv2.VideoCapture("/home/kym/research/autonomous_car_vision/lane_detection/code/Tusimple/git_version/LocalDataset_Day.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
torch.cuda.synchronize()
prevTime = time.time()
frame = cv2.resize(frame, (512,256))/255.0
frame = np.rollaxis(frame, axis=2, start=0)
_, _, ti = test(lane_agent, np.array([frame]))
curTime = time.time()
sec = curTime - prevTime
fps = 1/(sec)
s = "FPS : "+ str(fps)
ti[0] = cv2.resize(ti[0], (1280,800))
cv2.putText(ti[0], s, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))
cv2.imshow('frame',ti[0])
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
elif p.mode == 2: # check model with a picture
test_image = cv2.imread(p.test_root_url+"clips/0530/1492720840345996040_0/20.jpg")
test_image = cv2.resize(test_image, (512,256))/255.0
test_image = np.rollaxis(test_image, axis=2, start=0)
_, _, ti = test(lane_agent, np.array([test_image]))
cv2.imshow("test", ti[0])
cv2.waitKey(0)
elif p.mode == 3: #evaluation
print("evaluate")
evaluation(loader, lane_agent)
############################################################################
## evaluate on the test dataset
############################################################################
def evaluation(loader, lane_agent, index= -1, thresh = p.threshold_point, name = None):
result_data = deepcopy(loader.test_data)
progressbar = tqdm(range(loader.size_test//4))
for test_image, target_h, ratio_w, ratio_h, testset_index, gt in loader.Generate_Test():
x, y, _ = test(lane_agent, test_image, thresh, index)
x_ = []
y_ = []
for i, j in zip(x, y):
temp_x, temp_y = util.convert_to_original_size(i, j, ratio_w, ratio_h)
x_.append(temp_x)
y_.append(temp_y)
#x_, y_ = find_target(x_, y_, target_h, ratio_w, ratio_h)
x_, y_ = fitting(x_, y_, target_h, ratio_w, ratio_h)
result_data = write_result_json(result_data, x_, y_, testset_index)
#util.visualize_points_origin_size(x_[0], y_[0], test_image[0], ratio_w, ratio_h)
#print(gt.shape)
#util.visualize_points_origin_size(gt[0], y_[0], test_image[0], ratio_w, ratio_h)
progressbar.update(1)
progressbar.close()
if name == None:
save_result(result_data, "test_result.json")
else:
save_result(result_data, name)
############################################################################
## linear interpolation for fixed y value on the test dataset
############################################################################
def find_target(x, y, target_h, ratio_w, ratio_h):
# find exact points on target_h
out_x = []
out_y = []
x_size = p.x_size/ratio_w
y_size = p.y_size/ratio_h
count = 0
for x_batch, y_batch in zip(x,y):
predict_x_batch = []
predict_y_batch = []
for i, j in zip(x_batch, y_batch):
min_y = min(j)
max_y = max(j)
temp_x = []
temp_y = []
for h in target_h[count]:
temp_y.append(h)
if h < min_y:
temp_x.append(-2)
elif min_y <= h and h <= max_y:
for k in range(len(j)-1):
if j[k] >= h and h >= j[k+1]:
#linear regression
if i[k] < i[k+1]:
temp_x.append(int(i[k+1] - float(abs(j[k+1] - h))*abs(i[k+1]-i[k])/abs(j[k+1]+0.0001 - j[k])))
else:
temp_x.append(int(i[k+1] + float(abs(j[k+1] - h))*abs(i[k+1]-i[k])/abs(j[k+1]+0.0001 - j[k])))
break
else:
if i[0] < i[1]:
l = int(i[1] - float(-j[1] + h)*abs(i[1]-i[0])/abs(j[1]+0.0001 - j[0]))
if l > x_size or l < 0 :
temp_x.append(-2)
else:
temp_x.append(l)
else:
l = int(i[1] + float(-j[1] + h)*abs(i[1]-i[0])/abs(j[1]+0.0001 - j[0]))
if l > x_size or l < 0 :
temp_x.append(-2)
else:
temp_x.append(l)
predict_x_batch.append(temp_x)
predict_y_batch.append(temp_y)
out_x.append(predict_x_batch)
out_y.append(predict_y_batch)
count += 1
return out_x, out_y
def fitting(x, y, target_h, ratio_w, ratio_h):
out_x = []
out_y = []
count = 0
x_size = p.x_size/ratio_w
y_size = p.y_size/ratio_h
for x_batch, y_batch in zip(x,y):
predict_x_batch = []
predict_y_batch = []
for i, j in zip(x_batch, y_batch):
min_y = min(j)
max_y = max(j)
temp_x = []
temp_y = []
jj = []
pre = -100
for temp in j[::-1]:
if temp > pre:
jj.append(temp)
pre = temp
else:
jj.append(pre+0.00001)
pre = pre+0.00001
#sp = csaps.UnivariateCubicSmoothingSpline(jj, i[::-1], smooth=0.01)
sp = csaps.CubicSmoothingSpline(jj, i[::-1], smooth=0.0001)
last = 0
last_second = 0
last_y = 0
last_second_y = 0
for h in target_h[count]:
temp_y.append(h)
if h < min_y:
temp_x.append(-2)
elif min_y <= h and h <= max_y:
temp_x.append( sp([h])[0] )
last = temp_x[-1]
last_y = temp_y[-1]
if len(temp_x)<2:
last_second = temp_x[-1]
last_second_y = temp_y[-1]
else:
last_second = temp_x[-2]
last_second_y = temp_y[-2]
else:
if last < last_second:
l = int(last_second - float(-last_second_y + h)*abs(last_second-last)/abs(last_second_y+0.0001 - last_y))
if l > x_size or l < 0 :
temp_x.append(-2)
else:
temp_x.append(l)
'''
last = temp_x[-1]
last_y = temp_y[-1]
if len(temp_x)<2:
last_second = temp_x[-1]
last_second_y = temp_y[-1]
else:
last_second = temp_x[-2]
last_second_y = temp_y[-2]
'''
else:
l = int(last_second + float(-last_second_y + h)*abs(last_second-last)/abs(last_second_y+0.0001 - last_y))
if l > x_size or l < 0 :
temp_x.append(-2)
else:
temp_x.append(l)
'''
last = temp_x[-1]
last_y = temp_y[-1]
if len(temp_x)<2:
last_second = temp_x[-1]
last_second_y = temp_y[-1]
else:
last_second = temp_x[-2]
last_second_y = temp_y[-2]
'''
#temp_x.append(-2)
#temp_x.append( sp([h])[0] )
predict_x_batch.append(temp_x)
#predict_x_batch.append(sp(range(100, 590, 10)))
predict_y_batch.append(temp_y)
out_x.append(predict_x_batch)
out_y.append(predict_y_batch)
count += 1
return out_x, out_y
############################################################################
## write result
############################################################################
def write_result_json(result_data, x, y, testset_index):
for index, batch_idx in enumerate(testset_index):
for i in x[index]:
result_data[batch_idx]['lanes'].append(i)
result_data[batch_idx]['run_time'] = 1
return result_data
############################################################################
## save result by json form
############################################################################
def save_result(result_data, fname):
with open(fname, 'w') as make_file:
for i in result_data:
json.dump(i, make_file, separators=(',', ': '))
make_file.write("\n")
############################################################################
## test on the input test image
############################################################################
def test(lane_agent, test_images, thresh = p.threshold_point, index= -1):
result = lane_agent.predict_lanes_test(test_images)
torch.cuda.synchronize()
confidences, offsets, instances = result[index]
#confidences = torch.sigmoid(confidences)
#confidences = 0
#for c, o, i in result:
# confidences = confidences + c
num_batch = len(test_images)
out_x = []
out_y = []
out_images = []
for i in range(num_batch):
# test on test data set
image = deepcopy(test_images[i])
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)*255.0
image = image.astype(np.uint8).copy()
confidence = confidences[i].view(p.grid_y, p.grid_x).cpu().data.numpy()
offset = offsets[i].cpu().data.numpy()
offset = np.rollaxis(offset, axis=2, start=0)
offset = np.rollaxis(offset, axis=2, start=0)
instance = instances[i].cpu().data.numpy()
instance = np.rollaxis(instance, axis=2, start=0)
instance = np.rollaxis(instance, axis=2, start=0)
# generate point and cluster
raw_x, raw_y = generate_result(confidence, offset, instance, thresh)
# eliminate fewer points
in_x, in_y = eliminate_fewer_points(raw_x, raw_y)
# sort points along y
in_x, in_y = util.sort_along_y(in_x, in_y)
#in_x, in_y = eliminate_out(in_x, in_y, confidence, deepcopy(image))
#in_x, in_y = util.sort_along_y(in_x, in_y)
#in_x, in_y = eliminate_fewer_points(in_x, in_y)
result_image = util.draw_points(in_x, in_y, deepcopy(image))
out_x.append(in_x)
out_y.append(in_y)
out_images.append(result_image)
return out_x, out_y, out_images
############################################################################
## post processing for eliminating outliers
############################################################################
def eliminate_out(sorted_x, sorted_y, confidence, image = None):
out_x = []
out_y = []
for lane_x, lane_y in zip(sorted_x, sorted_y):
lane_x_along_y = np.array(deepcopy(lane_x))
lane_y_along_y = np.array(deepcopy(lane_y))
ind = np.argsort(lane_x_along_y, axis=0)
lane_x_along_x = np.take_along_axis(lane_x_along_y, ind, axis=0)
lane_y_along_x = np.take_along_axis(lane_y_along_y, ind, axis=0)
if lane_y_along_x[0] > lane_y_along_x[-1]: #if y of left-end point is higher than right-end
starting_points = [(lane_x_along_y[0], lane_y_along_y[0]), (lane_x_along_y[1], lane_y_along_y[1]), (lane_x_along_y[2], lane_y_along_y[2]),
(lane_x_along_x[0], lane_y_along_x[0]), (lane_x_along_x[1], lane_y_along_x[1]), (lane_x_along_x[2], lane_y_along_x[2])] # some low y, some left/right x
else:
starting_points = [(lane_x_along_y[0], lane_y_along_y[0]), (lane_x_along_y[1], lane_y_along_y[1]), (lane_x_along_y[2], lane_y_along_y[2]),
(lane_x_along_x[-1], lane_y_along_x[-1]), (lane_x_along_x[-2], lane_y_along_x[-2]), (lane_x_along_x[-3], lane_y_along_x[-3])] # some low y, some left/right x
temp_x = []
temp_y = []
for start_point in starting_points:
temp_lane_x, temp_lane_y = generate_cluster(start_point, lane_x, lane_y, image)
temp_x.append(temp_lane_x)
temp_y.append(temp_lane_y)
max_lenght_x = None
max_lenght_y = None
max_lenght = 0
for i, j in zip(temp_x, temp_y):
if len(i) > max_lenght:
max_lenght = len(i)
max_lenght_x = i
max_lenght_y = j
out_x.append(max_lenght_x)
out_y.append(max_lenght_y)
#return out_x, out_y
return sorted_x, sorted_y
############################################################################
## generate cluster
############################################################################
def generate_cluster(start_point, lane_x, lane_y, image = None):
cluster_x = [start_point[0]]
cluster_y = [start_point[1]]
point = start_point
while True:
points = util.get_closest_upper_point(lane_x, lane_y, point, 3)
max_num = -1
max_point = None
if len(points) == 0:
break
if len(points) < 3:
for i in points:
cluster_x.append(i[0])
cluster_y.append(i[1])
break
for i in points:
num, shortest = util.get_num_along_point(lane_x, lane_y, point, i, image)
if max_num < num:
max_num = num
max_point = i
total_remain = len(np.array(lane_y)[np.array(lane_y) < point[1]])
cluster_x.append(max_point[0])
cluster_y.append(max_point[1])
point = max_point
if len(points) == 1 or max_num < total_remain/5:
break
return cluster_x, cluster_y
############################################################################
## remove same value on the prediction results
############################################################################
def remove_same_point(x, y):
out_x = []
out_y = []
for lane_x, lane_y in zip(x, y):
temp_x = []
temp_y = []
for i in range(len(lane_x)):
if len(temp_x) == 0 :
temp_x.append(lane_x[i])
temp_y.append(lane_y[i])
else:
if temp_x[-1] == lane_x[i] and temp_y[-1] == lane_y[i]:
continue
else:
temp_x.append(lane_x[i])
temp_y.append(lane_y[i])
out_x.append(temp_x)
out_y.append(temp_y)
return out_x, out_y
############################################################################
## eliminate result that has fewer points than threshold
############################################################################
def eliminate_fewer_points(x, y):
# eliminate fewer points
out_x = []
out_y = []
for i, j in zip(x, y):
if len(i)>2:
out_x.append(i)
out_y.append(j)
return out_x, out_y
############################################################################
## generate raw output
############################################################################
def generate_result(confidance, offsets,instance, thresh):
mask = confidance > thresh
#print(mask)
grid = p.grid_location[mask]
offset = offsets[mask]
feature = instance[mask]
lane_feature = []
x = []
y = []
for i in range(len(grid)):
if (np.sum(feature[i]**2))>=0:
point_x = int((offset[i][0]+grid[i][0])*p.resize_ratio)
point_y = int((offset[i][1]+grid[i][1])*p.resize_ratio)
if point_x > p.x_size or point_x < 0 or point_y > p.y_size or point_y < 0:
continue
if len(lane_feature) == 0:
lane_feature.append(feature[i])
x.append([point_x])
y.append([point_y])
else:
flag = 0
index = 0
min_feature_index = -1
min_feature_dis = 10000
for feature_idx, j in enumerate(lane_feature):
dis = np.linalg.norm((feature[i] - j)**2)
if min_feature_dis > dis:
min_feature_dis = dis
min_feature_index = feature_idx
if min_feature_dis <= p.threshold_instance:
lane_feature[min_feature_index] = (lane_feature[min_feature_index]*len(x[min_feature_index]) + feature[i])/(len(x[min_feature_index])+1)
x[min_feature_index].append(point_x)
y[min_feature_index].append(point_y)
elif len(lane_feature) < 12:
lane_feature.append(feature[i])
x.append([point_x])
y.append([point_y])
return x, y
if __name__ == '__main__':
Testing()
| [
"[email protected]"
] | |
84ce5330ca71b8b80f1c9606c660fff1e3be16f3 | 6999ebec0bed0aed8b9b8d185cd5a1e6f1de8b02 | /code/StartTime/luanchtime/LuanchTime.py | 846bcad4ac0bd8697234d8284df09e04e0fa4fe6 | [
"Apache-2.0"
] | permissive | aohanyao/AndroidTest | 5855fbdd615cb0e0a0c26c120df84911f244ab84 | da9a97795d0b41ecea1e6ce6303c01b9c6a84826 | refs/heads/master | 2020-03-26T02:59:00.510846 | 2018-08-12T13:35:07 | 2018-08-12T13:35:07 | 144,434,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | # encoding:utf-8
import csv
import os
from time import sleep
import time
class App(object):
def __init__(self):
self.result = ""
self.startTime = 0
# 启动app
def luanchApp(self):
cmd = "adb shell am start -W -n com.yida.cloud.client.party/.SplashActivity"
self.result = os.popen(cmd)
# 关闭app
def stopApp(self):
cmd = "adb shell am force-stop com.yida.cloud.client.party"
os.popen(cmd)
# 去获取 启动时间
def getLuanchTime(self):
for line in self.result.readlines():
if "ThisTime" in line:
self.startTime = line.split(":")[1]
return self.startTime
class Controller(object):
def __init__(self):
# 创建对像
self.app = App()
# 保存测试结果
self.allData = [("testTime", "lunchTime")]
# 总测试次数
self.testCount = 2
# 开始测试
def startProcess(self):
# 启动app
self.app.luanchApp()
# 启动耗时
cultTime = self.app.getLuanchTime()
# 当前时间
currentTime = self.getCurrentTime()
# 保存时间
self.allData.append((currentTime, cultTime))
# 睡眠一下
sleep(5)
# 停止app
self.app.stopApp()
sleep(5)
# 获取当前时间
def getCurrentTime(self):
return time.strftime("%Y-%m-%d %H:%M%S", time.localtime())
# 开始运行
def run(self):
while self.testCount > 0:
self.startProcess()
self.testCount -= 1
self.saveTestResult()
def saveTestResult(self):
csvFile = file("startTimeResult.csv", "wb")
writer = csv.writer(csvFile)
writer.writerows(self.allData)
csvFile.close()
if __name__ == "__main__":
controller = Controller()
controller.run()
| [
"[email protected]"
] | |
31c879c8ba1cc165a4cae8998dfb0e2df6294de1 | e6328c5076fe0f1b6819c3eacca08e1c4791199b | /088. Merge Sorted Array/88. Merge Sorted Array.py | 298b3b6ac8fa2d5db7fa23194a25b316bef5a8a1 | [] | no_license | 603lzy/LeetCode | 16e818d94282b34ac153271697b512c79fc95ef5 | 9752533bc76ce5ecb881f61e33a3bc4b20dcf666 | refs/heads/master | 2020-06-14T03:07:03.148542 | 2018-10-22T14:10:33 | 2018-10-22T14:10:33 | 75,514,162 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
nums1[m:n+m] = nums2
nums1.sort()
return
| [
"[email protected]"
] | |
c5bd4835d49e18d36d811280c498931e1fc1aeb4 | 6241b5c00c13130c0784371c5e4b4dc62d2adf51 | /Week_02/1.两数之和.py | a920d0f6d312376d7b1cb0aece98c67f4e88671f | [] | no_license | rosencrystal/algorithm008-class02 | ad5e9b61d14691661f444cb1a085325d66653ac2 | df02fcdac839e9c5a9b82b5f51f16adc396d6f72 | refs/heads/master | 2022-11-13T06:18:00.326104 | 2020-07-05T05:12:24 | 2020-07-05T05:12:24 | 255,079,856 | 0 | 0 | null | 2020-04-12T12:41:21 | 2020-04-12T12:41:20 | null | UTF-8 | Python | false | false | 2,253 | py | #
# @lc app=leetcode.cn id=1 lang=python3
# 法一: 普通解法
# [1] 两数之和
# 1. 找出所有比target小的数,并将它们放在一个数组less_target中
# 2. 依次从上面的数组中去数字然后做减法得到一个val,然后判断该值能否在less_target中找到,如果能则得到一个结果并返回;如果不能则继续,直到所有的元素被遍历
# import sys
# @lc code=start
# class Solution:
# def twoSum(self, nums: list, target: int) -> list:
# for i, num in enumerate(nums):
# val = target - num
# try:
# i_val = nums.index(val, i+1)
# if i < i_val:
# return [i, i_val]
# continue
# except ValueError:
# # print('not find %d', val)
# continue
# return []
# 法二:字典hash法
# 字典记录了 num1 和 num2 的值和位置,而省了再查找 num2 索引的步骤
# @lc code=start
# class Solution:
# def twoSum(self, nums: list, target: int) -> list:
# hashmap={}
# for ind,num in enumerate(nums):
# hashmap[num] = ind
# for i,num in enumerate(nums):
# j = hashmap.get(target - num)
# if j is not None and i!=j:
# return [i,j]
# 法三:优化的字典hash法
# 不需要 mun2 不需要在整个 dict 中去查找。可以在 num1 之前的 dict 中查找,因此就只需要一次循环可解决
# @lc code=start
class Solution:
def twoSum(self, nums: list, target: int) -> list:
hashmap={}
for i,num in enumerate(nums):
if hashmap.get(target - num) is not None:
return [i,hashmap.get(target - num)]
hashmap[num] = i #这句不能放在if语句之前,解决list中有重复值或target-num=num的情况
# if __name__ == '__main__':
# # target = 9
# # sys.stdout.write('nums = \n')
# line = sys.stdin.readline()[1:-2].split(',')
# # print(line)
# nums = [int(l) for l in line]
# # print(nums)
# # sys.stdout.write('target = \n')
# target = int(sys.stdin.readline().split('\n')[0])
# s = Solution()
# print(s.twoSum(nums, target))
# @lc code=end
| [
"[email protected]"
] | |
719050945a17c610cd14153ab0a9999137565f54 | b18b4486c4be5e561316190fac398e132325d2b7 | /longest_palindrome.py | eb96ced6ee18b735b3898b853f8adbd80c2bcd1c | [] | no_license | nooyji/algorithm | 593b402e3b29a158e4639c703ef731e8164d8334 | 290892a8d62edc087b335e722aae20063d3359a6 | refs/heads/main | 2023-06-02T18:44:47.990935 | 2021-06-22T09:04:31 | 2021-06-22T09:04:31 | 378,690,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | def longest_palindrome(self, s: str) -> str:
def expand(left: int, right: int) -> str:
while left >= 0 and right <= len(s) and s[left] == s[right -1]:
left -= 1
right += 1
return s[left + 1:right -1]
if len(s) < 2 or s == s[::-1]:
return s
result = ''
for i in range(len(s) - 1):
result = max(result, expand(i, i + 1), expand(i, i + 2), key = len)
return result
| [
"[email protected]"
] | |
3ac6c82a350037db4993c4067b468ec81eae4f8c | 2d2c10ffa7aa5ee35393371e7f8c13b4fab94446 | /projects/feed/rank/src/ensemble/avg-ensemble.py | a12bbdd39d04d5c936e702dd25060724929e18c6 | [] | no_license | faker2081/pikachu2 | bec83750a5ff3c7b5a26662000517df0f608c1c1 | 4f06d47c7bf79eb4e5a22648e088b3296dad3b2d | refs/heads/main | 2023-09-02T00:28:41.723277 | 2021-11-17T11:15:44 | 2021-11-17T11:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# \file avg-ensemble.py
# \author chenghuige
# \date 2019-08-27 23:38:36.572572
# \Description
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import pandas as pd
df1 = pd.read_csv(sys.argv[1])
df2 = pd.read_csv(sys.argv[2])
df1 = df1.sort_values('id')
df2 = df2.sort_values('id')
| [
"[email protected]"
] | |
d1db12354091a6a30e9af2057e828efa009ed0e7 | f94d58fbd7efed8a8c26274407d24658bbc4e7c5 | /tensorflow-tests/test_mnist1.py | 5a42b316c65dafac0fc96fd305163980043cc591 | [] | no_license | ricsanfre/tensorflow-tests | ed2d64ec655cc0a06b756d39b3257556d305379f | a30070e09ce186aa7302ba0aea81b161a45f2492 | refs/heads/main | 2023-05-06T09:45:28.925833 | 2021-05-27T16:20:42 | 2021-05-27T16:20:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,060 | py | import os.path
import tensorflow as tf
LOGDIR = "/home/ricardo/CODE/tensorflow-tests/logs/"
DATADIR = "/home/ricardo/CODE/tensorflow-tests/data/"
### MNIST EMBEDDINGS ###
mnist = tf.contrib.learn.datasets.mnist.read_data_sets(train_dir=DATADIR, one_hot=True, reshape=False, validation_size=0)
def fc_layer(input, size_in, size_out, name="fc", activation="sigmoid"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
if activation == "sigmoid":
act = tf.nn.sigmoid(tf.matmul(input, w) + b)
elif activation == "softmax":
act = tf.nn.softmax(tf.matmul(input, w)+b)
else:
act= tf.matmul(input, w)+b
tf.summary.histogram("weights", w)
tf.summary.histogram("biases", b)
tf.summary.histogram("activations", act)
return act
def mnist_model(learning_rate, hparam):
tf.reset_default_graph()
sess = tf.Session()
# Setup placeholders, input images and labels
x = tf.placeholder(tf.float32, shape=[None, 28, 28, 1], name="x")
y = tf.placeholder(tf.float32, shape=[None, 10], name="labels")
tf.summary.image('input', x, 3)
#Reshaping input images
x_reshape = tf.reshape(x, [-1, 784])
# Number neurons per layer
layer_neurons1 =200
layer_neurons2 = 100
layer_neurons3 = 60
layer_neurons4 = 30
layer1 = fc_layer(x_reshape, 784, layer_neurons1, "layer1" , "sigmoid")
layer2 = fc_layer(layer1, layer_neurons1, layer_neurons2, "layer2", "sigmoid" )
layer3 = fc_layer(layer2, layer_neurons2, layer_neurons3, "layer3" , "sigmoid")
layer4 = fc_layer(layer3, layer_neurons3, layer_neurons4, "layer4", "sigmoid" )
logits = fc_layer(layer4, layer_neurons4, 10, "logits", "none" )
with tf.name_scope("prediction"):
prediction = tf.nn.softmax(logits)
tf.summary.histogram("preditions", prediction)
with tf.name_scope("xent"):
xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=y), name="xent")
tf.summary.scalar("xent", xent)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(xent)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("accuracy", accuracy)
summ = tf.summary.merge_all()
# saver = tf.train.Saver()
# Initializing variables
sess.run(tf.global_variables_initializer())
# TensorBoard - Initializing
writer = tf.summary.FileWriter(LOGDIR+ hparam)
#Display Graph in Tensor Board
writer.add_graph(sess.graph)
for i in range(2001):
batch = mnist.train.next_batch(100)
if i % 5 == 0:
[train_accuracy, s] = sess.run([accuracy, summ], feed_dict={x: batch[0], y: batch[1]})
writer.add_summary(s, i)
# if i % 500 == 0:
# [test_accuracy, s_test]= sess.run([accuracy, summ], feed_dict={x: mnist.test.images[:1024], y: mnist.test.labels[:1024]})
# saver.save(sess, os.path.join(LOGDIR + "logs/", "model.ckpt"), i)
sess.run(train_step, feed_dict={x: batch[0], y: batch[1]})
def make_hparam_string(learning_rate):
return "lr_%.0E" % (learning_rate)
def main():
print('Starting main')
for learning_rate in [1E-3, 1E-4]:
hparam = make_hparam_string(learning_rate)
print('Starting run for %s' % hparam)
# Actually run with the new settings
mnist_model(learning_rate, hparam)
print('Done training!')
print('Run `tensorboard --logdir=%s` to see the results.' % LOGDIR)
print('Running on mac? If you want to get rid of the dialogue asking to give '
'network permissions to TensorBoard, you can provide this flag: '
'--host=localhost')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
905c0ef7a9ecf1647c322b2199e529f793019a71 | e5aa66cd24921417899344d85fbf9f5004a15c2b | /db_create.py | 3d228cded3a99c5ec819fc21c2397086ce28e37c | [] | no_license | MarsStirner/caesar | 04af208b0ace5aa61c237d33d9ca07c3730e4e77 | 74ba0b714103d3468b467975707287325d461286 | refs/heads/master | 2020-12-03T00:33:22.704537 | 2015-02-13T09:27:14 | 2015-02-13T09:27:14 | 96,041,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | # -*- coding: utf-8 -*-
#TODO: Add Migrations
#TODO: Add Auto Creation blueprints Tables
from application.app import db, app
with app.app_context():
db.create_all() | [
"[email protected]"
] | |
d4bf4720ba5e864e43601258c0800bda5cef7f68 | 72041e3fff79e24930b15b5893c7f2268ac7e455 | /thesis/thesisApp/migrations/0008_delete_book.py | acf89f9da2fcc5f0d92584e2d8f3d71e69662a48 | [] | no_license | myyyy/thesis | 4bdf78c1943bc30a1c9df74d3592b88e33f17b69 | b65f3b2f42f6c3053c9dd0c41c9f5469a110a681 | refs/heads/master | 2021-01-10T06:58:13.372219 | 2016-03-16T14:57:05 | 2016-03-16T14:57:05 | 53,477,768 | 0 | 0 | null | 2016-03-18T07:22:59 | 2016-03-09T07:35:58 | HTML | UTF-8 | Python | false | false | 305 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thesisApp', '0007_book'),
]
operations = [
migrations.DeleteModel(
name='Book',
),
]
| [
"[email protected]"
] | |
e4f6abf656404cad96834cc94e886dc356333681 | c2f10e271bda2c2095237f6a4fef7524fc096814 | /ex8.py | 4de7440d680cd1d063921511912e346f3c7ee952 | [] | no_license | speeser/learningpython | 3732ef379ac3863daf7f096ddc498be92b150dd9 | 30352dfc1fd8418f6937d416dcfd40010d0a665d | refs/heads/master | 2020-06-07T12:07:50.281284 | 2013-12-26T07:31:59 | 2013-12-26T07:31:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #!/usr/bin/python
#-- coding: utf-8 --
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, False, False, True)
print formatter % (formatter, formatter, formatter, formatter)
print formatter % (
"I Had this thing.",
"That you could type up right.",
"But it didn't sing.",
"So I said goodnight."
)
| [
"[email protected]"
] | |
c83b7b5a535d0699dc3f91138cb2269d9e1b9364 | fc6494e30a711079474b7cb08556d63b4f56e8dd | /Chapter4_variables.py | bffbcc5b855302900ae6e40baaae137daa0424c4 | [] | no_license | wsgan001/YiwuLPN | a0b846c6cccd770b286f07f484243774d2a2f4eb | 09235855a3547924a3bfd7695879ee96de784c90 | refs/heads/master | 2023-03-19T06:52:01.971288 | 2021-03-06T06:31:26 | 2021-03-06T06:31:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,695 | py | #搜寻模型——生成变量
import pandas as pds
import networkx as nx
import numpy as np
from industryconvert import convert
edge09 = pds.read_csv("data/2009.csv",engine="python")
node09 = pds.read_csv("data/2009nodeinfo2.csv",engine="python",index_col=0)
edge10 = pds.read_csv("data/2010.csv",engine="python")
node10 = pds.read_csv("data/2010nodeinfo2.csv",engine="python",index_col=2)
dcc = np.load("data/dcc.npy")
iip = np.load("data/iip.npy")
net = nx.DiGraph()#09年网络
for i in edge09.index:
start = edge09['heads'][i]
end = edge09['tails'][i]
net.add_edge(start,end)
codeDict = {}#企业代码映射,10年企业代码:09年企业代码
degreeDict = {}#度值映射,10年企业代码:09年企业度值
for i in range(1,3043):
name = node10["company"][i]
if name in node09.index:
c = node09["code"][name]
codeDict[i] = c
degreeDict[i] = net.degree(c)
else:
degreeDict[i] = 0
#10年网络邻接矩阵(因变量)
adjMatrix = np.zeros([3042,3042])
for i in range(10048):
adjMatrix[edge10["tails"][i]-1,edge10["heads"][i]-1] = 1
#直接距离计算
gd = np.zeros([3042,3042])#地理距离矩阵
pd = np.zeros([3042,3042])#产品距离矩阵
nd = np.zeros([3042,3042])#网络距离矩阵
for supplier in range(1,3043):
for buyer in range(supplier+1,3043):
#地理距离计算
if node10["area"][supplier] == node10["area"][buyer]:
gd[supplier-1,buyer-1] = 1
#产品距离计算
supplierIndus = convert(node10["industry"][supplier])
buyerIndus = convert(node10["industry"][buyer])
pd[supplier-1,buyer-1] = dcc[supplierIndus,buyerIndus]#直接消耗系数
#网络距离计算
if (supplier in codeDict) and (buyer in codeDict):
osc = codeDict[supplier]
obc = codeDict[buyer]
if nx.has_path(net,osc,obc):
nd[supplier-1,buyer-1] = 1 / nx.shortest_path_length(net,osc,obc)
print(supplier)
#间接距离计算
igd = np.zeros([3042,3042])#间接地理距离矩阵
ipd = np.zeros([3042,3042])#间接产品距离矩阵
for supplier in range(1,3043):
if supplier in codeDict:
osc = codeDict[supplier]
#卖方的生意伙伴
partners = set()
partners = set(net.successors(osc)) | set(net.predecessors(osc))
for buyer in range(1,3043):
if supplier != buyer:
if buyer in codeDict:
obc = codeDict[buyer]
partners = partners - set([obc])
l = len(partners)
if l > 0:
#间接距离
sigd, sipd, sind= 0,0,0
for p in partners:
#间接地理距离
if node09["area"][node09.index[p-1]] == node10["area"][buyer]:
sigd += 1
#间接产品距离
buyerIndus = convert(node10["industry"][buyer])
partnerIndus = convert(node09["industry"][node09.index[p-1]])
sipd += iip[partnerIndus,buyerIndus]
igd[supplier-1,buyer-1] = sigd / l
ipd[supplier-1,buyer-1] = sipd / l
print(supplier)
#控制变量
eco = pds.ExcelFile("data/统计年鉴数据.xlsx")
eco09 = pds.read_excel(eco,sheetname=0,index_col=0)
#生成变量矩阵
variables = np.zeros([9250722,9])
dependent = np.zeros([9250722,1])
idx = 0
for supplier in range(1,3043):
for buyer in range(1,3043):
if supplier != buyer:
dependent[idx,0] = adjMatrix[supplier-1,buyer-1]
variables[idx,0] = degreeDict[supplier]
variables[idx,1] = degreeDict[buyer]
variables[idx,5] = igd[supplier-1,buyer-1]
variables[idx,6] = ipd[supplier-1,buyer-1]
variables[idx,8] = eco09["人均财政收入"][node10["area"][buyer]] / 1000
if buyer > supplier:
variables[idx,2] = gd[supplier-1,buyer-1]
variables[idx,3] = pd[supplier-1,buyer-1]
variables[idx,4] = nd[supplier-1,buyer-1]
else:
variables[idx,2] = gd[buyer-1,supplier-1]
variables[idx,3] = pd[buyer-1,supplier-1]
variables[idx,4] = nd[buyer-1,supplier-1]
idx += 1
if supplier % 100 == 0:
print(supplier)
#保存变量
np.save("data/variables.npy",variables)
np.save("data/dependent.npy",dependent) | [
"[email protected]"
] | |
658da8cbd49d008a6065fcecd403e3e1a0c07bd8 | 5d57b93a18b68948cb02321b6252b7777698a288 | /cd2021-guiao-3-98388_98430/src/broker.py | 9b4a8c904aaa1676bd09d7526d1121d070076753 | [] | no_license | ricardombrodriguez/CD-PracticalClasses | 3a11113c56a52953a6e1222da95b58a7224aa1ae | 58a7a76bc1f4bf7ce2e29d4ea142f271206bf552 | refs/heads/main | 2023-06-17T02:13:22.963372 | 2021-07-19T17:18:06 | 2021-07-19T17:18:06 | 387,539,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,108 | py | """Message Broker"""
import enum
import json
import pickle
from typing import List
import socket
import selectors
import xml.etree.ElementTree as tree
class Serializer(enum.Enum):
"""Possible message serializers."""
JSON = 0
XML = 1
PICKLE = 2
class Broker:
"""Implementation of a PubSub Message Broker."""
def __init__(self):
"""Initialize broker."""
self.canceled = False
self._host = "localhost"
self._port = 5000
self.broker = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.broker.bind((self._host,self._port))
self.sel = selectors.DefaultSelector()
self.broker.listen()
self.topic_consumers = {"/": []} # {topic1 : [(conn1,SerialENUM),(conn2,SerialENUM)], topic2 : [(conn3,serialENUM)] ....} -> stores the subscribers of each topic
self.topic_message = {"/": []} # topic : last_topic_message -> stores the last message published on that topic
self.consumers_info = {} # (consumer socket): serialization_type -> stores the serializer of each consumer
self.producer_topics = [] # [topic1,topic2...] -> stores all of the topics produced by the producers/publishers
self.sel.register(self.broker, selectors.EVENT_READ, self.accept)
# connection accept and serialization type storage
def accept(self,broker,mask):
conn, addr = broker.accept() #accept connection
header = conn.recv(2).decode('utf-8') #receives the 2-byte standard serialization message
header_size = int(header.replace('f','')) #replace headers with 'f' to get the header_size of the next message
serialization_type = str(conn.recv(header_size).decode('utf-8')) #this received message has the serialization type of the consumer
if (serialization_type == "JSONQueue"):
self.consumers_info[conn] = Serializer.JSON
elif (serialization_type == "XMLQueue"):
self.consumers_info[conn] = Serializer.XML
elif (serialization_type == "PickleQueue"):
self.consumers_info[conn] = Serializer.PICKLE
self.sel.register(conn, selectors.EVENT_READ, self.handle)
# operation handler (according to the message received by the Queue)
def handle(self,conn,mask):
serialization_type = self.consumers_info[conn] #get serialization type of the consumer
header = conn.recv(4) #receive the 4-byte standard header
if header:
header = int(header.decode('utf-8').replace('f','')) #replace possible 'f' letters from header
data = conn.recv(header) #receive the encoded message
# decode message according to the consumer serialization type
if (serialization_type == Serializer.JSON):
operation, topic, message = self.decodeJSON(data)
elif (serialization_type == Serializer.XML):
operation, topic, message = self.decodeXML(data)
elif (serialization_type == Serializer.PICKLE):
operation, topic, message = self.decodePickle(data)
# possible operations
if (operation == "PUBLISH"):
self.put_topic(topic,message)
elif (operation == "LIST_TOPICS"):
topic_list = self.list_topics()
self.send(conn,"LIST_TOPICS",topic_list)
elif (operation == "SUBSCRIBE"):
self.subscribe(topic,conn,serialization_type)
elif (operation == "UNSUBSCRIBE"):
self.unsubscribe(topic,conn)
# unsubscribe that consumer from all topics and close connection
else:
self.unsubscribe("",conn)
self.sel.unregister(conn)
conn.close()
# send message to the consumer
def send(self, conn, operation : str, data, topic = ""):
# message encoding
serialization_type = self.consumers_info[conn]
if (serialization_type == Serializer.JSON):
encoded_msg = self.encodeJSON(operation,topic,data)
elif (serialization_type == Serializer.XML):
encoded_msg = self.encodeXML(operation,topic,data)
elif (serialization_type == Serializer.PICKLE):
encoded_msg = self.encodePickle(operation,topic,data)
# send header + message to the consumer (conn)
header = str(len(encoded_msg))
size_header = len(header)
newheader= 'f'*(4-size_header) + header
conn.send(newheader.encode('utf-8'))
conn.send(encoded_msg)
def list_topics(self) -> List[str]:
"""Returns a list of strings containing all topics."""
return self.producer_topics
def get_topic(self, topic):
"""Returns the currently stored value in topic."""
if topic in self.topic_message:
return self.topic_message[topic]
else:
return None
def put_topic(self, topic, value):
"""Store in topic the value."""
# store the value as the topic last message and add topic to producer_topics if it doesn't exist
self.topic_message[topic] = value
if topic not in self.producer_topics:
self.producer_topics.append(topic)
# create new consumer topic if it doesn't exist and migrate all consumers who are subscribed to a super topic of 'topic'
if topic not in self.topic_consumers.keys():
self.topic_consumers[topic] = []
for t in self.topic_consumers.keys():
if (topic.startswith(t)):
for consumer in self.list_subscriptions(t):
if consumer not in self.list_subscriptions(topic):
self.topic_consumers[topic].append(consumer)
# send message to all the topic subscribers
if topic in self.topic_consumers:
for consumer in self.list_subscriptions(topic):
self.send(consumer[0],"MESSAGE",value,topic)
else:
self.topic_consumers[topic] = []
def list_subscriptions(self, topic: str) -> List[socket.socket]: #DONE
"""Provide list of subscribers to a given topic."""
return self.topic_consumers[topic]
def subscribe(self, topic: str, address: socket.socket, _format: Serializer = None): #DONE
"""Subscribe to topic by client in address."""
# get consumer_info and store the information if it's not on self.consumers_info yet
consumer_info = (address,_format)
if address not in self.consumers_info:
self.consumers_info[address] = _format
# create new consumer topic if it doesn't exist and migrate all consumers who are subscribed to a super topic of 'topic'
if topic not in self.topic_consumers.keys():
self.topic_consumers[topic] = []
for t in self.topic_consumers.keys():
if (topic.startswith(t)):
for consumer in self.list_subscriptions(t):
if consumer not in self.list_subscriptions(topic):
self.topic_consumers[topic].append(consumer)
self.topic_consumers[topic].append(consumer_info)
# add connection to each (sub)topic that starts with topic
for t in self.topic_consumers.keys():
if (t.startswith(topic) and consumer_info not in self.topic_consumers[t]):
self.topic_consumers[t].append(consumer_info)
# send last topic's message to the new subscriber if it exists
if topic in self.topic_message:
self.send(address,"message",self.get_topic(topic),topic)
def unsubscribe(self, topic, address):
"""Unsubscribe to topic by client in address."""
# get consumer info
serialization_type = self.consumers_info[address]
consumer_info = (address,serialization_type)
# if the consumer has unsubscribed one specific topic (also remove the consumer from its subtopics)
if (topic != ""):
for t in self.topic_consumers.keys():
if (t.startswith(topic)):
self.topic_consumers[t].remove(consumer_info)
# consumer has disconnected (remove it from all the existing topics)
else:
for t in self.topic_consumers.keys():
if (consumer_info in self.topic_consumers[t]):
self.topic_consumers[t].remove(consumer_info)
# ENCODE / DECODE ---> JSON
def encodeJSON(self, operation, topic, data):
message = {'operation': operation, 'topic': topic, 'data': data}
return (json.dumps(message)).encode('utf-8')
def decodeJSON(self,data):
data = data.decode('utf-8')
data = json.loads(data)
operation = data['operation']
topic = data['topic']
message = data['data']
return operation, topic, message
def encodeXML(self,operation, topic, data):
root = tree.Element('root')
tree.SubElement(root,'operation').set("value",operation)
tree.SubElement(root,'topic').set("value",topic)
tree.SubElement(root,'data').set("value",str(data))
return tree.tostring(root)
def decodeXML(self,data):
xml_tree = tree.fromstring(data.decode('utf-8'))
operation = xml_tree.find('operation').attrib['value']
topic = xml_tree.find('topic').attrib['value']
message = xml_tree.find('data').attrib['value']
return operation,topic,message
def encodePickle(self,operation, topic, data):
pickle_dict = {'operation': operation, 'topic': topic, 'data': data}
return pickle.dumps(pickle_dict)
def decodePickle(self,data):
data = pickle.loads(data)
operation = data['operation']
topic = data['topic']
message = data['data']
return operation, topic, message
def run(self):
"""Run until canceled."""
while not self.canceled:
events = self.sel.select()
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
| [
"[email protected]"
] | |
4d7ee48edaf76f609c4fc7645802036a5c11ea83 | 53994424a9f3ce98986f50462c39108456609632 | /compiler/interpreter.py | 4ded1b50720a353ab54d986bf828c747ab916eed | [
"BSD-2-Clause"
] | permissive | ethan2-0/Slang | e87e73f118006e124ff741792133023a4ecede0f | ce0844777b550f51b84d6eba7c80f1a5acf612ce | refs/heads/master | 2020-03-22T21:03:14.678505 | 2020-01-25T02:37:43 | 2020-01-25T02:37:43 | 140,652,588 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,246 | py | from typing import List, Generic, TypeVar
import typesys
import emitter
import parser
import abc
T = TypeVar("T", bound=typesys.AbstractType, covariant=True)
# Not inheriting from abc.ABC: https://stackoverflow.com/a/48554367
class AbstractInterpreterValue(Generic[T]):
def __init__(self, typ: T) -> None:
self.type: T = typ
@abc.abstractmethod
def equals(self, other: "AbstractInterpreterValue") -> bool:
raise NotImplementedError
@abc.abstractmethod
def human_representation(self) -> str:
raise NotImplementedError
InterpreterValueAny = AbstractInterpreterValue[typesys.AbstractType]
class InterpreterValueInteger(AbstractInterpreterValue[typesys.IntType]):
def __init__(self, typ: typesys.IntType, value: int) -> None:
AbstractInterpreterValue.__init__(self, typ)
self.value = value
def equals(self, other: AbstractInterpreterValue) -> bool:
if not isinstance(other, InterpreterValueInteger):
return False
return other.value == self.value
def human_representation(self) -> str:
return str(self.value)
class InterpreterValueBoolean(AbstractInterpreterValue[typesys.BoolType]):
def __init__(self, typ: typesys.BoolType, value: bool) -> None:
AbstractInterpreterValue.__init__(self, typ)
self.value = value
def equals(self, other: AbstractInterpreterValue) -> bool:
if not isinstance(other, InterpreterValueBoolean):
return False
return other.value == self.value
def human_representation(self) -> str:
return "true" if self.value else "false"
class InterpreterValueNull(AbstractInterpreterValue[typesys.VoidType]):
def __init__(self, typ: typesys.VoidType) -> None:
AbstractInterpreterValue.__init__(self, typ)
def equals(self, other: AbstractInterpreterValue) -> bool:
return isinstance(other, InterpreterValueNull)
def human_representation(self) -> str:
return "null"
class InterpreterValueArray(AbstractInterpreterValue[typesys.ArrayType]):
def __init__(self, typ: typesys.ArrayType, value: List[AbstractInterpreterValue]) -> None:
AbstractInterpreterValue.__init__(self, typ)
self.value = value
def equals(self, other: AbstractInterpreterValue) -> bool:
return other is self
def human_representation(self) -> str:
return "[%s]" % ", ".join(value.human_representation() for value in self.value)
class Interpreter:
def __init__(self, program: "emitter.Program") -> None:
self.program: "emitter.Program" = program
self.null = InterpreterValueNull(self.program.types.void_type)
self.true = InterpreterValueBoolean(self.program.types.bool_type, True)
self.false = InterpreterValueBoolean(self.program.types.bool_type, False)
self.dummy_scope = emitter.Scopes()
def create_int_value(self, value: int) -> InterpreterValueInteger:
return InterpreterValueInteger(self.program.types.int_type, value)
def create_array_value(self, type: typesys.ArrayType, values: List[AbstractInterpreterValue]) -> InterpreterValueArray:
return InterpreterValueArray(type, values)
def normalize_negative(self, num: int) -> int:
bitmask = 0xffffffffffffffff
if num & 0x8000000000000000 != 0:
num = (((~num) & bitmask) + 1) & bitmask
return num
def eval_expr(self, node: parser.Node) -> InterpreterValueAny:
# TODO: Support referencing other static variables
# Bitmask for 64-bit computation
bitmask = 0xffffffffffffffff
if node.i("number"):
return self.create_int_value(int(node.data_strict))
elif node.i("true"):
return self.true
elif node.i("false"):
return self.false
elif node.i("null"):
return self.null
elif node.of("+", "*", "^", "&", "|"):
lhs = self.eval_expr(node[0])
rhs = self.eval_expr(node[1])
if not isinstance(lhs, InterpreterValueInteger) or not isinstance(rhs, InterpreterValueInteger):
raise typesys.TypingError(node, "Attempt to perform arithmetic on something that isn't an integers")
if node.i("+"):
ret = lhs.value + rhs.value
elif node.i("*"):
ret = lhs.value * rhs.value
elif node.i("^"):
ret = lhs.value ^ rhs.value
elif node.i("&"):
ret = lhs.value & rhs.value
elif node.i("|"):
ret = lhs.value | rhs.value
return self.create_int_value(ret & bitmask)
elif node.of("and", "or"):
lhs = self.eval_expr(node[0])
rhs = self.eval_expr(node[1])
if not isinstance(lhs, InterpreterValueBoolean) or not isinstance(rhs, InterpreterValueBoolean):
raise typesys.TypingError(node, "Attempt to perform logical operation on something that isn't an integers")
if node.i("and"):
ret = lhs.value and rhs.value
elif node.i("or"):
ret = lhs.value or rhs.value
return self.true if ret else self.false
elif node.i("not"):
val = self.eval_expr(node[0])
if not isinstance(val, InterpreterValueBoolean):
raise typesys.TypingError(node, "Attempt to perform logical operation on something that isn't an integers")
return self.false if val.value else self.true
elif node.of(">=", "<=", "<", ">"):
lhs = self.eval_expr(node[0])
rhs = self.eval_expr(node[1])
if not isinstance(lhs, InterpreterValueInteger) or not isinstance(rhs, InterpreterValueInteger):
raise typesys.TypingError(node, "Attempt to perform arithmetic on something that isn't an integers")
lhs_value = self.normalize_negative(lhs.value)
rhs_value = self.normalize_negative(rhs.value)
if node.i(">="):
ret = lhs_value >= rhs_value
elif node.i("<="):
ret = lhs_value <= rhs_value
elif node.i("<"):
ret = lhs_value < rhs_value
elif node.i(">"):
ret = lhs_value > rhs_value
else:
raise ValueError("This is a compiler bug")
return self.true if ret else self.false
elif node.of("==", "!="):
lhs = self.eval_expr(node[0])
rhs = self.eval_expr(node[1])
if not lhs.type.is_assignable_to(rhs.type) and not rhs.type.is_assignable_to(lhs.type):
raise typesys.TypingError(node, "Incomparable types: '%s' and '%s'" % (lhs.type, rhs.type))
return self.true if lhs.equals(rhs) ^ (True if node.i("!=") else False) else self.false
elif node.i("-") and len(node) == 2:
lhs = self.eval_expr(node[0])
rhs = self.eval_expr(node[1])
if not isinstance(lhs, InterpreterValueInteger) or not isinstance(rhs, InterpreterValueInteger):
raise typesys.TypingError(node, "Attempt to perform arithmetic on something that isn't an integers")
return self.create_int_value((lhs.value - rhs.value) & bitmask)
elif (node.i("-") and len(node) == 1) or node.i("~"):
val = self.eval_expr(node[0])
if not isinstance(val, InterpreterValueInteger):
raise typesys.TypingError(node, "Attempt to negate something that isn't an integer")
return self.create_int_value((-val.value if node.i("-") else ~val.value) & bitmask)
elif node.i("/"):
lhs = self.eval_expr(node[0])
rhs = self.eval_expr(node[1])
if not isinstance(lhs, InterpreterValueInteger) or not isinstance(rhs, InterpreterValueInteger):
raise typesys.TypingError(node, "Attempt to perform arithmetic on something that isn't an integers")
lhs_value = lhs.value
rhs_value = rhs.value
# Make sure, if our value is negative, we're dividing by a
# negative rather than a very large value (due to two's
# complement)
lhs_value = self.normalize_negative(lhs_value)
rhs_value = self.normalize_negative(rhs_value)
res = lhs_value // rhs_value
if res * rhs.value != lhs.value:
# Python rounds toward negative infinity, whereas C
# (and thus our language) rounds toward 0.
if res < 0:
res += 1
return self.create_int_value(res & bitmask)
elif node.i("["):
typ = self.program.types.decide_type(node, emitter.Scopes(), None)
if not isinstance(typ, typesys.ArrayType):
raise ValueError("This is a compiler bug.")
# All of our typechecking has already been taken care of in
# the logic in typesys there
result_values: List[AbstractInterpreterValue] = []
for child in node:
result_values.append(self.eval_expr(child))
return self.create_array_value(typ, result_values)
elif node.i("arrinst"):
typ = self.program.types.decide_type(node, emitter.Scopes(), None)
if not isinstance(typ, typesys.ArrayType):
raise ValueError("This is a compiler bug.")
# Same thing, all of our typechecking is delegated to typesys
array_len = self.eval_expr(node[1])
if not isinstance(array_len, InterpreterValueInteger):
raise typesys.TypingError(node[1], "Type of array length must be an integer")
if array_len.value > 1024:
node.warn("Statically creating a very large array, this will make your bytecode file very large: %d" % array_len.value)
arrinst_result_values: List[AbstractInterpreterValue]
if isinstance(typ.parent_type, typesys.IntType):
arrinst_result_values = [self.create_int_value(0)] * array_len.value
elif isinstance(typ.parent_type, typesys.BoolType):
arrinst_result_values = [self.false] * array_len.value
else:
arrinst_result_values = [self.null] * array_len.value
return self.create_array_value(typ, arrinst_result_values)
elif node.i("#"):
val = self.eval_expr(node[0])
if not isinstance(val, InterpreterValueArray):
raise typesys.TypingError(node[0], "Cannot find length of something that isn't an array")
return self.create_int_value(len(val.value))
elif node.i("access"):
lhs = self.eval_expr(node[0])
rhs = self.eval_expr(node[1])
if not isinstance(lhs, InterpreterValueArray):
raise typesys.TypingError(node[0], "Can't access an element of something that isn't an array")
if not isinstance(rhs, InterpreterValueInteger):
raise typesys.TypingError(node[1], "Array indices must be integers")
return lhs.value[rhs.value]
else:
node.compile_error("Cannot evaluate expression at compile-time")
| [
"[email protected]"
] | |
0807a7286c1ebab68bf7b40f1d17f15b483f1c8f | cbbc8652087c22f3474ca5d8db619d9b4210ca95 | /com/kakao/cafe/menu/tea/lavenderTea.py | 84b9af134ac344eea5460a5d87913ccd45d55e45 | [
"MIT"
] | permissive | bamin0422/kakao-cafe | da1afa61347bc5fefd93b11bb1f329d913e679c3 | f5aca5f6eaf3637eca6af136157f53737c0bffc8 | refs/heads/master | 2021-05-22T21:53:13.170602 | 2020-04-04T19:28:54 | 2020-04-04T19:28:54 | 253,113,215 | 2 | 0 | MIT | 2020-04-04T22:48:50 | 2020-04-04T22:48:49 | null | UTF-8 | Python | false | false | 1,097 | py | from com.kakao.cafe.menu.tea.tea import Tea
class LavenderTea(Tea):
def __init__(self):
super().__init__()
self.__lavenderTea = 1
self.name = "LavenderTea"
self.__price = 3500
self.__water = 300
def getName(self) -> str:
return self.name
def setName(self, name: str) -> None:
self.name = name
def getPrice(self) -> int:
return self.__price
def setPrice(self, price: int) -> None:
self.__price = price
def isIced(self) -> bool:
return self._iced
def setIced(self, iced: bool) -> None:
self._iced = iced
def getWater(self) -> int:
return self.__water
def setWater(self, water: int) -> None:
self.__water = water
def getLavenderTea(self) -> int:
return self.__lavenderTea
def setLavenderTea(self, lavenderTea: int) -> None:
self.__lavenderTea = lavenderTea
def addLavenderTea(self, amount: int) -> None:
self.setLavenderTea(self.getLavenderTea() + amount)
self.setPrice(self.getPrice() + amount * 500)
| [
"[email protected]"
] | |
feea2a313d9429af3bdd7dea8044c61e70a33333 | 01ae57dd2ee10e12256147ec2c8aab2b24b589b8 | /huster/server.py | 1eaf18ee0d49a110054b7b44e8db7d8d3bfe0044 | [
"MIT"
] | permissive | ZQPei/Huster | 4be331fa3d8f419b2d473ef48ab66b800e6c854b | 61e2cf7d52ec624c66e0755df2129bb906a3317b | refs/heads/master | 2020-11-23T19:51:08.290688 | 2019-12-16T13:13:00 | 2019-12-16T13:13:00 | 227,796,868 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | import os
import sys
if sys.version_info.major == 2:
from .SimpleHTTPServerWithUpload_py2 import server
elif sys.version_info.major == 3:
from .SimpleHTTPServerWithUpload_py3 import server
else:
raise RuntimeError("Python version not found!")
__all__ = ['build_server', 'run_server']
def run_server(port=8088, base_dir="/"):
if os.path.isdir(base_dir):
os.chdir(base_dir)
else:
raise UserWarning("base_dir is not a rightful directory")
os.chdir("/")
server(port=port)
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--port", default=8000, type=int)
parser.add_argument("--base_dir", default="/", type=str)
return parser.parse_args()
def build_server():
args = parse_args()
run_server(port=args.port, base_dir=args.base_dir)
if __name__ == "__main__":
build_server()
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.