blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ceed057825798d46c509ddab61ac189ced30ad29
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-oms/setup.py
|
b81b7515b7d134fa9438170ce81a39929b9463d6
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,654 |
py
|
# coding: utf-8
from os import path
from setuptools import setup, find_packages
NAME = "huaweicloudsdkoms"
VERSION = "3.0.52"
AUTHOR = "HuaweiCloud SDK"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/huaweicloud/huaweicloud-sdk-python-v3"
DESCRIPTION = "OMS"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README_PYPI.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
REQUIRES = ["huaweicloudsdkcore"]
OPTIONS = {
'bdist_wheel': {
'universal': True
}
}
setup(
name=NAME,
version=VERSION,
options=OPTIONS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache LICENSE 2.0",
url=URL,
keywords=["huaweicloud", "sdk", "OMS"],
packages=find_packages(exclude=["tests*"]),
install_requires=REQUIRES,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*",
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development'
]
)
|
[
"[email protected]"
] | |
d5ddd74869a157b83c40a72dcab563c596578394
|
ce196aba0adde47ea2767eae1d7983a1ef548bb8
|
/T083_求用0—7所能组成的奇数个数.py
|
0fb0007220933911a99ceca79ed911aaae9783bb
|
[] |
no_license
|
xiang-daode/Python3_codes
|
5d2639ffd5d65065b98d029e79b8f3608a37cf0b
|
06c64f85ce2c299aef7f9311e9473e0203a05b09
|
refs/heads/main
| 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 889 |
py
|
# 在这里写上你的代码 :-)
'''
题目083:求0—7所能组成的奇数个数。
'''
def tm083():
'''
【个人备注】:没说组成几位数或是否重复使用。假设1-8位都可以,且不能重复使用。
直接用排列函数,累加然后去重,就得到答案了。
'''
s = [i for i in '01234567']
import itertools #有排列与组合函数
arr = []
for i in range(1,9):
a = list(itertools.permutations(s,i)) # 长度1-8左右排列
l = list(map(lambda x:int(''.join(x)),a)) # 整理成数字形式(避免出现02这种情况,02实际上就是2)
arr+=l
print(i,len(l))
arr1 = set(arr) # 去重复的
arr2 = list(filter(lambda x:x%2==1,arr1)) # 只留奇数
print(len(arr),len(arr1),len(arr2)) # 答案是46972
tm083()
|
[
"[email protected]"
] | |
95b09bf9b3e4db89414199c59be246b83df7e9f0
|
835881ade89eaff933f81d186e69fcf9695d9392
|
/bolero/utils/setup.py
|
dcce793f7c39de9bdf163a6985c1d62c94056aed
|
[
"BSD-3-Clause"
] |
permissive
|
MMKrell/bolero
|
9e056a88aa89332762c0f06d4f8e43fc4ac64018
|
0e011de35f2b364bb3bb7509bc38491762026643
|
refs/heads/master
| 2021-01-21T15:19:20.012273 | 2017-05-19T13:38:47 | 2017-05-19T13:38:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 513 |
py
|
def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration("utils", parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
|
[
"[email protected]"
] | |
eff44ce1869cc6d0c340bdadc54f92b6e8ba7f01
|
817f6b5a69b53599589b798b94efecd8ed1d8e17
|
/exercises/1901100282/d07/mymodule/main.py
|
2eb3cc635245118bc6dace1675a2ec08d1d02312
|
[] |
no_license
|
oneisonly/selfteaching-python-camp
|
2422a16c0c9efe787f18fa48833b0bdc8e245982
|
2f26872d31c7392f9530ee1aa7be7958109aaec3
|
refs/heads/master
| 2020-07-04T23:17:39.750702 | 2019-12-03T04:38:43 | 2019-12-03T04:38:43 | 202,078,442 | 0 | 0 | null | 2019-08-13T06:24:02 | 2019-08-13T06:24:01 | null |
UTF-8
|
Python
| false | false | 3,999 |
py
|
text = '''
愚公移⼭山
太⾏行行,王屋⼆二⼭山的北北⾯面,住了了⼀一個九⼗十歲的⽼老老翁,名叫愚公。⼆二⼭山佔地廣闊,擋住去路路,使他
和家⼈人往來來極為不不便便。
⼀一天,愚公召集家⼈人說:「讓我們各盡其⼒力力,剷平⼆二⼭山,開條道路路,直通豫州,你們認為怎
樣?」
⼤大家都異異⼝口同聲贊成,只有他的妻⼦子表示懷疑,並說:「你連開鑿⼀一個⼩小丘的⼒力力量量都沒有,怎
可能剷平太⾏行行、王屋⼆二⼭山呢?況且,鑿出的⼟土⽯石⼜又丟到哪裏去呢?」
⼤大家都熱烈烈地說:「把⼟土⽯石丟進渤海海裏。」
於是愚公就和兒孫,⼀一起開挖⼟土,把⼟土⽯石搬運到渤海海去。
愚公的鄰居是個寡婦,有個兒⼦子⼋八歲也興致勃勃地⾛走來來幫忙。
寒來來暑往,他們要⼀一年年才能往返渤海海⼀一次。
住在⿈黃河河畔的智叟,看⾒見見他們這樣⾟辛苦,取笑愚公說:「你不不是很愚蠢嗎?你已⼀一把年年紀
了了,就是⽤用盡你的氣⼒力力,也不不能挖去⼭山的⼀一⻆角呢?」
愚公歎息道:「你有這樣的成⾒見見,是不不會明⽩白的。你⽐比那寡婦的⼩小兒⼦子還不不如呢!就算我死
了了,還有我的兒⼦子,我的孫⼦子,我的曾孫⼦子,他們⼀一直傳下去。⽽而這⼆二⼭山是不不會加⼤大的,總有
⼀一天,我們會把它們剷平。」
智叟聽了了,無話可說:
⼆二⼭山的守護神被愚公的堅毅精神嚇倒,便便把此事奏知天帝。天帝佩服愚公的精神,就命兩位⼤大
⼒力力神揹⾛走⼆二⼭山。
How The Foolish Old Man Moved Mountains
Yugong was a ninety-year-old man who lived at the north of two high
mountains, Mount Taixing and Mount Wangwu.
Stretching over a wide expanse of land, the mountains blocked
yugong’s way making it inconvenient for him and his family to get
around.
One day yugong gathered his family together and said,”Let’s do our
best to level these two mountains. We shall open a road that leads
to Yuzhou. What do you think?”
All but his wife agreed with him.
“You don’t have the strength to cut even a small mound,” muttered
his wife. “How on earth do you suppose you can level Mount Taixin
and Mount Wanwu? Moreover, where will all the earth and rubble go?”
“Dump them into the Sea of Bohai!” said everyone.
So Yugong, his sons, and his grandsons started to break up rocks and
remove the earth. They transported the earth and rubble to the Sea
of Bohai.
Now Yugong’s neighbour was a widow who had an only child eight years
old. Evening the young boy offered his help eagerly.
Summer went by and winter came. It took Yugong and his crew a full
year to travel back and forth once.
On the bank of the Yellow River dwelled an old man much respected
for his wisdom. When he saw their back-breaking labour, he ridiculed
Yugong saying,”Aren’t you foolish, my friend? You are very old now,
and with whatever remains of your waning strength, you won’t be able
to remove even a corner of the mountain.”
Yugong uttered a sigh and said,”A biased person like you will never
understand. You can’t even compare with the widow’s little boy!”
“Even if I were dead, there will still be my children, my
grandchildren, my great grandchildren, my great great grandchildren.
They descendants will go on forever. But these mountains will not
grow any taler. We shall level them one day!” he declared with
confidence.
The wise old man was totally silenced.
When the guardian gods of the mountains saw how determined Yugong
and his crew were, they were struck with fear and reported the
incident to the Emperor of Heavens.
Filled with admiration for Yugong, the Emperor of Heavens ordered
two mighty gods to carry the mountains away.
'''
import stats_word
stats_word.stats_word(text)
|
[
"[email protected]"
] | |
4a60de6be31da7bf31c87e44c1819edbb0b124a0
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_040/ch20_2020_03_05_18_36_09_760355.py
|
f61391c93f57ceb3a39f6885c928eb85d74c21f9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 182 |
py
|
distancia=float(input("Qual distância você deseja percorrer: "))
if (distancia<=200):
print ("R$",(distancia:.2f*0.5))
else:
print ("R$",(200*0.5+(distancia:.2f-200)*0.45))
|
[
"[email protected]"
] | |
5b1b804ba412f88488a66775b1cd8af3b8f2a81e
|
517d461257edd1d6b239200b931c6c001b99f6da
|
/Circuit_Playground/CircuitPython/Data_Logging/typing/typing_original_.py
|
5b9aa66386ae0b84741b00930ee46fc0dee033a7
|
[] |
no_license
|
cmontalvo251/Microcontrollers
|
7911e173badff93fc29e52fbdce287aab1314608
|
09ff976f2ee042b9182fb5a732978225561d151a
|
refs/heads/master
| 2023-06-23T16:35:51.940859 | 2023-06-16T19:29:30 | 2023-06-16T19:29:30 | 229,314,291 | 5 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,283 |
py
|
# Circuit Playground Express Data Time/Light Intensity/Temp
# Log data to a spreadsheet on-screen
# Open Spreadsheet beforehand and position to start (A,1)
# Use slide switch to start and stop sensor readings
# Time values are seconds since board powered on (relative time)
import time
from digitalio import DigitalInOut, Direction, Pull
import analogio
import board
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
import adafruit_thermistor
# Switch to quickly enable/disable
switch = DigitalInOut(board.SLIDE_SWITCH)
switch.pull = Pull.UP
# light level
light = analogio.AnalogIn(board.LIGHT)
# temperature
thermistor = adafruit_thermistor.Thermistor(board.TEMPERATURE, 10000,
10000, 25, 3950)
# Set the keyboard object!
# Sleep for a bit to avoid a race condition on some systems
time.sleep(1)
kbd = Keyboard()
layout = KeyboardLayoutUS(kbd) # US is only current option...
led = DigitalInOut(board.D13) # Set up red LED "D13"
led.direction = Direction.OUTPUT
print("Time\tLight\tTemperature") # Print column headers
def slow_write(string): # Typing should not be too fast for
for c in string: # the computer to be able to accept
layout.write(c)
time.sleep(0.2) # use 1/5 second pause between characters
while True:
if switch.value: # If the slide switch is on, don't log
continue
# Turn on the LED to show we're logging
led.value = True
temp = thermistor.temperature # In Celsius
# if you want Fahrenheit, uncomment the line below
# temp = temp * 9 / 5 + 32
# Format data into value 'output'
output = "%0.1f\t%d\t%0.1f" % (time.monotonic(), light.value, temp)
print(output) # Print to serial monitor
slow_write(output) # Print to spreadsheet
kbd.press(Keycode.DOWN_ARROW) # Code to go to next row
time.sleep(0.01)
kbd.release_all()
for _ in range(3):
kbd.press(Keycode.LEFT_ARROW)
time.sleep(0.015)
kbd.release_all()
time.sleep(0.025) # Wait a bit more for Google Sheets
led.value = False
# Change 0.1 to whatever time you need between readings
time.sleep(0.1)
|
[
"[email protected]"
] | |
64d2855cd04459ab7a7b86a9e703c6518a7c19f3
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/not/sample_good666.py
|
3687c9337e2f798525c72cf0779d606b08e582b2
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 370 |
py
|
import random
import re
import array
import textwrap
import readline
nterms = 195
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 0 < 195:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count = count - (2 - 3)
|
[
"[email protected]"
] | |
6730aafef63549f62e2673d9ec48a2b98ce7cfcc
|
d044e88e622d9f4ca350aa4fd9d95d7ba2fae50b
|
/application/dataentry/migrations/0192_auto_20210722_1359.py
|
7e1c663d3fec4392b13dc51e6c16f22fc0f16cee
|
[] |
no_license
|
Tiny-Hands/tinyhands
|
337d5845ab99861ae189de2b97b8b36203c33eef
|
77aa0bdcbd6f2cbedc7eaa1fa4779bb559d88584
|
refs/heads/develop
| 2023-09-06T04:23:06.330489 | 2023-08-31T11:31:17 | 2023-08-31T11:31:17 | 24,202,150 | 7 | 3 | null | 2023-08-31T11:31:18 | 2014-09-18T19:35:02 |
PLpgSQL
|
UTF-8
|
Python
| false | false | 497 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2021-07-22 13:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0191_auto_20210712_1433'),
]
operations = [
migrations.AlterField(
model_name='stationstatistics',
name='budget',
field=models.DecimalField(decimal_places=2, max_digits=17, null=True),
),
]
|
[
"[email protected]"
] | |
9efe0099db495a6abf8ec4e5391c09aec9b087d3
|
525bdfe2c7d33c901598a501c145df94a3e162b0
|
/math_projects/kateryna/bin/constants.py
|
c2f278310902832628add1fa859476272f1c01ff
|
[] |
no_license
|
davendiy/ads_course2
|
f0a52108f1cab8619b2e6e2c6c4383a1a4615c15
|
e44bf2b535b34bc31fb323c20901a77b0b3072f2
|
refs/heads/master
| 2020-04-06T09:37:12.983564 | 2019-05-09T10:28:22 | 2019-05-09T10:28:22 | 157,349,669 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,342 |
py
|
#!/usr/bin/env python3
# -*-encoding: utf-8-*-
import logging
DEFAULT_N = 1000 # к-ть елементів, які повертає пошук за умовчанням
# типи елементів (значення - назви таблиць у БД)
KEY_WORD = 'Key_words'
SITE = 'Sites'
LINK = 'Links'
CATEGORIES = 'Categories' # назва таблиці категорій
DEFAULT_DATABASE = 'data.db' # шлях до бд за умовчанням
DEFAULT_LOG_GUI = 'parser_gui.log' # файл з логами для графічного інтерфейсу
DEFAULT_LOG_CLIENT = 'parser_client.log' # файл з логами для клієнта
FORMAT = '%(asctime) -15s %(message)s' # формат запису: <час> <повідомлення>
SLEEP = 1 # тривалість інтервалу монтіорингу (у годинах)
# списки полів для кожної таблиці, які відображаються
LINKS_GUI_FIELDS = ['Link', 'Category', 'Date', 'Information']
SITES_GUI_FIELDS = ['Id', 'Name', 'Link']
KEY_WORDS_GUI_FIELDS = ['Id', 'Word']
# списки всіх полів для кожної таблиці
SITES_DATA_FIELDS = ['Id', 'Name', 'Link', 'Category_id']
KEY_WORDS_DATA_FIELDS = ['Id', 'Word', "Category_id"]
CATEGORIES_FIELDS = ['Id', 'Name']
|
[
"[email protected]"
] | |
34109b133c9e51f5fe159c8a970393a67ac6d7d8
|
169e75df163bb311198562d286d37aad14677101
|
/tensorflow/tensorflow/python/ops/gradients.py
|
9fa8e27d5cb51e0c2dd0b7926756a579d38841d2
|
[
"Apache-2.0"
] |
permissive
|
zylo117/tensorflow-gpu-macosx
|
e553d17b769c67dfda0440df8ac1314405e4a10a
|
181bc2b37aa8a3eeb11a942d8f330b04abc804b3
|
refs/heads/master
| 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 |
Apache-2.0
| 2022-10-04T23:36:22 | 2018-05-21T08:29:12 |
C++
|
UTF-8
|
Python
| false | false | 1,240 |
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.gradients_impl import AggregationMethod
from tensorflow.python.ops.gradients_impl import gradients
from tensorflow.python.ops.gradients_impl import hessians
# pylint: enable=unused-import
|
[
"[email protected]"
] | |
68d509c7c66a8393f202ba51444e4af380bc3c9b
|
9ca9cad46f2358717394f39e2cfac2af4a2f5aca
|
/Week16/MainHW/MainHW Week16_KSY.py
|
86af737749bde01e82c6dcf8a85382d1d4c33cd5
|
[] |
no_license
|
Artinto/Python_and_AI_Study
|
ddfd165d1598914e99a125c3019a740a7791f6f6
|
953ff3780287825afe9ed5f9b45017359707d07a
|
refs/heads/main
| 2023-05-05T15:42:25.963855 | 2021-05-24T12:24:31 | 2021-05-24T12:24:31 | 325,218,591 | 1 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,897 |
py
|
'''
This script shows how to predict stock prices using a basic RNN
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import os
import matplotlib
torch.manual_seed(777) # reproducibility
import matplotlib.pyplot as plt
def MinMaxScaler(data):
''' Min Max Normalization
Parameters
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
Returns
----------
data : numpy.ndarry
normalized data
shape: [Batch size, dimension]
References
----------
.. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return numerator / (denominator + 1e-7)
# train Parameters
learning_rate = 0.01
num_epochs = 500
input_size = 5
hidden_size = 5
num_classes = 1
timesteps = seq_length = 14
num_layers = 1 # number of layers in RNN
# Open, High, Low, Volume, Close
xy = np.loadtxt('stock.csv', delimiter=',')
xy = xy[::-1] # reverse order (chronically ordered)
xy = MinMaxScaler(xy)
x = xy
y = xy[:, [-1]] # Close as label
# build a dataset
dataX = []
dataY = []
for i in range(0, len(y) - seq_length):
_x = x[i:i + seq_length]
_y = y[i + seq_length] # Next close price
dataX.append(_x)
dataY.append(_y)
# train/test split
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
trainX = torch.Tensor(np.array(dataX[0:train_size]))
trainX = Variable(trainX)
testX = torch.Tensor(np.array(dataX[train_size:len(dataX)]))
testX = Variable(testX)
trainY = torch.Tensor(np.array(dataY[0:train_size]))
trainY = Variable(trainY)
testY = torch.Tensor(np.array(dataY[train_size:len(dataY)]))
testY = Variable(testY)
class LSTM(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers):
super(LSTM, self).__init__()
self.num_classes = num_classes
self.num_layers = num_layers
self.input_size = input_size
self.hidden_size = hidden_size
self.seq_length = seq_length
# Set parameters for RNN block
# Note: batch_first=False by default.
# When true, inputs are (batch_size, sequence_length, input_dimension)
# instead of (sequence_length, batch_size, input_dimension)
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
# Fully connected layer
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Initialize hidden and cell states
h_0 = Variable(torch.zeros(
self.num_layers, x.size(0), self.hidden_size))
c_0 = Variable(torch.zeros(
self.num_layers, x.size(0), self.hidden_size))
# Propagate input through LSTM
_, (h_out, _) = self.lstm(x, (h_0, c_0))
h_out = h_out.view(-1, self.hidden_size)
out = self.fc(h_out)
return out
# Instantiate RNN model
lstm = LSTM(num_classes, input_size, hidden_size, num_layers)
# Set loss and optimizer function
criterion = torch.nn.MSELoss() # mean-squared error for regression
optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
outputs = lstm(trainX)
optimizer.zero_grad()
# obtain the loss function
loss = criterion(outputs, trainY)
loss.backward()
optimizer.step()
print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))
print("Learning finished!")
# Test the model
lstm.eval()
test_predict = lstm(testX)
# Plot predictions
test_predict = test_predict.data.numpy()
testY = testY.data.numpy()
plt.plot(testY)
plt.plot(test_predict)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
plt.show()
|
[
"[email protected]"
] | |
0861a3ba0e77e14cd38e259cec9bfe9413d33873
|
e7d5555eb0b80ad59e7c76dd31e5fa9a23ec4a4c
|
/muddery/worlddata/dao/dialogue_sentences_mapper.py
|
6e08b22dd51bc6ba16bc055d6d3aed08c566c4e2
|
[
"BSD-3-Clause"
] |
permissive
|
noahzaozao/muddery
|
4d1ef24b4a7f0ef178a1c28c367a441cbb57ee5c
|
294da6fb73cb04c62e5ba6eefe49b595ca76832a
|
refs/heads/master
| 2023-01-25T02:23:50.123889 | 2018-06-10T17:12:22 | 2018-06-10T17:12:22 | 137,031,119 | 0 | 0 |
NOASSERTION
| 2019-10-28T15:04:26 | 2018-06-12T07:05:42 |
Python
|
UTF-8
|
Python
| false | false | 691 |
py
|
"""
Query and deal common tables.
"""
from __future__ import print_function
from evennia.utils import logger
from django.apps import apps
from django.conf import settings
class DialogueSentencesMapper(object):
"""
NPC's dialogue sentences.
"""
def __init__(self):
self.model_name = "dialogue_sentences"
self.model = apps.get_model(settings.WORLD_DATA_APP, self.model_name)
self.objects = self.model.objects
def filter(self, key):
"""
Get dialogue sentences.
Args:
key: (string) dialogue's key.
"""
return self.objects.filter(dialogue=key)
DIALOGUE_SENTENCES = DialogueSentencesMapper()
|
[
"[email protected]"
] | |
6b34dfae513fa55c66c92dd64ea87fa9d1207242
|
45a924e5cd1dfc75a2088d3d4463995803a06a09
|
/frappe/email/doctype/email_unsubscribe/test_email_unsubscribe.py
|
602840fe3b30b30238661516ade48243176ea9b0
|
[
"MIT"
] |
permissive
|
joe-santy/frappe
|
7cad66295f07f60176fbbc24766af5e38ac1e9d2
|
a6d9170e5fd9fdff462eee7967409ff7e23b6d2f
|
refs/heads/develop
| 2023-07-15T15:59:03.226729 | 2021-08-09T16:20:11 | 2021-08-09T16:20:11 | 394,489,040 | 0 | 0 |
MIT
| 2021-08-13T13:12:31 | 2021-08-10T01:22:17 | null |
UTF-8
|
Python
| false | false | 258 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import frappe
import unittest
# test_records = frappe.get_test_records('Email Unsubscribe')
class TestEmailUnsubscribe(unittest.TestCase):
pass
|
[
"[email protected]"
] | |
74b0a2c23703cb4e5ab03f2b4f26df4d4bbbd55f
|
c705b2620119df0d60e925e55228bfbb5de3f568
|
/archives/twitter/add_to_list.py
|
b07b820c711aef611ff33b5d19f9e517e8424b05
|
[
"Apache-2.0"
] |
permissive
|
mcxiaoke/python-labs
|
5aa63ce90de5da56d59ca2954f6b3aeae7833559
|
61c0a1f91008ba82fc2f5a5deb19e60aec9df960
|
refs/heads/master
| 2021-08-05T03:47:51.844979 | 2021-07-24T11:06:13 | 2021-07-24T11:06:13 | 21,690,171 | 7 | 7 |
Apache-2.0
| 2020-08-07T01:52:32 | 2014-07-10T10:20:17 |
Python
|
UTF-8
|
Python
| false | false | 1,023 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2016-01-04 14:39:15
from __future__ import print_function, unicode_literals
import os
import sys
import codecs
import requests
import tweepy
from config import OWNER, OWNER_ID, CONSUMER_KEY, CONSUMER_SECRET, ACCESSS_TOKEN_KEY, ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESSS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
def read_list(name):
if not os.path.isfile(name):
return None
with codecs.open(name, 'r', 'utf-8') as f:
return [line.rstrip('\n') for line in f]
def add_to_list(slug, screen_name):
print('add user: %s to list: %s' % (screen_name, slug))
api.add_list_member(slug=slug,
screen_name=screen_name,
owner_screen_name='dorauimi')
def main():
uids = read_list(sys.argv[1])
for uid in uids:
add_to_list('asiangirls', uid)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
1cf91c973076f5cd1c46e4f58e68999f1a89e80d
|
a2f67003e0eededb0c2d7645d83243d19af71340
|
/exam_subject/Subject/apps/topic/migrations/0010_answer_analysis.py
|
117680a8d906da18fcca8540dbfdcda5856ebd05
|
[] |
no_license
|
john123951/SubjectSystem
|
c6bf118627aa54ba56bd367f73528e66f51dcd58
|
a7f8e6014f81ec4376f3c5f437a280e801ab22e4
|
refs/heads/master
| 2020-07-13T16:36:15.663952 | 2019-06-19T07:02:14 | 2019-06-19T07:02:14 | 205,115,935 | 7 | 0 | null | 2019-08-29T08:23:00 | 2019-08-29T08:22:59 | null |
UTF-8
|
Python
| false | false | 422 |
py
|
# Generated by Django 2.0.2 on 2019-05-05 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topic', '0009_examtime_exam_number'),
]
operations = [
migrations.AddField(
model_name='answer',
name='analysis',
field=models.CharField(default='', max_length=500, verbose_name='解析'),
),
]
|
[
"[email protected]"
] | |
c5708367337a0c64f2df12dcce951050022001b6
|
2af1e6357f51d0d08b1a991e2bd922b7bdc8c0b6
|
/baekjoon/accepted/15480 LCA와 쿼리.py
|
8220d3407e8cfb7390cba36119d50b67d795abeb
|
[] |
no_license
|
grasshopperTrainer/coding_practice
|
530e9912b10952c866d35d69f12c99b96959a22d
|
d1e5e6d6fa3f71f1a0105940fff1785068aec8b0
|
refs/heads/master
| 2023-06-01T13:30:15.362657 | 2021-06-08T08:40:15 | 2021-06-08T08:40:15 | 267,359,225 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,998 |
py
|
# not accepted
from sys import stdin
from collections import deque
def solution(N, edges, asked):
nd_tree = {}
for a, b in edges:
nd_tree.setdefault(a, []).append(b)
nd_tree.setdefault(b, []).append(a)
answers = []
for root, a, b in asked:
# find directed tree and depth
tree = [i for i in range(N+1)]
node_depth = [0 for _ in range(N+1)]
max_depth = 0
que = deque([[root,0]])
visited = {root}
while que:
at, depth = que.popleft()
max_depth = max((max_depth, depth))
for goto in nd_tree[at]:
if goto not in visited:
visited.add(goto)
tree[goto] = at
node_depth[goto] = depth+1
que.append((goto, depth+1))
# build ancestor table
ancestry_d = len(bin(max_depth)[2:])+1
lca = [[root for _ in range(ancestry_d)] for _ in range(N+1)]
for node in range(1, N+1):
for anc in range(ancestry_d):
if anc == 0:
lca[node][anc] = tree[node]
else:
lca[node][anc] = lca[lca[node][anc-1]][anc-1]
# search asked
while node_depth[a] != node_depth[b]:
if node_depth[a] > node_depth[b]:
a = tree[a]
else:
b = tree[b]
while a != b:
anc = 0
print(a, b, anc, lca[a], lca[b], lca[a][anc+1], lca[b][anc+1])
while lca[a][anc+1] != lca[b][anc+1]:
anc += 1
a, b = lca[a][anc], lca[b][anc]
answers.append(a)
return answers
N = int(stdin.readline())
edges = []
for _ in range(N-1):
edges.append([int(c) for c in stdin.readline().strip().split(' ')])
M = int(stdin.readline())
asked = []
for _ in range(M):
asked.append([int(c) for c in stdin.readline().strip().split(' ')])
for a in solution(N, edges, asked):
print(a)
|
[
"[email protected]"
] | |
cfe01345e37aadfec5a5a2ccb5e0ad6c4a9df927
|
9d278285f2bc899ac93ec887b1c31880ed39bf56
|
/ondoc/doctor/migrations/0192_auto_20190125_1514.py
|
3779ca4d7f242aa5bd1e5ad30a90d32209c5bc7d
|
[] |
no_license
|
ronit29/docprime
|
945c21f8787387b99e4916cb3ba1618bc2a85034
|
60d4caf6c52a8b70174a1f654bc792d825ba1054
|
refs/heads/master
| 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 433 |
py
|
# Generated by Django 2.0.5 on 2019-01-25 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doctor', '0191_auto_20190124_1845'),
]
operations = [
migrations.AlterField(
model_name='cancellationreason',
name='type',
field=models.PositiveSmallIntegerField(blank=True, default=None, null=True),
),
]
|
[
"[email protected]"
] | |
4ae82fbb54695b12dbf2f6d5842e6919c8a8330b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_97/1111.py
|
a46d75a4bf3135788b1b71ced5eaa6713ed67828
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 981 |
py
|
# Program to solve C. Recycled Numbers
def is_recycled_pair(a, b, call):
astr = str(a)
bstr = str(b)
if len(astr) != len(bstr) or len(astr) == 1:
return False
for i in range(1, len(astr)):
if astr == (bstr[len(astr) - i:] + bstr[:len(astr) - i]):
return True
if call == 1:
return is_recycled_pair(b, a, 2)
else:
return False
filename = "in.txt"
infile = open(filename, 'r')
outfile = open("output.txt", 'w')
first_line = True
case = 0
for line in infile:
if first_line:
first_line = False
continue
case += 1
start = int(line.split(" ")[0])
end = int(line.split(" ")[1])
if end <= start:
outfile.write("Case #" + str(case) + ": 0" + "\n")
continue
pair_count = 0
for n1 in range(start, end):
for n2 in range(n1 + 1, end + 1):
if is_recycled_pair(n1, n2, 1):
pair_count += 1
outfile.write("Case #" + str(case) + ": " + str(pair_count) + "\n")
infile.close()
outfile.close()
|
[
"[email protected]"
] | |
8e5e8ca0317d169947d49cf752033de72b169638
|
0f4823e4e8dcedf64b0061c9f02d2bf4b410c0e0
|
/autotest/t000_testscripts.py
|
4394523e616b8f2de32d6d4ce65a26d645f47bda
|
[
"BSD-3-Clause"
] |
permissive
|
MJKnowling/flopy
|
cfa4383c8c834fbc57341511621d3f2401726224
|
f480ff304e5728ccaa5e663d3fa77ec025cb0ba8
|
refs/heads/master
| 2021-09-20T23:57:13.032896 | 2017-12-01T18:57:09 | 2017-12-01T18:57:09 | 113,387,250 | 0 | 0 | null | 2017-12-07T01:33:03 | 2017-12-07T01:33:02 | null |
UTF-8
|
Python
| false | false | 1,809 |
py
|
# Remove the temp directory and then create a fresh one
from __future__ import print_function
import os
import sys
import shutil
exclude = ['flopy_swi2_ex2.py', 'flopy_swi2_ex5.py']
for arg in sys.argv:
if arg.lower() == '--all':
exclude = []
sdir = os.path.join('..', 'examples', 'scripts')
# make working directories
testdir = os.path.join('.', 'temp', 'scripts')
if os.path.isdir(testdir):
shutil.rmtree(testdir)
os.mkdir(testdir)
# add testdir to python path
sys.path.append(testdir)
def copy_scripts():
files = [f for f in os.listdir(sdir) if f.endswith('.py')]
# exclude unwanted files
for e in exclude:
if e in files:
files.remove(e)
# copy files
for fn in files:
pth = os.path.join(sdir, fn)
opth = os.path.join(testdir, fn)
# copy script
print('copying {} from {} to {}'.format(fn, sdir, testdir))
shutil.copyfile(pth, opth)
return files
def import_from(mod, name):
mod = __import__(mod)
main = getattr(mod, name)
return main
def run_scripts(fn):
# import run function from scripts
s = os.path.splitext(fn)[0]
run = import_from(s, 'run')
# change to working directory
opth = os.getcwd()
print('changing to working directory "{}"'.format(testdir))
os.chdir(testdir)
# run the script
ival = run()
# change back to starting directory
print('changing back to starting directory "{}"'.format(opth))
os.chdir(opth)
# make sure script ran successfully
assert ival == 0, 'could not run {}'.format(fn)
def test_notebooks():
files = copy_scripts()
for fn in files:
yield run_scripts, fn
if __name__ == '__main__':
files = copy_scripts()
print(files)
for fn in files:
run_scripts(fn)
|
[
"[email protected]"
] | |
95b54f2914a61f9a045c2fd26d9d46b9767a42c4
|
0b953c73458679beeef3b95f366601c834cff9b4
|
/hunter/longest palindrome substring within string.py
|
9f907b18b130617c943cd267d9545d83c25ece09
|
[] |
no_license
|
Sravaniram/Python-Programming
|
41531de40e547f0f461e77b88e4c0d562faa041c
|
f6f2a4e3a6274ecab2795062af8899c2a06c9dc1
|
refs/heads/master
| 2020-04-11T12:49:18.677561 | 2018-06-04T18:04:13 | 2018-06-04T18:04:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 224 |
py
|
a=raw_input()
l=[]
m=[]
for i in range(0,len(a)):
for j in range(i,len(a)):
z=a[i:j+1]
y=z[::-1]
if z==y:
l.append(z)
m.append(len(z))
y=max(m)
for i in range(0,len(a)):
if m[i]==y:
print l[i]
break
|
[
"[email protected]"
] | |
aaa1bebc04e41b15d7bbd59b3e874ecfad08e1e6
|
ebde1fadfbe336fa52bc20c8a2f74de8d1d90cf3
|
/src/moca_modules/moca_share/__init__.py
|
53ea1bfc999d299bbc568895421ca67f221548ec
|
[
"MIT"
] |
permissive
|
el-ideal-ideas/MocaTwitterUtils
|
be2481ce9eb0f9e53e8e0bd54b1b265c80e4f959
|
544a260600ade1b8cd4e0a2d2967c2fb6a8f38d3
|
refs/heads/master
| 2023-02-18T23:27:31.056121 | 2021-01-23T07:41:16 | 2021-01-23T07:41:16 | 321,014,400 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 366 |
py
|
# -- Imports --------------------------------------------------------------------------
from .MocaMultiProcessLock import MocaMultiProcessLock
from .MocaSharedMemory import MocaSharedMemory
# -------------------------------------------------------------------------- Imports --
"""
This module can share data between processes.
Requirements
------------
None
"""
|
[
"[email protected]"
] | |
81b6659ce41232ce1546045cddc849edadb44f22
|
3a2af7b4b801d9ba8d78713dcd1ed57ee35c0992
|
/zerver/webhooks/errbit/view.py
|
a47ccae2f0fc9f5a3b1841a1b5be747b0a7ea1b3
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
timabbott/zulip
|
2b69bd3bb63539adbfc4c732a3ff9d52657f40ac
|
42f239915526180a1a0cd6c3761c0efcd13ffe6f
|
refs/heads/master
| 2023-08-30T21:45:39.197724 | 2020-02-13T23:09:22 | 2020-06-25T21:46:33 | 43,171,533 | 6 | 9 |
Apache-2.0
| 2020-02-24T20:12:52 | 2015-09-25T19:34:16 |
Python
|
UTF-8
|
Python
| false | false | 1,333 |
py
|
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
ERRBIT_TOPIC_TEMPLATE = '{project_name}'
ERRBIT_MESSAGE_TEMPLATE = '[{error_class}]({error_url}): "{error_message}" occurred.'
@api_key_only_webhook_view('Errbit')
@has_request_variables
def api_errbit_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
subject = get_subject(payload)
body = get_body(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject(payload: Dict[str, Any]) -> str:
project = payload['problem']['app_name'] + ' / ' + payload['problem']['environment']
return ERRBIT_TOPIC_TEMPLATE.format(project_name=project)
def get_body(payload: Dict[str, Any]) -> str:
data = {
'error_url': payload['problem']['url'],
'error_class': payload['problem']['error_class'],
'error_message': payload['problem']['message'],
}
return ERRBIT_MESSAGE_TEMPLATE.format(**data)
|
[
"[email protected]"
] | |
b157b3943a5da0075b79e5476fd9dc13cb5f888d
|
f0e25779a563c2d570cbc22687c614565501130a
|
/Think_Python/rotate.py
|
88a2a43db71c667c9424a08799bd16968e7efbd5
|
[] |
no_license
|
XyK0907/for_work
|
8dcae9026f6f25708c14531a83a6593c77b38296
|
85f71621c54f6b0029f3a2746f022f89dd7419d9
|
refs/heads/master
| 2023-04-25T04:18:44.615982 | 2021-05-15T12:10:26 | 2021-05-15T12:10:26 | 293,845,080 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 946 |
py
|
"""This module contains code from
Think Python by Allen B. Downey
http://thinkpython.com
Copyright 2012 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import string
def rotate_letter(letter, n):
"""Rotates a letter by n places. Does not change other chars.
letter: single-letter string
n: int
Returns: single-letter string
"""
if letter.isupper():
start = ord('A')
elif letter.islower():
start = ord('a')
else:
return letter
c = ord(letter) - start
i = (c + n) % 26 + start
return chr(i)
def rotate_word(word, n):
"""Rotates a word by n places.
word: string
n: integer
Returns: string
"""
res = ''
for letter in word:
res += rotate_letter(letter, n)
return res
if __name__ == '__main__':
print(rotate_word('cheer', 7))
print(rotate_word('melon', -10))
print(rotate_word('sleep', 9))
|
[
"[email protected]"
] | |
77ab9cecf9571229a858bc319ec4530650f8d96c
|
4a48593a04284ef997f377abee8db61d6332c322
|
/python/opencv/opencv_2/gui/opencv_with_tkinter.py
|
c38c3d8a121d82026b7644085f0fe74574998ae3
|
[
"MIT"
] |
permissive
|
jeremiedecock/snippets
|
8feaed5a8d873d67932ef798e16cb6d2c47609f0
|
b90a444041c42d176d096fed14852d20d19adaa7
|
refs/heads/master
| 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 |
MIT
| 2023-06-06T02:17:44 | 2015-06-05T10:19:09 |
Python
|
UTF-8
|
Python
| false | false | 3,408 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Trackbar widget.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_trackbar/py_trackbar.html#trackbar
WARNING: Tkinter doesn't work if it's run outside the main thread!
See: http://stackoverflow.com/questions/10556479/running-a-tkinter-form-in-a-separate-thread
"Tkinter isn't thread safe, and the general consensus is that Tkinter
doesn't work in a non-main thread. If you rewrite your code so that Tkinter
runs in the main thread, you can have your workers run in other threads."
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import Tkinter as tk
import threading
def trackbar1_cb(x):
pass
def trackbar2_cb(x):
pass
#def scale_cb(ev=None):
# print(scale.get())
def main():
# Parse the programm options (get the path of the image file to read) #####
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--cameraid", "-i", help="The camera ID number (default: 0)", type=int, default=0, metavar="INTEGER")
args = parser.parse_args()
device_number = args.cameraid
# TkInter #################################################################
root = tk.Tk()
root.geometry("500x75") # Set the size of the "root" window
# See: http://effbot.org/tkinterbook/scale.htm
scale = tk.Scale(root, from_=0, to=255, orient=tk.HORIZONTAL)
#scale = tk.Scale(root, from_=0, to=255, orient=tk.HORIZONTAL, command=scale_cb)
scale.pack(fill=tk.X, expand=1)
# OpenCV ##################################################################
video_capture = cv.VideoCapture(device_number)
# Create a window
window_name = "Threshold Bin"
cv.namedWindow(window_name)
print("Press q to quit.")
def opencv_main_loop():
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to Grayscale
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Threshold the Grayscale image: dst_i = (src_i > threshold_value) ? max_val : 0
threshold_value = scale.get()
max_val = 255
ret, img_threshold_bin = cv.threshold(img_gray, threshold_value, max_val, cv.THRESH_BINARY)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frames (Threshold)
cv.imshow(window_name, img_threshold_bin)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
# Run the OpenCV main loop in a separate thread
thread_cv = threading.Thread(target=opencv_main_loop)
thread_cv.start()
# Run the tkinter main loop
root.mainloop()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
05569309e30bae8fa01d77141b06eb6f922b24e6
|
43c24c890221d6c98e4a45cd63dba4f1aa859f55
|
/test/tests/os_test.py
|
cb10509f1d7cdb4b47c62f144aadf5f27e252502
|
[
"Python-2.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
jmgc/pyston
|
c8e4df03c33c6b81d20b7d51a781d9e10148238e
|
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
|
refs/heads/master
| 2020-12-11T07:51:58.968440 | 2020-09-11T14:38:38 | 2020-09-11T14:38:38 | 39,242,644 | 0 | 0 |
NOASSERTION
| 2020-09-11T14:38:39 | 2015-07-17T08:09:31 |
Python
|
UTF-8
|
Python
| false | false | 673 |
py
|
#
# currently broken:
# import os.path
import os
r1 = os.urandom(8)
r2 = os.urandom(8)
print len(r1), len(r2), type(r1), type(r2), r1 == r2
print type(os.stat("/dev/null"))
print os.path.expanduser("~") == os.environ["HOME"]
print os.path.isfile("/dev/null")
print os.path.isfile("/should_not_exist!")
e = OSError(1, 2, 3)
print e
print e.errno
print e.strerror
print e.filename
print OSError(1, 2).filename
try:
os.execvp("aoeuaoeu", ['aoeuaoeu'])
except OSError, e:
print e
# Changes to os.environ should show up in subprocesses:
import subprocess
env = os.environ
env["PYTHONPATH"] = "."
subprocess.check_call("echo PYTHONPATH is $PYTHONPATH", shell=1)
|
[
"[email protected]"
] | |
ac00ac4bb96ebe184493e06849d1d2e99492b860
|
2f96d0e69ce3d6b1ea4623ed5b4c1741d9634ea9
|
/tests/dummy_repo/tvm/python/tvm/hybrid/util.py
|
556ede1519e92fb2666ef894fd89ca5bfffa2590
|
[
"Apache-2.0"
] |
permissive
|
tqchen/ffi-navigator
|
ae1e8923e4d5be589beabfadba91f4a3b39e03dd
|
46b0d0c6bce388a8e1e2cb7ed28062e889e4596c
|
refs/heads/main
| 2023-02-06T22:32:54.214871 | 2023-02-05T16:25:16 | 2023-02-05T16:25:16 | 230,478,838 | 217 | 24 |
Apache-2.0
| 2023-02-05T16:25:18 | 2019-12-27T16:44:58 |
Python
|
UTF-8
|
Python
| false | false | 921 |
py
|
import ast
import inspect
import logging
import sys
import numpy
from .. import api as _api
from .. import make as _make
from .. import expr as _expr
from .. import stmt as _stmt
from .._ffi.base import numeric_types
from ..tensor import Tensor
from ..container import Array
def replace_io(body, rmap):
"""Replacing tensors usage according to the dict given"""
from .. import ir_pass
def replace(op):
if isinstance(op, _stmt.Provide) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Provide(buf.op, op.value_index, op.value, op.args)
if isinstance(op, _expr.Call) and op.func in rmap.keys():
buf = rmap[op.func]
return _make.Call(buf.dtype, buf.name, op.args, \
_expr.Call.Halide, buf.op, buf.value_index)
return None
return ir_pass.IRTransform(body, None, replace, ['Provide', 'Call'])
|
[
"[email protected]"
] | |
6f6476757e06d7a487ecf584035e507e47e98cb6
|
9e9d23e7a57c46da27a491a61f19c7239d066bf8
|
/biliup/__init__.py
|
e1ff55cbd324da2fcb10188ba6f6f304a81fa7ea
|
[
"MIT"
] |
permissive
|
vmcole/bilibiliupload
|
f7c667927bfcc4a0c1c5eba96b674729ae776e62
|
b5c416451f66c2ebe550694d4c4957129d0e966e
|
refs/heads/master
| 2023-06-09T19:58:33.813073 | 2021-07-06T14:50:18 | 2021-07-06T14:50:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,722 |
py
|
import asyncio
from .common.reload import AutoReload
from .common.timer import Timer
from .engine.event import EventManager, Event
from .engine import config, invert_dict, Plugin
from . import plugins
__version__ = "0.0.8"
def create_event_manager():
streamer_url = {k: v['url'] for k, v in config['streamers'].items()}
inverted_index = invert_dict(streamer_url)
urls = list(inverted_index.keys())
pool1_size = config.get('pool1_size') if config.get('pool1_size') else 3
pool2_size = config.get('pool2_size') if config.get('pool2_size') else 3
# 初始化事件管理器
app = EventManager(config, pool1_size=pool1_size, pool2_size=pool2_size)
app.context['urls'] = urls
app.context['url_status'] = dict.fromkeys(inverted_index, 0)
app.context['checker'] = Plugin(plugins).sorted_checker(urls)
app.context['inverted_index'] = inverted_index
app.context['streamer_url'] = streamer_url
return app
event_manager = create_event_manager()
async def main():
from .handler import CHECK_UPLOAD, CHECK
event_manager.start()
async def check_timer():
event_manager.send_event(Event(CHECK_UPLOAD))
for k in event_manager.context['checker'].keys():
event_manager.send_event(Event(CHECK, (k,)))
wait = config.get('event_loop_interval') if config.get('event_loop_interval') else 40
# 初始化定时器
timer = Timer(func=check_timer, interval=wait)
interval = config.get('check_sourcecode') if config.get('check_sourcecode') else 15
# 模块更新自动重启
detector = AutoReload(event_manager, timer, interval=interval)
await asyncio.gather(detector.astart(), timer.astart(), return_exceptions=True)
|
[
"[email protected]"
] | |
274332a28662cdd27514f4e4d6ea6d2fb35d89f7
|
82db461036ffb2adbf0424a6f0575cd9d24b48a8
|
/main.py
|
aa2b3ceb4b62ba95ae0a6123184a319dd03db241
|
[] |
no_license
|
webclinic017/option_pdt
|
fdc559f02cc529b54278e90e04170713fe93684f
|
dd302c6b2661e26dbfcbea0384b99e85ae9584e1
|
refs/heads/master
| 2023-03-24T10:43:35.998775 | 2021-03-19T14:08:38 | 2021-03-19T14:08:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,828 |
py
|
import sys
import os
parent_path = os.path.dirname(sys.path[0])
if parent_path not in sys.path:
sys.path.append(parent_path)
import json
import pickle
import logging
import pandas as pd
import numpy as np
from datetime import datetime
from library import get_strategy
from utils.util_func import *
from optparse import OptionParser
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = OptionParser()
parser.add_option('-f', '--file_name', action='store', type='string', default=None)
(opts, args) = parser.parse_args()
file_path = f'library/strategy/{opts.file_name}.json'
strategy_data_file = opts.file_name.split('_')[0]+"_data"
with open(file_path, 'r') as f:
options = json.load(f)
'''
from datetime import datetime
import pandas as pd
import pickle
positions = pd.read_csv("data/positions_s.csv")
positions['group'] = positions['group'].astype(str)
#hedge_positions = pd.read_csv("data/hedge_positions.csv",index_col=0)
#hedge_positions['group'] = hedge_positions['group'].astype(str)
strategy_data = {'hedge_time':datetime.now()}
with open(f'data/delta_data.pkl','wb') as fw:
pickle.dump(strategy_data, fw)
with open(f'data/customer_position.pkl','wb') as fw:
pickle.dump(positions, fw)
today = datetime.now()
cols = ['EXP_DATE','ask_price', 'bid_price', 'creation_timestamp','instrument_name', 'K','S','cp',
'interest_rate','open_interest','underlying_index', 'volume','TTM']
option_df = pd.read_csv("data/option_df.csv",index_col=0)
option_df = option_df[cols]
#option_df['TTM'] = [days_diff(exp_date,today) for exp_date in option_df['EXP_DATE']]
option_df = option_df[option_df['TTM']>0.1]
portfolio = sim_positions(option_df,6)
subscription_list = [symbol2subs(symbol,"%d%b%y") for symbol in portfolio['instrument_name']]
'''
with open(f'data/{strategy_data_file}.pkl','rb') as fw:
strategy_data = pickle.load(fw)
with open(f'data/customer_position.pkl','rb') as fw:
positions = pickle.load(fw)
positions,is_removed = remove_expired_positions(positions)
if is_removed:
with open(f'data/customer_position.pkl','wb') as fw:
pickle.dump(positions, fw)
hedge_time = strategy_data['hedge_time']
#hedge_positions = strategy_data['hedge_positions']
#positions = {key:{k:0 for k,v in values.items()} for key,values in positions.items()}
#subscription_list = [symbol2subs(symbol,"%Y%m%d") for symbol in positions.keys() if symbol!='BTCUSD']
subscription_list = []
subscription_list.append('Deribit|BTCUSD|perp|ticker')
subscription_list.append('Deribit|BTCUSD|option|summaryinfo')
options['subscription_list'] = list(set(subscription_list))
options['hedge_time'] = hedge_time
options['positions'] = positions
if strategy_data_file == "delta_data":
options['account_target'] = float(strategy_data['account_target'])
stratgy = options['file_name']
context = get_strategy(stratgy)
context.logger.info('Start trading..')
context.config_update(**options)
context.pre_start(**options)
context.start()
#instrument = 'Deribit|BTCUSD-20200925-7000-P|option'
#instrument = 'Deribit|BTCUSD|option|summaryinfo'
#instrument = 'Deribit|BTCUSD|perp'
#context.send_order(instrument, 'sell', 0.1200, 0.1, 'Limit')
#context.send_order(instrument, 'sell', 0.1, 0.1, 'Fak', delay=3000)
#context.send_order(instrument, 'sell', 9500.5, 1, 'Limit',note='maker')
#context.send_order(instrument, 'buy', 8100.5, 1, 'Market',note='taker')
#context.inspect_order(instrument,'3887280714')
#context.send_order(instrument,'buy',7084,0.0706,'Limit')
|
[
"[email protected]"
] | |
e8d6832b01ddb153bea7721f9728d12768dc77a3
|
3c259a3755fa81dbaa5a33591c4bcedb79c20314
|
/config/ssef/ssef_eval_cqg_masked_2015.config
|
897b88fb2d2d418c25f44140fa30a4d2702f637a
|
[
"MIT"
] |
permissive
|
djgagne/hagelslag
|
f96bea7395d2d967e1dc84faccf910e01b83157b
|
17757de7b55737f65f615e5dccad379604961832
|
refs/heads/master
| 2023-07-24T20:13:07.659540 | 2023-07-13T17:02:00 | 2023-07-13T17:02:00 | 37,555,335 | 64 | 26 |
MIT
| 2023-07-13T17:02:01 | 2015-06-16T20:48:43 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 3,621 |
config
|
#!/usr/bin/env python
from datetime import datetime
import numpy as np
ensemble_members = ["wrf-s3cn_arw"] + ["wrf-s3m{0:d}_arw".format(m) for m in range(3, 14)]
scratch_path = "/sharp/djgagne/"
experiment_name = "cqg_masked"
config = dict(ensemble_name="SSEF",
ensemble_members=ensemble_members,
start_date=datetime(2015, 5, 12),
end_date=datetime(2015, 6, 5),
start_hour=13,
end_hour=36,
window_sizes=[1, 3, 24],
time_skip=1,
model_names=dict(dist=["Random Forest", "Elastic Net", "Random Forest CV"],
condition=["Random Forest"]),
model_types=["dist", "condition"],
size_thresholds=[5, 25, 50],
condition_threshold=0.5,
dist_thresholds=np.arange(0, 200),
num_max_samples=1000,
forecast_json_path=scratch_path + "track_forecasts_spring2015_{0}_json/".format(experiment_name),
track_data_csv_path=scratch_path + "track_data_spring2015_{0}_csv/".format(experiment_name),
forecast_sample_path=scratch_path + "track_samples_spring2015_{0}/".format(experiment_name),
mrms_path=scratch_path + "mrms_spring2015/",
mrms_variable="MESH_Max_60min_00.50",
obs_mask=True,
mask_variable="RadarQualityIndex_00.00",
forecast_thresholds=np.concatenate(([0, 0.01, 0.02], np.arange(0.05, 1.1, 0.05))),
dilation_radius=13,
forecast_bins={"dist": np.array(["Shape_f", "Location_f", "Scale_f"]),
"condition": np.array(["ProbHail"]),
"translation-x":np.arange(-240000, 264000, 24000),
"translation-y":np.arange(-240000, 264000, 24000),
"start-time":np.arange(-6, 7, 1)
},
object_thresholds=[0, 25, 50],
out_path=scratch_path + "evaluation_data_spring2015_{0}/".format(experiment_name),
obj_scores_file="object_scores_ssef_2015_cqg_closest_",
grid_scores_file="grid_scores_ssef_2015_cqg_cloest.csv",
obs_thresholds=[5, 25, 50, 75],
ensemble_variables=["uh_max", "hailsz", "cqgmax", "r10cmx"],
neighbor_thresholds={"dist": [25, 50],
"uh_max": [25, 75, 150],
"hailsz": [5, 25, 50],
"cqgmax": [5, 25, 50],
"r10cmx": [40, 60]},
neighbor_path="/sharp/djgagne/hail_consensus_ssef_{0}_2015/".format(experiment_name),
neighbor_score_path="/sharp/djgagne/neighbor_scores_ssef_unique_2015/ssef_{0}_diss_".format(experiment_name),
neighbor_radii=[14, 28],
smoothing_radii=[14, 21, 28],
neighbor_radius=42,
neighbor_sigma=1,
ml_grid_path=scratch_path + "hail_forecasts_grib2_ssef_cqg_masked_2015/",
coarse_neighbor_out_path= scratch_path + "ssef_coarse_neighbor_eval_2015/",
map_file = "/home/djgagne/hagelslag/mapfiles/ssef2015.map",
us_mask_file="/home/djgagne/hagelslag/mapfiles/ssef_2015_us_mask.nc",
coordinate_file="/sharp/djgagne/ssef_2015_grid.nc",
lon_bounds=[-106,-80],
lat_bounds=[28,48],
stride=14,
ensemble_path=scratch_path + "spring2015_nc/",
single_step=False,
)
|
[
"[email protected]"
] | |
12712fe4e23a5c73bf59f892cdc1ef0041cd1ab4
|
5410700e83210d003f1ffbdb75499062008df0d6
|
/leetcode/isHappy.py
|
92bdf82a57b5d864724396b17b24897d123370fd
|
[] |
no_license
|
lilyandcy/python3
|
81182c35ab8b61fb86f67f7796e057936adf3ab7
|
11ef4ace7aa1f875491163d036935dd76d8b89e0
|
refs/heads/master
| 2021-06-14T18:41:42.089534 | 2019-10-22T00:24:30 | 2019-10-22T00:24:30 | 144,527,289 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 528 |
py
|
class Solution:
def isHappy(self, n):
"""
:type n: int
:rtype: bool
"""
looplist = []
num = n
while num != 1:
if num not in looplist:
looplist.append(num)
else:
return False
num = self.sumLocation(num)
return True
def sumLocation(self, num):
strnum = str(num)
sumnum = 0
for i in range(len(strnum)):
sumnum += int(strnum[i]) ** 2
return sumnum
|
[
"[email protected]"
] | |
994523ad13eaf886d1e9b898c2b4e1e3021ae3a6
|
fac37d77a8d00e3d13106bcd728d51a455dd16f2
|
/kmer.py
|
2c016a97eb7bf7903ce31d36c4622ef1926e080c
|
[] |
no_license
|
anu-bioinfo/rosalind-4
|
c6a628bba94f647cf4a34bdf505f1527af4346a9
|
3ddc659d44298f4dd4b5dde66d7833b4d27a2580
|
refs/heads/master
| 2020-03-25T13:47:39.521215 | 2014-09-14T02:30:54 | 2014-09-14T02:30:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 589 |
py
|
#!/usr/bin/env python
from __future__ import print_function
import os
from revp import read_fasta
from subs import substring_find
from lexf import lexf_order
def kmer_composition(dna_string):
output = []
for p in lexf_order(4, 'ACGT'):
pos = list(substring_find(dna_string, ''.join(p)))
output.append(str(len(pos)))
return output
if __name__ == "__main__":
with open(os.path.join('data', 'rosalind_kmer.txt')) as dataset:
seqs = read_fasta(dataset)
dna_string = seqs.popitem(last=False)[1]
print(*kmer_composition(dna_string))
|
[
"[email protected]"
] | |
3f532246345c6898340e9b5f2125626a978ca0cf
|
fed6c6bdb6276d195bc565e527c3f19369d22b74
|
/galaxy-galaxy lensing/prepare_cata/Fourier_Quad_cata/gather_raw_cata.py
|
4e38e9d277633610cb84172ab6665238c0c69d4e
|
[] |
no_license
|
hekunlie/astrophy-research
|
edbe12d8dde83e0896e982f08b463fdcd3279bab
|
7b2b7ada7e7421585e8993192f6111282c9cbb38
|
refs/heads/master
| 2021-11-15T05:08:51.271669 | 2021-11-13T08:53:33 | 2021-11-13T08:53:33 | 85,927,798 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,291 |
py
|
import matplotlib
matplotlib.use("Agg")
import os
my_home = os.popen("echo $MYWORK_DIR").readlines()[0][:-1]
from sys import path
path.append('%s/work/mylib/'%my_home)
import tool_box
import h5py
from mpi4py import MPI
import numpy
import time
from subprocess import Popen
import warnings
warnings.filterwarnings('error')
# The new Fourier_Quad catalog differs from the old version!!!
# collect: collect the data from the files of each field. It creates the "fourier_cata.hdf5" in
# the parent directory of the one contain the field catalog.
# If the catalog file doesn't exist, run it firstly !!!.
# It will add the redshift parameters from CFHT catalog into the finial catalog.
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
cpus = comm.Get_size()
data_path = "/mnt/perc/hklee/CFHT/catalog/fourier_cata_new/"
raw_cata_path = data_path + "raw_cata_new/"
dicts, fields = tool_box.field_dict(data_path + "nname.dat")
my_field = tool_box.allot(fields, cpus)[rank]
chip_num = 36
for field_nm in my_field:
field_path = raw_cata_path + "%s/"%field_nm
files = os.listdir(field_path)
chip_exps = []
for nm in files:
if ".dat" in nm:
exp_nm = nm.split("p")[0]
if exp_nm not in chip_exps:
chip_exps.append(exp_nm)
chip_exps.sort()
file_count = 0
for exp_nm in chip_exps:
for i in range(1,chip_num+1):
chip_nm = "%sp_%d_shear.dat"%(exp_nm, i)
chip_path = field_path + chip_nm
if os.path.exists(chip_path):
try:
temp = numpy.loadtxt(chip_path, skiprows=1)
if file_count == 0:
data = temp
else:
data = numpy.row_stack((data, temp))
file_count += 1
except:
file_size = os.path.getsize(chip_path)/1024.
print("Empty: %s (%.3f KB)"%(chip_nm, file_size))
else:
print("Can't find %d"%chip_nm)
if file_count > 0:
final_path = data_path + "%s/%s_shear_raw.cat"%(field_nm, field_nm)
numpy.savetxt(final_path, data)
h5f = h5py.File(final_path,"w")
h5f["/data"] = data
h5f.close()
|
[
"[email protected]"
] | |
9d54ff837c1a8f276a97e819ccf6c7a49e66713b
|
24144f83276705fe2f4df295ee50199c2035ca7b
|
/active/theses-mainz.py
|
0acd9b145b345b370518620e935b1280fb1eaed5
|
[] |
no_license
|
AcidBurn429/ejlmod
|
a2e4eb6bb28bcb6bbccc3d83e2e24f5aed23d4eb
|
dec50edbb14380686072d7311589a2363ef5cd00
|
refs/heads/master
| 2023-08-14T21:19:10.890194 | 2021-09-28T13:39:06 | 2021-09-28T13:39:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,405 |
py
|
# -*- coding: utf-8 -*-
#harvest theses from Mainz U.
#FS: 2020-01-27
import getopt
import sys
import os
import urllib2
import urlparse
from bs4 import BeautifulSoup
import re
import ejlmod2
import codecs
import datetime
import time
import json
xmldir = '/afs/desy.de/user/l/library/inspire/ejl'
retfiles_path = "/afs/desy.de/user/l/library/proc/retinspire/retfiles"
now = datetime.datetime.now()
stampoftoday = '%4d-%02d-%02d' % (now.year, now.month, now.day)
publisher = 'Mainz U.'
jnlfilename = 'THESES-MAINZ-%s' % (stampoftoday)
hdr = {'User-Agent' : 'Magic Browser'}
recs = []
rpp = 40
pages = 3
for page in range(pages):
tocurl = 'https://openscience.ub.uni-mainz.de/simple-search?query=&filter_field_1=organisationalUnit&filter_type_1=equals&filter_value_1=FB+08+Physik%2C+Mathematik+u.+Informatik&filter_field_2=publicationType&filter_type_2=equals&filter_value_2=Dissertation&sort_by=dc.date.issued_dt&order=desc&rpp=' + str(rpp) + '&etal=0&start=' + str(page*rpp)
print '==={ %i/%i }==={ %s }===' % (page+1, pages, tocurl)
tocpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(tocurl))
for tr in tocpage.body.find_all('tr'):
rec = {'tc' : 'T', 'keyw' : [], 'jnl' : 'BOOK', 'note' : []}
for td in tr.find_all('td', attrs = {'headers' : 't1'}):
rec['year'] = td.text.strip()
rec['date'] = td.text.strip()
for td in tr.find_all('td', attrs = {'headers' : 't3'}):
for a in td.find_all('a'):
rec['tit'] = a.text.strip()
rec['hdl'] = re.sub('.*handle\/', '', a['href'])
rec['artlink'] = 'https://openscience.ub.uni-mainz.de' + a['href']
recs.append(rec)
time.sleep(10)
i = 0
for rec in recs:
i += 1
print '---{ %i/%i }---{ %s }------' % (i, len(recs), rec['artlink'])
try:
artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
time.sleep(4)
except:
try:
print "retry %s in 180 seconds" % (rec['artlink'])
time.sleep(180)
artpage = BeautifulSoup(urllib2.build_opener(urllib2.HTTPCookieProcessor).open(rec['artlink']))
except:
print "no access to %s" % (rec['artlink'])
continue
for tr in artpage.body.find_all('tr'):
for td in tr.find_all('td', attrs = {'class' : 'metadataFieldLabel'}):
tdt = td.text.strip()
for td in tr.find_all('td', attrs = {'class' : 'metadataFieldValue'}):
#authors
if tdt == 'Authors:':
rec['autaff'] = [[ td.text.strip(), publisher ]]
#language
elif tdt == 'Language :':
if td.text.strip() == 'german':
rec['language'] = 'German'
#abstract
elif tdt == 'Abstract:':
rec['abs'] = td.text.strip()
#license
elif re.search('Information', tdt):
for a in td.find_all('a'):
if re.search('creativecommons.org', a['href']):
rec['license'] = {'url' : a['href']}
#pages
elif tdt == 'Extent:':
if re.search('\d\d', td.text):
rec['pages'] = re.sub('.*?(\d\d+).*', r'\1', td.text.strip())
#DOI
elif tdt == 'DOI:':
for a in td.find_all('a'):
rec['doi'] = re.sub('.*org\/', '', a['href'])
#FFT
for td in tr.find_all('td', attrs = {'class' : 'standard'}):
for a in td.find_all('a'):
if re.search('pdf$', a['href']):
if 'license' in rec.keys():
rec['FFT'] = 'https://openscience.ub.uni-mainz.de' + a['href']
else:
rec['hidden'] = 'https://openscience.ub.uni-mainz.de' + a['href']
print ' ', rec.keys()
#closing of files and printing
xmlf = os.path.join(xmldir, jnlfilename+'.xml')
xmlfile = codecs.EncodedFile(codecs.open(xmlf, mode='wb'), 'utf8')
ejlmod2.writenewXML(recs, xmlfile, publisher, jnlfilename)
xmlfile.close()
#retrival
retfiles_text = open(retfiles_path, "r").read()
line = jnlfilename+'.xml'+ "\n"
if not line in retfiles_text:
retfiles = open(retfiles_path, "a")
retfiles.write(line)
retfiles.close()
|
[
"[email protected]"
] | |
d49bcc85fb670923856b90cd4b3431c31b19fed9
|
8671856181ef218f147f23f367fd0b1dc7592e1a
|
/realtor/migrations/0020_auto_20190918_1213.py
|
69d2a3d67932c1247662582520c4265d41e2eef5
|
[] |
no_license
|
Alishrf/Shop_Website
|
e4fef9618aec2db6f4a655ff643aa68cf42dbb68
|
971d4a2ff8b7a68a0157681ff26404fe403502e6
|
refs/heads/master
| 2020-08-11T06:03:47.642870 | 2019-10-14T14:29:30 | 2019-10-14T14:29:30 | 214,504,737 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
# Generated by Django 2.2.4 on 2019-09-18 07:43
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('realtor', '0019_auto_20190918_1203'),
]
operations = [
migrations.AlterField(
model_name='realtor',
name='hire_date',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 18, 12, 13, 29, 200152)),
),
]
|
[
"[email protected]"
] | |
df776be8e3c5a10d0ea0d5ac96bb71188cc0c541
|
be5f4d79910e4a93201664270916dcea51d3b9ee
|
/fastdownward/experiments/issue627/merge-v3-v5.py
|
100f1f2a1136f016756e9f799a53b284019dc988
|
[
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-or-later"
] |
permissive
|
mehrdadzakershahrak/Online-Explanation-Generation
|
17c3ab727c2a4a60381402ff44e95c0d5fd0e283
|
e41ad9b5a390abdaf271562a56105c191e33b74d
|
refs/heads/master
| 2022-12-09T15:49:45.709080 | 2019-12-04T10:23:23 | 2019-12-04T10:23:23 | 184,834,004 | 0 | 0 |
MIT
| 2022-12-08T17:42:50 | 2019-05-04T00:04:59 |
Python
|
UTF-8
|
Python
| false | false | 1,574 |
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
from common_setup_no_benchmarks import IssueConfig, IssueExperiment, get_script_dir
from relativescatter import RelativeScatterPlotReport
import os
def main(revisions=None):
exp = IssueExperiment(benchmarks_dir=".", suite=[])
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v3-eval"),
filter=lambda(run): "base" not in run["config"],
)
exp.add_fetcher(
os.path.join(get_script_dir(), "data", "issue627-v5-eval"),
filter=lambda(run): "base" not in run["config"],
)
for config_nick in ['astar-blind', 'astar-lmcut', 'astar-ipdb', 'astar-cegar-original', 'astar-cegar-lm-goals']:
exp.add_report(
RelativeScatterPlotReport(
attributes=["memory"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v5-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v5_memory_%s.png' % config_nick
)
exp.add_report(
RelativeScatterPlotReport(
attributes=["total_time"],
filter_config=["issue627-v3-%s" % config_nick,
"issue627-v5-%s" % config_nick],
get_category=lambda run1, run2: run1.get("domain"),
),
outfile='issue627_v3_v5_total_time_%s.png' % config_nick
)
exp()
main(revisions=['issue627-v3', 'issue627-v5'])
|
[
"[email protected]"
] | |
1a1b7696f4ce2e13094a1f79e092e53fcc9eb461
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j15376-1702/sdB_galex_j15376-1702_lc.py
|
e2d309905a69f149eca005da69c193f1c0718906
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 358 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[234.417917,-17.037508], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_galex_j15376-1702/sdB_galex_j15376-1702_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
1ebeccd76c77fb7295b05092f26a7ad953d07807
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2330/60796/280647.py
|
2ebf0db21d495fb1321ec9d1115f099b73a5cb61
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,846 |
py
|
import math
N=int(input())
ls=[]
for i in range(N):
ls.append(input().split(","))
ls[i]=[int(x) for x in ls[i]]
r=[]
for i1 in range(len(ls)-3):
for i2 in range(i1+1,len(ls)-2):
for i3 in range(i2+1,len(ls)-1):
for i4 in range(i3+1,len(ls)):
a=ls[i1]
b=ls[i2]
c=ls[i3]
d=ls[i4]
if (a[0]==c[0] and d[0]==d[0] and (a[1]==b[1] and c[1]==d[1])):
r.append(abs(a[1]-c[1])*abs(b[0]-a[0]))
elif (a[0]==b[0] and c[0]==d[0] and (d[1]==b[1] and a[1]==c[1])):
r.append(abs(a[1]-b[1])*abs(c[0]-a[0]))
elif (a[0]==d[0] and c[0]==b[0] and (d[1]==c[1] and a[1]==b[1])):
r.append(abs(a[0]-b[0])*abs(d[1]-a[1]))
elif (a[0]==b[0] and c[0]==d[0] and (c[1]==b[1] and a[1]==d[1])):
r.append(abs(a[0]-d[0])*abs(b[1]-a[1]))
elif (a[0]==c[0] and b[0]==d[0] and (c[1]==b[1] and a[1]==d[1])):
r.append(abs(a[0]-d[0])*abs(c[1]-a[1]))
elif (a[0]==d[0] and c[0]==b[0] and (d[1]==b[1] and a[1]==c[1])):
r.append(abs(a[0]-c[0])*abs(d[1]-a[1]))
elif (a[0]-b[0])!=0 and (c[0]-d[0])!=0 and (a[0]-c[0])!=0 and (d[0]-b[0])!=0:
if abs((a[1]-b[1])/(a[0]-b[0]))==abs((c[1]-d[1])/(c[0]-d[0])) and abs((a[1]-c[1])/(a[0]-c[0]))==abs((d[1]-b[1])/(d[0]-b[0])):
x=math.sqrt(pow(a[0]-b[0],2)+pow(a[1]-b[1],2))
y=math.sqrt(pow(a[0]-c[0],2)+pow(a[1]-c[1],2))
r.append(x*y)
elif (a[0]-d[0])!=0 and (c[0]-b[0])!=0 and (a[0]-c[0])!=0 and (d[0]-b[0])!=0:
if abs((a[1]-d[1])/(a[0]-d[0]))==abs((b[1]-c[1])/(b[0]-c[0])) and abs((a[1]-c[1])/(a[0]-c[0]))==abs((d[1]-b[1])/(d[0]-b[0])):
x=math.sqrt(pow(a[0]-d[0],2)+pow(a[1]-d[1],2))
y=math.sqrt(pow(a[0]-c[0],2)+pow(a[1]-c[1],2))
r.append(x*y)
elif (a[0] - d[0]) != 0 and (c[0] - b[0]) != 0 and (a[0] - b[0]) != 0 and (d[0] - c[0]) != 0:
if abs((a[1] - d[1]) / (a[0] - d[0])) == abs((b[1] - c[1]) / (b[0] - c[0])) and abs(
(a[1] - b[1]) / (a[0] - b[0])) == abs((d[1] - c[1]) / (d[0] - c[0])):
x = math.sqrt(pow(a[0] - d[0], 2) + pow(a[1] - d[1], 2))
y = math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))
r.append(x * y)
if len(r)==0:
print("0.0000")
else:
s=str(min(r))
if not s.__contains__("."):
s=s+".0000"
else:
i=s.index(".")
t=s[i+1:]
while len(t)<4:
t="0"+t
if len(t)>4:
s=s[:i+1]+t[:4]
print(s)
|
[
"[email protected]"
] | |
c5971d938e49b66b654a919ac6e2e69b5337945b
|
a4a754bb5d2b92707c5b0a7a669246079ab73633
|
/8_kyu/derive.py
|
6efcdb1118f8b8cb017f87a2a9c1cd42ddd88128
|
[] |
no_license
|
halfendt/Codewars
|
f6e0d81d9b10eb5bc66615eeae082adb093c09b3
|
8fe4ce76824beece0168eb39776a2f9e078f0785
|
refs/heads/master
| 2023-07-11T13:58:18.069265 | 2021-08-15T18:40:49 | 2021-08-15T18:40:49 | 259,995,259 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
def derive(coefficient, exponent):
"""
Take the Derivative Kata
https://www.codewars.com/kata/5963c18ecb97be020b0000a2
"""
return str(coefficient*exponent)+'x^'+str(exponent - 1)
|
[
"[email protected]"
] | |
4392d1adcce1c93371a6728ecfff29e616948c28
|
ec78f8ab63aec0753b9360715a4276a971b78a82
|
/py/data_analysis/np/matrix.py
|
2df729d2746c2ba43d2ec102e1595d3cf8c1e176
|
[] |
no_license
|
anderscui/ml
|
4ace7e7b8cf248042d224bd54e81b691963b2e0e
|
39238ba6d802df7e8bf1089ef3605cfc83b333ac
|
refs/heads/master
| 2021-06-03T16:09:55.207202 | 2018-11-01T18:50:49 | 2018-11-01T18:50:49 | 23,989,214 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 139 |
py
|
import numpy as np
arr = np.arange(15).reshape((3, 5))
print(arr.T)
print(arr.transpose())
print(np.dot(arr.T, arr))
# also swapaxes()
|
[
"[email protected]"
] | |
eb8ca0533b3576c10c7673e10928c10f18803fac
|
a1ea4bb213801a2f49e9b3d178f402f108d8a803
|
/AI(BE)/bullseyes/bullseyes/settings.py
|
0cd8bb3f9a00a7080b4c0388a0e00b09b89ddf1f
|
[
"MIT"
] |
permissive
|
osamhack2021/AI_WEB_Bullseyes_Bullseyes
|
537df4c35550917b963442538926c0b4bbef3cd6
|
ec6aa6ce093e93b5666a0fd5ede28585c27a3590
|
refs/heads/master
| 2023-08-18T10:42:24.212460 | 2021-10-20T02:49:35 | 2021-10-20T02:49:35 | 407,145,903 | 4 | 2 |
MIT
| 2021-10-17T05:23:18 | 2021-09-16T11:54:11 |
JavaScript
|
UTF-8
|
Python
| false | false | 4,101 |
py
|
"""
Django settings for bullseyes project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_&*#@x)6_c7#1y4e65x)+!*75if7gyn4kz469&v2h6aw$om&m3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'bullseyes_server',
'rest_framework.authtoken',
'django_filters',
'corsheaders',
]
MIDDLEWARE = [
"corsheaders.middleware.CorsMiddleware",
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'corsheaders.middleware.CorsPostCsrfMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ALLOWED_ORIGIN_REGEXES = [
r".*",
]
#CORS_ORIGIN_ALLOW_ALL = True
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 100,
'DEFAULT_FILTER_BACKENDS':['django_filters.rest_framework.DjangoFilterBackend'],
# 'DATE_INPUT_FORMATS': ['iso-8601', '%Y-%m-%dT%H:%M:%S.%fZ'],
}
ROOT_URLCONF = 'bullseyes.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bullseyes.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"[email protected]"
] | |
391e199af1fa6be6a64f00ab28750cf11324aad2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02660/s614607751.py
|
34c48997173930d6f69893b65345505f7e034156
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 334 |
py
|
N = int(input())
ans = 0
for i in range(2, N):
if i * i > N:
break
e = 0
while N % i == 0:
e += 1
N //= i
if e > 0:
for j in range(1, 10):
if e >= j:
e -= j
ans += 1
else:
break
if N > 1:
ans += 1
print(ans)
|
[
"[email protected]"
] | |
33e87fe2280b584f9fab54d1712d053ca31d4dcd
|
4d8d542058f91bc2a1fede92a7ebc614b61aed22
|
/environments/mujoco/rand_param_envs/gym/envs/debugging/__init__.py
|
ebdd5b7c14979e41c5700c705b69cfaddace0c6c
|
[
"MIT"
] |
permissive
|
NagisaZj/varibad
|
9ea940e168fea336457636e33f61400d48a18a27
|
df7cda81588c62a2a3bee69e4173228701bd7000
|
refs/heads/master
| 2023-02-07T15:50:47.912644 | 2020-12-27T01:51:10 | 2020-12-27T01:51:10 | 270,474,411 | 0 | 0 |
NOASSERTION
| 2020-06-08T00:34:09 | 2020-06-08T00:34:08 | null |
UTF-8
|
Python
| false | false | 552 |
py
|
from environments.mujoco.rand_param_envs.gym.envs.debugging.one_round_deterministic_reward import \
OneRoundDeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.one_round_nondeterministic_reward import \
OneRoundNondeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.two_round_deterministic_reward import \
TwoRoundDeterministicRewardEnv
from environments.mujoco.rand_param_envs.gym.envs.debugging.two_round_nondeterministic_reward import \
TwoRoundNondeterministicRewardEnv
|
[
"[email protected]"
] | |
42ccabfd89b1d00cd7df7184e283bdbb70020766
|
41777d4d219ea97b4632f4a8a31ab6c82a60772c
|
/kubernetes-stubs/config/incluster_config.pyi
|
79a0d4354db777d9f57fa699d05bd128f72a24d2
|
[
"Apache-2.0"
] |
permissive
|
gordonbondon/kubernetes-typed
|
501d9c998c266386dc7f66f522f71ac3ba624d89
|
82995b008daf551a4fe11660018d9c08c69f9e6e
|
refs/heads/master
| 2023-07-18T12:06:04.208540 | 2021-09-05T19:50:05 | 2021-09-05T19:50:05 | 319,183,135 | 24 | 2 |
Apache-2.0
| 2021-09-05T19:50:06 | 2020-12-07T02:34:12 |
Python
|
UTF-8
|
Python
| false | false | 635 |
pyi
|
# Code generated by `stubgen`. DO NOT EDIT.
from .config_exception import ConfigException as ConfigException
from kubernetes.client import Configuration as Configuration
from typing import Any
SERVICE_HOST_ENV_NAME: str
SERVICE_PORT_ENV_NAME: str
SERVICE_TOKEN_FILENAME: str
SERVICE_CERT_FILENAME: str
class InClusterConfigLoader:
def __init__(self, token_filename, cert_filename, try_refresh_token: bool = ..., environ=...) -> None: ...
def load_and_set(self, client_configuration: Any | None = ...) -> None: ...
def load_incluster_config(client_configuration: Any | None = ..., try_refresh_token: bool = ...) -> None: ...
|
[
"[email protected]"
] | |
e325857a904d0df6ed0627ab009f34fc96c74972
|
329cc042bb5829ab26a51d0b3a0bd310f05e0671
|
/main.py
|
60f84aa47980f4c797b50f2df6697f82314f4908
|
[] |
no_license
|
bkhoward/WLC-PSK-Change
|
53afe64e767889ce967679d8aeb798745166fa72
|
1b92fd1d5afae4bc64bfc61bc4935c635cca12f0
|
refs/heads/master
| 2023-03-25T01:33:53.765751 | 2021-03-11T18:59:03 | 2021-03-11T18:59:03 | 345,891,681 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,263 |
py
|
#!/usr/bin/env python
#
# Author: Brian Howard
# Date: 02Feb2021
# Version: 1.0
# Abstract: Create SSH connection to corporate Cisco WLCs and change the PSK used for ONEguest SSID
# - Find the WLAN_ID for the ONEguest SSID
# - Disable the WLAN_ID for ONEguest
# - Modify the existing PSK for the ONEguest SSID
# - Re-Enable the WLAN_ID for ONEguest
# - Save the config
# - Create logfiles for all SSH transactions
#
# Source Files:
# main.py - main python script
# credentials.py - file to store login credentials
# ios_wlan_id-finder.py - logs into an ios host and finds the WLAN_ID associated with the ONEguest SSID
# aireos_wlan_id-finder.py - logs into an aireos host and finds the WLAN_ID associated with the ONEguest SSID
# host_file.py - python list containing ip addresses of Cisco WLCs
# cmd_file.py - python list containing Cisco commands to run within the script
# Note: 'show run | include hostname' must be element 0
#
# Output Files:
# log.txt - log file containing all information from the SSH channel.
# This is an all inclusive file for all hosts connected to
# {hostname}.txt - each host connected to has an individual log file of commands only.
# this log is not as detailed as the log.txt file.
# ------------------------------------------------------------------------------------------------#
# ------------------------------------------------------------------------------------------------#
# Function definitions
# ------------------------------------------------------------------------------------------------#
import logging
import coloredlogs
from netmiko import ConnectHandler
from ntc_templates.parse import parse_output
from host_file import host
from credentials import credentials
from cmd_file import cmd
from pprint import pprint
##### Begin Logging section #####
# Basic logging allows Netmiko detailed logging of the ssh stream written to a file
logging.basicConfig(filename='log.txt', level=logging.DEBUG, datefmt='%d-%b-%y %H:%M:%S')
logger = logging.getLogger('Netmiko')
# Create a console handler object for the console Stream
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# # Create a ColoredFormatter to use as formatter for the Console Handler
formatter = coloredlogs.ColoredFormatter(fmt='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
ch.setFormatter(formatter)
# assign console handler to logger
logger.addHandler(ch)
# ##### End Logging section #####
if __name__ == '__main__':
# Capture new PSK
print()
PSK = input("Please enter New PSK: ")
print()
for wlc in host:
logfile = wlc['hostname'] + '.log'
# Netmiko SSH connection
ssh_connect = ConnectHandler(ip=wlc['ip'], username=credentials['username'], password=credentials['password'],
device_type=wlc['device_type'], session_log=logfile)
# Netmiko connection sends show command
# use_textfsm automatically looks in the \venv\Lib\site-packages\ntc_templates\templates directory
# for a template matching the device type + command name to convert the unstructured output of the show
# command to structured data (list of dictionaries)
# Note: ntc_templates and fsmtext are automatically installed with Netmiko
show_wlan_raw = ssh_connect.send_command(cmd['get_wlan'])
show_wlan = parse_output(platform=wlc['device_type'], command="show wlan sum", data=show_wlan_raw)
for wlan in show_wlan:
if wlan['ssid'] == 'ONEguest':
print()
print('*******************************************************************************')
print()
# Connect to host and Show current state of WLANs
logger.critical('Connecting to ' + wlc['hostname'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
# Disable ONEguest WLAN and Show current state of WLANs
logger.critical('Disabling WLAN on ' + wlan['ssid'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_wlan_disable'] + ' ' + wlan['wlanid'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
else:
# change to wlan profile sub menu for ONEguest SSID and shutdown SSID
# send_config_set automatically enters config mode, executes a list of commands,
# then exits config mode. Note if only one command is in the list it does not stay in config mode
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_shutdown']])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
# Change PSK
logger.critical('Changing PSK on ' + wlc['hostname'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_psk'] + ' ' + PSK + ' ' + wlan['wlanid'])
logger.warning('New PSK is: ' + PSK)
print()
else:
ssh_connect.enable()
# change to wlan profile sub menu for ONEguest SSID and chnage PSK
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_psk'] + ' ' + PSK])
logger.warning('New PSK is: ' + PSK)
print()
# Enable ONEguest WLAN and Show current state of WLANs
logger.critical('Enabling WLAN on ' + wlan['ssid'] + ' for WLAN-ID: ' + wlan['wlanid'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.send_command(cmd['aireos_wlan_enable'] + ' ' + wlan['wlanid'])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
else:
ssh_connect.enable()
# change to wlan profile sub menu for ONEguest SSID and enable it
ssh_connect.send_config_set(['wlan ' + wlan['profile'], cmd['ios_no_shutdown']])
logger.warning(ssh_connect.send_command(cmd['get_wlan']))
print()
# Save Config
logger.critical('Saving Config on host: ' + wlc['hostname'])
if wlc['device_type'] == 'cisco_wlc_ssh':
ssh_connect.save_config(cmd['aireos_save'], confirm_response='y')
print()
print('*******************************************************************************')
print()
else:
ssh_connect.save_config()
print()
print('*******************************************************************************')
print()
ssh_connect.disconnect()
|
[
"[email protected]"
] | |
102450eccb8fcad7b0362df30fb062da3054d97a
|
779291cb83ec3cab36d8bb66ed46b3afd4907f95
|
/migration/rnaseq-wf_cleanup.py
|
7a26326b9cd9e55a223e034fa72a8b9827c72f1c
|
[] |
no_license
|
Shengqian95/ncbi_remap
|
ac3258411fda8e9317f3cdf951cc909cc0f1946e
|
3f2099058bce5d1670a672a69c13efd89d538cd1
|
refs/heads/master
| 2023-05-22T06:17:57.900135 | 2020-11-01T17:16:54 | 2020-11-01T17:16:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,369 |
py
|
import os
import re
import shutil
from pathlib import Path
CLEAN_UP = os.environ.get("CLEAN_UP", False)
SRR_PATTERN = re.compile(r"^[SED]RR\d+$")
TARGETS = [
"../output/rnaseq-wf/aln_stats/{srx}.parquet",
"../output/rnaseq-wf/gene_counts/{srx}.parquet",
"../output/rnaseq-wf/junction_counts/{srx}.parquet",
"../output/rnaseq-wf/intergenic_counts/{srx}.parquet",
"../output/rnaseq-wf/segment_counts/{srx}.parquet",
"../output/rnaseq-wf/fusion_counts/{srx}.parquet",
"../output/rnaseq-wf/flybase_bigwigs/{srx}.flybase.first.bw",
"../output/rnaseq-wf/flybase_bigwigs/{srx}.flybase.second.bw",
"../output/rnaseq-wf/ucsc_bigwigs/{srx}.first.bw",
"../output/rnaseq-wf/ucsc_bigwigs/{srx}.second.bw",
"../output/rnaseq-wf/samples/{srx}/{srx}.bam",
"../output/rnaseq-wf/samples/{srx}/{srx}.bam.bai",
]
def main():
for srx_path in Path("../output/rnaseq-wf/samples").iterdir():
srx = srx_path.name
remove_temp(srx)
if (
Path(f"../output/rnaseq-wf/atropos_bad/{srx}").exists()
or Path(f"../output/rnaseq-wf/alignment_bad/{srx}").exists()
):
remove_srx_folder(srx)
continue
if all(check_target(target.format(srx=srx)) for target in TARGETS):
Path(f"../output/rnaseq-wf/done/{srx}").touch()
remove_srr_folders(srx)
remove_processed_files(srx)
remove_misc_files(srx)
def remove_temp(srx: str):
for pth in Path(f"../output/rnaseq-wf/samples/{srx}").glob("*.tmp"):
pth.unlink()
def remove_srx_folder(srx: str):
pth = Path(f"../output/rnaseq-wf/samples/{srx}")
if pth.exists() and CLEAN_UP:
shutil.rmtree(pth)
elif pth.exists():
print("Removing SRX Folder:", pth, sep="\t")
def check_target(file_name: str):
if Path(file_name).exists():
return True
print("Missing Target:", file_name, sep="\t")
def remove_srr_folders(srx: str):
for pth in Path(f"../output/rnaseq-wf/samples/{srx}").iterdir():
if pth.is_dir() and re.match(SRR_PATTERN, pth.name):
if CLEAN_UP:
shutil.rmtree(pth)
else:
print("Removing SRR Folder:", pth, sep="\t")
def remove_file(file_name: str):
pth = Path(file_name)
if pth.exists() and CLEAN_UP:
pth.unlink()
elif pth.exists():
print("Removing File:", pth, sep="\t")
def remove_processed_files(srx: str):
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.samtools.stats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.bamtools.stats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts")
def remove_misc_files(srx: str):
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.trim.clean.tsv")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.hisat2.bam.tsv")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.samtools.idxstats")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.counts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.intergenic.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_fusions.counts.log")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.jcounts")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.summary")
remove_file(f"../output/rnaseq-wf/samples/{srx}/{srx}.bam.exon_segments.counts.log")
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
9b8e1d93b2f68bc34a67adea4f49a273d934c106
|
a6b8263a42b96f317b818b3ba7e45bb8cb4458f6
|
/shipsnake/__main__.py
|
e0db2bc14affd6ceb307dfe465e53a7f63042a48
|
[
"MIT"
] |
permissive
|
cole-wilson/test-ship
|
5002add3b7f84162a064fcc4496f82a512fe4ff3
|
95f2ff585efd7564e60caad9a4806939923bc525
|
refs/heads/master
| 2023-01-30T01:52:55.111219 | 2020-12-07T05:18:12 | 2020-12-07T05:18:12 | 319,211,654 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,108 |
py
|
if __name__ != '__main__':
print("Please run shipsnake as script or command, not module.")
import toml
import os
import sys
import glob
import shutil
# mode = sys.argv[1]
# mode="upload"
version = ""
if len(sys.argv) == 1:
print("Provide a mode:\n\tshipsnake [wizard | build | dev | upload]")
sys.exit(0)
mode = sys.argv[1]
if len(sys.argv) < 3 and mode in ["upload",'build']:
print("Provide a version:\n\tshipsnake "+mode+" <version>")
sys.exit(0)
if len(sys.argv)>2:
version = sys.argv[2]
if mode=="dev" and version=="":
version = "dev_build"
if os.getenv('TEST_SNAKE')=="TRUE":
os.chdir('tester')
if mode == "wizard":
import wizard
wizard.main()
elif mode in ["build","dev","upload"]:
open('.'+os.sep+'.gitignore','w+').write('*'+os.sep+'__pychache__')
if not os.path.isfile('.'+os.sep+'shipsnake.toml'):
print('Please create a config file with `shipsnake wizard` first.')
sys.exit(0)
with open('.'+os.sep+'shipsnake.toml') as datafile:
data = toml.loads(datafile.read())
with open(prefix+os.sep+'setup.py.template') as datafile:
template = datafile.read()
setup = template.format(
**data,
version = version,
entry_points = [data["short_name"]+"="+data["short_name"]+".__main__"] if data["file"]!="" else [""]
)
open('setup.py','w+').write(setup)
source_dir = os.getcwd()
target_dir = data["short_name"]+os.sep
types = ('*.py',*data["data_files"])
file_names = []
for files in types:
file_names.extend(glob.glob(files))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
for file_name in file_names:
if file_name in ["setup.py","shipsnake.toml"]:
continue
shutil.move(os.path.join(source_dir, file_name), target_dir+os.sep+file_name)
open(target_dir+'__init__.py','w+').write('')
if data['file']!="" and not os.path.isfile(data['short_name']+os.sep+'__main__.py'):
try:
os.rename(data['short_name']+os.sep+data['file'],data['short_name']+os.sep+'__main__.py')
open(data['short_name']+os.sep+data['file'],'w+').write('# Please edit __main__.py for the main code. Thanks!\n(you can delete this file.)')
except FileNotFoundError:
pass
try:
shutil.rmtree('dist')
except:
pass
try:
os.mkdir('bin')
except:
pass
open("bin"+os.sep+data['short_name'],'w+').write(f"#!"+os.sep+"usr"+os.sep+"bin"+os.sep+f"env bash\npython3 -m {data['short_name']} $@ || echo 'Error. Please re-install shipsnake with:\\n`pip3 install shipsnake --upgrade`'")
if mode == "build" or mode=="upload":
os.system('python3 .'+os.sep+'setup.py sdist bdist_wheel')
try:
shutil.rmtree('build')
except:
pass
elif mode == "dev":
os.system('python3 .'+os.sep+'setup.py develop')
for x in glob.glob('*.egg-info'):
shutil.rmtree(x)
else:
print(f'Illegeal option `{mode}`')
sys.exit(0)
if mode=="upload":
print("Please make sure that you have a https://pypi.org/ account.")
try:
import twine
except:
input('Press enter to continue installing `twine`. Press ctrl+x to exit.')
os.system('python3 -m pip install --user --upgrade twine || python3 -m pip install --upgrade twine')
os.system('python3 -m twine upload dist'+os.sep+'*')
|
[
"[email protected]"
] | |
199390424fddb7b89b231c304224800f4fb4fb79
|
3a1c1373d8f1617485893dea46323c9d07dedc4d
|
/python_algo/프로그래머스/20210429_다리를 지나는 트럭.py
|
b1fd3a58b2856feedb64be0b86420485b12daf0c
|
[] |
no_license
|
GaYoung87/Algorithm
|
28b95c3eed054454a06a14d1a255ea1d57486b22
|
59abce98ff14879bc88b72ef2e562ce55dae5335
|
refs/heads/master
| 2023-08-31T07:52:31.487648 | 2023-08-29T15:09:04 | 2023-08-29T15:09:04 | 199,405,835 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 296 |
py
|
def solution(bridge_length, weight, truck_weights):
bridge_sum = 0
bridge_count = 0
time = 0
while truck_weights:
x = truck_weights[0]
if bridge_count <= bridge_length:
if bridge_sum + x <= weight:
bridge_count += 1
return time
|
[
"[email protected]"
] | |
b86b6586ed3da7fa83a3a45383ce369cf1633df0
|
99d436394e47571160340c95d527ecadaae83541
|
/algorithms_questions/ch17_shortest_path/q39_2.py
|
841ee2b4be8737f7003f3b85123dcedfc6c83627
|
[] |
no_license
|
LeeSeok-Jun/Algorithms
|
b47ba4de5580302e9e2399bcf85d245ebeb1b93d
|
0e8573bd03c50df3f89dd0ee9eed9cf8716ef8d8
|
refs/heads/main
| 2023-03-02T06:47:20.939235 | 2021-02-08T05:18:24 | 2021-02-08T05:18:24 | 299,840,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 969 |
py
|
"""
화성 탐사 - 3회차
"""
# 풀이 제한 시간 : 40분
# 2021/01/26 16:06 ~ 16:16
# 정답!
import heapq
import sys
input = sys.stdin.readline
dr = [-1, 1, 0, 0]
dc = [0, 0, -1, 1]
for tc in range(int(input())):
n = int(input())
graph = []
for _ in range(n):
graph.append(list(map(int, input().split())))
INF = int(1e9)
distance = [[INF] * n for _ in range(n)]
distance[0][0] = graph[0][0]
q = []
heapq.heappush(q, (graph[0][0], 0, 0))
while q:
dist, r, c = heapq.heappop(q)
if distance[r][c] < dist:
continue
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
if nr < 0 or nr >= n or nc < 0 or nc >= n:
continue
cost = dist + graph[nr][nc]
if cost < distance[nr][nc]:
distance[nr][nc] = cost
heapq.heappush(q, (cost, nr, nc))
print(distance[n-1][n-1])
|
[
"[email protected]"
] | |
c45eb5f1c3777c3c501733e0224bf45deaa1c22e
|
d6589ff7cf647af56938a9598f9e2e674c0ae6b5
|
/waf-openapi-20190910/setup.py
|
ecd45e94dd0948313db72333337965dd00c423a0
|
[
"Apache-2.0"
] |
permissive
|
hazho/alibabacloud-python-sdk
|
55028a0605b1509941269867a043f8408fa8c296
|
cddd32154bb8c12e50772fec55429a9a97f3efd9
|
refs/heads/master
| 2023-07-01T17:51:57.893326 | 2021-08-02T08:55:22 | 2021-08-02T08:55:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,604 |
py
|
# -*- coding: utf-8 -*-
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
from setuptools import setup, find_packages
"""
setup module for alibabacloud_waf-openapi20190910.
Created on 25/04/2021
@author: Alibaba Cloud SDK
"""
PACKAGE = "alibabacloud_waf_openapi20190910"
NAME = "alibabacloud_waf-openapi20190910" or "alibabacloud-package"
DESCRIPTION = "Alibaba Cloud waf-openapi (20190910) SDK Library for Python"
AUTHOR = "Alibaba Cloud SDK"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/aliyun/alibabacloud-python-sdk"
VERSION = __import__(PACKAGE).__version__
REQUIRES = [
"alibabacloud_tea_util>=0.3.3, <1.0.0",
"alibabacloud_tea_openapi>=0.2.4, <1.0.0",
"alibabacloud_endpoint_util>=0.0.3, <1.0.0"
]
LONG_DESCRIPTION = ''
if os.path.exists('./README.md'):
with open("README.md", encoding='utf-8') as fp:
LONG_DESCRIPTION = fp.read()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="Apache License 2.0",
url=URL,
keywords=["alibabacloud","waf","openapi20190910"],
packages=find_packages(exclude=["tests*"]),
include_package_data=True,
platforms="any",
install_requires=REQUIRES,
python_requires=">=3.6",
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
"Topic :: Software Development"
)
)
|
[
"[email protected]"
] | |
c642bda474582d7a38bff7dcb5c49dbe6fc93d0c
|
0b9470f9a839d87b21fd575421b5223afb4573c6
|
/07day/01-捕获输入异常.py
|
676dc64ff5bbe99306b430ca519aeb1cedf9871d
|
[] |
no_license
|
ZiHaoYa/1808
|
351356b4fa920a5075899c8abdce24a61502097f
|
891582547fef4c6fd4fd4132da033e48e069901f
|
refs/heads/master
| 2020-03-30T06:20:46.898840 | 2018-09-29T08:56:53 | 2018-09-29T08:56:53 | 147,603,769 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 251 |
py
|
try:
number = int(input("请输入一个"))
except Exception as ret:
print("输入有误")
print(ret)
number = input("请输入一个")
if number.isdigit():
print("出数字")
number = int(number)
else:
print("输入有误")
|
[
"[email protected]"
] | |
1d034b6b06e94315ceda06e8a8cc67681b8b3e9e
|
6a7d8b67aad59c51dafdfb8bcffd53864a3d65b0
|
/LeetCode/toeplitzMatrix.py
|
ef8e74f26346b2369d234c7f7ba1f11b002541a5
|
[] |
no_license
|
dicao425/algorithmExercise
|
8bba36c1a08a232678e5085d24bac1dbee7e5364
|
36cb33af758b1d01da35982481a8bbfbee5c2810
|
refs/heads/master
| 2021-10-07T08:56:18.030583 | 2018-12-04T05:59:17 | 2018-12-04T05:59:17 | 103,611,760 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
#!/usr/bin/python
import sys
class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
m = len(matrix)
n = len(matrix[0])
for l in range(m-1):
if matrix[l][:n-1] != matrix[l+1][1:]:
return False
return True
def main():
aa = Solution()
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"[email protected]"
] | |
d08dcdc3b0c9dc63dfaf73fa44457a1d7af97a27
|
65b6e843df4c2e8b9abed79b33be24eba1686fa2
|
/absence/wsgi.py
|
2d6983105aada3e12ea2bdcaac1b9b198f064d05
|
[] |
no_license
|
The-Super-Stack/abs-backend
|
563fba90b36f45a0bac82aa5ace7c7d079309b09
|
d9335ec0a9fe9fdfa1d416d8277c11c2ac23cb5a
|
refs/heads/main
| 2023-08-14T13:33:05.631317 | 2021-10-11T06:18:44 | 2021-10-11T06:18:44 | 415,801,507 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 391 |
py
|
"""
WSGI config for absence project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'absence.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
dd84a5790a2a78e6f48019faaa8ff6e1469c0763
|
1ab7b3f2aa63de8488ce7c466a67d367771aa1f2
|
/Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/werkzeug/wrappers/accept.py
|
9605e637dc682aa6fb376053cb9a80387c566377
|
[
"MIT"
] |
permissive
|
icl-rocketry/Avionics
|
9d39aeb11aba11115826fd73357b415026a7adad
|
95b7a061eabd6f2b607fba79e007186030f02720
|
refs/heads/master
| 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 |
MIT
| 2022-06-27T10:17:06 | 2019-10-19T09:57:07 |
C++
|
UTF-8
|
Python
| false | false | 429 |
py
|
import typing as t
import warnings
class AcceptMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'AcceptMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs) # type: ignore
|
[
"[email protected]"
] | |
94f2d43841d6de8c61172de178f2cf83ea40e303
|
b8a3e758eff2922ff6abc77947d879e3f6d1afa3
|
/ws_moveit/build/moveit_resources/catkin_generated/pkg.develspace.context.pc.py
|
1db6f9fe77fc24ca7b6f4cd26bc3b8b329be1584
|
[] |
no_license
|
rrowlands/ros-baxter-coffee
|
ab7a496186591e709f88ccfd3b9944428e652f3e
|
32473c3012b7ec4f91194069303c85844cf1aae7
|
refs/heads/master
| 2016-09-05T20:58:20.428241 | 2013-12-02T23:10:44 | 2013-12-02T23:10:44 | 14,313,406 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 523 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/p/peth8881/robotics/ws_moveit/build/moveit_resources/include".split(';') if "/home/p/peth8881/robotics/ws_moveit/build/moveit_resources/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "moveit_resources"
PROJECT_SPACE_DIR = "/home/p/peth8881/robotics/ws_moveit/devel"
PROJECT_VERSION = "0.5.0"
|
[
"[email protected]"
] | |
4b2039b2583b2258d2f0fea69a7ad4fcde28256d
|
46ac0965941d06fde419a6f216db2a653a245dbd
|
/sdks/python/appcenter_sdk/models/AzureSubscriptionPatchRequest.py
|
12120015c7f4061d120ebe159a0c58a00ab14fa1
|
[
"MIT",
"Unlicense"
] |
permissive
|
b3nab/appcenter-sdks
|
11f0bab00d020abb30ee951f7656a3d7ed783eac
|
bcc19c998b5f648a147f0d6a593dd0324e2ab1ea
|
refs/heads/master
| 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 |
MIT
| 2022-01-22T07:57:59 | 2019-05-18T17:29:21 |
Python
|
UTF-8
|
Python
| false | false | 3,336 |
py
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class AzureSubscriptionPatchRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'is_billing': 'boolean'
}
attribute_map = {
'is_billing': 'is_billing'
}
def __init__(self, is_billing=None): # noqa: E501
"""AzureSubscriptionPatchRequest - a model defined in Swagger""" # noqa: E501
self._is_billing = None
self.discriminator = None
self.is_billing = is_billing
@property
def is_billing(self):
"""Gets the is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
If the subscription is used for billing # noqa: E501
:return: The is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
:rtype: boolean
"""
return self._is_billing
@is_billing.setter
def is_billing(self, is_billing):
"""Sets the is_billing of this AzureSubscriptionPatchRequest.
If the subscription is used for billing # noqa: E501
:param is_billing: The is_billing of this AzureSubscriptionPatchRequest. # noqa: E501
:type: boolean
"""
if is_billing is None:
raise ValueError("Invalid value for `is_billing`, must not be `None`") # noqa: E501
self._is_billing = is_billing
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AzureSubscriptionPatchRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
1d4ebcce0118f05541c3c6d3e01ae58b51dcc55a
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/paloaltonetworks/azure-mgmt-paloaltonetworksngfw/generated_samples/certificate_object_global_rulestack_delete_maximum_set_gen.py
|
1f902b1ba35bfded952bc53f1fceaa215a018896
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 |
MIT
| 2023-09-08T08:38:48 | 2019-11-18T07:09:24 |
Python
|
UTF-8
|
Python
| false | false | 1,697 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.paloaltonetworksngfw import PaloAltoNetworksNgfwMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-paloaltonetworksngfw
# USAGE
python certificate_object_global_rulestack_delete_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = PaloAltoNetworksNgfwMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="SUBSCRIPTION_ID",
)
response = client.certificate_object_global_rulestack.begin_delete(
global_rulestack_name="praval",
name="armid1",
).result()
print(response)
# x-ms-original-file: specification/paloaltonetworks/resource-manager/PaloAltoNetworks.Cloudngfw/preview/2022-08-29-preview/examples/CertificateObjectGlobalRulestack_Delete_MaximumSet_Gen.json
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
6b3f498770a1dfc3845ef9db19d864e6ef3dbe55
|
f98a2875e0cdc84341fe8e37b11336368a257fe7
|
/agents/product.py
|
a86acbd28d5e0c5f8555128eb791223b5eb52c56
|
[
"MIT"
] |
permissive
|
anhnguyendepocen/PolicySpace2
|
eaa83533b7ad599af677ce69353841e665b447d0
|
d9a450e47651885ed103d3217dbedec484456d07
|
refs/heads/master
| 2023-08-28T04:55:40.834445 | 2021-10-21T18:50:03 | 2021-10-21T18:50:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
class Product:
def __init__(self, product_id, quantity, price):
self.product_id = product_id
self.quantity = quantity
self.price = price
def __repr__(self):
return 'Product ID: %d, Quantity: %d, Price: %.1f' % (self.product_id, self.quantity, self.price)
|
[
"[email protected]"
] | |
d8b55cb94184067e2e3d57f95ab20936d5d86e5e
|
c200119f4180ddc17dcaeb87d8bad6399442a529
|
/tests/src/miniblog/settings.py
|
4a49139d88b610ef74757746972a748e912302d5
|
[] |
no_license
|
marekmalek/django-observer
|
3f4ae6ba1482f649d4495a95b95d4ec74f8222f2
|
3b9e4aeaaa9cd4cc4af7a245a185fb18e89e181a
|
refs/heads/master
| 2021-01-18T06:59:34.588359 | 2012-08-31T16:31:25 | 2012-08-31T16:31:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,009 |
py
|
# Django settings for weblog project.
import os
import sys
ROOT=os.path.join(os.path.dirname(__file__), '../../')
app_path=os.path.realpath(os.path.join(ROOT, '../'))
if app_path not in sys.path:
sys.path.insert(0, app_path)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT, 'database.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '4et6(22#@lgie4wogk)6um6^jklpkk0!z-l%uj&kvs*u2xrvfj%'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'miniblog.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'miniblog.autocmd',
'miniblog.blogs',
)
FIXTURE_DIRS = (
os.path.join(ROOT, 'fixtures'),
)
LOGIN_REDIRECT_URL = '/'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"[email protected]"
] | |
cb0fbdb98f51edb323e77ac971a051e4e5dbf795
|
3cda2dc11e1b7b96641f61a77b3afde4b93ac43f
|
/test/training_service/config/metrics_test/trial.py
|
43e3ac1b4d66f7bd96f307c7314cbfb226ab1cdc
|
[
"MIT"
] |
permissive
|
Eurus-Holmes/nni
|
6da51c352e721f0241c7fd26fa70a8d7c99ef537
|
b84d25bec15ece54bf1703b1acb15d9f8919f656
|
refs/heads/master
| 2023-08-23T10:45:54.879054 | 2023-08-07T02:39:54 | 2023-08-07T02:39:54 | 163,079,164 | 3 | 2 |
MIT
| 2023-08-07T12:35:54 | 2018-12-25T12:04:16 |
Python
|
UTF-8
|
Python
| false | false | 818 |
py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import time
import json
import argparse
import nni
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dict_metrics", action='store_true')
args = parser.parse_args()
if args.dict_metrics:
result_file = 'expected_metrics_dict.json'
else:
result_file = 'expected_metrics.json'
nni.get_next_parameter()
with open(result_file, 'r') as f:
m = json.load(f)
time.sleep(5)
for v in m['intermediate_result']:
time.sleep(1)
print('report_intermediate_result:', v)
nni.report_intermediate_result(v)
time.sleep(1)
print('report_final_result:', m['final_result'])
nni.report_final_result(m['final_result'])
print('done')
|
[
"[email protected]"
] | |
b0504f6fbbe712366d92f283d5cbb43334f0bf11
|
e4cbd82358ba5e8b4d4bacefa054e4ecda2d1517
|
/config/settings_base.py
|
622ff272b323de19ec535a9f28658818391172f6
|
[] |
no_license
|
mziegler/UssdDjangoDemo
|
a69ca95010443e5925fdf181904da05e9938bcc3
|
9b29eb562a7832aa6a033daf1bee8d99746ee93b
|
refs/heads/master
| 2020-07-21T18:16:40.325034 | 2017-07-01T00:42:40 | 2017-07-01T00:42:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,480 |
py
|
"""
Django settings for djangoUSSD project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(PROJECT_DIR, ...)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_DIR = os.path.join(PROJECT_DIR,'config')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q@^7+k@94i7&x58y(czx*&zw7g+x2i!7%hwmj^fr$qey(a^%e9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Site Apps
'UssdHttp',
'UssdDemo',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['UssdHttp/simulator/static'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONFIG_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Session Settings
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR,'UssdHttp/simulator/static'),
)
|
[
"[email protected]"
] | |
2d507377a10d3350cc729739daf540151c9c4dc8
|
2e4169290bf115e62cebe1a51ce1dc1528bc2cd2
|
/trunk/vlist/vlist.py
|
703c11e359dcaf87b186f2001be0c4794c72d3e8
|
[] |
no_license
|
BGCX067/ezwidgets-svn-to-git
|
6c96bb408369316d395f6c8836b8e7be063ae0d8
|
2864f45bc3e9d87b940b34d0fa6ce64e712c2df8
|
refs/heads/master
| 2021-01-13T09:49:25.511902 | 2015-12-28T14:19:53 | 2015-12-28T14:19:53 | 48,833,330 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,324 |
py
|
#----------------------------------------------------------------------------
# Name: vlist.py
# Purpose: virtual list with mix-in ColumnSorter class
#
# Author: Egor Zindy
#
# Created: 26-June-2005
# Licence: public domain
#----------------------------------------------------------------------------
import wx
import wx.lib.mixins.listctrl as listmix
class VirtualList(wx.ListCtrl, listmix.ColumnSorterMixin):
def __init__(self, parent,columns,style=0):
wx.ListCtrl.__init__( self, parent, -1,
style=wx.LC_REPORT|wx.LC_VIRTUAL|style)
listmix.ColumnSorterMixin.__init__(self, len(columns))
self.itemDataMap={}
self.il = wx.ImageList(16, 16)
self.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
self.il_symbols={}
#adding some art (sm_up and sm_dn are used by ColumnSorterMixin
#symbols can be added to self.il using SetSymbols
symbols={"sm_up":wx.ART_GO_UP,"sm_dn":wx.ART_GO_DOWN}
self.SetSymbols(symbols)
#building the columns
self.SetColumns(columns)
#---------------------------------------------------
# These methods are callbacks for implementing the
# "virtualness" of the list...
def OnGetItemText(self, item, col):
index=self.itemIndexMap[item]
s = self.itemDataMap[index][col]
return s
def OnGetItemImage(self, item):
return -1
def OnGetItemAttr(self, item):
return None
#---------------------------------------------------
# These methods are Used by the ColumnSorterMixin,
# see wx/lib/mixins/listctrl.py
def GetListCtrl(self):
return self
def GetSortImages(self):
return self.il_symbols["sm_dn"],self.il_symbols["sm_up"]
def SortItems(self,sorter=None):
r"""\brief a SortItem which works with virtual lists
The sorter is not actually used (should it?)
"""
#These are actually defined in ColumnSorterMixin
#col is the column which was clicked on and
#the sort flag is False for descending (Z->A)
#and True for ascending (A->Z).
col=self._col
#creating pairs [column item defined by col, key]
items=[]
for k,v in self.itemDataMap.items():
items.append([v[col],k])
#sort the pairs by value (first element), then by key (second element).
#Multiple same values are okay, because the keys are unique.
items.sort()
#getting the keys associated with each sorted item in a list
k=[key for value, key in items]
#False is descending (starting from last)
if self._colSortFlag[col]==False:
k.reverse()
#storing the keys as self.itemIndexMap (is used in OnGetItemText,Image,ItemAttr).
self.itemIndexMap=k
#redrawing the list
self.Refresh()
#---------------------------------------------------
# These methods should be used to interact with the
# controler
def SetItemMap(self,itemMap):
r"""\brief sets the items to be displayed in the control
\param itemMap a dictionary {id1:("item1","item2",...), id2:("item1","item2",...), ...} and ids are unique
"""
l=len(itemMap)
self.itemDataMap=itemMap
self.SetItemCount(l)
#This regenerates self.itemIndexMap and redraws the ListCtrl
self.SortItems()
def SetColumns(self,columns):
r"""\brief adds columns to the control
\param columns a list of columns (("name1",width1),("name2",width2),...)
"""
i=0
for name,s in columns:
self.InsertColumn(i, name)
self.SetColumnWidth(i, s)
i+=1
def SetSymbols(self,symbols,provider=wx.ART_TOOLBAR):
r"""\brief adds symbols to self.ImageList
Symbols are provided by the ArtProvider
\param symbols a dictionary {"name1":wx.ART_ADD_BOOKMARK,"name2":wx.ART_DEL_BOOKMARK,...}
\param provider an optional provider
"""
for k,v in symbols.items():
self.il_symbols[k]=self.il.Add(wx.ArtProvider_GetBitmap(v,provider,(16,16)))
|
[
"[email protected]"
] | |
5f7b5a15c9442a8a6d69e574837dd9b9db1641db
|
329bf886f90cdcc5b083d2ab47c529f5df95767b
|
/survey/views.py
|
7a2375bebe6f6c999d7383dd539267dda614e1e5
|
[] |
no_license
|
leliel12/otree_saral
|
f4a16073479836df36789a58a311a8dc0e2fd7f5
|
d4c91e1b9451460a656f270fe9f540bf811a9a32
|
refs/heads/master
| 2021-01-10T08:39:39.278589 | 2015-10-26T00:53:29 | 2015-10-26T00:53:29 | 43,258,370 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 697 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
from . import models
from ._builtin import Page, WaitPage
from otree.common import Currency as c, currency_range
from .models import Constants
class Question(Page):
form_model = models.Player
form_fields = ["name", "age", "email", "gender", "major",
"location_of_your_partners_influence_your_decisions",
"working_in_a_location_of_their_choice_more_less_to_the_team",
"partners_in_location_their_choice_worked_harder_than_the_lab",
"I_work_best_in", "risks_in_everyday_life", "risks_in_financial_decision"]
page_sequence = [Question]
|
[
"[email protected]"
] | |
879d3e52d3b63ee8f078a3a5f876d4b96ca5aba3
|
3dc60bbcb27600ffe7baa4e6187fe2c71bb7b5ab
|
/Python/to-lower-case.py
|
ca69091671f3380ba24c1920aca7d39718fe6f48
|
[
"MIT"
] |
permissive
|
phucle2411/LeetCode
|
33f3cc69fada711545af4c7366eda5d250625120
|
ba84c192fb9995dd48ddc6d81c3153488dd3c698
|
refs/heads/master
| 2022-01-14T16:49:50.116398 | 2019-06-12T23:41:29 | 2019-06-12T23:41:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
# https://leetcode.com/problems/to-lower-case/submissions/
class Solution:
def toLowerCase(self, str):
"""
:type str: str
:rtype: str
"""
return str.lower()
|
[
"[email protected]"
] | |
8b6b1b686c656f460928930a4a0b4fa4374f8ad9
|
18e48f22f88fe80ce54d12fdbf9d05a7ca5bd65a
|
/0x11-python-network_1/7-error_code.py
|
ad1698d9aff563aff2ccaec553148dfecf84b193
|
[] |
no_license
|
SantiagoHerreG/holbertonschool-higher_level_programming
|
426c4bc9bc080a81b72d2f740c8ed2eb365023eb
|
ca2612ef3be92a60764d584cf39de3a2ba310f84
|
refs/heads/master
| 2020-07-22T19:33:48.507287 | 2020-02-14T04:34:00 | 2020-02-14T04:34:00 | 207,305,022 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 342 |
py
|
#!/usr/bin/python3
"""takes in a URL, sends a request to the URL and displays the body of
the response"""
import requests
import sys
if __name__ == "__main__":
url = sys.argv[1]
res = requests.get(url)
if res.status_code == requests.codes.ok:
print(res.text)
else:
print("Error code:", res.status_code)
|
[
"[email protected]"
] | |
a800f9d568a1d7598f3cae018badde0c06ea9409
|
8578ae5be776b49559fa95ce30f6b45b6a82b73a
|
/test/functional/p2p_fingerprint.py
|
0a572d97cfb88494d434474850b03427f50dd5ed
|
[
"MIT"
] |
permissive
|
devcoin/core
|
3f9f177bd9d5d2cc54ff95a981cfe88671206ae2
|
f67e8b058b4316dd491615dc3f8799a45f396f4a
|
refs/heads/master
| 2023-05-25T03:42:03.998451 | 2023-05-24T07:59:22 | 2023-05-24T08:02:14 | 21,529,485 | 16 | 13 |
MIT
| 2022-01-07T17:04:18 | 2014-07-05T22:42:13 |
C
|
UTF-8
|
Python
| false | false | 5,061 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
from test_framework.p2p import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
p2p_lock,
)
from test_framework.test_framework import DevcoinTestFramework
from test_framework.util import (
assert_equal,
)
class P2PFingerprintTest(DevcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def send_block_request(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(MSG_BLOCK, block_hash))
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata([x.sha256 for x in new_blocks])
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.send_block_request(stale_hash, node0)
node0.wait_for_block(stale_hash, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
node0.wait_for_header(hex(stale_hash), timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
block_hash = int(self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[-1], 16)
assert_equal(self.nodes[0].getblockcount(), 14)
node0.wait_for_block(block_hash, timeout=3)
# Request for very old stale block should now fail
with p2p_lock:
node0.last_message.pop("block", None)
self.send_block_request(stale_hash, node0)
node0.sync_with_ping()
assert "block" not in node0.last_message
# Request for very old stale block header should now fail
with p2p_lock:
node0.last_message.pop("headers", None)
self.send_header_request(stale_hash, node0)
node0.sync_with_ping()
assert "headers" not in node0.last_message
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.send_block_request(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.send_block_request(block_hash, node0)
node0.wait_for_block(block_hash, timeout=3)
self.send_header_request(block_hash, node0)
node0.wait_for_header(hex(block_hash), timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main()
|
[
"[email protected]"
] | |
1c1dcd8bc185c5981370cc6412b274be30918a26
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_PolyTrend_Seasonal_Minute_MLP.py
|
613e814baf612b7e6b06d1d12091e2333056e4bd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 |
BSD-3-Clause
| 2018-12-17T22:08:12 | 2018-06-12T17:15:43 |
Python
|
UTF-8
|
Python
| false | false | 172 |
py
|
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['PolyTrend'] , ['Seasonal_Minute'] , ['MLP'] );
|
[
"[email protected]"
] | |
59378ed1c249261ad53db470074838f10644f261
|
3381d3d1b70bd88374e75d90197d0202945bbade
|
/authentication/views.py
|
1ea82fb55ee882e019ac000c9ca14bf94b9c33ca
|
[] |
no_license
|
PHONGLEX/djangorestframework_quizapi
|
30d5011b67a484a525c94071672f29ed2b0cb700
|
c9f7b4ebdc00188533a0a5f44c13594011729fa4
|
refs/heads/master
| 2023-08-02T00:58:35.647091 | 2021-10-01T09:17:05 | 2021-10-01T09:17:05 | 412,402,225 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,554 |
py
|
import jsonpickle
import jwt
from rest_framework import generics, status
from rest_framework.views import APIView
from rest_framework_simplejwt.tokens import RefreshToken
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.contrib import auth
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import smart_str, force_bytes, DjangoUnicodeDecodeError
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.sites.shortcuts import get_current_site
from django.conf import settings
from django.urls import reverse
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from .models import User
from .serializers import *
from .tasks import send_email_task
class RegisterView(generics.GenericAPIView):
serializer_class = RegisterSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
user_data = serializer.data
user = User.objects.get(email=user_data['email'])
token = RefreshToken.for_user(user)
site = get_current_site(request).domain
url = reverse('email-verify')
link = 'http://' + site + url + "?token=" + str(token)
body = "Hi" + user.email + "\n Please use the link below to verify your account " + link
data = {
'subject': "Verify your account",
"body": body,
"to": user.email
}
send_email_task.delay(data)
return Response({'message': "We've sent you an email to verify your account"}, status=status.HTTP_201_CREATED)
class EmailVerificationView(generics.GenericAPIView):
serializer_class = EmailVerificationSerializer
token_param = openapi.Parameter('token', openapi.IN_QUERY, description="token param", type=openapi.TYPE_STRING)
@swagger_auto_schema(manual_parameters=[token_param])
def get(self, request):
token = request.GET.get('token')
try:
payload = jwt.decode(token, settings.SECRET_KEY, algorithms="HS256")
user = User.objects.get(id=payload['user_id'])
if not user.is_verified:
user.is_verified = True
user.save()
return Response({"message":"Successfully activate"}, status=status.HTTP_200_OK)
except jwt.exceptions.DecodeError as e:
return Response({"error": "Invalid token, please request a new one"}, status=status.HTTP_400_BAD_REQUEST)
except jwt.exceptions.ExpiredSignatureError as e:
return Response({"error": "Token is expired, please request a new one"}, status=status.HTTP_400_BAD_REQUEST)
class LoginView(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ResetPasswordView(generics.GenericAPIView):
serializer_class = ResetPasswordSerializer
def post(self, request):
email = request.data.get('email')
user = User.objects.filter(email=email)
if user.exists():
user = user[0]
uidb64 = urlsafe_base64_encode(force_bytes(jsonpickle.encode(user)))
token = PasswordResetTokenGenerator().make_token(user)
url = reverse('reset-password-confirm', kwargs={
'uidb64': uidb64,
'token': token
})
site = get_current_site(request).domain
link = 'http://' + site + url
body = "Hi " + user.email + "\n Please use the link below to reset your password " + link
data = {
'subject': "Reset your password",
"body": body,
"to": user.email
}
send_email_task.delay(data)
return Response({'message': "We've sent you an email to reset your password"}, status=status.HTTP_200_OK)
class CheckPasswordResetTokenView(APIView):
def post(self, request, uidb64, token):
try:
obj = smart_str(urlsafe_base64_decode(uidb64))
user = jsonpickle.decode(obj)
if not PasswordResetTokenGenerator().check_token(user, token):
return Response({'error': "Invalid token, please request a new one"}, status=status.HTTP_400_BAD_REQUEST)
return Response({'success': True, 'uidb64': uidb64, 'token': token}, status=status.HTTP_200_OK)
except Exception as e:
return Response({'error': "Invalid token, please request a new one"}, status=status.HTTP_200_OK)
class SetNewPasswordView(generics.GenericAPIView):
serializer_class = SetNewPasswordSerializer
def patch(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
return Response({"message": "Changed password successfully"}, status=status.HTTP_200_OK)
class LogoutView(generics.GenericAPIView):
serializer_class = LogoutSerializer
permission_classes = (IsAuthenticated,)
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
|
[
"[email protected]"
] | |
30d040917850dbfe213295e61066ca08ae2f4ddd
|
509fc176af52f46ce62f54a6f63c7c27b1bd0c2c
|
/djangofiltertest/djangofiltertest/apps/posts_areas/api_v1/views.py
|
d95acb524992ab1ca2a3395a52d48a793ab3f132
|
[
"MIT"
] |
permissive
|
gonzaloamadio/django-filter-test
|
8b16fdb989a8141ba5852cd4804148cb6b153e86
|
7b9dbc36ca248e2113deaac03e824b123a31a4ba
|
refs/heads/master
| 2022-12-10T11:35:07.684916 | 2019-01-24T09:19:21 | 2019-01-24T09:19:21 | 167,159,577 | 0 | 0 |
MIT
| 2022-12-08T01:33:33 | 2019-01-23T09:54:40 |
Python
|
UTF-8
|
Python
| false | false | 270 |
py
|
from posts_areas.api_v1.serializers import PostAreaSerializer
from posts_areas.models import PostArea
from djangofiltertest.libs.views import APIViewSet
class PostAreaViewSet(APIViewSet):
queryset = PostArea.objects.all()
serializer_class = PostAreaSerializer
|
[
"[email protected]"
] | |
dc90c334f8f9314e070b2c504c81d5c4b72155a3
|
bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d
|
/lib/googlecloudsdk/core/util/tokenizer.py
|
1a403b82516d25b5b6213598941a3ba5f7672ed2
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
05fbb473d629195f25887fc5bfaa712f2cbc0a24
|
392abf004b16203030e6efd2f0af24db7c8d669e
|
refs/heads/master
| 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 |
NOASSERTION
| 2022-10-29T20:49:13 | 2021-02-02T05:47:30 |
Python
|
UTF-8
|
Python
| false | false | 2,291 |
py
|
# -*- coding: utf-8 -*- #
# Copyright 2013 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility for tokenizing strings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
__all__ = ['Literal', 'Separator', 'Tokenize']
_ESCAPE_CHAR = '\\'
class Literal(str):
pass
class Separator(str):
pass
def Tokenize(string, separators):
"""Tokenizes the given string based on a list of separator strings.
This is similar to splitting the string based on separators, except
that this function retains the separators. The separators are
wrapped in Separator objects and everything else is wrapped in
Literal objects.
For example, Tokenize('a:b,c:d', [':', ',']) returns [Literal('a'),
Separator(':'), Literal('b'), Separator(','), Literal('c'),
Separator(':'), Literal('d')].
Args:
string: str, The string to partition.
separators: [str], A list of strings on which to partition.
Raises:
ValueError: If an unterminated escape sequence is at the
end of the input.
Returns:
[tuple], A list of strings which can be of types Literal or
Separator.
"""
tokens = []
curr = io.StringIO()
buf = io.StringIO(string)
while True:
c = buf.read(1)
if not c:
break
elif c == _ESCAPE_CHAR:
c = buf.read(1)
if c:
curr.write(c)
continue
else:
raise ValueError('illegal escape sequence at index {0}: {1}'.format(
buf.tell() - 1, string))
elif c in separators:
tokens.append(Literal(curr.getvalue()))
tokens.append(Separator(c))
curr = io.StringIO()
else:
curr.write(c)
tokens.append(Literal(curr.getvalue()))
return tokens
|
[
"[email protected]"
] | |
ea11bf784f41f2baf536fbb111241ab1f1165160
|
66c7b0da6ee27ddce0943945503cdecf199f77a2
|
/rllib/util/parameter_decay.py
|
2df23cd677dcb3091464bf29c075df7a3d8bd9ee
|
[
"MIT"
] |
permissive
|
tzahishimkin/extended-hucrl
|
07609f9e9f9436121bcc64ff3190c966183a2cd9
|
c144aeecba5f35ccfb4ec943d29d7092c0fa20e3
|
refs/heads/master
| 2023-07-09T22:57:28.682494 | 2021-08-24T08:50:16 | 2021-08-24T08:50:16 | 383,819,908 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,056 |
py
|
"""Implementation of a Parameter decay class."""
from abc import ABCMeta
import torch.jit
import torch.nn as nn
from rllib.util.neural_networks.utilities import inverse_softplus
class ParameterDecay(nn.Module, metaclass=ABCMeta):
"""Abstract class that implements the decay of a parameter."""
def __init__(self, start, end=None, decay=None):
super().__init__()
if not isinstance(start, torch.Tensor):
start = torch.tensor(start)
self.start = nn.Parameter(start, requires_grad=False)
if end is None:
end = start
if not isinstance(end, torch.Tensor):
end = torch.tensor(end)
self.end = nn.Parameter(end, requires_grad=False)
if decay is None:
decay = 1.0
if not isinstance(decay, torch.Tensor):
decay = torch.tensor(decay)
self.decay = nn.Parameter(decay, requires_grad=False)
self.step = 0
@torch.jit.export
def update(self):
"""Update parameter."""
self.step += 1
class Constant(ParameterDecay):
"""Constant parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
return self.start
class Learnable(ParameterDecay):
"""Learnable parameter."""
positive: bool
def __init__(self, val, positive: bool = False):
self.positive = positive
if self.positive:
val = inverse_softplus(val).item()
super().__init__(val)
self.start.requires_grad = True
self.positive = positive
def forward(self):
"""See `ParameterDecay.__call__'."""
if self.positive:
return torch.nn.functional.softplus(self.start) + 1e-4
else:
return self.start
class ExponentialDecay(ParameterDecay):
"""Exponential decay of parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
decay = torch.exp(-torch.tensor(1.0 * self.step) / self.decay)
return self.end + (self.start - self.end) * decay
class PolynomialDecay(ParameterDecay):
"""Polynomial Decay of a parameter.
It returns the minimum between start and end / step ** decay.
"""
def forward(self):
"""See `ParameterDecay.__call__'."""
return min(self.start, self.end / torch.tensor(self.step + 1.0) ** self.decay)
class LinearDecay(ParameterDecay):
"""Linear decay of parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
return max(self.end, self.start - self.decay * self.step)
class LinearGrowth(ParameterDecay):
"""Linear decay of parameter."""
def forward(self):
"""See `ParameterDecay.__call__'."""
return min(self.end, self.start + self.decay * self.step)
class OUNoise(ParameterDecay):
"""Ornstein-Uhlenbeck Noise process.
Parameters
----------
mean: Tensor
Mean of OU process.
std_deviation: Tensor
Standard Deviation of OU Process.
theta: float
Parameter of OU Process.
dt: float
Time discretization.
dim: Tuple
Dimensions of noise.
References
----------
https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
"""
def __init__(self, mean=0, std_deviation=0.2, theta=0.15, dt=1e-2, dim=(1,)):
if not isinstance(mean, torch.Tensor):
mean = mean * torch.ones(dim)
self.mean = mean
if not isinstance(std_deviation, torch.Tensor):
std_deviation = std_deviation * torch.ones(dim)
self.std_dev = std_deviation
self.theta = theta
self.dt = dt
super().__init__(start=torch.zeros_like(mean))
def forward(self):
"""Compute Ornstein-Uhlenbeck sample."""
x_prev = self.start.data
x = (
x_prev
+ self.theta * (self.mean - x_prev) * self.dt
+ self.std_dev
* torch.sqrt(torch.tensor(self.dt))
* torch.randn(self.mean.shape)
)
self.start.data = x
return x
|
[
"[email protected]"
] | |
8e1f9eeaa8eb59e4b8fd5822047b9e320adc32db
|
e2c120b55ab149557679e554c1b0c55126e70593
|
/python/imagej/tests/test_ImgLib2_ImgFactory.py
|
6b95c5b2582e670492dc615ed54d7090c3ee9152
|
[] |
no_license
|
acardona/scripts
|
30e4ca2ac87b9463e594beaecd6da74a791f2c22
|
72a18b70f9a25619b2dbf33699a7dc1421ad22c6
|
refs/heads/master
| 2023-07-27T14:07:37.457914 | 2023-07-07T23:13:40 | 2023-07-07T23:14:00 | 120,363,431 | 4 | 5 | null | 2023-05-02T11:20:49 | 2018-02-05T21:21:13 |
Python
|
UTF-8
|
Python
| false | false | 859 |
py
|
from net.imglib2.img.array import ArrayImgFactory
from net.imglib2.type.numeric.integer import UnsignedByteType, UnsignedShortType
from net.imglib2.util import Intervals
# An 8-bit 256x256x256 volume
img = ArrayImgFactory(UnsignedByteType()).create([256, 256, 256])
# Another image of the same type and dimensions, but empty
img2 = img.factory().create([img.dimension(d) for d in xrange(img.numDimensions())])
# Same, but easier reading of the image dimensions
img3 = img.factory().create(Intervals.dimensionsAsLongArray(img))
# Same, but use an existing img as an Interval from which to read out the dimensions
img4 = img.factory().create(img)
# Now we change the type: same kind of image and same dimensions,
# but crucially a different pixel type (16-bit) via a new ImgFactory
imgShorts = img.factory().imgFactory(UnsignedShortType()).create(img)
|
[
"[email protected]"
] | |
89e2d90ba4eedda9c8b3ce40056dde57e0048c0c
|
e60487a8f5aad5aab16e671dcd00f0e64379961b
|
/python_stack/Algos/leetcode_30days/max_subarray.py
|
05464124d8c978cb2d1c61f8ef20653a3b199cf1
|
[] |
no_license
|
reenadangi/python
|
4fde31737e5745bc5650d015e3fa4354ce9e87a9
|
568221ba417dda3be7f2ef1d2f393a7dea6ccb74
|
refs/heads/master
| 2021-08-18T08:25:40.774877 | 2021-03-27T22:20:17 | 2021-03-27T22:20:17 | 247,536,946 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,473 |
py
|
# Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
# Example:
# Input: [-2,1,-3,4,-1,2,1,-5,4],
# Output: 6
# Explanation: [4,-1,2,1] has the largest sum = 6.
def findMaxIndex(nums):
maxIndex=0
for i in range(1,len(nums)):
if nums[maxIndex] < nums[i]:
maxIndex=i
return maxIndex
def maxCrossingSum(nums, l, m, h) :
# Include elements on left of mid.
sm = 0; left_sum = -10000
for i in range(m, l-1, -1) :
sm = sm + nums[i]
if (sm > left_sum) :
left_sum = sm
# Include elements on right of mid
sm = 0; right_sum = -1000
for i in range(m + 1, h + 1) :
sm = sm + nums[i]
if (sm > right_sum) :
right_sum = sm
# Return sum of elements on left and right of mid
return left_sum + right_sum;
def max_subArray_divide(nums,lowest,highest):
if lowest==highest:
return nums[lowest]
# Find Middle point
mid=(lowest+highest)//2
print(mid)
left_sum= max_subArray_divide(nums,lowest,mid)
right_sum= max_subArray_divide(nums,mid+1,highest)
cross_sum=maxCrossingSum(nums, lowest, mid, highest)
print(left_sum,right_sum,cross_sum)
return max(left_sum,right_sum,cross_sum)
def max_subArray(nums):
# divide and conqure
return max_subArray_divide(nums,0,len(nums)-1)
print(max_subArray([-2,1,-3,4,-1,2,1,-5,4]))
|
[
"[email protected]"
] | |
8c4d47286298016368282b45a4cb4e2dc67954f7
|
f27c49458bde84048e6008da8c52ca0f1ae711ce
|
/code/07-data-structures/simple_dict/playground.py
|
b8e36ebaf6c98c36c3e8c2912fe99193322d5f38
|
[
"MIT"
] |
permissive
|
talkpython/python-for-absolute-beginners-course
|
54b0f48b5edbf7755de6ca688a8e737ba16dc2fc
|
1930dab0a91526863dc92c3e05fe3c7ec63480e1
|
refs/heads/master
| 2022-11-24T03:02:32.759177 | 2022-11-08T14:30:08 | 2022-11-08T14:30:08 | 225,979,578 | 2,287 | 1,059 |
MIT
| 2022-11-07T19:45:15 | 2019-12-05T00:02:31 |
Python
|
UTF-8
|
Python
| false | false | 547 |
py
|
# Data structures
# 1. Dictionaries
# 2. Lists / arrays [1,1,7,11]
# 3. Sets
# Lists
lst = [1, 1, 11, 7]
print(lst)
lst.append(2)
print(lst)
lst.remove(11)
print(lst)
lst.sort()
print(lst)
# Sets:
st = {1, 1, 11, 7}
st.add(1)
st.add(1)
st.add(11)
print(st)
# Dictionaries
d = {
'bob': 0,
'sarah': 0,
'defeated_by': {'paper', 'wolf'},
'defeats': {'scissors', 'sponge'}
}
print(d['bob'])
d['bob'] += 1
print(d['bob'])
print(d)
d['michael'] = 7
print(d)
print(f"You are defeated by {d['defeated_by']}")
print(d.get('other', 42))
|
[
"[email protected]"
] | |
8dc61e64bb66988a363127243cb1b02813e86140
|
a6a78f59f442c18449befc89be2b193e37b695d6
|
/ivi/rigol/rigolDP800.py
|
fd988186043295e67c8db82281a56f6215da0aef
|
[
"MIT"
] |
permissive
|
hohe/python-ivi
|
fa0b4b1232f4fca92bd046d2ae322e49959f8a83
|
0fe6d7d5aaf9ebc97085f73e25b0f3051ba996b6
|
refs/heads/master
| 2021-01-21T08:55:35.470107 | 2013-12-23T09:27:02 | 2013-12-23T09:27:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,380 |
py
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import dcpwr
from .. import scpi
TrackingType = set(['floating'])
TriggerSourceMapping = {
'immediate': 'imm',
'bus': 'bus'}
class rigolDP800(scpi.dcpwr.Base, scpi.dcpwr.Trigger, scpi.dcpwr.SoftwareTrigger,
scpi.dcpwr.Measurement):
"Rigol DP800 series IVI DC power supply driver"
def __init__(self, *args, **kwargs):
super(rigolDP800, self).__init__(*args, **kwargs)
self._instrument_id = 'Rigol Technologies,DP800'
self._output_count = 3
self._output_range = [[(8.0, 5.0)], [(30.0, 2.0)], [(-30.0, 2.0)]]
self._output_range_name = [['P8V'], ['P30V'], ['N30V']]
self._output_ovp_max = [8.8, 33.0, -33.0]
self._output_ocp_max = [5.5, 2.2, 2.2]
self._output_voltage_max = [8.0, 30.0, -30.0]
self._output_current_max = [5.0, 2.0, 2.0]
self._memory_size = 10
self._identity_description = "Rigol DP800 series DC power supply driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Rigol Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['DP831A', 'DP832', 'DP832A']
ivi.add_method(self, 'memory.save',
self._memory_save)
ivi.add_method(self, 'memory.recall',
self._memory_recall)
self._init_outputs()
def _memory_save(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
def _memory_recall(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
|
[
"[email protected]"
] | |
3efb3fa0f33c9db9e23e81ccddbd12529703f1e8
|
ddb8c14775dfbe9424691dabf1617273d118d317
|
/catkin_ws/build/msg_check/catkin_generated/pkg.installspace.context.pc.py
|
d23c59c9aa6dcd0265af7cbe01246235712f19cc
|
[] |
no_license
|
rishabhdevyadav/fastplanneroctomap
|
e8458aeb1f2d3b126d27dc57011c87ae4567687a
|
de9d7e49cb1004f3b01b7269dd398cf264ed92b4
|
refs/heads/main
| 2023-05-12T22:12:27.865900 | 2021-05-26T19:25:31 | 2021-05-26T19:25:31 | 356,674,577 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 542 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include;/usr/include/eigen3".split(';') if "${prefix}/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;mav_msgs;nav_msgs;roscpp;rospy;sensor_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmsg_check".split(';') if "-lmsg_check" != "" else []
PROJECT_NAME = "msg_check"
PROJECT_SPACE_DIR = "/home/rishabh/catkin_ws/install"
PROJECT_VERSION = "2.1.2"
|
[
"[email protected]"
] | |
bd0443ac664d583b35f574b914b7d097a427430c
|
e5897d5b5eb3b018bec8703f01cfc666acea5b38
|
/isy994/items/variables/variable_state.py
|
9ff4bd1fca3dd2830528fb6ce10c205ddf9ea290
|
[
"MIT"
] |
permissive
|
mjcumming/ISY994v5
|
5de41ce7e12be44c35dc0818daf639bb8c0e5487
|
928d8359fd15363e15b8daa402fbb1f5f53f3c45
|
refs/heads/master
| 2022-05-19T06:10:59.788621 | 2022-05-08T13:16:29 | 2022-05-08T13:16:29 | 187,289,265 | 4 | 10 |
MIT
| 2021-06-26T13:34:23 | 2019-05-17T22:36:55 |
Python
|
UTF-8
|
Python
| false | false | 219 |
py
|
#! /usr/bin/env python
from .variable_base import Variable_Base
class Variable_State(Variable_Base):
def __init__(self, container, variable_info):
Variable_Base.__init__(self, container, variable_info)
|
[
"[email protected]"
] | |
b59641920ce0787bdda82226455c999d8bfa5e60
|
23b3c698412f71a2878ae586f5599f2b6e38c980
|
/source-code/bokeh/ising.py
|
1dab727e38f3966bbe674a61422b703eec89d4d9
|
[
"CC-BY-4.0"
] |
permissive
|
gjbex/Scientific-Python
|
9b7ae7b3398cc9358d1f530ca24243b63f3c01f0
|
02d24e6e22cfbc5b73429a2184ecbdfcd514c8fc
|
refs/heads/master
| 2023-08-17T10:17:39.963630 | 2023-05-12T14:51:32 | 2023-05-12T14:51:32 | 221,184,612 | 13 | 13 |
CC-BY-4.0
| 2023-08-03T08:04:57 | 2019-11-12T09:55:27 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 964 |
py
|
#!/usr/bin/env python
from argparse import ArgumentParser
from bokeh.layouts import column
from bokeh.models import CustomJS, ColumnDataSource, Slider
from bokeh.plotting import curdoc, figure
import numpy as np
x = np.linspace(-3.0, 3.0, 301)
y = x.copy()
default_beta = 4.0
y_tanh = np.tanh(default_beta*x)
source = ColumnDataSource(data=dict(x=x, y=y_tanh))
def callback(attr, old_value, new_value):
beta = new_value
new_data = {
'x': source.data['x'],
'y': np.tanh(beta*source.data['x']),
}
source.data = new_data
plot = figure(width=300, height=300)
plot.line(x, y, line_width=0.5, line_dash='3 3')
plot.line('x', 'y', source=source)
plot.xaxis.axis_label = '$$x$$'
plot.yaxis.axis_label = r'$$\tanh \beta x$$'
slider = Slider(start=0.2, end=6.0, value=default_beta, step=0.01,
title=r'$$\beta$$')
slider.on_change('value', callback)
layout = column(children=[plot, slider])
curdoc().add_root(layout)
|
[
"[email protected]"
] | |
94ad119004a4fd0ddd961a8ed9e3b31bb811fd1a
|
1b7f4cd39bf7e4a2cf667ac13244e5138ee86cb2
|
/agents/game/human_agent.py
|
b04040eb7f8fea24819cc7ddc959c01950f3bda1
|
[
"MIT"
] |
permissive
|
cjreynol/willsmith
|
02f793003a914a21b181839bbd58108046f312d6
|
39d3b8caef8ba5825f3a0272c7fd61a2f78ef2b5
|
refs/heads/master
| 2020-07-15T13:25:57.613707 | 2018-06-12T00:18:19 | 2018-06-12T00:18:19 | 205,572,039 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,390 |
py
|
from agents.displays.human_display import HumanDisplay
from willsmith.game_agent import GameAgent
class HumanAgent(GameAgent):
"""
Agent that relies on user input to make action choices.
It relies on its action_prompt attribute, set externally by the
simulator, to provide the proper prompts and to construct the action.
"""
GUI_DISPLAY = None #HumanDisplay is not yet ready
INPUT_PROMPT = None
INPUT_PARSER = None
def __init__(self, agent_id, use_gui, action):
super().__init__(agent_id, use_gui)
self.add_input_info(action)
def add_input_info(self, action):
HumanAgent.INPUT_PROMPT = action.INPUT_PROMPT
HumanAgent.INPUT_PARSER = action.parse_action
def search(self, state, allotted_time):
"""
Prompt the player for an action until a legal action is chosen, then
return it.
"""
legal_actions = state.get_legal_actions()
player_action = HumanAgent.INPUT_PARSER(input(HumanAgent.INPUT_PROMPT))
while player_action not in legal_actions:
print("Last move was not legal, please try again.\n")
player_action = HumanAgent.INPUT_PARSER(input(HumanAgent.INPUT_PROMPT))
return player_action
def _take_action(self, action):
pass
def _reset(self):
pass
def __str__(self):
return ""
|
[
"[email protected]"
] | |
70f4e03aa8a2930c56a4ec84979dc5bb1e836e28
|
745a605d52556d5195b7cdbf871fc1011b2dc9cd
|
/backend/mete/models.py
|
92b2828ee3753d37d2fa5baa61d5d362342dc181
|
[] |
no_license
|
annikahannig/meteme
|
96a6b919fbdac20bef7e13e1d101130cd1805b7b
|
16ca646904a31833e8d1156be8f554e11ff0d37a
|
refs/heads/master
| 2021-06-25T05:34:23.517379 | 2017-05-09T20:33:54 | 2017-05-09T20:33:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,526 |
py
|
from __future__ import unicode_literals
from collections import OrderedDict
from django.db import models
from django.conf import settings
from djmoney.models.fields import MoneyField
from moneyed import Money
from solo.models import SingletonModel
from store import models as store_models
from unidecode import unidecode
import re
class Account(models.Model):
"""
User account:
We manage user accounts, separate from 'Users', because
they don't have a password, may not have an email,
and have an avatar.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
null=False,
blank=False,
on_delete=models.CASCADE)
avatar = models.ImageField(upload_to='avatars/',
default='/static/store/img/default_avatar.png',
null=True, blank=True)
balance = MoneyField(max_digits=10,
decimal_places=2,
default_currency='EUR',
default=Money(0, 'EUR'))
is_locked = models.BooleanField(default=False)
is_disabled = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
def __unicode__(self):
return self.name
@property
def name(self):
return self.user.username
@property
def canonical_name(self):
"""Return normalized username"""
name = unidecode(self.name) # Transliterate umlauts
name = re.sub(r'\W', '', name).lower()
return name
class Barcode(models.Model):
"""
Barcode(s) can be associated with an account
or with a product.
"""
number = models.CharField(unique=True, max_length=42)
product = models.ForeignKey(store_models.Product,
null=True,
blank=True,
on_delete=models.CASCADE)
account = models.ForeignKey(Account,
null=True,
blank=True,
on_delete=models.CASCADE)
class KeyPair(models.Model):
"""
A user may supply a public/private key pair,
so we can encrypt the audit log.
If a user does not have a key pair, no personal
log will be created.
The the keys are created on the client using the NaCL
crypto library.
The private key is encrypted with a key derived from a password / pin,
using the 'Password-Base Key Derivation Function 2' (PBKDF2) with
at least 3 million iterations.
The first 4 bytes of the encrypted private key determin
additional hashing rounds as a measure against rainbow tables.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
null=False,
blank=False,
on_delete=models.CASCADE)
crypto_version = models.PositiveSmallIntegerField(default=1)
private_key = models.CharField(max_length=68,
blank=False,
null=False,
unique=True)
public_key = models.CharField(max_length=64,
blank=False,
null=False,
unique=True)
verify_key = models.CharField(max_length=64,
blank=False,
null=False,
unique=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class TransactionManager(models.Manager):
def get_queryset(self):
"""
Override default queryset to order transactions
by date DESC
"""
qs = super(TransactionManager, self).get_queryset()
qs = qs.order_by('-created_at')
return qs
def donations(self):
transactions = self.get_queryset()
return transactions.filter(product__isnull=False)
def donations_grouped_months(self):
""" Get donations, grouped by month """
donations = self.donations()
groups = OrderedDict()
for transaction in donations:
key = (transaction.created_at.year, transaction.created_at.month)
if groups.get(key) is None:
groups[key] = []
groups[key].append(transaction)
return groups
def grouped(self):
transactions = self.get_queryset()
groups = OrderedDict()
for transaction in transactions:
date = transaction.created_at
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if groups.get(date) is None:
groups[date] = []
groups[date].append(transaction)
return groups
def grouped_month(self):
transactions = self.get_queryset()
groups = OrderedDict()
for transaction in transactions:
key = (transaction.created_at.year, transaction.created_at.month)
if groups.get(key) is None:
groups[key] = []
groups[key].append(transaction)
return groups
class Transaction(models.Model):
"""
Log Transactions.
Do not store the associated account.
This is just an audit log.
"""
amount = MoneyField(max_digits=10,
decimal_places=2,
default_currency='EUR')
product = models.ForeignKey('store.Product', null=True, blank=True)
product_name = models.CharField(null=True, blank=True, max_length=80)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
objects = TransactionManager()
class UserSetting(models.Model):
"""
Configure per user preferences, like:
Limiting categories. (This is it for now)
"""
user = models.OneToOneField('auth.User',
null=False,
blank=False,
on_delete=models.CASCADE)
categories = models.ManyToManyField('store.Category',
blank=True)
class Settings(SingletonModel):
price_set = models.ForeignKey('store.PriceSet', null=True, blank=False, default=1)
|
[
"[email protected]"
] | |
5cf9e4839963c2c5dace99204f707d7e8424f061
|
14c5bd382ac9ffbfa4ae34f244bca6685f3cd18c
|
/apps/geotracker/models.py
|
d3eff90a8929fa59880c39ed709ce3692949a42b
|
[] |
no_license
|
redhog/arwen
|
e8705e978588163554c83e3278297506c1ffb2ce
|
342daa97a72c0776d4dfe27196adfe66d4dff63c
|
refs/heads/master
| 2021-01-17T13:08:09.392613 | 2011-08-26T09:21:40 | 2011-08-26T09:21:40 | 2,084,644 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,235 |
py
|
# -*- coding: utf-8 -*-
import django.contrib.auth.models
from django.utils.translation import ugettext_lazy as _
import django.contrib.gis.db.models
import geotracker.geos
import linkableobject.models
class Vehicle(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
owner = django.db.models.ForeignKey(django.contrib.auth.models.User, related_name="owned_vehicles")
def __unicode__(self):
return self.name
class TimePoint(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
timestamp = django.contrib.gis.db.models.DateTimeField()
point = django.contrib.gis.db.models.PointField(geography=True)
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(self.point, self.id, timestamp = self.timestamp)
@property
def as_geoscollection(self):
return geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
def __unicode__(self):
return "%s @ %s" % (self.point, self.timestamp)
class Path(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
timestamp = django.contrib.gis.db.models.DateTimeField()
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(django.contrib.gis.geos.LineString([point.point for point in self.points.order_by('timestamp')]),
self.id,
name = self.name,
description = self.description)
@property
def as_geoscollection(self):
res = geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
for point in self.points.order_by('timestamp'):
res += point.as_geoscollection
return res
def __unicode__(self):
return self.name
class PathPoint(TimePoint):
path = django.contrib.gis.db.models.ForeignKey(Path, related_name='points')
path.verbose_related_name = _("Points")
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(self.point, self.id, timestamp = self.timestamp, path = self.path.id)
class Journey(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
vehicle = django.db.models.ForeignKey(Vehicle, related_name="journeys")
vehicle.verbose_related_name = _("Journeys")
owner = django.db.models.ForeignKey(django.contrib.auth.models.User, related_name="organized_journeys")
owner.verbose_related_name = _("Organized journeys")
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(django.contrib.gis.geos.MultiLineString([path.as_geosfeature.geometry for path in self.paths.order_by('timestamp')]),
self.id,
vehicle = self.vehicle.id,
owner = self.owner.id,
name = self.name,
description = self.description)
@property
def as_geoscollection(self):
res = geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
for path in self.paths.order_by('timestamp'):
res += path.as_geoscollection
return res
def __unicode__(self):
return self.name
class JourneyPath(Path):
journey = django.contrib.gis.db.models.ForeignKey(Journey, related_name='paths', verbose_name=_('Journey'))
journey.verbose_related_name = _("Paths")
|
[
"[email protected]"
] | |
e1516bbfce063e8d56341ca439e8cf70dfc77eed
|
2b5fd9d436a97726f852a12bab58b8d367f4866a
|
/api/urls.py
|
2a552766f2d17ea023c0ec9ea230e41593ce2a2f
|
[] |
no_license
|
lxlzyf/roe
|
07ff551b142c0411acb7ca6f759ea98b40ad9b72
|
2d7f1b01e2456875d14a75c90d8397965215bcd3
|
refs/heads/master
| 2020-03-27T06:00:43.587235 | 2018-08-20T10:47:47 | 2018-08-20T10:47:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 322 |
py
|
from django.conf.urls import url
from api.views import user_api
urlpatterns = [
url(r'^group/$', assets_api.group_list),
url(r'^group/(?P<id>[0-9]+)/$',assets_api.group_detail),
url(r'^user/$', user_api.user_list),
url(r'^user/(?P<id>[0-9]+)/$',user_api.user_detail),
]
|
[
"[email protected]"
] | |
22126b447591b464ad5a6d753bb645c15ea5ed06
|
531f8027890188eb037a9dbe68d63882eb2e0ead
|
/demos/ebeam/flash/flash_mismatch.py
|
77b5f709db45ef41f935bc5ad434b0e1d972c21e
|
[] |
no_license
|
Cartj/desy
|
057947dd5e3e4fce085472dc145461cea68be8e9
|
9a1f12e7cf7040e28614e95dc5c49bc10d36b092
|
refs/heads/master
| 2020-03-21T06:01:54.315274 | 2016-08-16T13:04:56 | 2016-08-16T13:04:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,823 |
py
|
__author__ = 'Sergey Tomin'
from ocelot import *
from ocelot.gui import *
from pylab import *
exec( open("lattice_FLASH_S2E.py" ))
beam = Beam()
beam.E = 148.3148e-3 #in GeV ?!
beam.beta_x = 14.8821
beam.beta_y = 18.8146
beam.alpha_x = -0.61309
beam.alpha_y = -0.54569
beam.emit_xn = 1.5e-6
beam.emit_yn = 1.5e-6
beam.emit_x = beam.emit_xn / (beam.E / m_e_GeV)
beam.emit_y = beam.emit_yn / (beam.E / m_e_GeV)
beam.tlen=2e-3 # in m
tw0 = Twiss(beam)
lat = MagneticLattice(lattice)
tws_m=twiss(lat, tw0, nPoints=None)
plot_opt_func(lat, tws_m, top_plot = ["Dx", "Dy"], fig_name="optics")
#plt.show()
mx = 1.
my = 1.
Mx_b = []
My_b = []
S = []
for elem, tws in zip(lat.sequence,tws_m[1:]):
dk = 0.
if elem.type == "quadrupole":
dk_k = -0.05
#if elem.id in ["Q8TCOL", "Q2UBC3", "Q6DBC2"]:
# dk_k = np.random.rand()/100.
dk = dk_k*elem.k1
elem.k1 = elem.k1*(1. + dk_k)
mx += 0.5*((dk*elem.l*tws.beta_x*cos(2*tws.mux))**2 + (dk*elem.l*tws.beta_x*sin(2*tws.mux))**2)
my += 0.5*((dk*elem.l*tws.beta_y*cos(2*tws.muy))**2 + (dk*elem.l*tws.beta_y*sin(2*tws.muy))**2)
Mx_b.append(mx)
My_b.append(my)
S.append(tws.s)
lat = MagneticLattice(lattice)
tws_e=twiss(lat, tw0, nPoints=None)
t = tw0
x = linspace(-sqrt(t.beta_x-1e-7), sqrt(t.beta_x-1e-7), num=200)
#print t.beta_x - x*x
x1 = (sqrt(t.beta_x - x*x) - t.alpha_x*x)/t.beta_x
x2 = (-sqrt(t.beta_x - x*x) - t.alpha_x*x)/t.beta_x
a = sqrt(0.5*((t.beta_x + t.gamma_x) + sqrt((t.beta_x + t.gamma_x)**2 - 4.)))
theta = arctan(-2.*t.alpha_x/(t.beta_x - t.gamma_x))/2.
t = linspace(0, 2*pi, num=100)
xe = a*cos(t)*cos(theta) - 1./a*sin(t)*sin(theta)
ye = a*cos(t)*sin(theta) + 1./a*sin(t)*cos(theta)
plt.plot(x, x1, x, x2)
plt.plot(xe, ye)
plt.show()
Mx = []
My = []
Mx2 = []
My2 = []
for tm, te in zip(tws_m, tws_e):
bx_n = te.beta_x/tm.beta_x
by_n = te.beta_y/tm.beta_y
ax_n = -te.alpha_x + tm.alpha_x*bx_n
ay_n = -te.alpha_y + tm.alpha_y*by_n
gx_n = -2.*te.alpha_x*tm.alpha_x + tm.alpha_x**2*bx_n + tm.beta_x*te.gamma_x
gy_n = -2.*te.alpha_y*tm.alpha_y + tm.alpha_y**2*by_n + tm.beta_y*te.gamma_y
mx = 0.5*(bx_n + gx_n) + sqrt((bx_n + gx_n)**2 - 4.)
#print (by_n + gy_n)**2 - 4.
my = 0.5*(by_n + gy_n) + sqrt((by_n + gy_n)**2 - 4.)
Mx.append(sqrt(mx))
My.append(sqrt(my))
Mx2.append(sqrt(0.5*(tm.beta_x*te.gamma_x - 2.*te.alpha_x*tm.alpha_x + te.beta_x*tm.gamma_x)))
My2.append(sqrt(0.5*(tm.beta_y*te.gamma_y - 2.*te.alpha_y*tm.alpha_y + te.beta_y*tm.gamma_y)))
s = [p.s for p in tws_m]
bx_e = [p.beta_x for p in tws_e]
bx_m = [p.beta_x for p in tws_m]
plt.plot(s, bx_m,"r", s, bx_e, "b")
plt.show()
plt.plot(s, Mx, "r", s, My, "b")
#plt.plot(s, Mx2, "r.", s, My2, "b.")
plt.plot(S, Mx_b, "ro-", S, My_b, "bo-")
plt.show()
|
[
"[email protected]"
] | |
4778c6986b6120a7ef560780ffc43c77d358ed22
|
4c9580b2e09e2b000e27a1c9021b12cf2747f56a
|
/chapter13/xiaoyu_mall/xiaoyu_mall/apps/areas/migrations/0001_initial.py
|
079ebb7f05049decffb2551a21f8dbc383e69e82
|
[] |
no_license
|
jzplyy/xiaoyue_mall
|
69072c0657a6878a4cf799b8c8218cc7d88c8d12
|
4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc
|
refs/heads/master
| 2023-06-26T02:48:03.103635 | 2021-07-22T15:51:07 | 2021-07-22T15:51:07 | 388,514,311 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 930 |
py
|
# Generated by Django 2.2.3 on 2019-11-15 06:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subs', to='areas.Area', verbose_name='上级行政区划')),
],
options={
'verbose_name': '省市区',
'verbose_name_plural': '省市区',
'db_table': 'tb_areas',
},
),
]
|
[
"[email protected]"
] | |
c5e60a89ed2a73c9c155f1c67d66ad55d13bc4ba
|
cd486d096d2c92751557f4a97a4ba81a9e6efebd
|
/17/addons/plugin.video.ukturk/resources/lib/scraper2.py
|
0c1a6e03d1453afd6847bd928d43d611c2b92671
|
[] |
no_license
|
bopopescu/firestick-loader-kodi-data
|
2f8cb72b9da67854b64aa76f720bdad6d4112926
|
e4d7931d8f62c94f586786cd8580108b68d3aa40
|
refs/heads/master
| 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,184 |
py
|
# coding: UTF-8
import sys
l111ll1llUK_Turk_No1 = sys.version_info [0] == 2
l11l1l11lUK_Turk_No1 = 2048
l111llll1UK_Turk_No1 = 7
def l11l1lUK_Turk_No1 (l1llll1lUK_Turk_No1):
global l1l1ll1llUK_Turk_No1
l11lllll1UK_Turk_No1 = ord (l1llll1lUK_Turk_No1 [-1])
l11l111llUK_Turk_No1 = l1llll1lUK_Turk_No1 [:-1]
l1lll1lllUK_Turk_No1 = l11lllll1UK_Turk_No1 % len (l11l111llUK_Turk_No1)
l1l11llllUK_Turk_No1 = l11l111llUK_Turk_No1 [:l1lll1lllUK_Turk_No1] + l11l111llUK_Turk_No1 [l1lll1lllUK_Turk_No1:]
if l111ll1llUK_Turk_No1:
l1ll1llUK_Turk_No1 = unicode () .join ([unichr (ord (char) - l11l1l11lUK_Turk_No1 - (l11lllUK_Turk_No1 + l11lllll1UK_Turk_No1) % l111llll1UK_Turk_No1) for l11lllUK_Turk_No1, char in enumerate (l1l11llllUK_Turk_No1)])
else:
l1ll1llUK_Turk_No1 = str () .join ([chr (ord (char) - l11l1l11lUK_Turk_No1 - (l11lllUK_Turk_No1 + l11lllll1UK_Turk_No1) % l111llll1UK_Turk_No1) for l11lllUK_Turk_No1, char in enumerate (l1l11llllUK_Turk_No1)])
return eval (l1ll1llUK_Turk_No1)
import urllib,urllib2,re,os
def l11lll11l1UK_Turk_No1():
string=l11l1lUK_Turk_No1 (u"ࠨࠩැ")
link=l1llll111UK_Turk_No1(l11l1lUK_Turk_No1 (u"ࠤࡸࡹࡶ࠺࠰࠱ࡦࡶࡩࡦࡳࡧࡨ࠲ࡸࡩࡧࡱࡲࡸࡧࡧࡻ࠭ࡴࡶࡵࡩࡦࡳࠢෑ"))
events=re.compile(l11l1lUK_Turk_No1 (u"ࠪࡀࡹࡪ࠾࠽ࡵࡳࡥࡳࠦࡣࡣࡶࡷࡂࠨࡳࡱࡱࡵࡸ࠲ࡣࡰࡰࠫ࠲࠰ࡅࠩ࠽࠱ࡷࡶࡃ࠭ි"),re.DOTALL).findall(link)
for event in events:
l11lll111lUK_Turk_No1=re.compile(l11l1lUK_Turk_No1 (u"ࠫࡁࡺࡤࠪ࠱ࡄ࠼ࡣࡴࠫ࠲࠰ࡅࠩ࠽࠱ࡷࡨࡃ࠭ී")).findall(event)
for day,date in l11lll111lUK_Turk_No1:
day=l11l1lUK_Turk_No1 (u"ࠬࡡࡃࡐࡎࡒࡖࠥࡦࡠࠫු")+day+l11l1lUK_Turk_No1 (u"࡛࠭࠰ࡅࡒࡐࡔࡘࠨ")
date=date.replace(l11l1lUK_Turk_No1 (u"ࠧࠩූ"),l11l1lUK_Turk_No1 (u"ࠨࠩ"))
time=re.compile(l11l1lUK_Turk_No1 (u"ࠩࡸࡩࠦࡣࡣࡶࡷࡂࠨࡢࡶࡦࡹࡦࠤࠣࡷࡹࡿࡦࠥࡧࡴࡲࡳ࠼ࠦ࠹࠹࠻࠴࠶࠶࠾ࡪࡴࡴࡴࡹࡨࡴ࠻ࡤࡲࡰࡩࡁࡦࡰࡰࡷ࠱ࡸࡺࡦ࠼ࠣ࠽ࡵࡾࠢࠪ࠱ࡄ࠼࠰ࡶࡧࡂࠬෘ")).findall(event)[0]
time=l11l1lUK_Turk_No1 (u"ࠪࡈࡕࡌࡐࡔࠣࡦࡱࡻࡥ࡞ࠪࠪෙ")+time+l11l1lUK_Turk_No1 (u"ࠫࡡࡄࡑࡏࡓࡗࡣࠧේ")
l11lll1l11UK_Turk_No1=re.compile(l11l1lUK_Turk_No1 (u"ࠬࡂࡡࠡࡵࡷࡽࡱ࠽ࠣࡶࡨࡼࡹ࠳ࡤࡦࡥࡲࡶࡦࡺࡩࡰࡰ࠽ࡲࡴࡴࡥࠡࠣࡱࡵࡵࡲࡵࡣࡱࡸࡀࡩࡱࡵ࠾ࠨ࠻࠴࠶࠶࠸࠸ࡀࠨࠠࡩࡴࡨࡪࡂࠨࠨ࠭ࡂ࠭ࠧࠦࡴࡢࡴࡪࡩࡹࡃࠢࡠࡤࡥࡳࡱࠢࠪ࠱ࡄ࠼࠰ࡣࡁࡀ࠴ࡺࡤࠩෛ")).findall(event)
for url,l11lll11llUK_Turk_No1 in l11lll1l11UK_Turk_No1:
url=url
l11lll11llUK_Turk_No1=l11lll11llUK_Turk_No1
string=string+l11l1lUK_Turk_No1 (u"࠭࠾ࡸࡪࡳ࠾ࡰࡸࡺࡦࡀࠨࡷࡁ࠵ࡴࡪࡶࡩࡃࡢ࠽ࡵࡳࡳࡷࡺࡳࡥࡧࡹࡱࡄࠥࡴ࠾࠲ࡷࡵࡵࡲࡵࡵࡧࡩࡻ࡞ࡱࠫො")%(day+l11l1lUK_Turk_No1 (u"ࠧࠡࠩෝ")+time+l11l1lUK_Turk_No1 (u"ࠨࠢ࠰ࠤࠬෞ")+l11lll11llUK_Turk_No1,url)
string=string+l11l1lUK_Turk_No1 (u"ࠩࡸࡻࡣࡰࡤࡱࡄࡉࡣࡪࡩࡍࡲࡦ࠾࠲ࡸࡻࡣࡰࡤࡱࡄ࠾ࡩࡥࡳࡧࡲࡵࡀࡩࡥࡳࡧࡲࡵ࠾࠲ࡪࡦࡴࡡࡳࡶࡁࡠࡳࡂࡪࡶࡨࡱࡃࡢࠨෟ")
return string
def l1llll111UK_Turk_No1(url):
req = urllib2.Request(url)
req.add_header(l11l1lUK_Turk_No1 (u"࡙ࠪࡸࡲࡃࡪࡩࡳࡺࠧ"), l11l1lUK_Turk_No1 (u"ࠫࡒࡵࡺࡪࡥ࠴࠻࠱࡛ࠢࠫࡴࡤࡰࡹࡶࠤࡓ࡚ࠠ࠲࠲࠱࠴ࠦࡁࡱࡲࡩࡢࡌࡷ࠳࠺࠹࠷࠵࠹ࠤ࠭ࡑࡈࡕࡏࡏ࠰ࠥࡲࡩࡧࠣࡋࡪࡩࡰࠫࠣࡇࡸࡧ࠲࠹࠹࠴࠰࠴࠻࠸࠵࠴࠷࠲ࠢࡖࡥࡧࡲࡪ࠱࠸࠷࠼࠴࠳࠷ࠩ"))
response = urllib2.urlopen(req)
link=response.read()
return link
|
[
"[email protected]"
] | |
94ed5e380f49bf3d497d587c95ec1d3ec6e65bad
|
dcbedd4c06aa0cf78cf1d881a61f2a0cdb06005a
|
/(Keras) IMDB Dataset.py
|
756f84210ce7f7a14cdf371a8ffa4145def4e726
|
[] |
no_license
|
KevinHooah/recurrent-dropout-experiments
|
064243f403687a7e063a6464ce015d282a8a0dfb
|
96b2aa2478fb46a252251c0b49354a2de40c7684
|
refs/heads/master
| 2020-08-29T23:43:01.440740 | 2019-08-07T03:43:23 | 2019-08-07T03:43:23 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,816 |
py
|
# coding: utf-8
# # (Keras) IMDB Dataset
# In[1]:
import numpy as np
from tensorflow.contrib.keras.python.keras.optimizers import SGD, RMSprop, Adagrad
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers.core import Dense, Dropout
from tensorflow.contrib.keras.python.keras.layers.embeddings import Embedding
from tensorflow.contrib.keras.python.keras.layers.recurrent import LSTM, GRU, SimpleRNN
from tensorflow.contrib.keras.python.keras.regularizers import l2
from tensorflow.contrib.keras.python.keras.optimizers import Adam
from tensorflow.contrib.keras.python.keras.preprocessing import sequence
from tensorflow.contrib.keras.python.keras.datasets import imdb
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from yaringal_callbacks import ModelTest
from yaringal_dataset import loader
get_ipython().magic('matplotlib inline')
plt.style.use('fivethirtyeight')
plt.rcParams["figure.figsize"] = (8, 5)
# Global params:
NB_WORDS = 20000
SKIP_TOP = 0
TEST_SPLIT = 0.2
INIT_SEED = 2017
GLOBAL_SEED = 2018
MAXLEN = 80
BATCH_SIZE = 128
TEST_BATCH_SIZE = 512
WEIGHT_DECAY = 1e-4
# In[2]:
np.random.seed(100)
# In[3]:
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words=NB_WORDS)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=MAXLEN)
X_test = sequence.pad_sequences(X_test, maxlen=MAXLEN)
print('x_train shape:', X_train.shape)
print('x_test shape:', X_test.shape)
# In[4]:
def get_model(idrop=0.2, edrop=0.1, odrop=0.25, rdrop=0.2, weight_decay=WEIGHT_DECAY):
model = Sequential()
model.add(Embedding(NB_WORDS, 128, embeddings_regularizer=l2(weight_decay),
input_length=MAXLEN)) # , batch_input_shape=(batch_size, maxlen)))
if edrop:
model.add(Dropout(edrop))
model.add(LSTM(128, kernel_regularizer=l2(weight_decay), recurrent_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay), dropout=idrop, recurrent_dropout=rdrop))
if odrop:
model.add(Dropout(odrop))
model.add(Dense(1, kernel_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay), activation='sigmoid'))
optimizer = Adam(1e-3)
model.compile(loss='binary_crossentropy', metrics=["binary_accuracy"], optimizer=optimizer)
return model
# ## Normal Variational LSTM (w/o Embedding Dropout)
# All models in this notebook do not have embedding dropout as Keras does not have such layer.
# In[5]:
print('Build model...')
model = get_model(idrop=0.25, edrop=0, odrop=0.25, rdrop=0.25, weight_decay=1e-4)
# In[6]:
modeltest_1 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[7]:
history_1 = model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_1])
# In[11]:
best_epoch = np.argmin([x[1] for x in modeltest_1.history[:18]]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_1.history[best_epoch-1][1],
modeltest_1.history[best_epoch-1][3] * 100,
best_epoch
))
# In[12]:
plt.title("Log Loss Comparison")
plt.plot(np.arange(len(modeltest_1.history)), [x[0] for x in modeltest_1.history], label="std")
plt.plot(np.arange(len(modeltest_1.history)), [x[1] for x in modeltest_1.history], "g-", label="mc")
plt.legend(loc='best')
# In[13]:
plt.title("Accuracy Comparison")
plt.plot(np.arange(0, len(modeltest_1.history)), [x[2] for x in modeltest_1.history], label="std")
plt.plot(np.arange(0, len(modeltest_1.history)), [x[3] for x in modeltest_1.history], "g-", label="mc")
plt.legend(loc='best')
# ## Standard LSTM
# I choose to keep a very low weight decay because assigning zero seems to cause some problems.
# In[14]:
print('Build model...')
model = get_model(edrop=0, rdrop=0, odrop=0, idrop=0, weight_decay=1e-10)
# In[15]:
modeltest_2 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0, T=1,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[17]:
history_2 = model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_2])
# In[25]:
best_epoch = np.argmin([x[1] for x in modeltest_2.history]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_2.history[best_epoch-1][1],
modeltest_2.history[best_epoch-1][3] * 100,
best_epoch
))
# ## LSTM with Standard Dropout (different mask at differnt time steps)
# In[20]:
print('Build model...')
model = get_model(edrop=0.25, rdrop=0, odrop=0.25, idrop=0, weight_decay=1e-4)
# In[21]:
modeltest_3 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0, T=10,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[22]:
history_3 =model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_3])
# In[24]:
best_epoch = np.argmin([x[1] for x in modeltest_3.history[:19]]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_3.history[best_epoch-1][1],
modeltest_3.history[best_epoch-1][3] * 100,
best_epoch
))
# ## Visualizations
# In[40]:
bins = np.arange(-0.1, 0.035, 0.01)
# In[53]:
len(history_2.history["binary_accuracy"])
# In[54]:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.title("Accuracy Comparison - Training Set")
plt.plot(np.arange(len(history_2.history["binary_accuracy"])),
np.array(history_1.history["binary_accuracy"][:20]) * 100, label="variational")
plt.plot(np.arange(len(history_2.history["binary_accuracy"])),
np.array(history_2.history["binary_accuracy"]) * 100, "g-", label="no dropout")
plt.plot(np.arange(len(history_3.history["binary_accuracy"])),
np.array(history_3.history["binary_accuracy"]) * 100, "y-", label="naive dropout")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Accuracy")
plt.subplot(1, 2, 2)
plt.title("(MC - Approx) Histogram")
plt.hist([x[1] - x[0] for x in modeltest_1.history[:17]], bins=bins, alpha=0.5, label="varational")
plt.hist([x[1] - x[0] for x in modeltest_3.history[:17]], bins=bins, alpha=0.5, label="navie dropout")
plt.legend(loc='best')
plt.xlabel("Difference in Loss")
plt.ylabel("Count")
plt.xticks(fontsize=8, rotation=0)
# In[60]:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.title("Log Loss Comparison - Validation Set")
plt.plot(np.arange(len(modeltest_2.history)), [x[1] for x in modeltest_1.history[:20]], "b-", label="variational(mc)")
plt.plot(np.arange(len(modeltest_2.history)), [x[1] for x in modeltest_2.history], "g-", label="no dropout")
plt.plot(np.arange(len(modeltest_3.history)), [x[1] for x in modeltest_3.history], "y-", label="naive dropout(mc)")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Log Loss")
plt.subplot(1, 2, 2)
plt.title("Accuracy Comparison - Validation Set")
plt.plot(np.arange(len(modeltest_2.history)), [x[3] * 100 for x in modeltest_1.history[:20]], "b-", label="variational(mc)")
plt.plot(np.arange(len(modeltest_2.history)), [x[3] * 100 for x in modeltest_2.history], "g-", label="no dropout")
plt.plot(np.arange(len(modeltest_3.history)), [x[3] * 100 for x in modeltest_3.history], "y-", label="naive dropout(mc)")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Accuracy (%)")
# In[ ]:
|
[
"[email protected]"
] | |
4aafe1f881c5b33b219068a5220f67354a33717f
|
c72252f96a1021ba3f9b812020b74bda258bf465
|
/S12学习/day3/code/configfile.py
|
8a00a0bc88d4cfb073f70be09115f0b43d8c233f
|
[] |
no_license
|
yzwy1988/cloud
|
0251af05b8cc2a8fffdc6f739a01ba9383353dc5
|
6e87f26497072f41b20c1b0696e5605a52987c50
|
refs/heads/master
| 2021-01-17T22:19:52.327370 | 2016-02-22T10:34:59 | 2016-02-22T10:34:59 | 52,455,959 | 2 | 0 | null | 2016-02-24T16:14:50 | 2016-02-24T16:14:49 | null |
UTF-8
|
Python
| false | false | 1,267 |
py
|
# /usr/bin/env python
# -*- coding:utf-8 -*-
# startswith 是否以某个字段开头的
import json
def check(backend):
check_list = []
flag = False
with open('back', 'r') as f:
for line in f:
if line.startswith('backend'):
if backend == line.strip().split()[1]: # strip 换行,split 去掉空格
flag = True
continue
if flag and line.startswith('backend'):
break
if flag and line.strip():
check_list.append(line)
return check_list
def add(inp_dic):
add_mess = 'server %s weight % maxconn % ' % (inp_)
def menu():
print('''
****************
1 查看数据
2 添加数据
3 删除数据
****************
''')
def main():
menu()
action = input('请选择操作序号:')
if action == '1':
backend = input('''请按如下格式输入要操作的字段:
www.oldboy.org
''')
check(backend)
if action == '2':
inp_data = input('''
请按如下格式输入要操作的字段:
server 100.1.7.9 100.1.7.9 weight 20 maxconn 3000
''')
inp_dic = json.loads()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f9edc8d9a223c008a70ef3224c3054621286d518
|
12258001571bd504223fbf4587870960fa93a46d
|
/client/config.py
|
a629d5d3999e56e775ec3430d476a68ae01ea7a4
|
[] |
no_license
|
Nik0las1984/mud-obj
|
0bd71e71855a9b0f0d3244dec2c877bd212cdbd2
|
5d74280724ff6c6ac1b2d3a7c86b382e512ecf4d
|
refs/heads/master
| 2023-01-07T04:12:33.472377 | 2019-10-11T09:10:14 | 2019-10-11T09:10:14 | 69,223,190 | 2 | 0 | null | 2022-12-26T20:15:20 | 2016-09-26T07:11:49 |
Python
|
UTF-8
|
Python
| false | false | 190 |
py
|
# coding=utf-8
auto_login = False
try:
from local_config import *
except ImportError, e:
print 'Unable to load local_config.py:', e
if 'plugins' not in locals():
plugins = []
|
[
"[email protected]"
] | |
6d7330abeb85dd4954ae55bd45295a5be17a49bd
|
fffb732290af97687ea3221ce4a6ce4d95640aff
|
/courses/w10_opencv/source/OpenCV_in_Ubuntu/Python/mycam_02.py
|
a69e21c219e5ed6a45cf86fad76f32c973c641fb
|
[] |
no_license
|
NamWoo/self_driving_car
|
851de73ae909639e03756eea4d49ab663447fc19
|
cd5c1142c9e543e607ca9dc258f689de6879d207
|
refs/heads/master
| 2021-07-24T19:51:54.459485 | 2021-07-06T13:58:19 | 2021-07-06T13:58:19 | 186,267,543 | 9 | 7 | null | null | null | null |
UTF-8
|
Python
| false | false | 493 |
py
|
import numpy as np
import cv2
def receive():
cap = cv2.VideoCapture('udpsrc port=5200 caps=application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)H264,payload=(int)96!rtph264depay!decodebin!videoconvert!appsink',cv2.CAP_GSTREAMER)
while True:
ret,frame = cap.read()
if not ret:
print('empty frame')
continue
cv2.imshow('receive', frame)
if cv2.waitKey(1)&0xFF == ord('q'):
break
cap.release()
receive();
|
[
"[email protected]"
] | |
da183faec87314655b87ce430d6c703df9991366
|
4ef688b93866285bcc27e36add76dc8d4a968387
|
/moto/ds/responses.py
|
46d204c1e27ec3b9a35fcf38df9cfb7e7319d764
|
[
"Apache-2.0"
] |
permissive
|
localstack/moto
|
cec77352df216cac99d5e0a82d7ada933950a0e6
|
b0b2947e98e05d913d7ee2a0379c1bec73f7d0ff
|
refs/heads/localstack
| 2023-09-01T05:18:16.680470 | 2023-07-10T09:00:26 | 2023-08-07T14:10:06 | 118,838,444 | 22 | 42 |
Apache-2.0
| 2023-09-07T02:07:17 | 2018-01-25T00:10:03 |
Python
|
UTF-8
|
Python
| false | false | 6,529 |
py
|
"""Handles Directory Service requests, invokes methods, returns responses."""
import json
from moto.core.exceptions import InvalidToken
from moto.core.responses import BaseResponse
from moto.ds.exceptions import InvalidNextTokenException
from moto.ds.models import ds_backends, DirectoryServiceBackend
class DirectoryServiceResponse(BaseResponse):
"""Handler for DirectoryService requests and responses."""
def __init__(self) -> None:
super().__init__(service_name="ds")
@property
def ds_backend(self) -> DirectoryServiceBackend:
"""Return backend instance specific for this region."""
return ds_backends[self.current_account][self.region]
def connect_directory(self) -> str:
"""Create an AD Connector to connect to a self-managed directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
size = self._get_param("Size")
connect_settings = self._get_param("ConnectSettings")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.connect_directory(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
size=size,
connect_settings=connect_settings,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def create_directory(self) -> str:
"""Create a Simple AD directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
size = self._get_param("Size")
vpc_settings = self._get_param("VpcSettings")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.create_directory(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
size=size,
vpc_settings=vpc_settings,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def create_alias(self) -> str:
"""Create an alias and assign the alias to the directory."""
directory_id = self._get_param("DirectoryId")
alias = self._get_param("Alias")
response = self.ds_backend.create_alias(directory_id, alias)
return json.dumps(response)
def create_microsoft_ad(self) -> str:
"""Create a Microsoft AD directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
vpc_settings = self._get_param("VpcSettings")
edition = self._get_param("Edition")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.create_microsoft_ad(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
vpc_settings=vpc_settings,
edition=edition,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def delete_directory(self) -> str:
"""Delete a Directory Service directory."""
directory_id_arg = self._get_param("DirectoryId")
directory_id = self.ds_backend.delete_directory(directory_id_arg)
return json.dumps({"DirectoryId": directory_id})
def describe_directories(self) -> str:
"""Return directory info for the given IDs or all IDs."""
directory_ids = self._get_param("DirectoryIds")
next_token = self._get_param("NextToken")
limit = self._get_int_param("Limit")
try:
(directories, next_token) = self.ds_backend.describe_directories(
directory_ids, next_token=next_token, limit=limit
)
except InvalidToken as exc:
raise InvalidNextTokenException() from exc
response = {"DirectoryDescriptions": [x.to_dict() for x in directories]}
if next_token:
response["NextToken"] = next_token
return json.dumps(response)
def disable_sso(self) -> str:
"""Disable single-sign on for a directory."""
directory_id = self._get_param("DirectoryId")
username = self._get_param("UserName")
password = self._get_param("Password")
self.ds_backend.disable_sso(directory_id, username, password)
return ""
def enable_sso(self) -> str:
"""Enable single-sign on for a directory."""
directory_id = self._get_param("DirectoryId")
username = self._get_param("UserName")
password = self._get_param("Password")
self.ds_backend.enable_sso(directory_id, username, password)
return ""
def get_directory_limits(self) -> str:
"""Return directory limit information for the current region."""
limits = self.ds_backend.get_directory_limits()
return json.dumps({"DirectoryLimits": limits})
def add_tags_to_resource(self) -> str:
"""Add or overwrite on or more tags for specified directory."""
resource_id = self._get_param("ResourceId")
tags = self._get_param("Tags")
self.ds_backend.add_tags_to_resource(resource_id=resource_id, tags=tags)
return ""
def remove_tags_from_resource(self) -> str:
"""Removes tags from a directory."""
resource_id = self._get_param("ResourceId")
tag_keys = self._get_param("TagKeys")
self.ds_backend.remove_tags_from_resource(
resource_id=resource_id, tag_keys=tag_keys
)
return ""
def list_tags_for_resource(self) -> str:
"""Lists all tags on a directory."""
resource_id = self._get_param("ResourceId")
next_token = self._get_param("NextToken")
limit = self._get_param("Limit")
try:
tags, next_token = self.ds_backend.list_tags_for_resource(
resource_id=resource_id, next_token=next_token, limit=limit
)
except InvalidToken as exc:
raise InvalidNextTokenException() from exc
response = {"Tags": tags}
if next_token:
response["NextToken"] = next_token
return json.dumps(response)
|
[
"[email protected]"
] | |
49919addd199e8a7aff5d7ceb03465d0ee8fa6c8
|
3da6b8a0c049a403374e787149d9523012a1f0fc
|
/网易云课堂/Python办公自动化实战/01_开启自动化人生/batch_docs.py
|
d407f2929fd181400dee176ff02cc8571a3889b9
|
[] |
no_license
|
AndersonHJB/PyCharm_Coder
|
d65250d943e84b523f022f65ef74b13e7c5bc348
|
32f2866f68cc3a391795247d6aba69a7156e6196
|
refs/heads/master
| 2022-07-25T11:43:58.057376 | 2021-08-03T02:50:01 | 2021-08-03T02:50:01 | 348,922,058 | 3 | 3 | null | 2021-09-05T02:20:10 | 2021-03-18T02:57:16 |
Python
|
UTF-8
|
Python
| false | false | 790 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2021/5/6 8:22 下午
# @Author : AI悦创
# @FileName: batch_docs.py.py
# @Software: PyCharm
# @Blog :http://www.aiyc.top
# @公众号 :AI悦创
from docx import Document # 创建文档
from docx.oxml.ns import qn # 中文
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT # 段落
from docx.shared import Pt, RGBColor, Mm, Cm # 大小磅数/字号
import random
import qrcode
from openpyxl import load_workbook
import xlrd
def qr_code():
# 生成签到码字
signin_code = random.randint(1000, 9999)
img = qrcode.make('%s' % signin_code)
filename = '%s.png' % signin_code
img.save('qr/%s' % filename)
return filename
def excel_read():
file = xlrd.open_workbook('students.xlsx')
sheet = file.sheet_by_name(file.sheet_names()[0])
|
[
"[email protected]"
] | |
4426687fcdb98f8446d4f07841bc72249015469b
|
5173c3e3956387a3f2ae8fcf4aed7c7a600dac78
|
/Programmers/Programmers_입국심사.py
|
0b401b3a4fa57dd39d85c7899098df041a3e441f
|
[] |
no_license
|
ma0723/Min_Algorithm
|
df75f53f6e89b7817d4b52d686effb8236a4ddac
|
b02d1043008cb32e22daa9d4207b9a45f111d66f
|
refs/heads/master
| 2023-07-25T11:00:15.397093 | 2021-08-30T02:08:05 | 2021-08-30T02:08:05 | 375,613,927 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 892 |
py
|
def solution(n, times):
# 입국심사를 기다리는 사람 수 n
# 한 명을 심사하는데 걸리는 시간이 담긴 배열 times
answer = 0
left = 1
# 최소 시간
right = n * max(times)
# 최대 시간
while left <= right:
mid = (left + right) // 2
people = 0
for time in times:
people += mid // time
# 설정된 시간동안 각 심사대 처리 사람수
if people >= n:
# n명이 넘어가면
answer = mid
right = mid - 1
# mid 중간값보다 작은 값 탐색
break
# 시간초과 방지
# for문 종료
if people < n:
# for문을 모두 순회하고 처리한 사람이 n명이 충족하지 못하면
left = mid + 1
# mid 중
|
[
"[email protected]"
] | |
702760dacc77f2e65aaed171a0998dfd7602a7b9
|
3cf21d46cc8213614f5edfe4ebb09df112e5bf44
|
/tools/asset_aggregator/name_check.py
|
788a6e9eadc9bae3dc73c59a19e06448f19fd6e7
|
[
"BSD-3-Clause"
] |
permissive
|
toro09/rotki
|
abbf06c63bf0191b8a381bad05534bf8541cf212
|
014e7e11521b81c89b5cd2b4082d197da26684ee
|
refs/heads/master
| 2022-12-01T19:09:08.409018 | 2020-08-11T19:34:54 | 2020-08-11T19:34:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,883 |
py
|
import sys
from typing import Any, Dict
from asset_aggregator.utils import choose_multiple
# For assets we support but no API has names for. We manually input the names then.
MANUALLY_CHECKED_NAMES = {
'ADADOWN': 'Binance leveraged token ADADOWN',
'ADAUP': 'Binance leveraged token ADAUP',
'BTCDOWN': 'Binance leveraged token BTCDOWN',
'BTCUP': 'Binance leveraged token BTCUP',
'ETHDOWN': 'Binance leveraged token ETHDOWN',
'ETHUP': 'Binance leveraged token ETHUP',
'LINKDOWN': 'Binance leveraged token LINKDOWN',
'LINKUP': 'Binance leveraged token LINKUP',
'AMIS': 'Amis',
'AVA-2': 'Avalon',
'BIDR': 'Binance IDR Stable Coin',
'BITCAR': 'BitCar',
'BMT': 'BMChain',
'BOU': 'Boulle',
'BTCE': 'EthereumBitcoin',
'BTE': 'BTEcoin',
'BTH': 'Bytether',
'BTR-2': 'Bither',
'CET-2': 'DICE Money',
'CFTY': 'Crafty',
'CNTM': 'Connectome',
'CTSI': 'Cartesi',
'CO2': 'Climatecoin',
'CRGO': 'CargoCoin',
'DEPO': 'Depository Network',
'DIP': 'Etherisc',
'DPP': 'Digital Assets Power Play',
'EMT': 'EasyMine',
'ENTRP': 'Hut34 Entropy Token',
'ETHB': 'EtherBTC',
'FIH': 'FidelityHouse',
'FLX': 'BitFlux',
'FORK-2': 'Gastro Advisor Token',
'HBD': 'Hive dollar',
'HIVE': 'Hive',
'HKG': 'Hacker Gold',
'ITM': 'Intimate',
'JOY': 'JOYSO',
'KUE': 'Kuende',
'LGR': 'Logarithm',
'LOON': 'Loon Network',
'ME': 'All.me',
'MILC': 'Micro Licensing Coin',
'MNT': 'Media Network Token',
'MRP': 'Money Rebel',
'MRV': 'Macroverse',
'OAK': 'Acorn Collective',
'OCC-2': 'Original Crypto Coin',
'REA': 'Realisto',
'REDC': 'Red Cab',
'RIPT': 'RiptideCoin',
'RNDR': 'Render Token',
'SKR': 'Skrilla Token',
'SKYM': 'Skymap',
'SPICE': 'Spice VC Token',
'SSH': 'StreamSpace',
'STP': 'StashPay',
'TAN': 'Taklimakan',
'TBT': 'T-Bot',
'TRXBEAR': ' 3X Short TRX Token',
'TRXBULL': ' 3X Long TRX Token',
'URB': 'Urbit Data',
'USDJ': 'USDJ',
'UTI': 'Unicorn Technology International',
'VENUS': 'VenusEnergy',
'WMK': 'WeMark',
'WLK': 'Wolk',
'ZIX': 'Zeex Token',
}
def name_check(
asset_symbol: str,
our_asset: Dict[str, Any],
our_data: Dict[str, Any],
paprika_data: Dict[str, Any],
cmc_data: Dict[str, Any],
) -> Dict[str, Any]:
"""Process the name from coin paprika and coinmarketcap
Then compare to our data and provide choices to clean up the data.
"""
our_name = our_asset.get('name', None)
if our_name:
# If we already got a name from manual input then keep it
return our_data
if asset_symbol in MANUALLY_CHECKED_NAMES:
our_data[asset_symbol]['name'] = MANUALLY_CHECKED_NAMES[asset_symbol]
return our_data
paprika_name = None
if paprika_data:
paprika_name = paprika_data['name']
cmc_name = None
if cmc_data:
cmc_name = cmc_data['name']
if not paprika_name and not cmc_name and asset_symbol:
print(f'No name in any external api for asset {asset_symbol}')
sys.exit(1)
if paprika_name == cmc_name:
# If both external APIs agree just use their name
our_data[asset_symbol]['name'] = paprika_name
return our_data
msg = (
f'For asset {asset_symbol} the possible names are: \n'
f'(1) Coinpaprika: {paprika_name}\n'
f'(2) Coinmarketcap: {cmc_name}\n'
f'Choose a number (1)-(2) to choose which name to use: '
)
choice = choose_multiple(msg, (1, 2))
if choice == 1:
name = paprika_name
elif choice == 2:
if not cmc_name:
print("Chose coinmarketcap's name but it's empty. Bailing ...")
sys.exit(1)
name = cmc_name
our_data[asset_symbol]['name'] = name
return our_data
|
[
"[email protected]"
] | |
0b7d6236b66a636e6778572bde8454fb0fa408ca
|
242086b8c6a39cbc7af3bd7f2fd9b78a66567024
|
/python/PP4E-Examples-1.4/Examples/PP4E/Dstruct/Classics/permcomb.py
|
f6e244e89732645613fef830391f1e2f9dd60db6
|
[] |
no_license
|
chuzui/algorithm
|
7537d0aa051ac4cbe9f6a7ca9a3037204803a650
|
c3006b24c4896c1242d3ceab43ace995c94f10c8
|
refs/heads/master
| 2021-01-10T13:05:30.902020 | 2015-09-27T14:39:02 | 2015-09-27T14:39:02 | 8,404,397 | 4 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,516 |
py
|
"permutation-type operations for sequences"
def permute(list):
if not list: # shuffle any sequence
return [list] # empty sequence
else:
res = []
for i in range(len(list)):
rest = list[:i] + list[i+1:] # delete current node
for x in permute(rest): # permute the others
res.append(list[i:i+1] + x) # add node at front
return res
def subset(list, size):
if size == 0 or not list: # order matters here
return [list[:0]] # an empty sequence
else:
result = []
for i in range(len(list)):
pick = list[i:i+1] # sequence slice
rest = list[:i] + list[i+1:] # keep [:i] part
for x in subset(rest, size-1):
result.append(pick + x)
return result
def combo(list, size):
if size == 0 or not list: # order doesn't matter
return [list[:0]] # xyz == yzx
else:
result = []
for i in range(0, (len(list) - size) + 1): # iff enough left
pick = list[i:i+1]
rest = list[i+1:] # drop [:i] part
for x in combo(rest, size - 1):
result.append(pick + x)
return result
|
[
"zui"
] |
zui
|
39695f540bade7e05ff8fa960c71d068109b1dda
|
2dd0bf6e8542b560c2e3567f8793b561cb0678b0
|
/code/src/main/python/misconceptions/syntactics/grammar/R.py
|
4c300cb4f55146b42613854ab34bcb255fe58cbf
|
[
"Unlicense"
] |
permissive
|
Eduardo95/COSAL
|
021f01cfa86e656c3fe320159c8d25ca5b6f311d
|
4eb95d286288aa25a1a90db40cb1998dad048e1b
|
refs/heads/master
| 2023-06-17T08:19:37.925879 | 2021-07-12T16:24:06 | 2021-07-12T16:24:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,354 |
py
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
from lark import Lark
R_GRAMMAR = """
start: value (ASSIGNMENT_OPERATOR value)?
// expr: value ( indexer | value | attribute)+
binary_expr: value BINARY_OPERATOR value
unary_expr: UNARY_OPERATOR value
indexer: value "[" value "]"
attribute: value "$" value
value: unary_expr
| binary_expr
| array
| list
| matrix
| data_frame
| tuple
| slice_range
| QUOTED_STRING
| NUMBER
| BOOL
| NULL
| NAME
| if_else
| func_call
| attribute
| indexer
| values
values: value? ("," value?)+
array: ("c" "(" [value ("," value)*] ")") | ("[" value? ("," value?)* "]")
list: "list" "(" [value ("," value)*] ")"
matrix: "matrix" "(" args ")"
data_frame: "data.frame" "(" args ")"
tuple: "(" [value ("," value)*] ")"
QUOTED_STRING : DOUBLE_QUOTED_STRING | SINGLE_QUOTED_STRING | TILDE_QUOTED_STRING
DOUBLE_QUOTED_STRING : /"[^"]*"/
SINGLE_QUOTED_STRING : /'[^']*'/
TILDE_QUOTED_STRING : /`[^']*`/
NAME: ("_"|LETTER) ("_"|LETTER|DIGIT|".")*
BOOL: "TRUE" | "FALSE"
if_else: "ifelse" "(" value "," value "," value")"
slice_range: value? ":" value?
NULL: "NULL" | "NaN"
ASSIGNMENT_OPERATOR: "="
| "<-"
BINARY_OPERATOR: "+"
| "-"
| "**"
| "*"
| "/"
| "^"
| "%%"
| "%/%"
| ">="
| ">"
| "<="
| "<"
| "=="
| "!="
| "|"
| "&"
UNARY_OPERATOR: "!"
| "-"
func_name: NAME | TILDE_QUOTED_STRING
func_args: value ("," value)*
func_kwarg: NAME "=" value
func_kwargs: func_kwarg ("," func_kwarg)*
args: (func_args | func_kwargs | (func_args "," func_kwargs))
//indexer_args: (value | values | func_name)
func_call: func_name "(" args? ")"
// %import common.CNAME -> NAME
%import common.SIGNED_NUMBER -> NUMBER
%import common.LETTER -> LETTER
%import common.DIGIT -> DIGIT
%import common.WORD
%import common.WS
%import common.NEWLINE -> NEWLINE
%ignore WS
"""
def r_parser():
return Lark(R_GRAMMAR)
def _test():
parser = r_parser()
# print(parser.parse("df.iloc[1:2, df[[2]]]"))
# print(parser.parse("df.set_value(dfaxis=8.05)"))
# print(parser.parse('table(df$Parch, df$Survived)'))
print(parser.parse('mean(df$Fare)'))
def verify():
from utils import cache
misconceptions_path = "/Users/panzer/Raise/ProgramRepair/CodeSeer/code/src/main/python/misconceptions.xlsx"
wb = cache.read_excel(misconceptions_path, read_only=True)
# sheet = wb.get_sheet_by_name('HighSim-HighSyn')
sheet = wb.get_sheet_by_name('LowSim-LowSyn')
parser = r_parser()
seen = set()
for i, row in enumerate(sheet.iter_rows()):
if i == 0:
continue
snippet = row[0].value
if i >= 1 and snippet not in seen:
print(i, snippet)
seen.add(snippet)
parser.parse(snippet)
elif i % 100 == 0:
print("Dont worry I'm running", i)
if __name__ == "__main__":
# verify()
_test()
|
[
"[email protected]"
] | |
2718c3441138bf66c7e26a309ed95597a6632432
|
19375a18719e44eee7c596e72ef8915d3fcbff92
|
/day07_spider/06_qq.py
|
e29058c5d79b1d50650db9c34bab04e364ccb3bd
|
[] |
no_license
|
J-shan0903/AID1912
|
6c617fa26751c31ff05a63050a320122e3ca044e
|
0797f3d8ef0e96b8eb6908dffbec8193c9614973
|
refs/heads/master
| 2021-03-23T12:21:32.480026 | 2020-05-23T08:36:21 | 2020-05-23T08:36:21 | 247,452,309 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 303 |
py
|
from selenium import webdriver
driver = webdriver.Chrome()
driver.get(url='https://mail.qq.com/')
driver.switch_to.frame('login_frame')
driver.find_element_by_id('u').send_keys('[email protected]')
driver.find_element_by_id('p').send_keys('353597jss')
driver.find_elements_by_class_name('btn').click()
|
[
"[email protected]"
] | |
0b3ce647889db5ce8bc43acdb3f0730ff2349fb3
|
70811da75f0f38719703e02c9f99e2ce09e21d2a
|
/LSTM_FCN/distal_phalanx_tw_model.py
|
cd18a4f46cd55d512c9381b2a58a3c9a060069cf
|
[] |
no_license
|
HongminWu/time_series_anomaly_detection_classification_clustering
|
9d5e555c9bf37ee72770e127588f61f15139bd4e
|
548b3799389ec7a96fc56c51360a6de89e0502a1
|
refs/heads/master
| 2020-03-11T04:44:00.113684 | 2018-08-16T06:38:57 | 2018-08-16T06:38:57 | 129,783,614 | 15 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,711 |
py
|
from keras.models import Model
from keras.layers import Input, PReLU, Dense, Dropout, LSTM, Bidirectional, multiply, concatenate
from keras.layers import Conv1D, BatchNormalization, GlobalAveragePooling1D, Permute, Activation
from utils.constants import MAX_SEQUENCE_LENGTH_LIST, NB_CLASSES_LIST
from utils.keras_utils import train_model, evaluate_model, set_trainable, visualize_context_vector, visualize_cam
from utils.layer_utils import AttentionLSTM
DATASET_INDEX = 11
MAX_SEQUENCE_LENGTH = MAX_SEQUENCE_LENGTH_LIST[DATASET_INDEX]
NB_CLASS = NB_CLASSES_LIST[DATASET_INDEX]
TRAINABLE = True
def generate_model():
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = LSTM(64)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
model.summary()
# model.load_weights("weights/phalanx_tw_weights - 7769.h5")
return model
def generate_model_2():
ip = Input(shape=(1, MAX_SEQUENCE_LENGTH))
x = AttentionLSTM(64)(ip)
x = Dropout(0.8)(x)
y = Permute((2, 1))(ip)
y = Conv1D(128, 8, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(256, 5, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x, y])
out = Dense(NB_CLASS, activation='softmax')(x)
model = Model(ip, out)
model.summary()
# add load model code here to fine-tune
return model
if __name__ == "__main__":
model = generate_model_2()
#train_model(model, DATASET_INDEX, dataset_prefix='phalanx_tw', epochs=2000, batch_size=128)
evaluate_model(model, DATASET_INDEX, dataset_prefix='phalanx_tw', batch_size=128)
# visualize_context_vector(model, DATASET_INDEX, dataset_prefix='phalanx_tw', visualize_sequence=True,
# visualize_classwise=True, limit=1)
# visualize_cam(model, DATASET_INDEX, dataset_prefix='phalanx_tw', class_id=0)
|
[
"[email protected]"
] | |
ef01adb41fcf1f474f98c2f88f09443ee34ec339
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/servicebus/azure-servicebus/tests/async_tests/mgmt_tests/test_mgmt_namespaces_async.py
|
77e82602f3a50ce880403bd482c0dcba7293d2b3
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 |
MIT
| 2019-08-11T21:16:01 | 2018-11-28T21:34:49 |
Python
|
UTF-8
|
Python
| false | false | 1,489 |
py
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
from azure.servicebus.aio.management import ServiceBusAdministrationClient
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from servicebus_preparer import CachedServiceBusNamespacePreparer
class ServiceBusManagementClientNamespaceAsyncTests(AzureMgmtTestCase):
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
async def test_async_mgmt_namespace_get_properties(self, servicebus_namespace_connection_string,
servicebus_namespace, servicebus_namespace_key_name,
servicebus_namespace_primary_key):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
properties = await mgmt_service.get_namespace_properties()
assert properties
assert properties.messaging_sku == 'Standard'
# assert properties.name == servicebus_namespace.name
# This is disabled pending investigation of why it isn't getting scrubbed despite expected scrubber use.
|
[
"[email protected]"
] | |
4799dfae66b08654ba541db4e36bfdab1b6ecd9b
|
6382e12a32c3b62ec059ca45c1fee6941e51e260
|
/Part_5__Control_Flow/Chap_14__Iterables_Iterators_and_Generators/ex_14_12__aritprog_gen.py
|
b705450eca5df149e40a62b8325732285db256f8
|
[] |
no_license
|
CavalcanteLucas/python-fluent
|
e352a79e1da87ae4ee320a09196e119235a904a8
|
a4e22ab88235c5045eca52745b5e1558586dc166
|
refs/heads/master
| 2023-07-11T20:13:35.793456 | 2023-06-19T13:37:54 | 2023-06-19T13:37:54 | 224,661,365 | 1 | 0 | null | 2023-02-11T01:30:09 | 2019-11-28T13:39:03 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 233 |
py
|
def aritprog_gen(begin, step, end=None):
result = type(begin + step)(begin)
forever = end is None
index = 0
while forever or result < end:
yield result
index += 1
result = begin + step * index
|
[
"[email protected]"
] | |
fbcce6a43ad58373cd35ab45d604f4c91582da33
|
e7b7505c084e2c2608cbda472bc193d4a0153248
|
/LeetcodeNew/python/LC_935.py
|
58a9a16ae88d9c6f9538e65cc80e22da6dfcaf47
|
[] |
no_license
|
Taoge123/OptimizedLeetcode
|
8e5c1cd07904dfce1248bc3e3f960d2f48057a5d
|
3e50f6a936b98ad75c47d7c1719e69163c648235
|
refs/heads/master
| 2023-02-27T21:13:40.450089 | 2023-02-07T04:11:09 | 2023-02-07T04:11:09 | 170,044,224 | 9 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,630 |
py
|
"""
https://www.youtube.com/watch?v=HTnIFivp0aw
这是一道简单但是比较有趣的题目。DP的方法还是比较容易想到的。令dp[k]表示当前拨号数字为k的方案数,显然它取决于在按k之前的那个数字的拨号方案数之和。
举个例子,第i次拨号时的dp[4]就等于第i-1次拨号时的dp[0]+dp[3]+dp[9],这是因为在盘面上骑士只能从0,3,9这三个位置跳跃到4.
"""
class SolutionTD:
def knightDialer(self, n):
table = {1: [6, 8], 2: [7, 9], 3: [4, 8], 4: [0, 3, 9], 5: [], 6: [0, 1, 7], 7: [2, 6], 8: [1, 3], 9: [2, 4],
0: [4, 6]}
self.mod = 10 ** 9 + 7
res = 0
memo = {}
for i in range(10):
res += self.dfs(n - 1, i, table, memo)
res %= self.mod
return res
def dfs(self, n, node, table, memo):
if (n, node) in memo:
return memo[(n, node)]
if n == 0:
return 1
res = 0
for nei in table[node]:
res += self.dfs(n - 1, nei, table, memo)
res %= self.mod
memo[(n, node)] = res
return res
class Solution:
def knightDialer(self, N):
table = {1: [6, 8], 2: [7, 9], 3: [4, 8], 4: [0, 3, 9], 5: [], 6: [0, 1, 7], 7: [2, 6], 8: [1, 3], 9: [2, 4],
0: [4, 6]}
mod = 10 ** 9 + 7
dp = [1] * 10
for _ in range(N - 1):
newDP = [0] * 10
for i in range(10):
for j in table[i]:
newDP[j] += dp[i]
dp = newDP
return sum(dp) % (mod)
|
[
"[email protected]"
] | |
95104df4640b4babf14d129503b2955198323497
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/11104121.py
|
e842e5077a8ce26042b14a549459d60c120ea087
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 860 |
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/11104121.py generated: Wed, 25 Jan 2017 15:25:16
#
# Event Type: 11104121
#
# ASCII decay Descriptor: [B0 -> pi+ pi- (KS0 -> pi+ pi-)]cc
#
from Configurables import Generation
Generation().EventType = 11104121
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bd_KSpi+pi-=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 511,-511 ]
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.