blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
โ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03bcb52f8522d03c2c26b48ede99705b3fae114d
|
b2cab3b5a4780ff2974a4b081ba1f070284e939c
|
/backend/reports/tests/test_api.py
|
b2d5ddcd02369153ebda0db8f0b7c9b73bd1dfc0
|
[] |
no_license
|
leonardocouy/100aedes
|
a6535e6c6174e2143afa51ff9b407123859845c2
|
90c81b29fe6cbefe7d3c181bacf6e0dfdb57f889
|
refs/heads/master
| 2021-03-16T10:29:15.253489 | 2017-01-31T16:18:04 | 2017-01-31T16:18:04 | 65,765,297 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,849 |
py
|
import json
from django.forms import model_to_dict
from django.urls import reverse
from model_mommy import mommy
from rest_framework import status
from rest_framework.test import APITestCase
from backend.accounts.tests.test_api import get_jwt_token
from ..models import Report
class UserAuthReportApiTest(APITestCase):
def setUp(self):
self.user = mommy.make_recipe('backend.core.user')
self.login = self.client.login(username=self.user.username, password='leo')
self.report = mommy.make_one(Report, user=self.user)
self.jwt_authorization = get_jwt_token(self.user)
def test_user_is_authenticated(self):
"""
Check if user is authenticated
"""
self.assertTrue(self.login)
def test_user_can_use_api(self):
"""
User can use report endpoints
"""
urls = (reverse('report-list'), reverse('report-detail', kwargs={'pk': self.report.pk}))
with self.subTest():
for expected in urls:
response = self.client.get(expected, HTTP_AUTHORIZATION=self.jwt_authorization)
self.assertEqual(200, response.status_code)
class UserAuthErrorsReportApiTest(APITestCase):
def setUp(self):
self.report = mommy.make_one(Report)
def test_user_cant_use_api(self):
"""
Check if user has a INVALID JWT TOKEN authorizated else must return status code 401 NOT AUTHORIZATED
"""
urls = (reverse('report-list'), reverse('report-detail', kwargs={'pk': self.report.pk}))
with self.subTest():
for expected in urls:
response = self.client.get(expected)
self.assertEqual(401, response.status_code)
class CreateReportApiTest(APITestCase):
def setUp(self):
city = mommy.make_recipe('backend.core.city')
user = mommy.make_recipe('backend.core.user')
report = mommy.prepare_one(Report, city=city, user=user, _fill_optional=True)
jwt_authorization = get_jwt_token(user)
self.data = model_to_dict(report)
self.resp = self.client.post(reverse('report-list'), self.data, HTTP_AUTHORIZATION=jwt_authorization)
def test_create_report(self):
"""
POST /api/v1/reports/ must return status code 201 CREATED and
return data created
"""
response_data = json.loads(self.resp.content.decode('utf8'))
expected_fields = {'user', 'city', 'number'}
with self.subTest():
for expected_field in expected_fields:
self.assertEquals(self.data[expected_field], response_data[expected_field])
self.assertEqual(self.resp.status_code, status.HTTP_201_CREATED)
def test_save_report(self):
"""
Check if report has been saved
"""
self.assertTrue(Report.objects.exists())
class CreateInvalidReportApiTest(APITestCase):
def setUp(self):
user = mommy.make_recipe('backend.core.user')
jwt_authorization = get_jwt_token(user)
data = {'description': 'Invalid report'}
self.resp = self.client.post(reverse('report-list'), data, HTTP_AUTHORIZATION=jwt_authorization)
def test_create_invalid_report(self):
"""
POST invalid report at /api/v1/reports/
Must return status code 400 BAD REQUEST
"""
self.assertEqual(400, self.resp.status_code)
def test_has_errors(self):
"""
Context must return errors
"""
self.assertTrue(self.resp.data.serializer.errors)
def test_save_invalid_report(self):
"""
Check if invalid report has not been saved
"""
self.assertFalse(Report.objects.exists())
class ReadReportApiTest(APITestCase):
def setUp(self):
group = mommy.make_recipe('backend.core.group', name='Agente')
user = mommy.make_recipe('backend.core.user')
user2 = mommy.make_recipe('backend.core.user', username='test_user2')
super_user = mommy.make_recipe('backend.core.user', username='superuser', is_superuser=True)
''' Create a agent user with group Agente'''
agent_user = mommy.make_recipe('backend.core.user', username='test_agent')
agent_user.groups.add(group)
agent_user.save()
self.jwt_authorizations = {
'user_1': get_jwt_token(user),
'user_2': get_jwt_token(user2),
'agent_user': get_jwt_token(agent_user),
'super_user': get_jwt_token(super_user)
}
self.report1 = mommy.make_one(Report, description='have things here', user=user, status=1)
self.report2 = mommy.make_one(Report, description='have two things here', user=user, status=2)
self.report_user2 = mommy.make_one(Report, description='I am user 2', user=user2, status=1)
def test_read_report_list(self):
"""
GET at /api/v1/reports/
Must return status code 200 OK and
check if reports are being shown
"""
response = self.client.get(reverse('report-list'), HTTP_AUTHORIZATION=self.jwt_authorizations['user_1'])
self.assertEqual(200, response.status_code)
self.assertContains(response, self.report1.description)
self.assertContains(response, self.report2.description)
def test_can_read_only_own_reports(self):
"""
GET at /api/v1/reports/
Must return status code 200 OK and
certify that only own reports are being shown
"""
response = self.client.get(reverse('report-list'), HTTP_AUTHORIZATION=self.jwt_authorizations['user_2'])
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['user'], self.report_user2.user.pk)
def test_agent_can_read_all_reports(self):
"""
GET at /api/v1/reports/
Must return all reports if user is superuser or agent
and check if all reports are being shown
"""
users_token = [self.jwt_authorizations['agent_user'], self.jwt_authorizations['super_user']]
with self.subTest():
for expected in users_token:
response = self.client.get(reverse('report-list'),
HTTP_AUTHORIZATION=expected)
self.assertEqual(len(response.data), 3)
self.assertContains(response, self.report1.description)
self.assertContains(response, self.report2.description)
self.assertContains(response, self.report_user2.description)
class ReadReportDetailErrorsApiTest(APITestCase):
def setUp(self):
user = mommy.make_recipe('backend.core.user')
self.jwt_authorization = get_jwt_token(user)
def test_read_invalid_report(self):
"""
GET invalid report_id at /api/v1/reports/report_id
Must return status code 404 NOT FOUND
"""
response = self.client.get(reverse('report-detail', kwargs={'pk': 0}),
HTTP_AUTHORIZATION=self.jwt_authorization)
self.assertEqual(404, response.status_code)
def test_not_read_other_report_details(self):
"""
User can't read other report details that which aren't his
GET at /api/v1/reports/
Must return status code 403 FORBIDDEN
"""
other_user = mommy.make_recipe('backend.core.user', username='leonardo2')
report = mommy.make_one(Report, user=other_user)
response = self.client.get(reverse('report-detail', kwargs={'pk': report.pk}),
HTTP_AUTHORIZATION=self.jwt_authorization)
self.assertEqual(403, response.status_code)
class UpdateDeleteReportApiTest(APITestCase):
def setUp(self):
group = mommy.make_recipe('backend.core.group', name='Agente')
user = mommy.make_recipe('backend.core.user')
super_user = mommy.make_recipe('backend.core.user', username='superuser', is_superuser=True)
''' Create a agent user with group Agente'''
agent_user = mommy.make_recipe('backend.core.user', username='test_agent')
agent_user.groups.add(group)
agent_user.save()
self.jwt_authorizations = {
'user': get_jwt_token(user),
'agent_user': get_jwt_token(agent_user),
'super_user': get_jwt_token(super_user)
}
self.report = mommy.make_one(Report, city__name='Bom Despacho', user=user)
def test_agent_can_update_any_reports(self):
"""
PATCH at /api/v1/reports/
Should update a report made by another user
but user must be a superuser or agent
"""
self.report.description = 'Changed'
data = {'description': 'Changed'}
users_token = [self.jwt_authorizations['agent_user'], self.jwt_authorizations['super_user']]
with self.subTest():
for expected in users_token:
response = self.client.patch(reverse('report-detail', kwargs={'pk': self.report.pk}), data,
HTTP_AUTHORIZATION=expected)
response_data = json.loads(response.content.decode('utf8'))
self.assertEqual(200, response.status_code)
self.assertEqual(response_data['description'], 'Changed')
def test_agent_can_delete_any_reports(self):
"""
A super-user or agent can DELETE User's reports at /api/v1/reports/report_id/
Must return status code 204 NO CONTENT
"""
response = self.client.delete(reverse('report-detail', kwargs={'pk': self.report.pk}),
HTTP_AUTHORIZATION=self.jwt_authorizations['agent_user'])
self.assertEqual(204, response.status_code)
class UpdateDeleteInvalidReportApiTest(APITestCase):
def setUp(self):
self.user = mommy.make_recipe('backend.core.user')
self.report = mommy.make_one(Report, city__name='Bom Despacho', user=self.user, _fill_optional=True)
self.report.city = None
self.data = model_to_dict(self.report)
''' Create a agent user with group Agente'''
group = mommy.make_recipe('backend.core.group', name='Agente')
self.agent_user = mommy.make_recipe('backend.core.user', username='test_agent')
self.agent_user.groups.add(group)
self.agent_user.save()
def test_user_cant_update(self):
"""
An user can't update report at /api/v1/reports/report_id/
Must return status code 403 FORBIDDEN
"""
jwt_authorization = get_jwt_token(self.user)
resp = self.client.put(reverse('report-detail', kwargs={'pk': self.report.pk}), self.data,
HTTP_AUTHORIZATION=jwt_authorization)
self.assertEqual(403, resp.status_code)
def test_user_cant_delete(self):
"""
An user can't delete report at /api/v1/reports/report_id/
Must return status code 403 FORBIDDEN
"""
jwt_authorization = get_jwt_token(self.user)
resp = self.client.delete(reverse('report-detail', kwargs={'pk': self.report.pk}),
HTTP_AUTHORIZATION=jwt_authorization)
self.assertEqual(403, resp.status_code)
def test_update_invalid_report(self):
"""
PUT invalid report at /api/v1/reports/report_id/
Must return status code 400 BAD REQUEST with ERRORS
And check if Report have not changed
"""
jwt_authorization = get_jwt_token(self.agent_user)
resp = self.client.put(reverse('report-detail', kwargs={'pk': self.report.pk}), self.data,
HTTP_AUTHORIZATION=jwt_authorization)
self.assertEqual(400, resp.status_code)
self.assertTrue(resp.data.serializer.errors)
self.assertTrue(Report.objects.get(pk=self.report.pk))
|
[
"[email protected]"
] | |
d15d78dd5befb823ae36b970032deb40772ed1b2
|
8940481119114c545c8b01146e3aaa53b4da9150
|
/event_two.py
|
59f96e7a03113c256d7bed50ccd53d8bd4b6095e
|
[
"MIT"
] |
permissive
|
Malcolm1998/CS412T1C4
|
837791f02fe45ed124bc572370c242a6797d9466
|
7924b4a3b8227140b4a79454c717c91535faf172
|
refs/heads/master
| 2020-09-19T23:10:12.844333 | 2019-12-06T06:54:06 | 2019-12-06T06:54:06 | 224,319,754 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,930 |
py
|
import signal
import rospy
import smach
import smach_ros
import math
import time
import cv2
from math import tanh
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image
import numpy as np
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Joy
from kobuki_msgs.msg import Sound
import traceback
import sys
sys.path.insert(1, '/home/malcolm/Documents/CMPUT_412/Competition/CS412T1C4/shapeTesting')
import v2
global shutdown_requested
global checked
global previous_shape
class RotateLeft(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done2', 'follow', 'success2'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
def execute(self, userdata):
global shutdown_requested
global checked
while not shutdown_requested:
target_heading = (self.callbacks.bot_odom_heading + 80) % 360
val = turn(self, target_heading)
if val != None: return val
# turning = True
# previous_difference = None
# while turning:
# if shutdown_requested:
# return 'done2'
# difference = minimum_angle_between_headings(target_heading, self.callbacks.bot_odom_heading)
#
# if previous_difference is None:
# self.twist.angular.z = 0.4
# self.cmd_vel_pub.publish(self.twist)
# else:
# if difference < 1:
# turning = False
# self.twist.angular.z = 0
# self.cmd_vel_pub.publish(self.twist)
# else:
# self.twist.angular.z = 0.4
# self.cmd_vel_pub.publish(self.twist)
#
# if previous_difference != difference:
# previous_difference = difference
if checked:
return 'success2'
else:
return 'follow'
return 'done2'
class Follow(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done2', 'stop'])
self.callbacks = callbacks
self.prev_error = None
self.Kp = 1.0 / 50.0
self.Ki = 1.0 / 50.0
self.Kd = 1.0 / 50.0
self.speed = 0.8
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
def execute(self, userdata):
global shutdown_requested
while not shutdown_requested:
if self.callbacks.line_white_mask is not None and self.callbacks.red_mask is not None:
bottom_white_mask = self.callbacks.line_white_mask.copy()
bottom_red_mask = self.callbacks.red_mask.copy()
# Check if a long red strip has been detected
h = self.callbacks.secondary_h
w = self.callbacks.secondary_w
search_top = 3 * h / 4
search_bot = h
bottom_white_mask[0:search_top, 0:w] = 0
bottom_white_mask[search_bot:h, 0:w] = 0
bottom_red_mask[h*1/2:h, 0:w] = 0
red_pixel_count = cv2.sumElems(bottom_red_mask)[0] / 255
white_pixel_count = cv2.sumElems(bottom_white_mask)[0] / 255
if white_pixel_count < 10:
print("No white found")
print(white_pixel_count)
return 'stop'
RM = cv2.moments(bottom_red_mask)
if RM['m00'] > 0:
ry = int(RM['m01'] / RM['m00'])
if red_pixel_count > 500 and ry > 100:
print(red_pixel_count)
print(ry)
print("red found")
return 'stop'
# If there is no significant red line, follow white line
WM = cv2.moments(bottom_white_mask)
if WM['m00'] > 0:
cx = int(WM['m10'] / WM['m00'])
cy = int(WM['m01'] / WM['m00'])
# BEGIN CONTROL
if self.prev_error is None:
error = cx - w / 2
rotation = -(self.Kp * float(error))
self.prev_error = error
else:
error = cx - w / 2
rotation = -(self.Kp * float(error) + self.Kd * (error - self.prev_error))
self.prev_error = error
self.twist.linear.x = self.speed
self.twist.angular.z = rotation
self.cmd_vel_pub.publish(self.twist)
# END CONTROL
return 'done2'
class Stop(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done2', 'check', 'rotate_left'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
self.prev_error = None
self.Kp = 1.0 / 150.0
self.Ki = 1.0 / 150.0
self.Kd = 1.0 / 150.0
self.speed = 0.8
def execute(self, userdata):
global shutdown_requested
global checked
if not checked:
distance = 0.1
else:
print("THE DISTANCE IS 0.6")
distance = 1.2
while self.callbacks.bot_odom_position is None:
time.sleep(1)
sp = self.callbacks.bot_odom_position
ep = sp
start = time.time()
while math.sqrt((sp.x - ep.x) ** 2 + (sp.y - ep.y) ** 2) < distance:
#print(str(math.sqrt((sp.x - ep.x) ** 2 + (sp.y - ep.y) ** 2)) + " "+str(distance))
if shutdown_requested:
return 'done2'
h = self.callbacks.secondary_h
w = self.callbacks.secondary_w
search_top = 3 * h / 4
search_bot = h
bottom_white_mask = self.callbacks.line_white_mask.copy()
bottom_white_mask[0:search_top, 0:w] = 0
bottom_white_mask[search_bot:h, 0:w] = 0
M = cv2.moments(bottom_white_mask)
if M['m00'] > 10:
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
# BEGIN CONTROL
if self.prev_error is None:
error = cx - w / 2
rotation = -(self.Kp * float(error))
self.prev_error = error
else:
error = cx - w / 2
rotation = -(self.Kp * float(error) + self.Kd * (error - self.prev_error))
self.prev_error = error
self.twist.linear.x = self.speed
self.twist.angular.z = rotation
self.cmd_vel_pub.publish(self.twist)
# END CONTROL
ep = self.callbacks.bot_odom_position
else:
self.twist.linear.x = 2.0
self.cmd_vel_pub.publish(self.twist)
if time.time() - start > 0.7:
print("break")
break
self.twist.linear.x = 0
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
if checked:
return 'rotate_left'
else:
return 'check'
class Check(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done2', 'rotate_180'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
self.sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
def execute(self, userdata):
global shutdown_requested
global checked
global previous_shape
while not shutdown_requested:
if self.callbacks.line_white_mask is not None and self.callbacks.red_mask is not None:
symbol_red_mask = self.callbacks.symbol_red_mask.copy()
symbol_green_mask = self.callbacks.symbol_green_mask.copy()
h = self.callbacks.main_h
w = self.callbacks.main_w
symbol_red_mask[0:h / 4, 0:w] = 0
symbol_red_mask[3 * h / 4:h, 0:w] = 0
symbol_green_mask[0:h / 4, 0:w] = 0
symbol_green_mask[3 * h / 4:h, 0:w] = 0
count = v2.count_objects(symbol_green_mask)
count += v2.count_objects(symbol_red_mask)
for i in range(int(count)):
self.sound_pub.publish(1)
time.sleep(1)
checked = True
#previous_shape = detect_shape.detect_shape(symbol_green_mask, h, w)
try:
previous_shape = v2.shapeDetection('green', 1)
print("green shape detected:" + previous_shape)
except Exception as e:
print(e)
traceback.print_exc()
return 'rotate_180'
return 'done1'
class Rotate180(smach.State):
def __init__(self, callbacks):
smach.State.__init__(self, outcomes=['done2', 'follow'])
self.callbacks = callbacks
self.twist = Twist()
self.cmd_vel_pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=1)
def execute(self, userdata):
global shutdown_requested
while not shutdown_requested:
target_heading = (self.callbacks.bot_odom_heading + 180) % 360
turning = True
previous_difference = None
while turning:
if shutdown_requested:
return 'done2'
difference = minimum_angle_between_headings(target_heading, self.callbacks.bot_odom_heading)
if previous_difference is None:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
else:
if difference < 1:
turning = False
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
else:
self.twist.angular.z = 0.4
self.cmd_vel_pub.publish(self.twist)
if previous_difference != difference:
previous_difference = difference
return 'follow'
return 'done2'
def minimum_angle_between_headings(a, b):
heading_difference = a - b
if heading_difference < 0:
heading_difference += 360
if heading_difference > 180:
heading_difference = b - a
if heading_difference < 0:
heading_difference += 360
return heading_difference
def turn(classObj, target_heading):
global shutdown_requested
turning = True
previous_difference = None
turnRate = 0.4
while turning:
if shutdown_requested:
return 'done2'
difference = minimum_angle_between_headings(target_heading, classObj.callbacks.bot_odom_heading)
if previous_difference is None:
classObj.twist.angular.z = turnRate
classObj.cmd_vel_pub.publish(classObj.twist)
else:
if difference < 1:
turning = False
classObj.twist.angular.z = 0
classObj.cmd_vel_pub.publish(classObj.twist)
else:
classObj.twist.angular.z = turnRate
classObj.cmd_vel_pub.publish(classObj.twist)
if previous_difference != difference:
previous_difference = difference
def get_state_machine(callbacks):
global checked
checked = False
sm_event_2 = smach.StateMachine(outcomes=['DONE2', 'SUCCESS2'])
with sm_event_2:
smach.StateMachine.add('ROTATE_LEFT', RotateLeft(callbacks),
transitions={'done2': 'DONE2', 'follow': 'FOLLOW', 'success2': 'SUCCESS2'})
smach.StateMachine.add('FOLLOW', Follow(callbacks),
transitions={'done2': 'DONE2', 'stop': 'STOP'})
smach.StateMachine.add('STOP', Stop(callbacks),
transitions={'done2': 'DONE2', 'check': 'CHECK', 'rotate_left': 'ROTATE_LEFT'})
smach.StateMachine.add('CHECK', Check(callbacks),
transitions={'done2': 'DONE2', 'rotate_180': 'ROTATE_180'})
smach.StateMachine.add('ROTATE_180', Rotate180(callbacks),
transitions={'done2': 'DONE2', 'follow': 'FOLLOW'})
return sm_event_2
|
[
"[email protected]"
] | |
23360d6f5e4397081856508eee1b3434081d5f72
|
d3325fb51a99a8fb25a5bef8f61dc943333417a1
|
/neuron_models/experiments/MLI_exp_current_param_sweep.py
|
5a57bb949b4acca06601302e59cb1bab6d24e2ea
|
[] |
no_license
|
blennon/research
|
e26ccfb5a33543f72e84d96655b69f857b4ff422
|
6579a4d9636332267d0f26d8d4c8226e4fecf85d
|
refs/heads/master
| 2022-09-18T02:53:11.056480 | 2015-02-10T17:37:54 | 2015-02-10T17:37:54 | 7,845,366 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,177 |
py
|
'''
This script performs a grid search for the current parameters that best match the data
from Hausser and Clark (1997)
'''
import datetime
import os
import gc
import multiprocessing
import itertools
from brian import *
import sys
from neuron_models import *
import cPickle
import time
set_global_preferences(useweave=True, usenewpropagate=True, usecodegen=True, usecodegenweave=True)
defaultclock.dt = .25*ms
from scipy.stats import skew
def isi_mean_and_std(monitor):
'''
compute the mean and variance of interspike intervals
of a group of neurons
'''
isi = []
for n_ind, times in monitor.spiketimes.iteritems():
isi += list(diff(times)*1000)
return mean(isi), var(isi)**.5, skew(isi)
def run_net((k,theta)):
seed(os.getpid())
print os.getpid()
reinit()
reinit_default_clock()
clear(True)
gc.collect()
T = 6000
N_MLI = 1
MLI = MLIGroup(N_MLI)
@network_operation(Clock(dt=defaultclock.dt))
def random_current():
MLI.I = (k + exponential(theta,size=len(MLI))) * nA
# Monitor
MS_MLI = SpikeMonitor(MLI)
MR_MLI = PopulationRateMonitor(MLI,bin=1*ms)
MISI_MLI = ISIHistogramMonitor(MLI,bins=arange(0,162,2)*ms)
start = time.time()
run(T*msecond)
print time.time() - start
mli_mew, mli_std, mli_skew = isi_mean_and_std(MS_MLI)
return k,theta,mean(MR_MLI.rate), mli_std/mli_mew, mli_skew
if __name__ == "__main__":
pool = multiprocessing.Pool(6)
params = []
for k in linspace(.016,.018,25):
for theta in linspace(.009,.011,25):
if k+theta < .029 and k+theta > .02:
params.append((k,theta))
print len(params)
results = pool.map(run_net, params)
out_dir = out_dir = '/home/bill/research/data/neuron_models/molecular_layer/mli_exp_current_param_sweep/%s/'%datetime.datetime.now().isoformat()
os.makedirs(out_dir)
with open(out_dir+'results.txt','w') as outf:
outf.write('\t'.join(['k','theta','mli_mean_firing_rate','mli_cv', 'mli_skew'])+'\n')
for r in results:
outf.write('\t'.join(map(str,r))+'\n')
|
[
"[email protected]"
] | |
b91c18f436e31c5115be02df8c6f787bb7876279
|
189b4a297123678100da31ea7fbe6abef5902d60
|
/200_mise_en_base/parse_texte.py
|
4fc04d9644521c33d7ea5270a4e0a09fc913ebdb
|
[] |
no_license
|
michelbl/loi
|
24c4d32b6caa5c5288c99722c4f38f36eee24c5e
|
73d67754b3daad69fd8318ccb7ef18ae39509184
|
refs/heads/master
| 2020-06-11T08:27:57.763320 | 2017-01-10T15:32:58 | 2017-01-10T15:32:58 | 77,166,025 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 18,311 |
py
|
import os
import sys
import xml.etree.ElementTree as ElementTree
import collections
from collections import Counter
import psycopg2
racine_legi = '/home/michel/legi_plat/'
racine_jorf = '/home/michel/jorf_plat/'
class FrozenDict(collections.Mapping):
"""Mike Graham http://stackoverflow.com/questions/2703599/what-would-a-frozen-dict-be"""
def __init__(self, *args, **kwargs):
self._d = dict(*args, **kwargs)
self._hash = None
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
def __getitem__(self, key):
return self._d[key]
def __hash__(self):
# It would have been simpler and maybe more obvious to
# use hash(tuple(sorted(self._d.iteritems()))) from this discussion
# so far, but this solution is O(n). I don't know what kind of
# n we are going to run into, but sometimes it's hard to resist the
# urge to optimize when it will gain improved algorithmic performance.
if self._hash is None:
self._hash = 0
for pair in self.items():
self._hash ^= hash(pair)
return self._hash
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, str(self._d))
def __str__(self):
return self.__repr__()
def parse_struct(contenu):
valeurs = {}
TEXTELR = ElementTree.fromstring(contenu)
assert TEXTELR.tag == 'TEXTELR'
#
META = TEXTELR[0]
assert META.tag == 'META'
##
META_COMMUN = META[0]
assert META_COMMUN.tag == 'META_COMMUN'
###
ID = META_COMMUN[0]
assert ID.tag == 'ID'
valeurs['ID'] = ID.text
id_eli_present = 0
valeurs['ID_ELI'] = ''
if META_COMMUN[1].tag == 'ID_ELI':
id_eli_present = 1
ID_ELI = META_COMMUN[1]
valeurs['ID_ELI'] = ID_ELI.text
eli_alias_present = 0
valeurs['ID_ELI_ALIAS'] = ''
if META_COMMUN[1 + id_eli_present].tag == 'ELI_ALIAS':
eli_alias_present = 1
ELI_ALIAS = META_COMMUN[1 + id_eli_present]
assert len(list(ELI_ALIAS)) == 1
ID_ELI_ALIAS = ELI_ALIAS[0]
assert ID_ELI_ALIAS.tag == 'ID_ELI_ALIAS'
valeurs['ID_ELI_ALIAS'] = ID_ELI_ALIAS.text
ANCIEN_ID = META_COMMUN[1 + id_eli_present + eli_alias_present]
assert ANCIEN_ID.tag == 'ANCIEN_ID'
valeurs['ANCIEN_ID'] = ANCIEN_ID.text
ORIGINE = META_COMMUN[2 + id_eli_present + eli_alias_present]
assert ORIGINE.tag == 'ORIGINE'
valeurs['ORIGINE'] = ORIGINE.text
URL = META_COMMUN[3 + id_eli_present + eli_alias_present]
assert URL.tag == 'URL'
valeurs['URL'] = URL.text
NATURE = META_COMMUN[4 + id_eli_present + eli_alias_present]
assert NATURE.tag == 'NATURE'
valeurs['NATURE'] = NATURE.text
##
META_SPEC = META[1]
assert META_SPEC.tag == 'META_SPEC'
###
META_TEXTE_CHRONICLE = META_SPEC[0]
assert META_TEXTE_CHRONICLE.tag == 'META_TEXTE_CHRONICLE'
####
CID = META_TEXTE_CHRONICLE[0]
assert CID.tag == 'CID'
valeurs['CID'] = CID.text
NUM = META_TEXTE_CHRONICLE[1]
assert NUM.tag == 'NUM'
valeurs['NUM'] = NUM.text
NUM_SEQUENCE = META_TEXTE_CHRONICLE[2]
assert NUM_SEQUENCE.tag == 'NUM_SEQUENCE'
valeurs['NUM_SEQUENCE'] = NUM_SEQUENCE.text
NOR = META_TEXTE_CHRONICLE[3]
assert NOR.tag == 'NOR'
valeurs['NOR'] = NOR.text
DATE_PUBLI = META_TEXTE_CHRONICLE[4]
assert DATE_PUBLI.tag == 'DATE_PUBLI'
valeurs['DATE_PUBLI'] = DATE_PUBLI.text
DATE_TEXTE = META_TEXTE_CHRONICLE[5]
assert DATE_TEXTE.tag == 'DATE_TEXTE'
valeurs['DATE_TEXTE'] = DATE_TEXTE.text
der_modif_present = 0
valeurs['DERNIERE_MODIFICATION'] = ''
if META_TEXTE_CHRONICLE[6].tag == 'DERNIERE_MODIFICATION':
der_modif_present = 1
DERNIERE_MODIFICATION = META_TEXTE_CHRONICLE[6]
valeurs['DERNIERE_MODIFICATION'] = DERNIERE_MODIFICATION.text
versions_a_venir_present = 0
VERSIONS_A_VENIR_data = []
if META_TEXTE_CHRONICLE[6 + der_modif_present].tag == 'VERSIONS_A_VENIR':
versions_a_venir_present = 1
VERSIONS_A_VENIR = META_TEXTE_CHRONICLE[6 + der_modif_present]
#####
for VERSION_A_VENIR in VERSIONS_A_VENIR:
assert VERSION_A_VENIR.tag == 'VERSION_A_VENIR'
VERSIONS_A_VENIR_data.append(VERSION_A_VENIR.text)
valeurs['VERSIONS_A_VENIR'] = tuple(VERSIONS_A_VENIR_data)
ORIGINE_PUBLI = META_TEXTE_CHRONICLE[6 + der_modif_present + versions_a_venir_present]
assert ORIGINE_PUBLI.tag == 'ORIGINE_PUBLI'
valeurs['ORIGINE_PUBLI'] = ORIGINE_PUBLI.text
PAGE_DEB_PUBLI = META_TEXTE_CHRONICLE[7 + der_modif_present + versions_a_venir_present]
assert PAGE_DEB_PUBLI.tag == 'PAGE_DEB_PUBLI'
valeurs['PAGE_DEB_PUBLI'] = PAGE_DEB_PUBLI.text
PAGE_FIN_PUBLI = META_TEXTE_CHRONICLE[8 + der_modif_present + versions_a_venir_present]
assert PAGE_FIN_PUBLI.tag == 'PAGE_FIN_PUBLI'
valeurs['PAGE_FIN_PUBLI'] = PAGE_FIN_PUBLI.text
#
VERSIONS = TEXTELR[1]
assert VERSIONS.tag == 'VERSIONS'
##
VERSIONS_data = []
for VERSION in VERSIONS:
assert VERSION.tag == 'VERSION'
VERSION_data = {}
VERSION_data['etat'] = VERSION.attrib['etat']
###
LIEN_TXT = VERSION[0]
assert LIEN_TXT.tag == 'LIEN_TXT'
VERSION_data['debut'] = LIEN_TXT.attrib['debut']
VERSION_data['fin'] = LIEN_TXT.attrib['fin']
VERSION_data['id_'] = LIEN_TXT.attrib['id']
VERSION_data['num'] = LIEN_TXT.attrib['num']
VERSIONS_data.append(FrozenDict(VERSION_data))
valeurs['VERSIONS'] = frozenset(VERSIONS_data)
#
STRUCT = TEXTELR[2]
assert STRUCT.tag == 'STRUCT'
##
LIENS_ART_data = []
LIENS_SECTION_TA_data = []
for LIEN in STRUCT:
if LIEN.tag == 'LIEN_ART':
LIEN_ART_data = {}
LIEN_ART_data['debut'] = LIEN.attrib['debut']
LIEN_ART_data['etat'] = LIEN.attrib['etat']
LIEN_ART_data['fin'] = LIEN.attrib['fin']
LIEN_ART_data['id_'] = LIEN.attrib['id']
LIEN_ART_data['num'] = LIEN.attrib['num']
LIEN_ART_data['origine'] = LIEN.attrib['origine']
LIENS_ART_data.append(FrozenDict(LIEN_ART_data))
elif LIEN.tag == 'LIEN_SECTION_TA':
LIEN_SECTION_TA_data = {}
LIEN_SECTION_TA_data['cid'] = LIEN.attrib['cid']
LIEN_SECTION_TA_data['debut'] = LIEN.attrib['debut']
LIEN_SECTION_TA_data['etat'] = LIEN.attrib['etat']
LIEN_SECTION_TA_data['fin'] = LIEN.attrib['fin']
LIEN_SECTION_TA_data['id_'] = LIEN.attrib['id']
LIEN_SECTION_TA_data['niv'] = LIEN.attrib['niv']
LIEN_SECTION_TA_data['url'] = LIEN.attrib['url']
LIENS_SECTION_TA_data.append(FrozenDict(LIEN_SECTION_TA_data))
else:
raise ValueError(LIEN.tag)
valeurs['LIENS_ART'] = tuple(LIENS_ART_data)
valeurs['LIENS_SECTION_TA'] = tuple(LIENS_SECTION_TA_data)
return valeurs
def parse_version(contenu):
valeurs = {}
TEXTE_VERSION = ElementTree.fromstring(contenu)
assert TEXTE_VERSION.tag == 'TEXTE_VERSION'
#
META = TEXTE_VERSION[0]
assert META.tag == 'META'
##
META_COMMUN = META[0]
assert META_COMMUN.tag == 'META_COMMUN'
###
ID = META_COMMUN[0]
assert ID.tag == 'ID'
valeurs['ID'] = ID.text
id_eli_present = 0
valeurs['ID_ELI'] = ''
if META_COMMUN[1].tag == 'ID_ELI':
id_eli_present = 1
ID_ELI = META_COMMUN[1]
valeurs['ID_ELI'] = ID_ELI.text
eli_alias_present = 0
valeurs['ID_ELI_ALIAS'] = ''
if META_COMMUN[1 + id_eli_present].tag == 'ELI_ALIAS':
eli_alias_present = 1
ELI_ALIAS = META_COMMUN[1 + id_eli_present]
assert len(list(ELI_ALIAS)) == 1
ID_ELI_ALIAS = ELI_ALIAS[0]
assert ID_ELI_ALIAS.tag == 'ID_ELI_ALIAS'
valeurs['ID_ELI_ALIAS'] = ID_ELI_ALIAS.text
ANCIEN_ID = META_COMMUN[1 + id_eli_present + eli_alias_present]
assert ANCIEN_ID.tag == 'ANCIEN_ID'
valeurs['ANCIEN_ID'] = ANCIEN_ID.text
ORIGINE = META_COMMUN[2 + id_eli_present + eli_alias_present]
assert ORIGINE.tag == 'ORIGINE'
valeurs['ORIGINE'] = ORIGINE.text
URL = META_COMMUN[3 + id_eli_present + eli_alias_present]
assert URL.tag == 'URL'
valeurs['URL'] = URL.text
NATURE = META_COMMUN[4 + id_eli_present + eli_alias_present]
assert NATURE.tag == 'NATURE'
valeurs['NATURE'] = NATURE.text
##
META_SPEC = META[1]
assert META_SPEC.tag == 'META_SPEC'
###
META_TEXTE_CHRONICLE = META_SPEC[0]
assert META_TEXTE_CHRONICLE.tag == 'META_TEXTE_CHRONICLE'
####
CID = META_TEXTE_CHRONICLE[0]
assert CID.tag == 'CID'
valeurs['CID'] = CID.text
NUM = META_TEXTE_CHRONICLE[1]
assert NUM.tag == 'NUM'
valeurs['NUM'] = NUM.text
NUM_SEQUENCE = META_TEXTE_CHRONICLE[2]
assert NUM_SEQUENCE.tag == 'NUM_SEQUENCE'
valeurs['NUM_SEQUENCE'] = NUM_SEQUENCE.text
NOR = META_TEXTE_CHRONICLE[3]
assert NOR.tag == 'NOR'
valeurs['NOR'] = NOR.text
DATE_PUBLI = META_TEXTE_CHRONICLE[4]
assert DATE_PUBLI.tag == 'DATE_PUBLI'
valeurs['DATE_PUBLI'] = DATE_PUBLI.text
DATE_TEXTE = META_TEXTE_CHRONICLE[5]
assert DATE_TEXTE.tag == 'DATE_TEXTE'
valeurs['DATE_TEXTE'] = DATE_TEXTE.text
der_modif_present = 0
valeurs['DERNIERE_MODIFICATION'] = ''
if META_TEXTE_CHRONICLE[6].tag == 'DERNIERE_MODIFICATION':
der_modif_present = 1
DERNIERE_MODIFICATION = META_TEXTE_CHRONICLE[6]
valeurs['DERNIERE_MODIFICATION'] = DERNIERE_MODIFICATION.text
versions_a_venir_present = 0
VERSIONS_A_VENIR_data = []
if META_TEXTE_CHRONICLE[6 + der_modif_present].tag == 'VERSIONS_A_VENIR':
versions_a_venir_present = 1
VERSIONS_A_VENIR = META_TEXTE_CHRONICLE[6 + der_modif_present]
#####
for VERSION_A_VENIR in VERSIONS_A_VENIR:
assert VERSION_A_VENIR.tag == 'VERSION_A_VENIR'
VERSIONS_A_VENIR_data.append(VERSION_A_VENIR.text)
valeurs['VERSIONS_A_VENIR'] = tuple(VERSIONS_A_VENIR_data)
ORIGINE_PUBLI = META_TEXTE_CHRONICLE[6 + der_modif_present + versions_a_venir_present]
assert ORIGINE_PUBLI.tag == 'ORIGINE_PUBLI'
valeurs['ORIGINE_PUBLI'] = ORIGINE_PUBLI.text
PAGE_DEB_PUBLI = META_TEXTE_CHRONICLE[7 + der_modif_present + versions_a_venir_present]
assert PAGE_DEB_PUBLI.tag == 'PAGE_DEB_PUBLI'
valeurs['PAGE_DEB_PUBLI'] = PAGE_DEB_PUBLI.text
PAGE_FIN_PUBLI = META_TEXTE_CHRONICLE[8 + der_modif_present + versions_a_venir_present]
assert PAGE_FIN_PUBLI.tag == 'PAGE_FIN_PUBLI'
valeurs['PAGE_FIN_PUBLI'] = PAGE_FIN_PUBLI.text
###
META_TEXTE_VERSION = META_SPEC[1]
assert META_TEXTE_VERSION.tag == 'META_TEXTE_VERSION'
####
TITRE = META_TEXTE_VERSION[0]
assert TITRE.tag == 'TITRE'
valeurs['TITRE'] = TITRE.text
TITREFULL = META_TEXTE_VERSION[1]
assert TITREFULL.tag == 'TITREFULL'
valeurs['TITREFULL'] = TITREFULL.text
etat_present = 0
valeurs['ETAT'] = ''
if META_TEXTE_VERSION[2].tag == 'ETAT':
etat_present = 1
ETAT = META_TEXTE_VERSION[2]
valeurs['ETAT'] = ETAT.text
DATE_DEBUT = META_TEXTE_VERSION[2 + etat_present]
assert DATE_DEBUT.tag == 'DATE_DEBUT'
valeurs['DATE_DEBUT'] = DATE_DEBUT.text
DATE_FIN = META_TEXTE_VERSION[3 + etat_present]
assert DATE_FIN.tag == 'DATE_FIN'
valeurs['DATE_FIN'] = DATE_FIN.text
AUTORITE = META_TEXTE_VERSION[4 + etat_present]
assert AUTORITE.tag == 'AUTORITE'
valeurs['AUTORITE'] = AUTORITE.text
MINISTERE = META_TEXTE_VERSION[5 + etat_present]
assert MINISTERE.tag == 'MINISTERE'
valeurs['MINISTERE'] = MINISTERE.text
mcs_txt_present = 0
valeurs['MCS_TXT'] = ''
if (len(META_TEXTE_VERSION) >= 7 + etat_present) and (META_TEXTE_VERSION[6 + etat_present].tag == 'MCS_TXT'):
mcs_txt_present = 1
MCS_TXT = META_TEXTE_VERSION[6 + etat_present]
valeurs['MCS_TXT'] = MCS_TXT.text
liens_present = 0
LIENS_data = []
if len(META_TEXTE_VERSION) == 7 + etat_present + mcs_txt_present:
liens_present = 1
LIENS = META_TEXTE_VERSION[6 + etat_present + mcs_txt_present]
assert LIENS.tag == 'LIENS'
#####
for LIEN in LIENS:
assert LIEN.tag == 'LIEN'
LIEN_data = {}
LIEN_data['cidtexte'] = LIEN.attrib['cidtexte']
LIEN_data['datesignatexte'] = LIEN.attrib['datesignatexte']
LIEN_data['id_'] = LIEN.attrib['id']
LIEN_data['naturetexte'] = LIEN.attrib['naturetexte']
LIEN_data['nortexte'] = LIEN.attrib['nortexte']
LIEN_data['num'] = LIEN.attrib['num']
LIEN_data['numtexte'] = LIEN.attrib['numtexte']
LIEN_data['sens'] = LIEN.attrib['sens']
LIEN_data['typelien'] = LIEN.attrib['typelien']
LIEN_data['texte'] = LIEN.text
LIENS_data.append(FrozenDict(LIEN_data))
valeurs['LIENS'] = tuple(LIENS_data)
assert len(META_TEXTE_VERSION) == 6 + etat_present + mcs_txt_present + liens_present
#
notice_present = 0
valeurs['NOTICE'] = ''
if TEXTE_VERSION[1].tag == 'NOTICE':
notice_present = 1
NOTICE = TEXTE_VERSION[1]
##
CONTENU = NOTICE[0]
valeurs['NOTICE'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
#
VISAS = TEXTE_VERSION[1 + notice_present]
assert VISAS.tag == 'VISAS'
##
CONTENU = VISAS[0]
valeurs['VISAS'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
#
SIGNATAIRES = TEXTE_VERSION[2 + notice_present]
assert SIGNATAIRES.tag == 'SIGNATAIRES'
##
CONTENU = SIGNATAIRES[0]
valeurs['SIGNATAIRES'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
#
TP = TEXTE_VERSION[3 + notice_present]
assert TP.tag == 'TP'
##
CONTENU = TP[0]
valeurs['TP'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
#
nota_present = 0
valeurs['NOTA'] = ''
if TEXTE_VERSION[4 + notice_present].tag == 'NOTA':
nota_present = 1
NOTA = TEXTE_VERSION[4 + notice_present]
##
CONTENU = NOTA[0]
valeurs['NOTA'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
#
ABRO = TEXTE_VERSION[4 + notice_present + nota_present]
assert ABRO.tag == 'ABRO'
##
CONTENU = ABRO[0]
valeurs['ABRO'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
#
RECT = TEXTE_VERSION[5 + notice_present + nota_present]
assert RECT.tag == 'RECT'
##
CONTENU = RECT[0]
valeurs['RECT'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
#
sm_present = 0
valeurs['SM'] = ''
assert len(TEXTE_VERSION) in [6 + notice_present + nota_present, 7 + notice_present + nota_present]
if len(TEXTE_VERSION) == 7 + notice_present + nota_present:
sm_present = 1
SM = TEXTE_VERSION[6 + notice_present + nota_present]
##
CONTENU = SM[0]
valeurs['SM'] = ElementTree.tostring(CONTENU, encoding='unicode', method='xml')
return valeurs
def parse_cid(cid, curseur):
curseur.execute("select base_origine, categorie, cid, id_ from struct where valide = True and cid = %s;",
(cid,))
liste_struct = curseur.fetchall()
curseur.execute("select base_origine, categorie, cid, id_ from version where valide = True and cid = %s;",
(cid,))
liste_version = curseur.fetchall()
assert len(liste_struct) == len(liste_version)
assert sorted([l[3] for l in liste_struct]) == sorted([l[3] for l in liste_struct])
liste_id_ = [l[3] for l in liste_struct]
assert len(liste_id_) == len(set(liste_id_))
assert cid in liste_id_
valeurs_struct_par_id_ = {}
for base_origine, categorie, cid, id_ in liste_struct:
if base_origine == 'JORF':
nom_fichier = os.path.join(racine_jorf, 'texte/struct', id_ + '.xml')
elif base_origine == 'LEGI':
nom_fichier = os.path.join(racine_legi, categorie, cid, 'struct', id_ + '.xml')
else:
raise ValueError(base_origine)
with open(nom_fichier) as f:
contenu = f.read()
valeurs_struct = parse_struct(contenu)
valeurs_struct_par_id_[id_] = valeurs_struct
valeurs_version_par_id_ = {}
for base_origine, categorie, cid, id_ in liste_version:
if base_origine == 'JORF':
nom_fichier = os.path.join(racine_jorf, 'texte/version', id_ + '.xml')
elif base_origine == 'LEGI':
nom_fichier = os.path.join(racine_legi, categorie, cid, 'version', id_ + '.xml')
else:
raise ValueError(base_origine)
with open(nom_fichier) as f:
contenu = f.read()
valeurs_version = parse_version(contenu)
valeurs_version_par_id_[id_] = valeurs_version
return liste_id_, valeurs_struct_par_id_, valeurs_version_par_id_
infos_communes = [
'ID_ELI',
'ID_ELI_ALIAS',
'NATURE',
'CID',
'NUM',
'NUM_SEQUENCE',
'NOR',
'DATE_PUBLI',
'DATE_TEXTE',
'DERNIERE_MODIFICATION',
'VERSIONS_A_VENIR',
'ORIGINE_PUBLI',
'PAGE_DEB_PUBLI',
'PAGE_FIN_PUBLI',
]
infos_communes_struct = [
'VERSIONS',
]
infos_communes_version = [
'AUTORITE',
'MINISTERE',
]
infos_particulieres = [
'ID',
'ANCIEN_ID',
'ORIGINE'
]
infos_particulieres_struct = [
'URL',
'LIENS_ART',
'LIENS_SECTION_TA',
]
infos_particulieres_version = [
'URL',
'TITRE',
'TITREFULL',
'ETAT',
'DATE_DEBUT',
'DATE_FIN',
'MCS_TXT',
'LIENS',
'NOTICE',
'VISAS',
'SIGNATAIRES',
'TP',
'NOTA',
'ABRO',
'RECT',
'SM',
]
|
[
"[email protected]"
] | |
116f6963b88edfdb0db9fda927ba4e4947b376fa
|
5ec7d0bad8a77c79843a2813f5effcb3a2b7e288
|
/lean/models/brokerages/cloud/tradier.py
|
fd5e10b9f48bced5ac4faae3e74d4fac7886ec50
|
[
"Apache-2.0"
] |
permissive
|
xdpknx/lean-cli
|
aca9b9c9c4e156c9faefcfa8ccdfc20423b510a0
|
c1051bd3e8851ae96f6e84f608a7116b1689c9e9
|
refs/heads/master
| 2023-08-08T02:30:09.827647 | 2021-09-21T21:36:24 | 2021-09-21T21:36:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,163 |
py
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import click
from lean.components.util.logger import Logger
from lean.models.brokerages.cloud.base import CloudBrokerage
class TradierBrokerage(CloudBrokerage):
"""A CloudBrokerage implementation for Tradier."""
def __init__(self, account_id: str, access_token: str, environment: str) -> None:
self._account_id = account_id
self._access_token = access_token
self._environment = environment
@classmethod
def get_id(cls) -> str:
return "TradierBrokerage"
@classmethod
def get_name(cls) -> str:
return "Tradier"
@classmethod
def build(cls, logger: Logger) -> CloudBrokerage:
logger.info("""
Your Tradier account id and API token can be found on your Settings/API Access page (https://dash.tradier.com/settings/api).
The account id is the alpha-numeric code in a dropdown box on that page.
Your account details are not saved on QuantConnect.
""".strip())
account_id = click.prompt("Account id")
access_token = logger.prompt_password("Access token")
environment = click.prompt("Environment", type=click.Choice(["demo", "real"], case_sensitive=False))
return TradierBrokerage(account_id, access_token, environment)
def _get_settings(self) -> Dict[str, str]:
return {
"account": self._account_id,
"token": self._access_token,
"environment": "live" if self._environment == "real" else "paper"
}
|
[
"[email protected]"
] | |
1e22197683ad05ab1e08e3e0579f3d39e930dc88
|
2c0ba020172ff3129576e9698be7d3c9c6688a3c
|
/blog/models.py
|
bdde4ddd977f43575f12e73d62c83b3a4efdbf45
|
[] |
no_license
|
Cyusa-G/blog-app
|
793dcfe2426db3d98a2fed52e76760ae71ae7b5f
|
034a564376d56d9f80ebe064dc60a059c7425231
|
refs/heads/main
| 2023-04-23T06:57:45.904534 | 2021-05-02T15:20:17 | 2021-05-02T15:20:17 | 363,417,736 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 415 |
py
|
from django.db import models
from django.urls import reverse
class Post(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey('auth.User',
on_delete=models.CASCADE,
)
body = models.TextField()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post_detail', args=[str(self.id)])
# Create your models here.
|
[
"[email protected]"
] | |
6b6b9d93703d5e9c0504afc1991151ab87a572fe
|
ef5e55482a431e94860960025e7aed0e08ce8bf5
|
/dp/longest_common_subsequence.py
|
deaf646cb366f69b7e29537316a358fa3d8465dd
|
[] |
no_license
|
ajitluhach/algorithms
|
c2ef4392fc73ffb11bf24863251fd4ef8a25d31e
|
1e9ed8a5e95f6b1735516461a0aa15ceb78608f2
|
refs/heads/master
| 2021-03-27T19:37:08.934630 | 2017-08-30T17:46:38 | 2017-08-30T17:46:38 | 89,777,342 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,682 |
py
|
def LCS(X, Y):
"""Return the LCS Table of the two substrings X and Y"""
n, m = len(X), len(Y) # length of both the strings
L = [[0]*(m+1) for _ in range(n+1)] # create a n+1*(m+1) table, each = 0
# one column and row more because 0th row is zero, and 0th column is
# also zero, the value for the current match accessed by previous diagonal
# if there is no match, then we choose the match from either the upper
# row and same column, or from same row and previous column
for j in range(n):
for k in range(m):
if X[j] == Y[k]: # match has occured, add both of them
L[j+1][k+1] = 1 + L[j][k]
else: # choose to align one of them, not both
L[j+1][k+1] = max(L[j][k+1], L[j+1][k])
return L
"""This algorithm reduced the time complexity from O((2^n)*m) to O(nm)
made the exponential time into polynomial.
To calculate the actual subsequence from this table, we go back from the last
column.
We have to choice in this case
if x[j] == x[k]:
then add this x to the solution, and move one up diagonally
elif previous row and same column is great than or equal to same row and\
previous column:
then choose the upper row.
else:
choose the same row
"""
def LCS_solution(X, Y, L):
"""Return the longest common substring of X and Y, given LCS table"""
j, k = len(X), len(Y)
solution = []
while L[j][k] > 0:
if X[j-1] == Y[k-1]:
solution.append(X[j-1])
j -= 1
k -= 1
elif L[j-1][k] >= L[j][k-1]:
j -= 1
else:
k -= 1
return ''.join(reversed(solution))
|
[
"[email protected]"
] | |
190c0b7174e3ee074dcee7447dd6149444d96d20
|
9030481ef925278a174cbbf58c74bc5058e8d302
|
/contrib/testgen/base58.py
|
0b6e6e1ae339c3c25f894b09b621c4777509d655
|
[
"MIT"
] |
permissive
|
hideoussquid/aureus-13-gui
|
1b8f85f262cbc1970c3d8072b064956073bc4182
|
8865c958ba1680d4615128dabcc3cc4d47a24c51
|
refs/heads/master
| 2021-01-19T08:22:45.795165 | 2017-04-26T07:34:19 | 2017-04-26T07:34:19 | 87,622,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,999 |
py
|
# Copyright (c) 2012 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Aureus base58 encoding and decoding.
Based on https://aureustalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Aureus does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/aureus/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
[
"[email protected]"
] | |
c74d484eaebbb4aa559ed27a741aa2128baf2d04
|
604b7a20dbb48e4c52611e289949aebdbfce2690
|
/pages/views.py
|
2621fa2b10f1c2139d5b3a4c3dc845859176fc13
|
[] |
no_license
|
NargesHme/Newspaper_App
|
33bbcaa8b3a09e30890ebe0621953f116d9202df
|
14d9f68c2a794ec8cdc6ff90f468d437e5db2ab7
|
refs/heads/main
| 2023-02-20T20:21:52.808861 | 2021-01-22T00:16:41 | 2021-01-22T00:16:41 | 329,339,190 | 1 | 0 | null | 2021-01-21T23:50:16 | 2021-01-13T14:49:52 |
Python
|
UTF-8
|
Python
| false | false | 173 |
py
|
from django.shortcuts import render
from django.views.generic import TemplateView
# Create your views here.
class HomePageView(TemplateView):
template_name = 'home.html'
|
[
"[email protected]"
] | |
58bb40f95b996bb5aaf4c9706c5271c0c5978cc2
|
25d8bac5635ac1cc3577a3593a4512e042ea7ecd
|
/scripts/asyncore-example-2.py
|
27a4738c22e98525faf3534d4f880e283ad582e0
|
[] |
no_license
|
mtslong/demo
|
2333fa571d6d9def7bdffc90f7bcb623b15e6e4b
|
a78b74e0eea7f84df489f5c70969b9b4797a4873
|
refs/heads/master
| 2020-05-18T18:28:48.237100 | 2013-11-11T16:10:11 | 2013-11-11T16:10:11 | 4,136,487 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 885 |
py
|
import asyncore
import socket, time
# reference time
TIME1970 = 2208988800L
class TimeChannel(asyncore.dispatcher):
def handle_write(self):
t = int(time.time()) + TIME1970
t = chr(t>>24&255) + chr(t>>16&255) + chr(t>>8&255) + chr(t&255)
self.send(t)
self.close()
class TimeServer(asyncore.dispatcher):
def __init__(self, port=37):
self.port = port
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(5)
print "listening on port", self.port
def handle_accept(self):
channel, addr = self.accept()
TimeChannel(channel)
server = TimeServer(8037)
asyncore.loop()
## log: adding channel <TimeServer at 8cb940>
## listening on port 8037
## log: adding channel <TimeChannel at 8b2fd0>
## log: closing channel 52:<TimeChannel connected at 8b2fd0>
|
[
"[email protected]"
] | |
e72a257dbd1f8b2b02e25b045333f59190c7dede
|
b756fac12094915d4b4c3b2f88eff0812b3cc0ae
|
/geekshop/adminapp/views.py
|
85ebd6da5d9a413de4e6930042e5784973e97c38
|
[] |
no_license
|
GarbGitHub/django-geekshop
|
6312c6ba42d702f00277325764d6750a9c81073c
|
43937c082d20e32b95b0c73d0a24fcac5aa33923
|
refs/heads/master
| 2023-08-24T07:58:52.497149 | 2021-10-20T17:48:29 | 2021-10-20T17:48:29 | 370,262,919 | 0 | 1 | null | 2021-10-20T17:48:30 | 2021-05-24T07:19:02 |
Python
|
UTF-8
|
Python
| false | false | 12,318 |
py
|
from django.contrib.auth.decorators import user_passes_test
from django.db import connection
from django.db.models import F
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.http import HttpResponseRedirect
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
from django.shortcuts import get_object_or_404
from adminapp.forms import ShopUserRegisterForm, ProductCategoryEditForm, ProductEditForm
from authapp.models import ShopUser
from mainapp.models import Product, ProductCategory
class UsersListView(ListView):
model = ShopUser
template_name = 'adminapp/users.html'
context_object_name = 'objects'
ordering = '-is_active', '-is_staff', 'username'
paginate_by = 10
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['icon'] = 'bx-user'
context['title'] = 'ะะพะปัะทะพะฒะฐัะตะปะธ'
return context
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class UserCreateView(CreateView):
model = ShopUser
template_name = 'adminapp/user_update.html'
success_url = reverse_lazy('admin_staff:users')
# fields = '__all__' # get_form()
def get_form(self, form_class=ShopUserRegisterForm):
"""ะะตัะฝะตั ัะบะทะตะผะฟะปัั ัะพัะผั, ะบะพัะพัะฐั ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัััั ะฒ ััะพะผ ะฟัะตะดััะฐะฒะปะตะฝะธะธ."""
return form_class(**self.get_form_kwargs())
def get_context_data(self, *args, **kwargs):
context = super(UserCreateView, self).get_context_data(**kwargs)
context['icon'] = 'bx-user-plus'
context['title'] = f'ัะพะทะดะฐะฝะธะต ะฟะพะปัะทะพะฒะฐัะตะปั'
return context
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class UserUpdateView(UpdateView):
model = ShopUser
template_name = 'adminapp/user_update.html'
context_object_name = 'edit_user'
def get_form(self, form_class=ShopUserRegisterForm):
"""ะะตัะฝะตั ัะบะทะตะผะฟะปัั ัะพัะผั, ะบะพัะพัะฐั ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัััั ะฒ ััะพะผ ะฟัะตะดััะฐะฒะปะตะฝะธะธ."""
return form_class(**self.get_form_kwargs())
def get_success_url(self):
return reverse_lazy('adminapp:user_update', args=(self.object.id,))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
title = f'ะฟะพะปัะทะพะฒะฐัะตะปั: {context.get(self, self.object.username)}'
context['title'] = title
context['icon'] = 'bx-edit'
return context
# def post(self, request, *args, **kwargs):
# self.object = self.get_object()
# self.context = self.get_context_data(object=self.object)
# self.context.update({
# 'alert': 'ะะฐะฝะฝัะต ััะฟะตัะฝะพ ะพะฑะฝะพะฒะปะตะฝั'
# })
# return super(UserUpdateView, self).post(request, **kwargs)
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class UserDeleteView(DeleteView):
model = ShopUser
template_name = 'adminapp/user_delete.html'
success_url = reverse_lazy('admin_staff:users')
context_object_name = 'user_to_delete'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = f'ัะดะฐะปะธัั ะฟะพะปัะทะพะฒะฐัะตะปั: {context.get(self, self.object.username)}'
context['icon'] = 'bx-user_to_delete'
return context
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.is_active = False
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class CategoriesListView(ListView):
model = ProductCategory
template_name = 'adminapp/categories.html'
context_object_name = 'objects'
ordering = '-is_active'
paginate_by = 3
def get_context_data(self, *args, **kwargs):
context = super(CategoriesListView, self).get_context_data(**kwargs)
context['icon'] = 'bx-cart'
context['title'] = f'ะบะฐัะตะณะพัะธะธ ัะพะฒะฐัะพะฒ'
return context
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class ProductCategoryCreateView(CreateView):
model = ProductCategory
template_name = 'adminapp/category_update.html'
success_url = reverse_lazy('admin_staff:categories')
# fields = '__all__' # get_form()
def get_form(self, form_class=ProductCategoryEditForm):
"""ะะตัะฝะตั ัะบะทะตะผะฟะปัั ัะพัะผั, ะบะพัะพัะฐั ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัััั ะฒ ััะพะผ ะฟัะตะดััะฐะฒะปะตะฝะธะธ."""
return form_class(**self.get_form_kwargs())
def get_context_data(self, *args, **kwargs):
context = super(ProductCategoryCreateView, self).get_context_data(**kwargs)
context['icon'] = 'bx-category'
context['title'] = f'ะกะพะทะดะฐะฝะธะต ะบะฐัะตะณะพัะธะธ'
return context
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class ProductCategoryUpdateView(UpdateView):
model = ProductCategory
template_name = 'adminapp/category_update.html'
success_url = reverse_lazy('adminapp:categories')
form_class = ProductCategoryEditForm
def get_form(self, form_class=ProductCategoryEditForm):
"""ะะตัะฝะตั ัะบะทะตะผะฟะปัั ัะพัะผั, ะบะพัะพัะฐั ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัััั ะฒ ััะพะผ ะฟัะตะดััะฐะฒะปะตะฝะธะธ."""
return form_class(**self.get_form_kwargs())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = f'ะบะฐัะตะณะพัะธั: {context.get(self, self.object.name)}'
context['icon'] = 'bx-edit'
return context
def form_valid(self, form):
if 'discount' in form.cleaned_data:
discount = form.cleaned_data['discount']
if discount:
self.object.product_set.update(price=F('price') * (1 - discount / 100))
db_profile_by_type(self.__class__, 'UPDATE', connection.queries)
return super().form_valid(form)
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class ProductCategoryDeleteView(DeleteView):
model = ProductCategory
template_name = 'adminapp/category_delete.html'
success_url = reverse_lazy('adminapp:categories')
context_object_name = 'category_to_delete'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = f'ัะดะฐะปะธัั ะบะฐัะตะณะพัะธั: {context.get(self, self.object.name)}'
context['icon'] = 'bx-edit'
return context
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.is_active = False
self.object.save()
return HttpResponseRedirect(self.get_success_url())
class ProductsListView(ListView):
model = Product
template_name = 'adminapp/products.html'
context_object_name = 'objects'
ordering = '-is_active'
paginate_by = 2
def get_queryset(self):
return Product.objects.filter(category__pk=self.kwargs.get('pk')).order_by('-is_active')
def get_context_data(self, *args, object_lists=None, **kwargs):
context = super(ProductsListView, self).get_context_data(**kwargs)
category = get_object_or_404(ProductCategory, pk=self.kwargs.get('pk'))
context['icon'] = 'bx-cart'
context['title'] = f'ัะพะฒะฐัั ะบะฐัะตะณะพัะธะธ "{category.name}"'
context['category'] = category
return context
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class ProductCreateView(CreateView):
model = Product
template_name = 'adminapp/product_update.html'
success_url = reverse_lazy('admin_staff:users')
# initial = 'category': category
# fields = '__all__' # get_form()
def get_initial(self):
initial = super(ProductCreateView, self).get_initial()
initial['category'] = get_object_or_404(ProductCategory, pk=self.kwargs.get('pk'))
return initial
def get_context_data(self, *args, **kwargs):
context = super(ProductCreateView, self).get_context_data(**kwargs)
category = get_object_or_404(ProductCategory, pk=self.kwargs.get('pk'))
context['icon'] = 'bx-category'
context['title'] = f'ะะพะฑะฐะฒะธัั ะฝะพะฒัะน ัะพะฒะฐั'
context['category'] = category
return context
def get_form(self, form_class=ProductEditForm):
"""ะะตัะฝะตั ัะบะทะตะผะฟะปัั ัะพัะผั, ะบะพัะพัะฐั ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัััั ะฒ ััะพะผ ะฟัะตะดััะฐะฒะปะตะฝะธะธ."""
return form_class(**self.get_form_kwargs())
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class ProductDetailView(DetailView):
model = Product
template_name = 'adminapp/product_read.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = 'ะะตัะฐะปัะฝะฐั ะธะฝัะพัะผะฐัะธั'
context['icon'] = 'bx-chair'
return context
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class ProductUpdateView(UpdateView):
model = Product
template_name = 'adminapp/product_update.html'
def get_form(self, form_class=ProductEditForm):
"""ะะตัะฝะตั ัะบะทะตะผะฟะปัั ัะพัะผั, ะบะพัะพัะฐั ะฑัะดะตั ะธัะฟะพะปัะทะพะฒะฐัััั ะฒ ััะพะผ ะฟัะตะดััะฐะฒะปะตะฝะธะธ."""
return form_class(**self.get_form_kwargs())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = f'ะัะฐะฒะธัั: "{context.get(self, self.object.name)}"'
context['icon'] = 'bx-edit'
return context
def get_success_url(self):
return reverse_lazy('adminapp:product_read', args=(self.object.id,))
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class ProductDeleteView(DeleteView):
model = Product
template_name = 'adminapp/product_delete.html'
context_object_name = 'product_to_delete'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = f'ัะดะฐะปะธัั: {context.get(self, self.object.name)}'
context['icon'] = 'bx-edit'
return context
def get_success_url(self):
return reverse_lazy('adminapp:products', args=(self.object.category_id,))
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.is_active = False
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def db_profile_by_type(prefix, type, queries):
update_queries = list(filter(lambda x: type in x['sql'], queries))
print(f'db_profile {type} for {prefix}:')
[print(query['sql']) for query in update_queries]
@receiver(pre_save, sender=ProductCategory)
def product_is_active_update_productcategory_save(sender, instance, **kwargs):
if instance.pk:
if instance.is_active:
instance.product_set.update(is_active=True)
else:
instance.product_set.update(is_active=False)
db_profile_by_type(sender, 'UPDATE', connection.queries)
|
[
"[email protected]"
] | |
5458918ced7a34110cce74cdfaac172334515069
|
009653f67d9b788281d2b9dd62a7e23628d4b15c
|
/wget.py
|
353a29d7ffc59ca9d1dfda3cebeb547167f7b37e
|
[] |
no_license
|
swapnilkauthale/Ubuntu-Repository-Updater-for-Cloud
|
ee92661308bde0cf2aa51f6b9114cb27c8c44281
|
7c2156f0f58ebc0e6207c5921d1f914a5c907892
|
refs/heads/master
| 2021-04-26T22:24:27.261069 | 2018-03-26T07:17:42 | 2018-03-26T07:17:42 | 124,086,620 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,891 |
py
|
#*/10 * * * * python /etc/cron.hourly/wget.py
import os,re
path = "/home/swapnil/Desktop/U_Mirror/versions/17/old_MD5/MD5SUMS"
l=[]
with open(path,'r') as f:
for line in f:
for word in line.split():
l.append(word)
old_hash = l[0]
#os.system("wget http://releases.ubuntu.com/17.10/ubuntu-17.10-desktop-amd64.iso -P /home/swapnil/Desktop/U_Mirror/versions/")
os.system("wget http://releases.ubuntu.com/17.10/MD5SUMS -P /home/swapnil/Desktop/U_Mirror/versions/17/new_MD5SUM/")
path1 = "/home/swapnil/Desktop/U_Mirror/versions/17/new_MD5SUM/MD5SUMS"
l1=[]
with open(path,'r') as f:
for line in f:
for word in line.split():
l1.append(word)
new_hash = l1[0]
os.system("openssl dgst -md5 /home/swapnil/Desktop/U_Mirror/versions/17/ubuntu-17.10-desktop-amd64.iso > /home/swapnil/Desktop/U_Mirror/versions/17/MD5_cross_check.txt")
with open("/home/swapnil/Desktop/U_Mirror/versions/17/MD5_cross_check.txt",'r') as f:
md = f.readline()
l2=[]
for parts in md.split("= "):
l2.append(parts)
md_cross_check = l2[1]
#print old
#print newm
#print md
#if old != newm and old != md :
# os.system("wget http://releases.ubuntu.com/17.10/ubuntu-17.10-desktop-amd64.iso -P /home/swapnil/Desktop/U_Mirror/versions/17/")
# os.system("wget http://releases.ubuntu.com/17.10/MD5SUMS -P /home/swapnil/Desktop/U_Mirror/versions/17/")
flag = 0
while flag!=1:
if old_hash != new_hash:
os.system("wget http://releases.ubuntu.com/17.10/ubuntu-17.10-desktop-amd64.iso -P /home/swapnil/Desktop/U_Mirror/versions/17/")
os.system("cp /home/swapnil/Desktop/U_Mirror/versions/17/new_MD5SUM/MD5SUMS /home/swapnil/Desktop/U_Mirror/versions/17/old_MD5/")
os.system("rm -rf /home/swapnil/Desktop/U_Mirror/versions/17/new_MD5SUM && mkdir /home/swapnil/Desktop/U_Mirror/versions/17/new_MD5SUM")
if new_hash == md_cross_check:
flag = 1
|
[
"[email protected]"
] | |
bd89005298fc7cb04640f30108944021ea926373
|
862126fc5d0b920501a75913ddd563a0fb6c942a
|
/Trabajo_Final/gSLICrPy/gSLICrPy.py
|
16315f9e0d597b11c819adb08ab60e4aed3c42fa
|
[] |
no_license
|
jhuni45/TCG-Laboratorio
|
60cc5a735e98fafd6c171e92b7d0d9ae71ba5378
|
e2841e6ea30880142a47e7aa3dc4158c5c0fe4d3
|
refs/heads/master
| 2023-03-08T13:24:17.849817 | 2021-02-15T17:49:32 | 2021-02-15T17:49:32 | 257,342,390 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,585 |
py
|
import ctypes
from ctypes import POINTER
def __get_CUDA_gSLICr__(path_to_shared='./build/libDEMO.so'):
"""
:return: Callable
"""
dll = ctypes.CDLL(path_to_shared, mode=ctypes.RTLD_GLOBAL)
func = dll.CUDA_gSLICr
"""
int* CUDA_gSLICr(unsigned char* image,
int img_size_x,
int img_size_y,
int n_segs,
int spixel_size,
float coh_weight,
int n_iters,
int color_space,
int segment_color_space,
bool segment_by_size,
bool enforce_connectivity,
char* out_name)
"""
func.argtypes = [POINTER(ctypes.c_uint8),
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_float,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_bool,
ctypes.c_bool,
ctypes.c_char_p]
# POINTER(c_char) or ctypes.c_char_p ?
return func
def CUDA_gSLICr(__get_CUDA_gSLICr__,
image,
img_size_x,
img_size_y,
n_segs,
spixel_size,
coh_weight,
n_iters,
color_space,
segment_color_space,
segment_by_size,
enforce_connectivity,
out_name):
"""
:param __get_CUDA_gSLICrm__:
:param image:
:param img_size_x:
:param img_size_y:
:param n_segs:
:param spixel_size:
:param coh_weight:
:param n_iters:
:param color_space:
:param segment_color_space:
:param segment_by_size:
:param enforce_connectivity:
:param out_name:
:return:
"""
image = image.ctypes.data_as(POINTER(ctypes.c_uint8))
out_name = out_name.encode('utf-8')
return __get_CUDA_gSLICr__(image,
img_size_x,
img_size_y,
n_segs,
spixel_size,
coh_weight,
n_iters,
color_space,
segment_color_space,
segment_by_size,
enforce_connectivity,
out_name)
|
[
"[email protected]"
] | |
5be34879011c0f4d0308e93c05824f2a437ec963
|
44b87d9faad99d542914c35410ba7d354d5ba9cd
|
/1/collection/list/divisible by 8 using compre.py
|
857a0b6ada0c2d9dc98bd9180ec1370a09173462
|
[] |
no_license
|
append-knowledge/pythondjango
|
586292d1c7d0ddace3630f0d77ca53f442667e54
|
0e5dab580e8cc48e9940fb93a71bcd36e8e6a84e
|
refs/heads/master
| 2023-06-24T07:24:53.374998 | 2021-07-13T05:55:25 | 2021-07-13T05:55:25 | 385,247,677 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 142 |
py
|
num=[i for i in range(1,1000) if i%8==0]
print(num)
print("length of num is",len(num))
# odd=[i for i in range(1000) if i%2!=0]
# print(odd)
|
[
"[email protected]"
] | |
14ef77722f048f2df87d9027535bf38755c8fe85
|
906f230699aa5017660140e7f2032c46db75086d
|
/feature_matching.py
|
4eef2b03912cd5044833e599456915d626fce331
|
[] |
no_license
|
gubo2012/opencv_tutorial
|
b3e10d41442052eb245f91e38f407dc67ef29a47
|
e90ecb4701731e4ed19c30bd32b58d9b9fda9763
|
refs/heads/master
| 2020-03-10T03:21:07.153053 | 2018-04-17T17:26:11 | 2018-04-17T17:26:11 | 129,162,430 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 672 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 11 22:05:40 2018
@author: gubo
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
img1 = cv2.imread('opencv-feature-matching-template.jpg',0)
img2 = cv2.imread('opencv-feature-matching-image.jpg',0)
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# sort them based on their distances
matches = bf.match(des1,des2)
matches = sorted(matches, key = lambda x:x.distance)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:5],None, flags=2)
plt.imshow(img3)
plt.show()
|
[
"[email protected]"
] | |
c88104d5615bd51adb47e4cdb4bcf60a416fae65
|
4cc2ad8ff00012095980bd98f4ec26437bf02feb
|
/form.py
|
ba4fbcf24556ee808a8bd7c05c332619b489b8a9
|
[] |
no_license
|
abcelso/Web-con-Python-y-flask
|
b57d562b592b2c5ff29936eccef7aaef7dadc5db
|
9de9b51abbb86880afdd5107973a1dd11156bafd
|
refs/heads/master
| 2022-06-09T23:10:41.734142 | 2020-04-21T04:29:28 | 2020-04-21T04:29:28 | 257,466,338 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 177 |
py
|
from wtforms import Form
from wtforms import StringField, PasswordField
class LoginForm(Form):
username = StringField('username')
password = PasswordField('password')
|
[
"[email protected]"
] | |
19f3c8b7d94aae6549e86646e36334cb826a906e
|
6e820756b82ffbe9837348937e53f1a0ce0e6cca
|
/Lib/site-packages/pandas_datareader/io/jsdmx.py
|
d602ca88beb058636aceaac714662ee2f457a6c4
|
[] |
no_license
|
AndreasPatsimas/pms_papei
|
c2afd941de6ae234dd37784d746e794183ebb8d3
|
da10220ea468304c1066bed55b8f92ba9e5ada8a
|
refs/heads/master
| 2023-02-01T23:33:39.221747 | 2020-12-19T12:17:59 | 2020-12-19T12:17:59 | 321,115,913 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,167 |
py
|
# pylint: disable-msg=E1101,W0613,W0603
from __future__ import unicode_literals
from collections import OrderedDict
import itertools
import re
import sys
import numpy as np
import pandas as pd
from pandas_datareader.io.util import _read_content
def read_jsdmx(path_or_buf):
"""
Convert a SDMX-JSON string to panda object
Parameters
----------
path_or_buf : a valid SDMX-JSON string or file-like
https://github.com/sdmx-twg/sdmx-json
Returns
-------
results : Series, DataFrame, or dictionary of Series or DataFrame.
"""
jdata = _read_content(path_or_buf)
try:
import simplejson as json
except ImportError:
if sys.version_info[:2] < (2, 7):
raise ImportError("simplejson is required in python 2.6")
import json
if isinstance(jdata, dict):
data = jdata
else:
data = json.loads(jdata, object_pairs_hook=OrderedDict)
structure = data["structure"]
index = _parse_dimensions(structure["dimensions"]["observation"])
columns = _parse_dimensions(structure["dimensions"]["series"])
dataset = data["dataSets"]
if len(dataset) != 1:
raise ValueError("length of 'dataSets' must be 1")
dataset = dataset[0]
values = _parse_values(dataset, index=index, columns=columns)
df = pd.DataFrame(values, columns=columns, index=index)
return df
def _get_indexer(index):
if index.nlevels == 1:
return [str(i) for i in range(len(index))]
else:
it = itertools.product(*[range(len(level)) for level in index.levels])
return [":".join(map(str, i)) for i in it]
def _fix_quarter_values(value):
"""Make raw quarter values Pandas-friendly (e.g. 'Q4-2018' -> '2018Q4')."""
m = re.match(r"Q([1-4])-(\d\d\d\d)", value)
if not m:
return value
quarter, year = m.groups()
value = "%sQ%s" % (quarter, year)
return value
def _parse_values(dataset, index, columns):
size = len(index)
series = dataset["series"]
values = []
# for s_key, s_value in iteritems(series):
for s_key in _get_indexer(columns):
try:
observations = series[s_key]["observations"]
observed = []
for o_key in _get_indexer(index):
try:
observed.append(observations[o_key][0])
except KeyError:
observed.append(np.nan)
except KeyError:
observed = [np.nan] * size
values.append(observed)
return np.transpose(np.array(values))
def _parse_dimensions(dimensions):
arrays = []
names = []
for key in dimensions:
values = [v["name"] for v in key["values"]]
role = key.get("role", None)
if role in ("time", "TIME_PERIOD"):
values = [_fix_quarter_values(v) for v in values]
values = pd.DatetimeIndex(values)
arrays.append(values)
names.append(key["name"])
midx = pd.MultiIndex.from_product(arrays, names=names)
if len(arrays) == 1 and isinstance(midx, pd.MultiIndex):
# Fix for panda >= 0.21
midx = midx.levels[0]
return midx
|
[
"[email protected]"
] | |
86a20d0a802a3b77e91c16b62fb4c5702450b991
|
dc69872f21492d34d7da6eee9f0d03f7c09a8a8d
|
/libraries/edge/opensearch/granuleisoresponse.py
|
fd3ed16eb03bd91778c8ff34354a963de13a58c8
|
[
"Apache-2.0"
] |
permissive
|
isabella232/incubator-sdap-edge
|
125e9ba8cb1738d8407222f9d21f5452fc5fa840
|
c725dad1098096048faed9a42a56f3cfc5c25bc5
|
refs/heads/master
| 2022-03-19T18:49:03.752184 | 2019-12-02T23:40:12 | 2019-12-02T23:40:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,127 |
py
|
import datetime
from edge.opensearch.isoresponsebysolr import IsoResponseBySolr
class GranuleIsoResponse(IsoResponseBySolr):
def __init__(self, linkToGranule):
super(GranuleIsoResponse, self).__init__()
self.linkToGranule = linkToGranule.split(',')
def _populateChannel(self, solrResponse):
pass
def _populateItem(self, solrResponse, doc, item):
link = self._getLinkToGranule(doc)
if link is not None:
doc['link'] = link
def _getLinkToGranule(self, doc):
link = None
if 'GranuleReference-Type' in doc and len(self.linkToGranule) > 0:
granuleRefDict = dict(list(zip(doc['GranuleReference-Type'], list(zip(doc['GranuleReference-Path'], doc['GranuleReference-Status'])))))
for type in self.linkToGranule:
# check if reference type exists
if type in granuleRefDict:
# check if reference is online
if granuleRefDict[type][1] == 'ONLINE':
link = granuleRefDict[type][0]
break
return link
|
[
"[email protected]"
] | |
ee35bcd0011f2a65f079aa4d10f48e44c32ac16b
|
44873fa0398bfb8f613f7b4e40a6c6e70aceaa9a
|
/ClassicUserAccounts/managers.py
|
479b39c1f0b475e195d44db23ae87aa0b27e88d8
|
[
"BSD-2-Clause"
] |
permissive
|
shyampathak/django-classic-user-account
|
8c6b9b9a32cfc556f9abb569f4bdc279cd302178
|
49e086de6feb2ee19fce4b8463dd8760694d03c6
|
refs/heads/master
| 2020-04-13T05:33:31.013708 | 2018-12-24T10:50:25 | 2018-12-24T10:50:25 | 162,995,683 | 1 | 0 |
BSD-2-Clause
| 2018-12-24T13:55:46 | 2018-12-24T13:55:45 | null |
UTF-8
|
Python
| false | false | 1,008 |
py
|
from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
user = self.create_user(email, password=password)
user.is_admin = True
user.is_superuser = True
# user.is_staff = True
user.save(using=self._db)
return user
|
[
"[email protected]"
] | |
deece369baf689aed3e350790563652c99e1df4c
|
ca0d710ed0469beb7f87ae53f5efdef7bac19a27
|
/MainView/migrations/0001_initial.py
|
c421c7915ab1a3ced242749c9b05288a7231a3c2
|
[
"MIT"
] |
permissive
|
CiganOliviu/wedding_invitation
|
5d441d786f742d6a4baf5ff418370c0cfbb1b81e
|
8b243b287b6577b4f5f899e33ade1fec651152f0
|
refs/heads/main
| 2023-03-03T08:12:36.345173 | 2021-02-08T15:37:04 | 2021-02-08T15:37:04 | 333,568,503 | 0 | 0 |
MIT
| 2021-02-08T15:37:05 | 2021-01-27T21:43:34 | null |
UTF-8
|
Python
| false | false | 646 |
py
|
# Generated by Django 3.0.8 on 2020-08-10 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ConfirmAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('submitted', models.BooleanField(default=True)),
('answer_sent', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"[email protected]"
] | |
ef27d9265109ae830f5ca62402fffac9b1752587
|
f2f8b2f31859608d98ef644a6114991733adc964
|
/asposeslidescloud/models/workbook.py
|
3f1f395f402ebbf10a5ca5e5de7bef334ed4ce5d
|
[
"MIT",
"Python-2.0"
] |
permissive
|
aspose-slides-cloud/aspose-slides-cloud-python
|
ece60566bcf755d7350773b6ea46b44cde2d038a
|
0627d09c65a776d8ea138f97c7487d47fb98fbce
|
refs/heads/master
| 2023-08-05T00:19:21.417406 | 2023-07-30T13:32:16 | 2023-07-30T13:32:16 | 161,640,927 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,848 |
py
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose">
# Copyright (c) 2018 Aspose.Slides for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from asposeslidescloud.models.data_source import DataSource
class Workbook(DataSource):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'worksheet_index': 'int',
'column_index': 'int',
'row_index': 'int'
}
attribute_map = {
'type': 'type',
'worksheet_index': 'worksheetIndex',
'column_index': 'columnIndex',
'row_index': 'rowIndex'
}
type_determiners = {
'type': 'Workbook',
}
def __init__(self, type='Workbook', worksheet_index=None, column_index=None, row_index=None): # noqa: E501
"""Workbook - a model defined in Swagger""" # noqa: E501
super(Workbook, self).__init__(type)
self._worksheet_index = None
self._column_index = None
self._row_index = None
self.type = 'Workbook'
self.worksheet_index = worksheet_index
self.column_index = column_index
self.row_index = row_index
@property
def worksheet_index(self):
"""Gets the worksheet_index of this Workbook. # noqa: E501
Worksheet index. # noqa: E501
:return: The worksheet_index of this Workbook. # noqa: E501
:rtype: int
"""
return self._worksheet_index
@worksheet_index.setter
def worksheet_index(self, worksheet_index):
"""Sets the worksheet_index of this Workbook.
Worksheet index. # noqa: E501
:param worksheet_index: The worksheet_index of this Workbook. # noqa: E501
:type: int
"""
self._worksheet_index = worksheet_index
@property
def column_index(self):
"""Gets the column_index of this Workbook. # noqa: E501
Column index of the first value. # noqa: E501
:return: The column_index of this Workbook. # noqa: E501
:rtype: int
"""
return self._column_index
@column_index.setter
def column_index(self, column_index):
"""Sets the column_index of this Workbook.
Column index of the first value. # noqa: E501
:param column_index: The column_index of this Workbook. # noqa: E501
:type: int
"""
self._column_index = column_index
@property
def row_index(self):
"""Gets the row_index of this Workbook. # noqa: E501
Row index of the first value. # noqa: E501
:return: The row_index of this Workbook. # noqa: E501
:rtype: int
"""
return self._row_index
@row_index.setter
def row_index(self, row_index):
"""Sets the row_index of this Workbook.
Row index of the first value. # noqa: E501
:param row_index: The row_index of this Workbook. # noqa: E501
:type: int
"""
self._row_index = row_index
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Workbook):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
42242438bea8875d7471ea2ddf09291f67a15799
|
30a34b3503decf1b4516039df3106cd152631819
|
/4AL17IS050_T_K_HARSHITH_PRASAD/19_05_2020/2.py
|
90236ef15cb59e0d27deb74598351d1745cafda7
|
[] |
no_license
|
alvas-education-foundation/ISE_3rd_Year_Coding_challenge
|
8ddb6c325bf6ab63e2f73d16573fa0b6e2484136
|
b4074cab4a47aad07ed0fa426eacccbfafdef7f8
|
refs/heads/master
| 2022-11-23T20:52:19.204693 | 2020-07-23T11:28:15 | 2020-07-23T11:28:15 | 265,195,514 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 196 |
py
|
# This program adds two numbers
num1 = 1.5
num2 = 6.3
# Add two numbers
sum = float(num1) + float(num2)
# Display the sum
print('The sum of {0} and {1} is {2}'.format(num1, num2, sum))
|
[
"[email protected]"
] | |
2070f5d6b0de0efb6739eb2fd4df1b8420de7296
|
aeaa059ff404bbf08d94fa9d5affbcdbc8cd5e51
|
/nbp_task/models/exchange_rate.py
|
59d4b5d58084b6e2ad77de1d972d5f44d39fafd8
|
[] |
no_license
|
durejkol/nbp_pyramid
|
8391f351b32b04f17161324e3e06ecb123713756
|
70361906697c2ed27a317681404b48967852cb05
|
refs/heads/master
| 2020-03-23T06:04:34.292549 | 2018-07-16T19:55:01 | 2018-07-16T19:55:01 | 141,187,137 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 669 |
py
|
from nbp_task.models import Base
from sqlalchemy import Column, Float, Integer, String
class ExchangeRate(Base):
__tablename__ = 'currencies'
id = Column(Integer, primary_key=True)
currency = Column(String)
currency_code = Column(String)
exchange_rate = Column(Float)
def __init__(self, currency, currency_code, exchange_rate):
self.currency = currency
self.currency_code = currency_code
self.exchange_rate = exchange_rate
def __repr__(self):
return "{0}, {1}, {2}".format(self.currency,
self.currency_code,
self.exchange_rate)
|
[
"[email protected]"
] | |
e06f6bad831d1fe03750409f2f19f010a7b6ddc3
|
4b23349fa42462acd842b713bbb9cb0868f2e8fe
|
/while.py
|
fa5438934e19b97c945b72b2951c6eb0069bc192
|
[] |
no_license
|
kkb1028-i-want-be-a-good-datascientist/TEST
|
663c80fefc510fb8d56f4c15ca6c781341fe5e10
|
cb918a2fd1ce7fdf935c82f29a354bf3b386cdf3
|
refs/heads/master
| 2020-07-08T02:20:16.283456 | 2019-08-21T08:24:43 | 2019-08-21T08:24:43 | 203,538,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 157 |
py
|
num, sum = 1, 0
while True:
sum+=num
if sum > 100:
break
else:
num += 1
print('num ๊ฐ์ด %d์ผ ๋ while๋ฌธ ํ์ถ!!' % num)
|
[
"[email protected]"
] | |
9e18ca0c910a39afcedd81193e4b16ecdffb726e
|
01494c3ac2e3281d71066ee220628afc452beb70
|
/Chapter IV/dostep_swobodny.py
|
24b2e875deb371d3f45b0076b3882a8bab9ddd20
|
[] |
no_license
|
PatrykDagiel/Python_Dawson
|
b3a4aab8dbb875eda54c0cd46ceed3650edc3dc7
|
d3a04a5041df5ac728e2596331521191f941f536
|
refs/heads/master
| 2020-07-17T12:58:47.390099 | 2017-10-11T21:11:37 | 2017-10-11T21:11:37 | 94,321,332 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 225 |
py
|
import random
word = "indeks"
high=len(word)
low=-len(word)
for i in range(10):
position = random.randrange(low, high)
print("word[", position, "]\t", word[position])
input("\nAby zakonczyc program nacisnij enter")
|
[
"[email protected]"
] | |
35871975b31c4ba2ba0b34d40db9e1991e766f36
|
f662a5fb79627d22723ee91b49613b63221160ff
|
/cbv3/bin/flake8
|
c880345fa3c8189ed2ebb38bbe5f62e8f1a00130
|
[] |
no_license
|
brylie/django-concept
|
bd9bcbaf6dfe0bf2c25dcd2ff34a4d621275c619
|
4003222dc2a1fc441ac6bf21b03f1bad6a51ae23
|
refs/heads/master
| 2020-07-21T11:11:08.225802 | 2019-10-06T06:14:39 | 2019-10-06T06:14:39 | 206,844,463 | 0 | 0 | null | 2019-09-06T17:36:51 | 2019-09-06T17:36:50 | null |
UTF-8
|
Python
| false | false | 256 |
#!/Users/lpnotes/Desktop/django-concept/cbv3/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flake8.main.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
42ac27e191cdf443e6d63d711278ad947615b5d6
|
cb5f2d4943b65d53a7c36080a48057f79dcb4c4a
|
/core/admin.py
|
56624ea61d601f822467e9b4962fdab83d5ee951
|
[] |
no_license
|
radaevalex/Portal
|
54be04808f52fdda0c7a12919f3d4d152d10d5db
|
7134336289e0bf425124c20bcd5c85e33f938591
|
refs/heads/master
| 2022-05-01T19:23:46.800454 | 2019-10-31T17:46:12 | 2019-10-31T17:46:12 | 218,718,710 | 0 | 0 | null | 2022-04-22T22:34:38 | 2019-10-31T08:28:50 |
JavaScript
|
UTF-8
|
Python
| false | false | 804 |
py
|
from django.contrib import admin
from .models import Office, Indicator, Dynamic
# Register your models here.
@admin.register(Office)
class OfficesAdmin(admin.ModelAdmin):
list_display = ('id', 'department', 'city')
list_filter = ('id', 'department', 'city')
search_fields = ('id', 'department', 'city')
exclude = ('slug',)
ordering = ('id',)
@admin.register(Indicator)
class IndicatorAdmin(admin.ModelAdmin):
list_display = ('group', 'name')
list_filter = ('group', 'name')
search_fields = ('group', 'name')
ordering = ('name',)
@admin.register(Dynamic)
class DynamicAdmin(admin.ModelAdmin):
list_display = ('month', 'office', 'indicator', 'value',)
list_filter = ('month', 'value',)
search_fields = ('month', 'value',)
ordering = ('month', )
|
[
"[email protected]"
] | |
03bcd0f092ca2843a4d023d01e64e3e166b7e627
|
c1bb8c962e565749576cad0207ada9b80676c49c
|
/test/bitflyer.py
|
a860b613293eb7fb71e800c6d83fb693c38f877b
|
[] |
no_license
|
mayabaha/vcts
|
ad57a5b567fd850fb0d0dbe7f37569adc49ca2f2
|
37d4fde6d16f36703c96eda5a19ad8c448710f37
|
refs/heads/master
| 2021-09-06T08:44:41.433365 | 2018-01-22T14:26:27 | 2018-01-22T14:26:27 | 107,733,425 | 0 | 0 | null | 2018-01-14T09:59:46 | 2017-10-20T22:47:08 |
Python
|
UTF-8
|
Python
| false | false | 9,825 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import requests
import pandas as pd
import datetime
import argparse
class bitflyer:
"""
bitFlyer API module
see the following for details:
https://lightning.bitflyer.jp/docs/api?lang=ja&type=ex&_ga=2.136056049.1965297882.1509160469-180722574.1506822122#ๆฟๆ
ๅ ฑ
"""
PRODUCT_CODE_BTC = 0x00000001
PRODUCT_CODE_ETH = 0x00000002
PRODUCT_CODE_BCH = 0x00000004
def __init__(self, product_code_bit=0x00000000, outdir=""):
""" constructor
- product_code_bit : target product code
0x00000000 = None
0x00000001 = BTC_JPY
0x00000002 = ETH_BTC
0x00000004 = BCH_BTC
- outdir : output directory for .csv file(s)
"""
# endpoint
self.endpoint = "https://api.bitflyer.jp"
# market
self.markets = []
# set of ticker
self.tickers_btc = []
self.tickers_eth = []
self.tickers_bch = []
# set csv file for ticker
self.tickers_csv_btc = None
self.tickers_csv_eth = None
self.tickers_csv_bch = None
# open csv file
if len(outdir) > 0:
csv_btc_jpy = outdir + "/" + "ticker_btc_jpy.csv"
csv_eth_btc = outdir + "/" + "ticker_eth_btc.csv"
csv_bch_btc = outdir + "/" + "ticker_bch_btc.csv"
header = "# timestamp,product,tick_id,best_bid,best_ask,best_bid_size,best_ask_size,total_bid_depth,total_ask_depth,ltp,volume,volume_by_product\n"
try:
if product_code_bit & self.PRODUCT_CODE_BTC: # BTC_JPY
if os.path.exists(csv_btc_jpy):
self.tickers_csv_btc = open(csv_btc_jpy, "a")
else:
self.tickers_csv_btc = open(csv_btc_jpy, "w")
self.tickers_csv_btc.write(header)
if product_code_bit & self.PRODUCT_CODE_ETH: # ETH_BTC
if os.path.exists(csv_eth_btc):
self.tickers_csv_eth = open(csv_eth_btc, "a")
else:
self.tickers_csv_eth = open(csv_eth_btc, "w")
self.tickers_csv_eth.write(header)
if product_code_bit & self.PRODUCT_CODE_BCH: # BCH_BTC
if os.path.exists(csv_bch_btc):
self.tickers_csv_bch = open(csv_bch_btc, "a")
else:
self.tickers_csv_bch = open(csv_bch_btc, "w")
self.tickers_csv_bch.write(header)
except:
raise
def get(self, api):
""" invoke API to bitFlyer by GET method """
if len(api) == 0:
print("ERROR: API is not specified")
return
# invoke
url = self.endpoint + api
# print("%s: URL=%s" % (sys._getframe().f_code.co_name, url))
req = requests.get(url)
if req.status_code != 200:
print("ERROR: error occurred in invoking, errcd=%d\n" % req.status_code)
return
item = req.json()
return item
def fetchMarketStatus(self, product_code=""):
""" fetch market status
- NORMAL : active
- BUSY : busy (not at a high load)
- VERU BUSY : at a high load
- SUPER BUSY : extremely high load
- NO ORDER : cannot accept order
- STOP : market is inactive
- FAIL : could not get market status
"""
api = "/v1/gethealth"
if product_code is not None:
api = api + "?product_code=%s" % (product_code)
# invoke
item = self.get(api)
if item is not None:
return item['status']
else:
return "FAIL"
def fetchBookStatus(self, product_code=""):
""" fetch book status """
api = "/v1/getboardstate"
if len(product_code) > 0:
api = api + "?product_code=%s" % (product_code)
item = self.get(api)
if item is not None:
return item
def fetchMarket(self):
""" fetch market list """
items = self.get("/v1/getmarkets")
if items is not None:
# clear old status
if len(self.markets) > 0:
self.markets.clear()
for item in items:
status = self.getBookStatus(item['product_code'])
market = {"product_code" : item["product_code"],
"state" : status["state"],
"health" : status["health"]}
market["datetime"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.markets.append(market)
return self.markets
else:
return
def fetchTicker(self, product_code=""):
""" fetch the latest trade information
- 'product_code' : product name
- 'timestamp' : current time (UTC)
- 'tick_id' : tick ID
- 'best_bid' : the highest price of current buy order
- 'best_ask' : the lowest price of current sell order
- 'best_bid_size" : ???
- 'best_ask_size" : ???
- 'total_bid_depth" : ???
- 'total_ask_depth" : ???
- 'ltp' : last price
- 'volume' : the amount of transactions in 24hr
"""
api = "/v1/getticker"
if len(product_code) > 0:
api = api + "?product_code=%s" % (product_code)
item = self.get(api)
if item is not None:
ticker = {"timestamp" : item["timestamp"],
"product" : item["product_code"],
"tick_id" : item["tick_id"],
"best_bid" : item["best_bid"],
"best_ask" : item["best_ask"],
"best_bid_size" : item["best_bid_size"],
"best_ask_size" : item["best_ask_size"],
"total_bid_depth" : item["total_bid_depth"],
"total_ask_depth" : item["total_ask_depth"],
"ltp" : item["ltp"],
"volume" : item["volume"],
"volume_by_product" : item["volume_by_product"]}
try:
if item["product_code"] == "BTC_JPY":
self.tickers_btc.append(ticker)
self.tickers_csv_btc.write(bitflyer.ticker2str(ticker) + "\n")
elif item["product_code"] == "ETH_BTC":
self.tickers_eth.append(ticker)
self.tickers_csv_eth.write(bitflyer.ticker2str(ticker) + "\n")
elif item["product_code"] == "BCH_BTC":
self.tickers_bch.append(ticker)
self.tickers_csv_bch.write(bitflyer.ticker2str(ticker) + "\n")
else:
pass
except:
raise
return ticker
else:
return
def fetchTickerBTC(self):
""" get ticker of BTC-JPY """
try:
return self.getTicker("BTC_JPY")
except:
raise
def fetchTickerETH(self):
""" get ticker of ETH-BTC """
try:
return self.getTicker("ETH_BTC")
except:
raise
def fetchTickerBCH(self):
""" get ticker of BCH-BTC """
try:
return self.getTicker("BCH_BTC")
except:
raise
def market2str(markets):
""" convert market information to string """
header = "# date product market_status board_status\n"
line = header
for market in markets:
line = line + "%(datetime)s %(product_code)15s %(health)13s %(state)12s\n" % market
return line
def ticker2str(ticker):
""" convert ticker to string
output format:
timestamp,product,tick_id,best_bid,best_ask,best_bid_size,best_ask_size,total_bid_depth,total_ask_depth,ltp,volume,volume_by_product"
"""
line = "%(timestamp)s,%(product)s,%(tick_id)s,%(best_bid)s,%(best_ask)s,%(best_bid_size)s,%(best_ask_size)s,%(total_bid_depth)s,%(total_ask_depth)s,%(ltp)s,%(volume)s,%(volume_by_product)s" % ticker
return line
def tickers2str(tickers):
""" convert tickers to string """
line = "# timestamp,product,tick_id,best_bid,best_ask,best_bid_size,best_ask_size,total_bid_depth,total_ask_depth,ltp,volume,volume_by_product\n"
for ticker in tickers:
line = line + bitflyer.ticker2str(ticker) + "\n"
################################################################################
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='bitFlyer API fetch module')
parser.add_argument('-o', '--output-dir', metavar='dir', dest='outdir',
type=str, required=False, default='',
help='output directory for .csv file')
parser.add_argument('-i', '--interval', metavar='val', dest='interval',
type=int, required=False, default=1,
help='polling interval [sec]')
parser.add_argument('-c', '--count', metavar='count', dest='count',
type=int, required=False, default=-1,
help='fetch count')
parser.add_argument('-b', '--fetch-btc', dest='f_btc',
required=False, action="store_true", default=False,
help='fetch ticker of BTC_JPY')
parser.add_argument('-e', '--fetch-eth', dest='f_eth',
required=False, action="store_true", default=False,
help='fetch ticker of ETH_BTC')
parser.add_argument('-H', '--fetch-bch', dest='f_bch',
required=False, action="store_true", default=False,
help='fetch ticker of BCH_BTC')
args = parser.parse_args()
# interval check
if args.interval <= 0:
print("ERROR: interval is NOT natural number")
sys.exit(1)
# set product code bit (pcb)
pcb = 0
if args.f_btc == True:
pcb = pcb | bitflyer.PRODUCT_CODE_BTC
if args.f_eth == True:
pcb = pcb | bitflyer.PRODUCT_CODE_ETH
if args.f_bch == True:
pcb = pcb | bitflyer.PRODUCT_CODE_BCH
if pcb == 0:
print("INFO: select BTC by default")
pcb = bitflyer.PRODUCT_CODE_BTC
outdir = args.outdir
if len(outdir) == 0:
outdir = "."
# create bitflyer instance
bf = bitflyer(pcb, outdir)
print("INFO: interval=%d, count=%d, outdir=%s" % \
(args.interval, args.count, outdir))
lpcnt = args.count
while True:
try:
if args.count == -1: # infinite loop is specified
lpcnt = 1;
if lpcnt > 0:
if pcb & bitflyer.PRODUCT_CODE_BTC:
ticker = bf.fetchTickerBTC()
print(bitflyer.ticker2str(ticker))
if pcb & bitflyer.PRODUCT_CODE_ETH:
ticker = bf.fetchTickerETH()
print(bitflyer.ticker2str(ticker))
if pcb & bitflyer.PRODUCT_CODE_BCH:
ticker = bf.fetchTickerBCH()
print(bitflyer.ticker2str(ticker))
lpcnt -= 1
# print("INFO: wait for %d seconds" % args.interval)
time.sleep(args.interval)
else:
break
except KeyboardInterrupt:
break
sys.exit(0)
|
[
"takashi@hermit"
] |
takashi@hermit
|
8119ae09255af8a153504009ba5d56f6a35a0562
|
841e606be767cf7d6fdfa551daaa887c4400ec36
|
/branches/pgasync-branch/src/database/postgresql/pgasyncpool.py
|
e97ea8e8230c51048d5ad26ea5510701675dd5c9
|
[
"MIT"
] |
permissive
|
BackupTheBerlios/weever-svn
|
e4dfda7be1fc64c2d38b5d0420deee7daa5b462a
|
d7b9969f107cd9e38f633b1314416e7a50a95c50
|
refs/heads/master
| 2021-01-10T18:33:52.245594 | 2005-03-15T12:14:56 | 2005-03-15T12:14:56 | 40,748,979 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,757 |
py
|
from twisted.internet import defer
import pgasync
class ConnectionPool(object):
def __init__(self, dbadapter, dsn, *args, **kwargs):
self.params = dsn
def runOperation(self, query, args={}):
d = defer.Deferred()
conn = pgasync.connect(**self.params)
dd = conn.cursor()
dd.addCallback(self._runOperation, conn, d, query, args)
return d
def _runOperation(self, cursor, conn, d, query, args):
cursor.execute(query, **args)
dd = conn.commit()
dd.addCallback(self._finish, d, cursor)
dd.addErrback(self._finish, d, cursor)
def runQuery(self, query, args={}):
d = defer.Deferred()
conn = pgasync.connect(**self.params)
dd = conn.cursor()
dd.addCallback(self._runQuery, conn, d, query, args)
return d
def _runQuery(self, cursor, conn, d, query, args):
dx = cursor.exFetch(query, **args)
dx.addCallback(self._finish, d, cursor)
dx.addErrback(self._finish, d, cursor)
def runInteraction(self, fun, query, args={}):
d = defer.Deferred()
conn = pgasync.connect(**self.params)
dd = conn.cursor()
dd.addCallback(self._runInteraction, fun, conn, d, query, args)
return d
def _runInteraction(self, cursor, fun, conn, d, query, args):
def commit(result, conn, d, cursor):
d = conn.commit()
d.addCallback(lambda _: self._finish(result, d, cursor))
d.addErrback(lambda _: self._finish(result, d, cursor))
d = fun(cursor, query, args)
d.addCallback(commit, conn, d, cursor)
def _finish(self, result, d, cursor):
cursor.release()
d.callback(result)
|
[
"dialtone@a440c657-b6e6-0310-a3b3-b76a39be4160"
] |
dialtone@a440c657-b6e6-0310-a3b3-b76a39be4160
|
cad5f850e0c474633290f1d954bf25d14c77d53a
|
d16813727de339ec61c02c60cf1ac8bcd9636802
|
/PreProcessamento/reducao.py
|
eef04034e60b70d536c42f45c11ec40ac1944875
|
[] |
no_license
|
joaocbrito/DataMining
|
5324d9309d67fd3ce8243007458eef7efe1466b0
|
0efe85500ce55675eee85ecae31ce058f92cba41
|
refs/heads/main
| 2023-06-08T22:38:39.502623 | 2021-07-02T12:55:02 | 2021-07-02T12:55:02 | 351,207,124 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,903 |
py
|
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
def main():
# Faz a leitura do arquivo
input_file = './Dataset/air-quality-clean.data'
df = pd.read_csv(input_file)
columns = list(df.columns)
target = 'CO(GT)'
# Separating out the columns
x = df.loc[:, columns].values
# Separating out the target
y = df.loc[:, [target]].values
# PCA projection
pca = PCA()
principalComponents = pca.fit_transform(x)
print("Explained variance per component:")
print(pca.explained_variance_ratio_.tolist())
print("\n\n")
principalDf = pd.DataFrame(data=principalComponents[:, 0:2],
columns=['principal component 1',
'principal component 2'])
finalDf = pd.concat([principalDf, df[[target]]], axis=1)
ShowInformationDataFrame(finalDf, "Dataframe PCA")
VisualizePcaProjection(finalDf, target)
def ShowInformationDataFrame(df, message=""):
print(message+"\n")
print(df.info())
print(df.describe())
print(df.head(10))
print("\n")
def VisualizePcaProjection(finalDf, targetColumn):
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Principal Component 1', fontsize=15)
ax.set_ylabel('Principal Component 2', fontsize=15)
ax.set_title('2 component PCA', fontsize=20)
targets = [1, 2, 3]
colors = ['r', 'g', 'b']
for target, color in zip(targets, colors):
indicesToKeep = finalDf[targetColumn] == target
ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1'],
finalDf.loc[indicesToKeep, 'principal component 2'],
c=color, s=50)
ax.legend(targets)
ax.grid()
plt.show()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
64295ef2699c0f0ee65e91e48a6268e1df7ef44b
|
3ac02ea9f521d34cc385f67ad3fe19749311a551
|
/elvis/constants.py
|
71169d8779b2590bba7790acc597aab7b6a52ce2
|
[
"MIT"
] |
permissive
|
nghenzi/elvis
|
79886bd88c3fa01e7a7b8e80be4e0c8018c81ad3
|
57cc4e83b790d9970566cdd09c5aeb056534e2b5
|
refs/heads/master
| 2022-09-06T09:35:49.993300 | 2020-05-29T09:54:02 | 2020-05-29T09:54:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 86 |
py
|
from enum import Enum
class LayoutTheme(Enum):
light = 'light'
dark = 'dark'
|
[
"[email protected]"
] | |
ab550e443a8102657df2f405460ad1185bd0c03e
|
6d2f5ab1d568b1b44591e8cc85865b37539d6b22
|
/HumourDetection/src/util/katz_walk.py
|
053df3ddf0c9ccb9eaac458daee4718a3453db09
|
[] |
no_license
|
acattle/HumourTools
|
ed350bb36c3f4d886653f6625577fba0f020e3e1
|
a4522e55ca8003745eff9bc032a10c56b9cdd6fe
|
refs/heads/master
| 2021-03-27T19:50:25.225543 | 2019-06-30T22:34:32 | 2019-06-30T22:34:32 | 63,935,056 | 11 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,400 |
py
|
'''
Created on Jul 6, 2018
@author: Andrew Cattle <[email protected]>
Utility functions related to Katz Spreading Activation.
For more information see:
Simon De Deyne, Daniel J. Navarro, Amy Perfors, Gert Storms, 2016,
'Structure at every scale: A semantic network account of the
similarities between unrelated concepts.', Journal of Experimental
Psychology: General, vol. 145, no. 9, pp. 1228-1254
These Python scripts are based on the R and matlab scripts available at:
https://github.com/SimonDeDeyne/SWOWEN-2018/tree/master/R/functions
'''
from scipy.sparse import csc_matrix, diags, identity, save_npz, load_npz
from scipy.sparse.linalg import inv
from word_associations.association_readers.igraph_readers import iGraphFromTuples
import numpy as np
# from numpy.linalg import inv
# from util.util_classes import IndexLookupWrapper
########## MATRIX OPERATIONS ##########
def _sum_as_array(mat, axis=0):
return np.squeeze(np.asarray(mat.sum(axis=axis)))
def _normalize(mat, norm_vec):
#remove inf (happens if the nromalization value of a row is 0)
norm_vec[np.isinf(norm_vec)] = 0
return diags(norm_vec, format=mat.format) * mat
# return np.diag(norm_vec) * mat
def l1_normalize(mat):
"""
L1 normalize a matrix
:param mat: the matrix to L1 normalize
:type mat: a scipy.sparse matrix
:returns: L1 normalized mat
:rtype: mat
"""
norm_vec = 1/_sum_as_array(mat,axis=1)
# norm_vec = 1/mat.sum(axis=1)
return _normalize(mat, norm_vec)
def l1_numpy(mat):
row_sums = mat.sum(axis=1)
# return mat / row_sums[:, np.newaxis] #np.newaxis implicitly reshapes row_sums from (n,) to (n,1)
#perform normalization row-by-row to avoid memory error
mat = np.copy(mat) #copy mat to avoid in-place normalization
for i, rs in enumerate(row_sums):
mat[i] = mat[i] / rs
mat[np.isinf(mat)] = 0 #get rid of infs if they happen
#TODO: is this needed?
return mat
def l2_normalize(mat):
"""
L2 normalize a matrix
:param mat: the matrix to L2 normalize
:type mat: a scipy.sparse matrix
:returns: L2 normalized mat
:rtype: mat
"""
norm_vec = 1/np.sqrt(_sum_as_array(mat**2,axis=1))
return _normalize(mat, norm_vec)
def ppmi(mat):
"""
Positive Pointwise Mutual Information
:param mat: the matrix to perform PPMI on
:type mat: scipy.sparse.csc_matrix
:returns: the PPMI matrix
:rtype: scipy.sparse.csc_matrix
"""
n=mat.shape[0]
d=diags(1/(_sum_as_array(mat,axis=0)/n), format=mat.format)
# d=np.diag(1/(mat.sum(axis=0)/n))
mat = mat*d #TODO: check that mat is a sparse matrix and not a numpy array
#TODO: currently we assume mat is sparse. Add check for numpy
mat.data = np.log2(mat.data) #only take the logs of the non-zero elements
mat.data[mat.data < 0] = 0 #replace negative values with 0s. This is the POSTIVIE part of PPMI
mat.eliminate_zeros() #get rid of any 0 values we may have added
# mat = np.log2(mat) #TODO: is this what "P@x <- log2(P@x)" does?
# mat[mat < 0] = 0 #replace negative values with 0s. This is the POSTIVIE part of PPMI
return mat
def ppmi_numpy(mat):
"""
Positive Pointwise Mutual Information
:param mat: the matrix to perform PPMI on
:type mat: scipy.sparse.csc_matrix
:returns: the PPMI matrix
:rtype: scipy.sparse.csc_matrix
"""
#pmi is log(p(x|y)/p(x))
#the values in mat are p(x|y), how is d related to p(x)?
n=mat.shape[0]
d=np.diag(n/(mat.sum(axis=0)))
mat = np.dot(mat, d)
mat[np.nonzero(mat)] = np.log2(mat[np.nonzero(mat)]) #only take the log of the non-zero elements
mat[mat < 0] = 0 #replace negative values with 0s. This is the POSTIVIE part of PPMI
return mat
def katz_walk(P, alpha=0.75):
"""
Performs the Katz Spreading Activation transformation
described in De Deyne et al. (2016)
:param P: adjacency matrix to calculate spreading activation for
:type P: scipy.sparse.csc_matrix
:param alpha: the decay weight for each path step
:type alpha: float
:returns: An adjacency matrix representing the results of the Katz walk
:rtype: scipy.sparse.csc_matrix
"""
return inv(identity(P.shape[0], format=P.format) - alpha*P)
def katz_numpy(P, alpha=0.75):
return np.linalg.inv(np.identity(P.shape[0]) - alpha*P)
########## GRAPH OPERATIONS ##########
def extract_component(G,mode="strong"):
"""
Extracts the largest strongly connected component from graph G
and converts it to a sparse adjacency matrix
:param G: the graph to extract the component from
:type G: igraph.Graph
:param mode: the clustering mode. Must be either "strong" (i.e. each node has in-degree and out-degree >= 1) or "weak" (i.e. in-degree or out-degree >= 1)
:type mode: str
:returns: the largest strongly connected component as a sparse adjacency matrix and its corresponding word->index mapping
:rtype: Tuple[scipy.sparse.csc_matrix, Dict[str, int]
"""
#get largest connected component only
#this reduces computational complexity
G = G.components(mode).giant()
# s=time()
# adj_mat_from_adj = np.array(G.get_adjacency(attribute="weight").data)
# print(time()-s)
# #for use converting from words to matrix indexes
# word_index = dict((n,i) for i, n in enumerate(G.vs["name"]))
# vocab_size = len(word_index)
#reorder the vocabulary to be in alphabetical order
#optional step but makes indexes easier to interpret
old_index_map = {name : i for i, name in enumerate(G.vs["name"])}
sorted_names = sorted(G.vs["name"])
new_index_map = {name : i for i, name in enumerate(sorted_names)}
old_to_new = {old_index_map[name] : new_index_map[name] for name in sorted_names}
vocab_size = len(sorted_names)
#for each edge, make an (x,y,weight) tuple.
#Then split it into separate x, y, and weight lists for constructing sparse matrix
# s=time()
xs,ys,ws = zip(*((*edge.tuple,edge["weight"]) for edge in G.es))
#update indexes
xs = [old_to_new[x] for x in xs]
ys = [old_to_new[y] for y in ys]
adj_mat = csc_matrix((ws, (xs,ys)), shape=(vocab_size, vocab_size)) #solve is more efficient for csc matrixes
# print(time()-s)
# adj_mat = adj_mat.todense()
# print(time()-s)
# s=time()
# adj_mat_from_zeros = np.zeros((vocab_size,vocab_size))
# for x,y,w in zip(xs,ys,ws):
# adj_mat_from_zeros[x,y]=w
# print(time()-s)
#
# print(adj_mat_from_adj.nbytes)
# print(adj_mat_dense.nbytes)
# adj_mat_wrapped = IndexLookupWrapper(adj_mat, new_index_map, ignore_case=True)
# return adj_mat, word_index
return adj_mat, new_index_map
def generate_katz_walk(cue_response_strengths):
#convert to iGraph
G = iGraphFromTuples(cue_response_strengths).graph
to_del = [v.index for v in G.vs if G.degree(v, mode="OUT") == 0]
G.delete_vertices(to_del)
#for compatibility with katz
G=remove_UK_words(G)
#remove self loops, multiple edges
#TDOD: should I sum multiple edges? Do they ever happen?
G.simplify(combine_edges="sum") #need to specify combine_edges or it erases the weights
#get largest connected compornent and convert to adjacency matrix
P, word_index = extract_component(G)
print("starting dense")
s=time()
P_dense = P.todense()
P_dense = l1_numpy(P_dense)
P_dense = ppmi_numpy(P_dense)
P_dense = l1_numpy(P_dense)
P_dense = katz_numpy(P_dense)
P_dense = ppmi_numpy(P_dense)
P_dense = l1_numpy(P_dense)
print(f"dense took {time()-s} seconds")
# print(f"pre-katz density: {P.nnz/(P.shape[0]*P.shape[1])}")
#
# print("starting sparse")
# s=time()
# #ensure matrix values are probabilities
# P = l1_normalize(P)
# P = ppmi(P)
# P = l1_normalize(P)
# P = katz_walk(P)
# P = ppmi(P)
# P = l1_normalize(P)
#
# print(f"sparse took {time()-s} seconds")
#
# print(f"post-katz density: {P.nnz/(P.shape[0]*P.shape[1])}")
P=None
return P, word_index, P_dense
def remove_UK_words(G):
"""
For compatibility with DeDeyne's implimentation
"""
# brexit_words = set( w.upper() for w in ['aeroplane', 'arse', 'ax', 'bandana', 'bannister', 'behaviour', 'bellybutton', 'centre',
# 'cheque', 'chequered', 'chilli', 'colour', 'colours', 'corn-beef', 'cosy', 'doughnut',
# 'extravert', 'favour', 'fibre', 'hanky', 'harbour', 'highschool', 'hippy', 'honour',
# 'hotdog', 'humour', 'judgment', 'labour', 'light bulb', 'lollypop', 'neighbour',
# 'neighbourhood', 'odour', 'oldfashioned', 'organisation', 'organise', 'paperclip',
# 'parfum', 'phoney', 'plough', 'practise', 'programme', 'pyjamas',
# 'racquet', 'realise', 'recieve', 'saviour', 'seperate', 'theatre', 'tresspass',
# 'tyre', 'verandah', 'whisky', 'WIFI', 'yoghurt','tinfoil','smokey','seat belt','lawn mower',
# 'coca-cola','cell phone','breast feeding','break up','bubble gum','black out'])
brexit_words = set(['aeroplane', 'arse', 'ax', 'bandana', 'bannister', 'behaviour', 'bellybutton', 'centre',
'cheque', 'chequered', 'chilli', 'colour', 'colours', 'corn-beef', 'cosy', 'doughnut',
'extravert', 'favour', 'fibre', 'hanky', 'harbour', 'highschool', 'hippy', 'honour',
'hotdog', 'humour', 'judgment', 'labour', 'light bulb', 'lollypop', 'neighbour',
'neighbourhood', 'odour', 'oldfashioned', 'organisation', 'organise', 'paperclip',
'parfum', 'phoney', 'plough', 'practise', 'programme', 'pyjamas',
'racquet', 'realise', 'recieve', 'saviour', 'seperate', 'theatre', 'tresspass',
'tyre', 'verandah', 'whisky', 'WIFI', 'yoghurt','tinfoil','smokey','seat belt','lawn mower',
'coca-cola','cell phone','breast feeding','break up','bubble gum','black out'])
to_delete = [v.index for v in G.vs if v["name"] in brexit_words]
G.delete_vertices(to_delete)
return G
if __name__ == "__main__":
from word_associations.association_readers.xml_readers import SWoW_Dataset
# sm = csc_matrix(([0,1,2,3,4], ([0,1,2,3,3], [0,1,2,2,3])))
#
# print(sm.todense())
#
# nm = l2_normalize(sm)
# print(type(nm))
# print(nm.todense())
#make SWoW graph (as igraph.Graph)
swow_100 = SWoW_Dataset("D:/datasets/SWoW/SWOW-EN.R100.csv",complete=False, probs=False,response_types="R1").get_all_associations()
from time import time
# s=time()
katz_sparse, word_index, katz_dense = generate_katz_walk(swow_100)
# print(f"took {time()-s}s")
# save_npz("katz_r1_sparse.npz",katz_sparse)
np.save("katz_r1_dedeyne.npy", katz_dense)
import pickle
with open('word_index_r1_dedeyne.pkl', "wb") as f:
pickle.dump(word_index, f)
# xs, ys = map(array, zip(*graph.get_edgelist()))
# if not graph.is_directed():
# xs, ys = hstack((xs, ys)).T, hstack((ys, xs)).T
# else:
# xs, ys = xs.T, ys.T
# return coo_matrix((ones(xs.shape), (xs, ys)))
|
[
"[email protected]"
] | |
3173c7fc8e6eed2af73d82af36bdbc28a65b6521
|
070a6843e24c0eee6397d47495effcce5e8130df
|
/rcs/account/apps.py
|
bbbb347859ab7524a9dc981f8f9692877a61dfcf
|
[] |
no_license
|
Forrest-Z/rcs
|
5e86edd08d292adafcf9ef694ed7894ff12bf2ef
|
9dd5cd9d3693b6bae9014dff365b2968b45313b7
|
refs/heads/main
| 2023-07-06T13:39:38.938705 | 2021-08-10T08:30:35 | 2021-08-10T08:30:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 150 |
py
|
from django.apps import AppConfig
class AccountConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'rcs.account'
|
[
"[email protected]"
] | |
17a54c49e8b54a1afc88c363e32b08adf3f15a77
|
239f70d1d68feec739ae309fdb1ae9432b528277
|
/flaskblog/models.py
|
0ae84f782ea263195f3a2a656e05501717eb7d2a
|
[] |
no_license
|
Acejoy/Blog-WebApp
|
5321287b396a765c832649caaa06ffb25773cfe0
|
bea38213e67f220939b40da2d2e18a11394ea9d9
|
refs/heads/main
| 2023-04-18T20:38:18.100653 | 2021-05-06T06:55:23 | 2021-05-06T06:55:23 | 360,434,931 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,093 |
py
|
from datetime import datetime
from flask_login import UserMixin
from flaskblog import db, login_manager
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpeg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"User('{self.title}', '{self.date_posted}')"
|
[
"[email protected]"
] | |
e286247caef6608e64d3f83668b0e57d5c35c469
|
07e6fc323f657d1fbfc24f861a278ab57338b80a
|
/python/test_chem_reaction.py
|
a45fb01f6793461a249921c48059b569c7d781b2
|
[
"MIT"
] |
permissive
|
ProkopHapala/SimpleSimulationEngine
|
99cf2532501698ee8a03b2e40d1e4bedd9a12609
|
47543f24f106419697e82771289172d7773c7810
|
refs/heads/master
| 2022-09-05T01:02:42.820199 | 2022-08-28T10:22:41 | 2022-08-28T10:22:41 | 40,007,027 | 35 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 462 |
py
|
#!/usr/bin/python
import re
import numpy as np
import sys
from pySimE import chemistry as ch
#print ch.str2composition( sys.argv[1] )
#sides = ch.parseReaction( 'Fe+O2=Fe2O3' )
#sides = ch.parseReaction( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print sides
#print ch.reaction2string( sides )
#print ch.balanceReactionString( 'Fe+O2=Fe2O3' )
print ch.balanceReactionString( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print atomicBalance( reaction[0], reaction[1] )
|
[
"[email protected]"
] | |
3b0858bb9df04478fdbea75686d088f8c57597c3
|
303f984c9668fd2a099939abf1982e79b1a70f3e
|
/roll_graph.py
|
80b0fc5bbab7cf8c6ebcc3b836f400b2cc000cc8
|
[
"MIT"
] |
permissive
|
nate-r-a/catan-receipts
|
2bbc620d887fd18c849d1d2110b59f8e97275d1d
|
37254983a9847dc13409f9f312d542293ff34f3a
|
refs/heads/master
| 2021-01-10T22:42:49.150403 | 2016-11-16T22:35:46 | 2016-11-16T22:35:46 | 69,703,329 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,245 |
py
|
import plotly.plotly as py
from plotly.graph_objs import Bar, Scatter, Figure, Layout
from plotly import __version__
def create_graph(actual_rolls):
#x-axis
NUMBERS = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
#relative odds of a number being rolled
ODDS = [1, 2, 3, 4, 5, 6, 5, 4, 3, 2, 1]
#calculate expected rolls
expected_rolls = []
total = 0
for i in actual_rolls:
total += i
for i in ODDS:
expected_rolls.append((total/36) * i)
#Sample values for testing
#y-axis - bar
#actual_rolls = [0, 5, 2, 5, 4, 3, 8, 9, 1, 1, 2]
#y-axis - scatter
#expected_rolls = [1.1111111111111112, 2.2222222222222223, 3.3333333333333335, 4.444444444444445, 5.555555555555555, 6.666666666666667, 5.555555555555555, 4.444444444444445, 3.3333333333333335, 2.2222222222222223, 1.1111111111111112]
trace1 = Bar(x=NUMBERS,y=actual_rolls,
name = "Actual",
marker = dict(
line = dict(
color = "rgb(0,0,0)",
width = 5),
color = "rgb(255,255,255)")
)
trace2 = Scatter(x=NUMBERS, y=expected_rolls,
name = "Expected",
marker = dict(
size = 10,
color = "rgb(0,0,0)",
symbol = "hexagon"
)
)
data = [trace1, trace2]
layout = Layout(width = 365,
height = 310,
xaxis = dict(autotick = False,
tick0 = 2,
dtick = 1,
tickfont = dict(size = 18)),
yaxis = dict(tickfont = dict(size = 18)),
margin = dict(b = 25,
l = 25,
r = 0,
t = 0),
showlegend = False)
fig = Figure(data=data,layout=layout)
# Save the figure as a png image:
py.image.save_as(fig, 'dice_rolls.png')
#Sample rolls for testing
# actual_rolls = [0, 5, 2, 5, 4, 3, 8, 9, 1, 1, 2]
# expected_rolls = [1.1111111111111112, 2.2222222222222223, 3.3333333333333335, 4.444444444444445, 5.555555555555555, 6.666666666666667, 5.555555555555555, 4.444444444444445, 3.3333333333333335, 2.2222222222222223, 1.1111111111111112]
#create_graph(actual_rolls)
# trace1 = go.Scatter(
# x=NUMBERS,
# y=expected_rolls
# )
# trace2 = go.Bar(
# x=NUMBERS,
# y=actual_rolls
# )
# data = [trace1, trace2]
# py.plot(data, filename='bar-line')
|
[
"[email protected]"
] | |
d5962b41aa960e1b40b3de9eb56fc4a6813c6491
|
a7ad18e70d9f46429281490af40ce7595825640f
|
/clients/services.py
|
e2a193dff132933a0de5f0388ebfd50b424cc7a0
|
[] |
no_license
|
JEBT28/Curso-de-python-CRUD
|
2f02d4450440ead037cd4ea5f65a8a45b0baf8dd
|
ab7d3aa9bcf063eeae3c858c030784053ec6a88a
|
refs/heads/master
| 2022-12-15T07:44:53.843546 | 2020-08-24T18:10:53 | 2020-08-24T18:10:53 | 287,862,953 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,116 |
py
|
import json
import uuid
from clients.models import Client
class ClientService:
def __init__(self,file_clients):
self.file_clients=file_clients
self.load_clients()
# print('Se cargaron los metodos')
def load_clients(self) :
file=open(self.file_clients,'r')
var = json.load(file)
clients = []
for d in var:
name = str(d.get("name"))
company = str(d.get("company"))
email = str(d.get("email"))
position = str(d.get("position"))
uid = str(d.get("uid"))
client = Client(name, company, email,position,uid)
clients.append(client)
self.clients = list(clients)
file.close()
def save_clients(self):
file=open(self.file_clients,'w')
file.write(json.dumps([c.to_dict() for c in self.clients]))
file.close()
def create_client(self,client):
if client not in self.clients:
self.clients.append(client)
else:
print('Client already exists')
self.save_clients()
def list_clients(self):
clients=[]
for client in self.clients:
if client is None:
pass
else:
clients.append(client.to_dict())
return clients
def delete_client(self, deleted_client):
clients_aux = []
for client in self.clients:
if client.uid==deleted_client.uid:
pass
else:
clients_aux.append(client)
self.clients = clients_aux
self.save_clients()
def update_client(self,updated_client):
clients_aux = []
for client in self.clients:
if client.uid==updated_client.uid:
clients_aux.append(updated_client)
else:
clients_aux.append(client)
self.clients = clients_aux
self.save_clients()
def from_str(x) -> str:
assert isinstance(x, str)
return x
|
[
"[email protected]"
] | |
4d838de8a7073096520f06dfb7f090cada807e65
|
f0364f2511721d22599eb46eda40ed3d0f3b4b00
|
/autotest_ecutest4.5/ui_Complete.py
|
ae196cd9419880f8b4a5f6114e797c7ad06eb703
|
[] |
no_license
|
KnightCpp/HIL-test-Base-On-ECU_TEST
|
19c2f4d3f0ac0e625194c8780fe5c4c15dd68372
|
4860d888e7599b3fbd7a2372bb9a3f6038c97ace
|
refs/heads/main
| 2023-03-30T01:33:45.922283 | 2021-03-25T13:24:29 | 2021-03-25T13:24:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,729 |
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'E:\temp\py\GUI\autotest_ecutest4.5\Complete.ui'
#
# Created: Fri Jan 04 10:23:27 2019
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CompleteDgl(object):
def setupUi(self, CompleteDgl):
CompleteDgl.setObjectName(_fromUtf8("CompleteDgl"))
CompleteDgl.resize(328, 110)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/haha/HaHaBundle.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
CompleteDgl.setWindowIcon(icon)
CompleteDgl.setStyleSheet(_fromUtf8(""))
self.label = QtGui.QLabel(CompleteDgl)
self.label.setGeometry(QtCore.QRect(70, 30, 171, 41))
self.label.setStyleSheet(_fromUtf8("color: rgb(0, 255, 0);\n"
"font: 20pt \"Arial\";"))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(CompleteDgl)
QtCore.QMetaObject.connectSlotsByName(CompleteDgl)
def retranslateUi(self, CompleteDgl):
CompleteDgl.setWindowTitle(_translate("CompleteDgl", "Complete", None))
self.label.setText(_translate("CompleteDgl", "Complete !", None))
import haha_rc
|
[
"[email protected]"
] | |
08f4aced36fe56bcec48deaa99f0e5ad628d5792
|
b978cf7f47c5cd6295f3c0c104752d3e1e9d89d6
|
/test.py
|
f88b6b9a5b2b21a543c221161f595e2588fd53b5
|
[] |
no_license
|
sepidmnorozy/backup-crawler
|
1e4cd62d5a48b6e3bf974f89d1d513765e5d9c5b
|
73beddd2febd0dec3a0d1f5706557de073035a06
|
refs/heads/master
| 2022-11-18T19:56:43.507394 | 2020-07-22T13:11:53 | 2020-07-22T13:11:53 | 281,674,079 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 455 |
py
|
from pymongo import MongoClient
from rss import rss_reader
import json
if rss_reader('https://www.khabaronline.ir/rss') == 'Success':
with open("links.json", 'r') as f:
urls = json.load(f)
else:
urls = []
client = MongoClient()
db = client['newsdb_week']
articles = db.weekarticles
start_urls = []
for url in urls:
if articles.find_one({"url": url}) is None:
start_urls.append(url)
print(start_urls)
print(len(start_urls))
|
[
"[email protected]"
] | |
22cce56ad1cf624ac9db09d203ea57c2bd8a72fe
|
e34d4bf879910b8f41068c1efb90915897e53d53
|
/sprint/SquaresOfSortedArray.py
|
a58ff6bd16baa33b009ff18fbabf44af40766e9e
|
[] |
no_license
|
ZhouningMan/LeetCodePython
|
6cfc30f0b76f6162502410fef5639fde4801bd74
|
cad9585c440efb329c9321648f94c58ded198438
|
refs/heads/master
| 2020-12-10T03:53:48.824344 | 2020-01-13T02:29:02 | 2020-01-13T02:29:02 | 233,494,907 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 618 |
py
|
class Solution:
def sortedSquares(self, A):
size = len(A)
squares = [0] * size
for i in range(size):
squares[i] = A[i] * A[i]
copy = [0] * size
begin = 0
end = size - 1
i = size - 1
while begin <= end:
if squares[begin] > squares[end]:
copy[i] = squares[begin]
begin += 1
else:
copy[i] = squares[end]
end -= 1
i -= 1
return copy
if __name__ == '__main__':
s = Solution()
ans = s.sortedSquares([-3,-3,-2,1])
print(ans)
|
[
"[email protected]"
] | |
52dfc7c479bffded54241d1449539d22e9a4a7ca
|
fd8dbd377277a8cd41883ee19fa01ed8285f17af
|
/casey_prototype/urls.py
|
91a6aece9fe891b009545c38c982f99f50801652
|
[] |
no_license
|
tomlouismurphy/casey_prototype
|
691c3ee06c5db23e1db9baaa1b159b3f40af692e
|
0cc03cf06e3fc7bc1eb60330d689b02393e68461
|
refs/heads/master
| 2021-08-23T17:19:19.337301 | 2017-12-05T21:27:16 | 2017-12-05T21:27:16 | 112,775,245 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 836 |
py
|
"""casey_prototype URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^bottom_ninth/', include('bottom_ninth.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"[email protected]"
] | |
730fe30426ac70c128ee92b95ce2c55d3c99b67e
|
85f3dcf42563767d55994160e50fab175d51304b
|
/resumeproject/edu/urls.py
|
67630c62865efc1cd742aebfb016ec9be7ada2c1
|
[] |
no_license
|
surya-pratap-2181/All-django-Projects
|
ac6bb3f9fa4122b618d42edc5aedf726ecff2c83
|
fd7c7be19810794bade1f61ecfd5423489801c43
|
refs/heads/main
| 2023-06-24T00:26:47.861780 | 2021-07-28T02:59:59 | 2021-07-28T02:59:59 | 390,193,394 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 114 |
py
|
from django.urls import path
from . import views
urlpatterns = [
path('/skill', views.skill, name="skill"),
]
|
[
"[email protected]"
] | |
56ab51ce296ac609989c894b2c80f0e70076ecf9
|
be2cd3e1696fa3b506c2fdcac4d388f886950811
|
/PY_files/file1.py
|
3c2f00669db616f8c7c29624c9f2f77d26e942a2
|
[] |
no_license
|
mhkr007/PY_Prgms
|
bc171f4e09cbc088d48336f597cdb476251794ca
|
ebda4d0728bc6751fd4abbdb049ed278277772d1
|
refs/heads/master
| 2020-06-20T12:56:46.218362 | 2019-07-23T07:19:41 | 2019-07-23T07:19:41 | 197,130,098 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,096 |
py
|
################################## FILE HANDLINGS ##################################################
"""These are 5 properties of files @ name,mode,readable,writable,closed"""
f=open("abc.txt",'r+')
print("File Name: ",f.name)
print("File Mode: ",f.mode)
print("Is File Readable: ",f.readable())
print("Is File Writable: ",f.writable())
print("Is File Closed : ",f.closed)
f.close()
print("Is File Closed : ",f.closed)
########################### write & writelines ############################
print("-------write----->\n")
f=open("abcde.txt",'w+') #to write
f.write("krishna\n")
f.write("Software\n")
f.write("Solutions\n")
print("Data written to the file successfully")
#f.close()
#f=open("abcd.txt",'r') #to read
pos1=f.tell()
print("current position of fptr after writing",pos1)
pos2=f.seek(0,0)
print(" position of fptr to read",pos2)
rd=f.read()
print(rd)
f.close()
############################################
print()
f=open("abcde.txt",'w')
list=["hari\n","teja\n","hema\n","mounika"]
rd=f.writelines(list) #to write no.of lines at time
print(rd)
f.close()
print("List of lines written to the file successfully")
################### read, read(n), readline() readlines() ###################
print("read----->\n")
f=open("abcd.txt","r")
a=f.read() #prints whole data
print(a,"\n**********************************************\n")
p=f.seek(0,0)
a=f.read(10) #prints 'n' chars
print(a,"\n**********************************************\n")
p=f.seek(0,0)
a=f.readline() #prints first line if no arguments and also can read 'n' characters by arguments
b=f.readline() #prints second line
c=f.readline(3) #by giving int in readline it prints upto that no.of chars and it treats remaining chars as a next line
print(a,"\n***************************************************\n")
print(b)
print(c)
p=f.seek(0,0)
a=f.readlines() # """all lines comes in list format only
#if arguments are zero or empty
# else prints first line in list format""
print(a,"\n***************************************************\n")
print("end of program")
|
[
"[email protected]"
] | |
81286eab7404c79ae264329c873fd324031b3ce5
|
b7054c7dc39eeb79aa4aecb77a8de222400b19a7
|
/object.py
|
deee2a4715df5ac355f73bac61921bfff028351c
|
[] |
no_license
|
csuxh/python_fullstack
|
89027133c7f9585931455a6a85a24faf41792379
|
f78571976b3bef104309e95304892fdb89739d9e
|
refs/heads/master
| 2023-05-11T09:36:40.482788 | 2019-06-12T14:21:26 | 2019-06-12T14:21:26 | 145,090,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 988 |
py
|
#!/usr/bin/env python
#!-*-coding:utf-8 -*-
#!@Auther : jack.xia
#!@Time : 2018/5/29 21:56
#!@File : object.py
class Stuf(object):
count = 0
__slots__ = ('name', 'id', 'position')
def __init__(self, name, id, position):
self.__name = name
self.__id = id
self.__position = position
def print_obj(self):
print('name: %s ;id: %d ;position %s ' %(self.__name, self.__id, self.__position))
class Account(Stuf):
pass
class IT(Stuf):
pass
if Stuf.count != 0:
print('ๆต่ฏๅคฑ่ดฅ!')
else:
bart = Stuf('Bart', 12, '2-4')
if Stuf.count != 1:
print('ๆต่ฏๅคฑ่ดฅ!')
Stuf.count +=1
print('%d' %(Stuf.count + 1) )
else:
lisa = Stuf('lisa', 11, '2-5')
if Stuf.count != 2:
print('ๆต่ฏๅคฑ่ดฅ!')
else:
print('Stuf:', Stuf.count)
print('ๆต่ฏ้่ฟ!')
#stu1 = Stuf('jack', 13, '1-2')
#stu1.print_obj()
#print(stu1.id)
#print(stu1.__name)
|
[
"[email protected]"
] | |
fb4d6144389ec8eb93a016186bb5908c2683cdc8
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_clattering.py
|
3893e7f6289447dca25d947171005c4f61ce3729
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
#calss header
class _CLATTERING():
def __init__(self,):
self.name = "CLATTERING"
self.definitions = clatter
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['clatter']
|
[
"[email protected]"
] | |
2d0cf1dd1d5942321fd949a7ccdd5f5a5be62e1b
|
1fda2038157bfc9cce18edba04fd345c882851c9
|
/test scripts/tesst.py
|
4800010862a7114a180021885e9abb9d4b6fbd3e
|
[] |
no_license
|
yotovtsvetomir/Cloud-Providers-Notifier
|
6df8241f6980c044d3c8850dab02ff99206bbe0c
|
7d84f32bc51b2739f4c0e748a89908d8c62a958f
|
refs/heads/master
| 2021-06-27T01:21:33.815140 | 2017-09-13T11:57:17 | 2017-09-13T11:57:17 | 103,392,324 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 479 |
py
|
from pymongo import MongoClient
import datetime
client = MongoClient()
db = client.aws_us_east
posts = db.posts
#Find by region
#f = posts.find({"_id":{'$regex' : ".*-us-east"}})
#for post in f:
# print post
#Find by timeframe
#start = datetime.datetime(2016, 9, 14, 1, 31, 0, 0)
#end = datetime.datetime(2016, 9, 14, 1, 32, 0, 0)
start = 'Thu, 9 Feb 2017 19:29:00 PST'
#'Thu, 9 Feb 2017 19:29:00 PST'
d = posts.find({"published": start})
for doc in d:
print doc
|
[
"[email protected]"
] | |
be150c5153affabe985e44548c355a01e4c22fda
|
1f908c05155bd905458ef0a740f67026ec4d83ea
|
/karatsubaalgorithm.py
|
5c3820c48291831348acbec9a2108d6316ed89b2
|
[] |
no_license
|
amirali1690/algorithm-coursera
|
c3214f49a14dc1daaa93f80be4109d767e8d0411
|
19d7e2246a489e939c9e3a47a70c08e0e3df0d4a
|
refs/heads/master
| 2020-05-04T13:23:27.640717 | 2020-01-23T04:49:12 | 2020-01-23T04:49:12 | 179,158,311 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 511 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 7 18:18:49 2019
@author: amir
"""
import math
def karatsuba(x,y):
if x<10 and y<10:
return x*y
else:
n=math.log(x,10)
nf=n//2+1
a=int(x/10**(nf))
b=int(x%10**(nf))
c=int(y/10**(nf))
d=int(y%10**(nf))
ac=karatsuba(a,c)
bd=karatsuba(b,d)
adbc=karatsuba(a,d)+karatsuba(b,c)
prod = ac * 10**(2*nf) + (adbc * 10**nf) + bd
return int(prod)
|
[
"[email protected]"
] | |
f41fa5c42bc1aebcc2d08cd24120b101b406944b
|
a370f6b81cbfe2a956b59db40fffc32526088b00
|
/analytical.py
|
012d1f5e47ba62858606750415e1bfbb70c99284
|
[] |
no_license
|
Mountiko/armageddon
|
10d607335e72a47078efa5f30811e607fb1fb4bc
|
0244ee604fbea2ecb1e9d54cd2e186b176b9d9a9
|
refs/heads/master
| 2020-11-24T04:13:57.740850 | 2019-12-14T18:05:52 | 2019-12-14T18:05:52 | 227,960,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,694 |
py
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def anal_sol(H_plot, radius=10, velocity=20e3, density=3000, strength=10e5, angle=45,
init_altitude=100e3, radians=False):
'''
Solves analytical solution for meteroid impact
Parameters
----------
radius : float
The radius of the asteroid in meters
velocity : float
The entery speed of the asteroid in meters/second
density : float
The density of the asteroid in kg/m^3
strength : float
The strength of the asteroid (i.e., the ram pressure above which
fragmentation and spreading occurs) in N/m^2 (Pa)
angle : float
The initial trajectory angle of the asteroid to the horizontal
By default, input is in degrees. If 'radians' is set to True, the
input should be in radians
init_altitude : float, optional
Initial altitude in m
radians : logical, optional
Whether angles should be given in degrees or radians. Default=False
Angles returned in the DataFrame will have the same units as the
input
Returns
-------
Result : DataFrame
pandas dataFrame with collumns:
altitude, velocity, dedz
'''
# define constants
Cd = 1 # drag coefficient
H = 8000 # atomspheric consatnt
rho = 1.2 # air density at the ground
# define initial conditions
m = 4/3 * np.pi * radius**3 * density # mass, asteroid to be assumed as spheric shape
A = np.pi * radius**2 # cross-sectional area
if radians is False: # converts degrees to radians
angle = angle * (np.pi)/180
# constant in analytical solution
c = velocity/(np.exp((-Cd * A * rho * H / (2 * m * np.sin(angle))) * np.exp(-init_altitude/H)))
def v_h(h):
return c * np.exp((-Cd * A * rho * H / (2 * m * np.sin(angle))) * np.exp(-h/H))
C2 = -Cd * A * rho * H / (2 * m * np.sin(angle))
def dEdz(z):
return c * np.exp(C2 * np.exp(-z/H)) * C2 * np.exp(-z/H) * (-1/H) * m * v_h(z)
#H_plot = np.linspace(100000, 0, 200)
v_plot = v_h(H_plot)
dedz = np.zeros((len(v_plot),)) # create array to store dedz results
dedz[0] = 0 # initial dedz
for i in range(1,len(v_plot)): # loop through all rows of result
energy = ((1/2 * m * v_plot[i]**2) - (1/2 * m * v_plot[i-1]**2))/4.184e12
alt = (H_plot[i] - H_plot[i-1])/1e3
dedz[i] = energy / alt
#dEdz_plot = dedz(H_plot)
result = pd.DataFrame({'altitude':H_plot, 'velocity':v_plot, 'dedz':dedz})
#result = result.sort_values(by='altitude', ascending=False)
return result
|
[
"[email protected]"
] | |
004f6ee2f5299f8fa21061fd22b3afc3270998c3
|
c5087f002963ca81c32fb1f7b801125955dd484f
|
/main.py
|
eb653007dc8acbc884353abac64969a681c84230
|
[] |
no_license
|
davidhjp/pysosj
|
a6ee12c99f14b6cf56ef975fed65f5cf740c9084
|
cb955fbae0f745ef7c1787be62c54c989d28483d
|
refs/heads/master
| 2021-06-10T21:47:42.735106 | 2017-01-24T03:39:53 | 2017-01-24T03:39:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 299 |
py
|
import pysosj
if __name__ == "__main__":
sss = pysosj.SJChannel("127.0.0.1", 1200, "127.0.0.1", 1100);
while 1:
val = sss.receive("CD2.I", "CD.I")
print "received " + val
sss.send("CD2.I2", "CD.I2", "hallo")
print "sent"
sss.close()
while(1):
pass
|
[
"[email protected]"
] | |
ad614d1c10782d42d169c7a5b58b58df2d9e4ac2
|
caceb60f71165772b6d6155f619e79189e7c80a9
|
/็ฌฌไธๆ/ๅไบฌ-ๆฒงๆพ/iniๆไปถ่ฏปๅ.py
|
a7089ca87611d7a683cc4043345514b042aed299
|
[
"Apache-2.0"
] |
permissive
|
beidou9313/deeptest
|
ff41999bb3eb5081cdc8d7523587d7bc11be5fea
|
e046cdd35bd63e9430416ea6954b1aaef4bc50d5
|
refs/heads/master
| 2021-04-26T23:06:08.890071 | 2019-04-03T02:18:44 | 2019-04-03T02:18:44 | 123,931,080 | 0 | 0 |
Apache-2.0
| 2018-03-05T14:25:54 | 2018-03-05T14:25:53 | null |
UTF-8
|
Python
| false | false | 1,048 |
py
|
# -*- coding utf-8 -*-
import configparser
if __name__ == "__main__":
config = configparser.ConfigParser()
# ๅ
ๆฐๅขไธไธชsection
config.add_section('ไธญๅฝ')
# ๅจๆฐๅข็sectionไธๆฐๅขkey-valueๅฏน
config.set('ไธญๅฝ', 'ๆฒณๅ', '็ณๅฎถๅบ')
config.set('ไธญๅฝ', 'ๆฒณๅ', '้ๅท')
config.set('ไธญๅฝ', 'ๅฑฑไธ', 'ๆตๅ')
# ๅๆฐๅขไธไธชsection๏ผไฝๆฏไธๅขๅ ้ฎๅผๅฏน
config.add_section('ๆฐๅฝ')
with open('iniConfig.ini', 'w') as configfile:
config.write(configfile)
##########################################
# ่ฏปๅiniๆไปถ
config.read('iniConfig.ini')
# ่ทๅๆๆ็section
sections = config.sections()
print(sections)
# ่ทๅsectionไธๆๆ็options
for sec in sections:
options = config.options(sec)
print(options)
# ๆ นๆฎsectionsๅoptions่ทๅๅฏนๅบ็valueๅผ
for sec in sections:
for options in config.options(sec):
print("[%s] %s=%s" % (sec, options, config.get(sec, options)))
|
[
"[email protected]"
] | |
8c8ddb6eb22d55c5cb1ea8f0c1998adaff30342f
|
28811880a917a1e1ec24a844999d950dc1a5d057
|
/0x07-python-test_driven_development/4-print_square.py
|
4d81d08b600778252b9240006e576c46b7c1393d
|
[] |
no_license
|
OrangeB0lt/holbertonschool-higher_level_programming
|
b591ceb8e0710cb26c78407a266421488f325678
|
f50668f78ffb861b305e0d691c29cd1b817d9ec0
|
refs/heads/master
| 2020-05-18T03:55:07.262414 | 2019-09-26T23:22:01 | 2019-09-26T23:22:01 | 184,158,871 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 445 |
py
|
#!/usr/bin/python3
"""
Prints squares
based on size input
woohoo
"""
def print_square(size):
"""
print_square: prints a square based on input number for size
"""
if not isinstance(size, int):
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
for idx in range(size):
for cnt in range(size):
print("#", end="")
print()
|
[
"[email protected]"
] | |
93a3267e51df3e2d5d2bf140872b6e2e1a563f40
|
53b2358c6089be2c51ac2768a77fc303d563550d
|
/assembly-scripts/combine_cds.py
|
3703c7cac212c41cfac0a5244748a064d01f18fd
|
[] |
no_license
|
bethsheets/Population-Genomics-via-RNAseq
|
684e7cd5a667a335f7b3e1111e1ccd6eb85533c6
|
3cb3ee912f855e8a9981874f4ff160551f8b8db3
|
refs/heads/docs
| 2020-04-12T05:42:48.038672 | 2019-07-12T17:24:37 | 2019-07-12T17:24:37 | 60,875,099 | 2 | 10 | null | 2017-04-14T15:16:23 | 2016-06-10T20:29:39 |
Python
|
UTF-8
|
Python
| false | false | 773 |
py
|
#usage: python combine_coding.py input_cds_grepped_from_gff_then_gff2bed_awk_prepend_strand.fa out.fa
import sys
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
coding=SeqIO.parse(sys.argv[1],'fasta',generic_dna)
combined=dict()
for cds in coding:
name=cds.id
transcript=name.split('.cds')[0]
if transcript in combined:
combined[transcript]=combined[transcript]+cds.seq
else:
cds.id=transcript
combined[transcript]=cds
final=dict()
for transcript, cds in combined.iteritems():
if transcript[0]=='-':
cds.seq=cds.seq.reverse_complement()
cds.id=str(transcript)[1:]
combined[transcript]=cds
OUT = open(sys.argv[2], "w")
SeqIO.write(combined.values(), OUT, "fasta")
OUT.close()
|
[
"[email protected]"
] | |
0bb86cc88e336ab207cb59291392165129948a10
|
c2e43c48aefa729962097d5fb470777c044c5a85
|
/venv/bin/f2py3.9
|
66a4eb1360ee8269a0e9a5ce727cd677237fc0f7
|
[] |
no_license
|
HiroshigeAoki/Optiver_Realized_Volatility_Prediction
|
57a14d374a65fe2419eb3c84ed9d9f6fc3faeef6
|
21bdbe2ec2ed193db6c28ebfa3f5313539c0a188
|
refs/heads/main
| 2023-07-11T01:47:49.631676 | 2021-08-17T06:47:02 | 2021-08-17T06:47:02 | 397,778,792 | 0 | 0 | null | 2021-08-19T01:19:57 | 2021-08-19T01:19:57 | null |
UTF-8
|
Python
| false | false | 289 |
9
|
#!/home/bi18056/VScode_workplaces/Optiver_Realized_Volatility_Prediction/venv/bin/python3.9
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | |
c819640f419b8ec2a2e4db92176f8a8578bd998a
|
f9483d708e9df7b38b7ae1d58726a4e186780473
|
/app/core/models.py
|
e976e6abe197bd1e740b9b98e95f5c3609ea0be1
|
[
"MIT"
] |
permissive
|
dev-tanvir/rest_api_docker
|
6db6bf616484ab29e9446795158c8facc2489fae
|
09804cfbc6332d6cfbb25b09813b36c338051ba3
|
refs/heads/main
| 2023-06-20T08:41:53.535214 | 2021-07-18T11:16:19 | 2021-07-18T11:16:19 | 367,130,087 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,099 |
py
|
import uuid
import os
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings # recommended way to import settings in django
def synthesize_image_file_path(instance, main_filename):
"""return a valid path for uploaded file with unique name"""
main_file_extension = main_filename.split('.')[-1]
new_filename = f'{uuid.uuid4()}.{main_file_extension}'
return os.path.join('uploads/synthesize/', new_filename)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
""" Creates and saves a new user"""
if not email:
raise ValueError('Users need to pass an valid email address!')
user = self.model(email=self.normalize_email(email), **extra_fields) # not using password here cause
# password needs to be hashed and
# not saved in clear text
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
""" Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom User model that supports Creating a user using email in stead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = "email"
class Tag(models.Model):
"""Model for tag management for Synthesize"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self) -> str:
"""String reprensation of tag object"""
return self.name
class Chemcomp(models.Model):
"""Model for chemical components of synthesizer"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self) -> str:
return self.name
class Synthesize(models.Model):
"""Model for declaring a synthesize for life"""
title = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
time_years = models.IntegerField()
link = models.CharField(blank=True, max_length=255)
chemcomps = models.ManyToManyField('Chemcomp')
tags = models.ManyToManyField('Tag')
chance = models.DecimalField(max_digits=5, decimal_places=2)
image = models.ImageField(null=True, upload_to=synthesize_image_file_path)
def __str__(self) -> str:
return self.title
|
[
"[email protected]"
] | |
a0c529fe9ac1114d4ea620a3a09ab644868c12c2
|
7c59bbd4ff413a95dc9d25fbfccd11c6db60202a
|
/python_stack/full_stack_django/test_orm/apps/test_orm_app/migrations/0001_initial.py
|
ff84e3ca46db76c12c5baaeb018a42283bcbe193
|
[] |
no_license
|
soikatesc/DojoAssignments
|
9a185a1164e42a985aea5e49d0ee270fd476d42a
|
c5c84bc9bd4aedd0fe6aa26bf75793e284edb248
|
refs/heads/master
| 2021-01-23T04:34:19.617679 | 2017-05-16T03:52:58 | 2017-05-16T03:52:58 | 86,211,544 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,310 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-19 00:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('blog', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_orm_app.Blog')),
],
),
]
|
[
"[email protected]"
] | |
e1ca4ac4ad045dfef7ec413194552a242acdc4be
|
1636f64b079a7cbb9e27bb27947eddc6c88aef61
|
/Chapter7/second/models/model.py
|
74f2a137eb79352e4745898fdc33774c888c0e1c
|
[] |
no_license
|
TJJTJJTJJ/pytorch__test
|
436a74a57f83c800b90dc063ef1976c20a3abc2b
|
e8bf4e9f6a3d23a7d577c0c78b93d9f5a7561ca5
|
refs/heads/master
| 2020-03-27T18:11:24.889333 | 2018-10-04T09:45:20 | 2018-10-04T09:45:20 | 146,904,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,287 |
py
|
# coding: utf-8
# In[1]:
from torch import nn
from .BasicModule import BasicModule
from torch import autograd
import torch
# ### ๅฎไน็ๆๅจ init(),forward()
# In[2]:
class NetG(BasicModule):
"""
็ๆๅจๅฎไน
__init__()
forward()
"""
def __init__(self,opt):
super(NetG,self).__init__()
ngf = opt.ngf
self.main = nn.Sequential(
# ่พๅ
ฅ 1*nz*1*1็ปด็ๅชๅฃฐ
nn.ConvTranspose2d(opt.nz,ngf*8, 4,1,0,bias=False),
nn.BatchNorm2d(ngf*8),
nn.ReLU(True),
# (ngf*8)*4*4
nn.ConvTranspose2d(ngf*8, ngf*4, 4,2,1,bias=False),
nn.BatchNorm2d(ngf*4),
nn.ReLU(True),
# (ngf*4)*8*8
nn.ConvTranspose2d(ngf*4, ngf*2, 4,2,1,bias=False),
nn.BatchNorm2d(ngf*2),
nn.ReLU(True),
# (ngf*2)*16*16
nn.ConvTranspose2d(ngf*2, ngf*1, 4,2,1,bias=False),
nn.BatchNorm2d(ngf*1),
nn.ReLU(True),
# (ngf*1)*32*32
nn.ConvTranspose2d(ngf, 3, 5,3,1,bias=False),
nn.Tanh()
# 3*96*96 range(-1,1)
)
def forward(self,x):
return self.main(x)
# ### ๅฎไนๅคๅซๅจ
# In[3]:
class NetD(BasicModule):
"""
ๅคๅซๅจ
__init__()
forward()
"""
def __init__(self,opt):
super(NetD, self).__init__()
ndf = opt.ndf
self.main = nn.Sequential(
# ไธ็ๆๅจๆญฃๅฅฝ็ธๅ
# 3*96*96
nn.Conv2d(3, ndf, 5, 3, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# ndf*32*32
nn.Conv2d(ndf,ndf*2,4,2,1,bias=False),
nn.BatchNorm2d(ndf*2),
nn.LeakyReLU(0.2,inplace=True),
nn.Conv2d(ndf*2,ndf*4,4,2,1,bias=False),
nn.BatchNorm2d(ndf*4),
nn.LeakyReLU(0.2,inplace=True),
nn.Conv2d(ndf*4,ndf*8,4,2,1,bias=False),
nn.BatchNorm2d(ndf*8),
nn.LeakyReLU(0.2,inplace=True),
nn.Conv2d(ndf*8,1,4,1,0,bias=False),
# batch*1*1*1
nn.Sigmoid()
)
def forward(self,x):
return self.main(x).view(-1) # batch
|
[
"[email protected]"
] | |
b0199dbaf2cbde5fff7bebea9f30687f1e16eeeb
|
6dc6287827a8b2e9bfb948624f62cc465c54fe12
|
/ch09/qos.py
|
ff5584e1ff5cb30938d1ed42cd0db0c752f3f34c
|
[
"MIT"
] |
permissive
|
AzureCloudMonk/Python-Networking-Cookbook
|
c1f8db96037e6c6a0d24cf4d9339e32d3ba513a0
|
26945c781a51fe72cc01409df6b5c5fa7df53f4c
|
refs/heads/main
| 2023-07-13T09:20:51.416840 | 2021-08-22T18:10:01 | 2021-08-22T18:10:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 879 |
py
|
import os
import sys
import requests
base_url = "https://api.meraki.com/api/v1"
key = os.environ.get("MERAKI_DASHBOARD_API_KEY", None)
if key is None:
print("Please provide an API key. Aborting.")
sys.exit(-1)
sess = requests.Session()
sess.headers.update({
"X-Cisco-Meraki-API-Key": key
})
network_id = "L_783626335162466515"
url = f"{base_url}/networks/{network_id}/switch/qosRules"
resp = sess.get(url)
if resp.status_code == 200:
rules = resp.json()
for rule in rules:
url_del = f"{base_url}/networks/{network_id}/switch/qosRules/{rule['id']}"
resp_del = sess.delete(url_del)
if resp_del.status_code == 204:
print(f"Deleted QoS rule {rule['id']}")
else:
print(f"Failed on delete request. Status: {resp_del.status_code}")
else:
print(f"Failed to retrieve rules. Status: {resp.status_code}")
|
[
"[email protected]"
] | |
cdfc02d15189f0e0ad498bfae17a3ee08c544cc0
|
d9cf5ce593b91c63139e4bd831f3ba99a3407d05
|
/analyze_timings.py
|
486158770c30fe74fa794ad1a058012d75a1ccb6
|
[] |
no_license
|
MaxwellDeJong/parallel_mcmc
|
51c6d7794424c4adf2d3e7ed85d8d3ade07dce57
|
4d9f41e402c35a295141057d8caab0bf85d1b033
|
refs/heads/main
| 2023-02-20T05:04:55.999668 | 2021-01-10T03:13:39 | 2021-01-10T03:13:39 | 328,289,809 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,105 |
py
|
import numpy as np
import matplotlib.pyplot as plt
def plot_timings(n):
filename = 'timings_' + str(n) + '.txt'
timings = np.loadtxt(filename)
plt.scatter(timings[:, 0], timings[:, 1])
plt.xlabel('Number of Steps')
plt.ylabel('Execution Time (ms)')
plt.title('Scaling with ' + str(n) + ' Dimensions')
plt.xlim((0, max(timings[:, 0]) + 5000))
plt.show()
def plot_dim_timings():
filename = 'timings_dim.txt'
timings = np.loadtxt(filename)
plt.scatter(timings[:, 0], timings[:, 1])
plt.xlabel('Number of Dimensions')
plt.ylabel('Execution Time (ms)')
plt.title('Dimensional Scaling with 3000 Steps')
plt.xlim((0, max(timings[:, 0]) + 5))
plt.show()
def plot_data_timings():
filename = 'timings_data.txt'
timings = np.loadtxt(filename)
plt.scatter(timings[:, 0], timings[:, 1])
plt.xlabel('Number of Data Points')
plt.ylabel('Execution Time (ms)')
plt.title('Data Scaling with 10000 Steps')
plt.xlim((-100, max(timings[:, 0]) + 600))
plt.show()
plot_timings(20)
plot_dim_timings()
plot_data_timings()
|
[
"[email protected]"
] | |
a981d72e03c71c1df680fd7aebeddc9f0d707d99
|
58c0c6cd1da0a0b70c14787fbbd5a5af5161ac15
|
/venv/Scripts/rst2odt.py
|
6925f5b6c183bc706e358ba95f88170286c59ba7
|
[
"MIT"
] |
permissive
|
RafaelHMachado/Cioffis_Automation
|
6454d33558a4f4b63412d1d068726ca73feddeea
|
07965ca71c3d4e78f5cee1fce4ba0bbfe2db9811
|
refs/heads/main
| 2023-06-06T07:04:11.182106 | 2021-07-03T07:39:28 | 2021-07-03T07:39:28 | 382,553,454 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 802 |
py
|
#!C:\Users\Eng\Documents\Project\venv\Scripts\python.exe
# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $
# Author: Dave Kuhlman <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A front end to the Docutils Publisher, producing OpenOffice documents.
"""
import sys
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline_to_binary, default_description
from docutils.writers.odf_odt import Writer, Reader
description = ('Generates OpenDocument/OpenOffice/ODF documents from '
'standalone reStructuredText sources. ' + default_description)
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
description=description)
|
[
"[email protected]"
] | |
7730264c505a3c732597e1d232c28b694ce63cd6
|
75d41c04791c047309607ce554f0b3e72f94b4cb
|
/app.py
|
3de14fdca36e861dfd5823560a0333681018862f
|
[
"MIT"
] |
permissive
|
lyfyork/LAB2
|
6e84d18dc22c98e9b0f2a1e550d39eb7cf022fe8
|
a76adfbe0e8077e06e2c25457df5ace67e64bc10
|
refs/heads/master
| 2020-05-14T15:17:38.807117 | 2019-04-17T09:05:28 | 2019-04-17T09:05:28 | 181,841,829 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17 |
py
|
//This is app.py
|
[
"[email protected]"
] | |
a135986681b422aa38dc0f73eb018ec8b5e5de5b
|
ae8a2e748976c702da93be5fceb415683e26dae4
|
/sumit1136/cartoonifyRealTime.py
|
488f4c9475a76e657648fbf4469ba2a39a9a6b03
|
[] |
no_license
|
hackslash-nitp/cartoonify
|
1a19e1c57959103f61d968e773f11babdf2f699c
|
7f4ac7329de79c63855dd80727a18c0eab6be577
|
refs/heads/main
| 2023-02-17T11:11:23.705255 | 2021-01-19T23:35:03 | 2021-01-19T23:35:03 | 322,804,672 | 1 | 6 | null | 2021-01-19T23:35:04 | 2020-12-19T08:48:18 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,080 |
py
|
# import the opencv library
import cv2
import numpy as np
# define a video capture object
vid = cv2.VideoCapture(0)
while True:
# Capture the video frame
# by frame
ret, frame = vid.read()
# Display the resulting frame
cv2.imshow('frame 1', frame)
# frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# cv2.imshow('frame 2', frame)
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# cv2.imshow('frame 2', gray)
imgBlur = cv2.medianBlur(gray, 5)
# cv2.imshow('frame 2', imgBlur)
imgEdge = cv2.adaptiveThreshold(imgBlur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
# cv2.imshow('frame 2', imgEdge)
colored = cv2.bilateralFilter(frame, 9, 250, 250)
cartoon = cv2.bitwise_and(colored, colored, mask=imgEdge)
cv2.imshow('frame 2', cartoon)
# the 'q' button is set as the
# quitting button you may use any
# desired button of your choice
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
cv2.destroyAllWindows()
|
[
"[email protected]"
] | |
f0921f29f3f682945a8f671213dc391d565db088
|
9d41570295cc05b66fd52584a90fe87f29155943
|
/src/crawler/delay.py
|
649fb6282c26a77936487a5bcd18eeda56ff6aa7
|
[
"MIT"
] |
permissive
|
diegojromerolopez/relwrac
|
ed56feeb2a5e455e0fa58f6bc130445e5a0831bd
|
23ee278ab4019b98269419c53feed2194f079c25
|
refs/heads/master
| 2022-12-11T08:06:19.888698 | 2019-11-16T12:35:34 | 2019-11-16T12:35:34 | 219,372,323 | 0 | 0 |
MIT
| 2022-12-08T06:49:05 | 2019-11-03T22:09:35 |
Python
|
UTF-8
|
Python
| false | false | 294 |
py
|
import random
class Delay(object):
@classmethod
def none(cls):
return None
@classmethod
def uniform(cls, lower_bound: float, upper_bound: float):
def uniform_delay_():
return random.uniform(lower_bound, upper_bound)
return uniform_delay_
|
[
"[email protected]"
] | |
b6cd32dd7c58e44b484925d0981c527b8eb6d61f
|
ddd09683d9cbd681db5dae4e2d036d28bd4d24c1
|
/PA3/BAL3.py
|
f82978400cd729be26ca286631abcea6caa2356a
|
[] |
no_license
|
nivedn3/DL4CV-EE6132-
|
41f9cd877a4c43db0a2f511a57df8b624fbc0a07
|
2cd97c7d2170a8e4fe36b6ccc8443c009e3d003a
|
refs/heads/master
| 2021-01-20T05:41:37.019460 | 2017-11-22T10:17:16 | 2017-11-22T10:17:16 | 101,465,640 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,240 |
py
|
import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
def data(number,size):
a = []
b = []
out = []
for i in range(number):
a_in = np.random.choice([0,1],size)
a_in = a_in.tolist()
#a_in = [1,0,0,0,0]
b_in = np.random.choice([0,1],size)
b_in = b_in.tolist()
#b_in = [1,0,0,0,0]
a_str = ','.join(str(x) for x in a_in).replace(',','')
b_str = ','.join(str(x) for x in b_in).replace(',','')
c = bin(int(a_str,2) + int(b_str,2)).split('b')[1]
c = [int(i) for i in list(c)]
c_out = np.array(c)
if len(c_out) == size:
c_out = np.insert(c_out,0,0)
if len(c_out) < size:
while(len(c_out) != size+1):
c_out = np.insert(c_out,0,0)
test = []
for j in range(len(a_in)):
test.append(a_in[j])
test.append(b_in[j])
a.append(test)
#b.append(b_in)
out.append(c_out)
return a,out
size = 3
hs = 5
x = tf.placeholder(tf.float32,shape = [None,size,2])
y = tf.placeholder(tf.float32,shape = [None,size+1])
w = tf.Variable(tf.random_normal([hs,size+1]))
b = tf.Variable(tf.random_normal([size+1]))
rnn_inp = tf.unstack(x,size,1)
lstm = tf.contrib.rnn.BasicRNNCell(hs)
outputs, states = tf.contrib.rnn.static_rnn(lstm, rnn_inp, dtype=tf.float32)
logits = tf.sigmoid(tf.matmul(outputs[-1], w) + b)
logitst = tf.add(logits,tf.scalar_mul(-0.5,tf.ones_like(logits)))
logitst = tf.nn.relu(logits)
logitst = tf.scalar_mul(1000000,logits)
logitst = tf.clip_by_value(logits,0,1)
logitsc = tf.cast(logitst,tf.int32)
yc = tf.cast(y,tf.int32)
with tf.name_scope("cross_entropy"):
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels = y))
cross_entropy = tf.losses.mean_squared_error(labels = y, predictions = logits)
tf.summary.scalar('cross entropy',cross_entropy)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(logitsc,yc)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.summary.scalar('accuracy',accuracy)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("/home/psycholearner/projects//DL4CV-EE6132-/PA3/2035")
writer.add_graph(sess.graph)
writer2 = tf.summary.FileWriter("/home/psycholearner/projects/DL4CV-EE6132-/PA3/20351")
writer2.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(20000):
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
#batch_y.astype(float)
if i % 25 == 0:
s = sess.run(merged_summary,feed_dict = {x: batch_x,y: batch_y})
writer.add_summary(s,i)
at,batch_yt = data(500,size)
batch_xt = np.array(at)
batch_xt = batch_xt.reshape(500,size,2)
batch_xt = [j[::-1] for j in batch_xt]
batch_xt = np.array(batch_xt)
batch_xt.astype(float)
batch_yt = np.array(batch_yt)
k = sess.run(merged_summary,feed_dict = {x: batch_xt,y: batch_yt})
writer2.add_summary(k,i)
#train_accuracy = sess.run(accuracy.eval(feed_dict={x: batch[0], y: batch[1]}))
#[train_accuracy] = sess.run([cross_entropy],feed_dict = {x: batch_x, y:batch_y})
#[test] = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#logits = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#print('step %d, training accuracy %g %g' % (i, train_accuracy,test))
#[test_acc] = sess.run([test_accuracy],feed_dict = {x: mnist.test.images, y:mnist.test.labels})
#print('step %d, test accuracy %g' % (i, test_acc))
#saver.restore(sess, "/home/psycholearner/projects//DL4CV-EE6132-/PA2/model.ckpt")
sess.run(train_step,feed_dict = {x:batch_x,y:batch_y})
'''
test_data = mnist.test.images[:128].reshape((-1, 28, 28))
test_label = mnist.test.labels[:128]
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: test_data, y: test_label}))
'''
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: batch_x, y: batch_y}))
|
[
"[email protected]"
] | |
2892259f2d817721cf02d7066f632c0970f63743
|
c9317c7703f05c3dd17c29aaadf9062b60bedd37
|
/website/view.py
|
1b984a7eb87633e8ebc56c91044d081162c065c0
|
[] |
no_license
|
gliv001/appointment_webapp
|
778bd5983fad27cfcf2a159f1f72ff9efa37e989
|
bb7a003c5d25c1364e25b0c6e160e9b24b66f490
|
refs/heads/main
| 2023-05-26T12:57:59.134742 | 2021-06-14T01:13:02 | 2021-06-14T01:13:02 | 371,082,262 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,255 |
py
|
from website.forms import AppointmentForm, EmployeeForm, ServiceForm
from flask import Blueprint, render_template, request, redirect, flash
from flask.helpers import url_for
from .models import (
Appointments,
Employees,
LoginHistory,
Services,
ApptUsers,
UserLevel,
db,
)
from datetime import datetime
from flask_login import login_required, current_user
from werkzeug.security import generate_password_hash
import simplejson as json
view = Blueprint("view", __name__)
@view.route("/", methods=["POST", "GET"])
@view.route("/appointments", methods=["POST", "GET"])
@login_required
def appointment_home():
form = AppointmentForm()
if request.method == "POST":
if form.validate_on_submit():
client = form.client.data
service = form.service.data
employee = form.employee.data
appt_date = form.appt_date.data
appt_time = form.appt_time.data
tip = form.tip.data
total = form.total.data
if tip == "":
tip = 0
new_appt = Appointments(
client=client,
serviceid=service,
employeeid=employee,
apptdatetime=datetime.strptime(
f"{appt_date} {appt_time}", "%Y-%m-%d %H:%M:%S"
),
tips=tip,
total=total,
)
try:
db.session.add(new_appt)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue adding a new appointment", category="error")
return redirect("/appointments")
employeeList = Employees.query.all()
serviceList = Services.query.all()
form.employee.choices = [(e.id, e.uname) for e in employeeList]
form.service.choices = [(s.id, f"{s.sname} ${s.price}") for s in serviceList]
if len(serviceList) < 1:
flash("There are no services, Please add services first.", category="error")
appointments = (
db.session.query(
Appointments,
Services.sname.label("service"),
Employees.uname.label("employee"),
)
.select_from(Appointments)
.join(Services)
.join(Employees)
.filter(Appointments.employeeid == current_user.id)
.all()
)
return render_template(
"view/appointments.jinja2",
form=form,
appointments=appointments,
user=current_user,
)
@view.route("/appointments/table", methods=["GET"])
@login_required
def appointment_table():
if request.args.get("viewall", default=0, type=int) == 0:
results = (
db.session.query(
Appointments.id,
Appointments.client,
Appointments.serviceid,
Appointments.employeeid,
Appointments.apptdatetime,
Appointments.tips,
Appointments.total,
Services.sname.label("service"),
Employees.uname.label("employee"),
)
.select_from(Appointments)
.join(Services)
.join(Employees)
.filter(Appointments.employeeid == current_user.id)
.all()
)
else:
results = (
db.session.query(
Appointments.id,
Appointments.client,
Appointments.serviceid,
Appointments.employeeid,
Appointments.apptdatetime,
Appointments.tips,
Appointments.total,
Services.sname.label("service"),
Employees.uname.label("employee"),
)
.select_from(Appointments)
.join(Services)
.join(Employees)
.all()
)
appointments_dict_list = [r._asdict() for r in results]
return json.dumps(appointments_dict_list, default=str)
@view.route("/appointments/update/<int:id>", methods=["POST", "GET"])
@login_required
def appointment_update(id):
form = AppointmentForm()
select_appointment = Appointments.query.get_or_404(id)
if request.method == "POST":
if form.validate_on_submit():
select_appointment.client = form.client.data
select_appointment.service = form.service.data
select_appointment.employee = form.employee.data
select_appointment.apptdatetime = datetime.strptime(
f"{form.appt_date.data} {form.appt_time.data}", "%Y-%m-%d %H:%M:%S"
)
select_appointment.tips = form.tip.data
try:
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue updating an appointment", category="error")
return redirect("/appointments")
employeeList = Employees.query.all()
serviceList = Services.query.all()
form.employee.choices = [(e.id, e.uname) for e in employeeList]
form.service.choices = [(s.id, f"{s.sname} ${s.price}") for s in serviceList]
form.client.default = select_appointment.client
form.service.default = select_appointment.serviceid
form.employee.default = select_appointment.employeeid
date = select_appointment.apptdatetime.date()
time = select_appointment.apptdatetime.time()
form.appt_date.default = date
form.appt_time.default = time
form.tip.default = select_appointment.tips
form.total.default = select_appointment.total
form.process() # this is to set the default choices for services/employees
return render_template(
"view/appointment_update.jinja2",
form=form,
user=current_user,
appointment=select_appointment,
)
@view.route("/appointments/delete/<int:id>")
@login_required
def appointment_delete(id):
select_appointment = Appointments.query.get_or_404(id)
try:
db.session.delete(select_appointment)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue deleting an appointment", category="error")
return redirect("/appointments")
@view.route("/employees", methods=["POST", "GET"])
@login_required
def employee_home():
if current_user.userlevelid >= 3:
flash("Access denied: user privileges too low", category="error")
return redirect("/")
form = EmployeeForm()
if request.method == "POST":
if form.validate_on_submit():
name = form.name.data
email = form.email.data
password = form.password.data
userlevelid = form.employee_type.data
new_employee = ApptUsers(
userlevelid=userlevelid,
uname=name,
email=email,
upassword=generate_password_hash(password, "sha256"),
verified=True,
)
try:
db.session.add(new_employee)
db.session.commit()
flash("New employee created!", category="success")
return redirect("employees")
except Exception as error:
print(error)
flash("There was an issue adding a new employee", category="error")
userLevels = UserLevel.query.filter(UserLevel.ulevel >= 2)
form.employee_type.choices = [(l.ulevel, l.uname) for l in userLevels]
employee = Employees.query.order_by(Employees.id).all()
return render_template(
"view/employees.jinja2",
form=form,
employees=employee,
user=current_user,
)
@view.route("/employees/update/<int:id>", methods=["POST", "GET"])
@login_required
def employee_update(id):
form = EmployeeForm()
employee = ApptUsers.query.get_or_404(id)
if request.method == "POST":
if form.validate_on_submit():
employee.name = form.name.data
employee.email = form.email.data
employee.upassword = generate_password_hash(form.password.data, "sha256")
try:
db.session.commit()
return redirect("/employees")
except Exception as error:
print(error)
flash("There was an issue updating an employee", category="error")
return render_template(
"view/employee_update.jinja2",
form=form,
employee=employee,
user=current_user,
)
@view.route("/employees/delete/<int:id>")
@login_required
def employee_delete(id):
selected_employee = ApptUsers.query.get_or_404(id)
try:
db.session.delete(selected_employee)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue deleting an employee", category="error")
return redirect("/employees")
@view.route("/services", methods=["POST", "GET"])
@login_required
def service_home():
form = ServiceForm()
if request.method == "POST":
if form.validate_on_submit():
service_name = form.name.data
service_price = form.price.data
new_service = Services(sname=service_name, price=service_price)
try:
db.session.add(new_service)
db.session.commit()
flash("New service created!", category="success")
return redirect("services")
except Exception as error:
print(error)
flash("There was an issue adding a new service", category="error")
s = Services.query.order_by(Services.id).all()
return render_template(
"view/services.jinja2",
form=form,
services=s,
user=current_user,
)
@view.route("/services/update/<int:id>", methods=["POST", "GET"])
@login_required
def service_update(id):
form = ServiceForm()
selected_service = Services.query.get_or_404(id)
if request.method == "POST":
if form.validate_on_submit():
selected_service.sname = form.name.data
selected_service.price = form.price.data
try:
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue updating an service", category="error")
return redirect("/services")
return render_template(
"view/service_update.jinja2",
form=form,
service=selected_service,
user=current_user,
)
@view.route("/services/delete/<int:id>")
@login_required
def service_delete(id):
selected_service = Services.query.get_or_404(id)
try:
db.session.delete(selected_service)
db.session.commit()
except Exception as error:
print(error)
flash("There was an issue deleting an service", category="error")
return redirect("/services")
@view.route("/loginhistory")
@login_required
def login_history():
if current_user.userlevelid > 1:
flash("Access denied: user privileges too low", category="error")
return redirect("/")
logins = LoginHistory.query.order_by(LoginHistory.id.desc()).all()
return render_template("view/loginhistory.jinja2", logins=logins, user=current_user)
|
[
""
] | |
4e716e88211d1235e4eb2d17c6c4c7d49c93d68d
|
9a9b4b88485101f23dd84f12187dcc3dd0638fb4
|
/code/chp14/mailbox.py
|
6458bd18a929b09f9ea1fc106a9b5ccc949b3a5f
|
[] |
no_license
|
hongdago/fopnpcode
|
0d8ce279a51068e4ae92fc305c11f299991f50b7
|
ef55cdff12a1f07c3f91cf80ca6745c2448de765
|
refs/heads/master
| 2021-09-03T12:46:55.009501 | 2018-01-09T07:14:24 | 2018-01-09T07:14:24 | 115,104,899 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 823 |
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
FileName:mailbox.py
DESC: ไฝฟ็จPOP็listๅฝไปค
"""
import getpass, poplib, sys
def main():
if len(sys.argv) !=3:
print('Usage: %s hostname username ' % sys.argv[0])
exit(2)
hostname, username = sys.argv[1:]
passwd = getpass.getpass()
p = poplib.POP3_SSL(hostname)
try:
p.user(username)
p.pass_(passwd)
except poplib.error_proto as e:
print("Login failed:", e)
else:
response, listings, octet_count = p.list()
if not listings:
print('No messages')
for listing in listings:
number, size = listing.decode('ascii').split()
print('Message %s hav %s bytes ' % (number, size))
finally:
p.quit()
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
2b59d2bc871b13882aa71629e364e5ee5cde3a00
|
186736f265fa7954e95198955546305ab1b9b981
|
/notesApi/settings.py
|
d3fd465d97e808c8f69bde9fd61320c402413ffb
|
[] |
no_license
|
nova-sangeeth/notes-api
|
6449669870dfb69a72e1aad71c8859ca9de8bfbb
|
d5d15a4df615b0b276ccf8f49efc9e21eb177b65
|
refs/heads/master
| 2022-12-22T11:38:03.065884 | 2020-09-23T19:58:14 | 2020-09-23T19:58:14 | 298,022,798 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,607 |
py
|
"""
Django settings for notesApi project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "v1jk=4%^w9@)42-xumnuc3ho+7!&ug#q3*^y)x^@rlu#-96o*d"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# crispy forms
"crispy_forms",
# all auth apps
"django.contrib.sites",
"allauth",
"allauth.account",
"allauth.socialaccount",
# apps
"rest_framework",
"api_notes",
]
SITE_ID = 1
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "notesApi.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "notesApi.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
ACCOUNT_EMAIL_VERIFICATION = "required"
ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS = True
ACCOUNT_EMAIL_REQUIRED = False
|
[
"[email protected]"
] | |
7c5501a793a8b3dcf3fe33d63e771c53c854e673
|
fd8eb0edf514fca4f25b885de86e1503463a8264
|
/polls/admin.py
|
3cd232b8b20f28c45a93edda83a4e2180351d1d8
|
[] |
no_license
|
ubiopen/project1
|
7f6dd1bf56721449c5e61d9c40c6c132721384e1
|
89ee8f5914bfe28627018d2b07749d17f0f63e92
|
refs/heads/master
| 2021-01-25T09:59:31.794667 | 2014-06-15T10:54:21 | 2014-06-15T10:54:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 114 |
py
|
from polls.models import *
from django.contrib import admin
admin.site.register(Poll)
admin.site.register(Choice)
|
[
"[email protected]"
] | |
11716fe8e719fd08d9305e4d39864d7c53af3e8a
|
268e00212b4e863e35b2113496f1a71c4b7b4a04
|
/11_ๅฏนdict็listๅป้.py
|
7af9b4a7b79ae15743f7087432b1449da813482a
|
[] |
no_license
|
MrCat9/Python_Note
|
ee5c2fa86b0f77538e1feacdaaadfa9afec884ef
|
6b81cdf4d46a6d1f1a78170c47151ae519e087d4
|
refs/heads/master
| 2022-08-20T22:28:17.730325 | 2022-08-08T02:44:47 | 2022-08-08T02:44:47 | 146,618,466 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,677 |
py
|
# -*- coding: utf-8 -*-
# ๆ นๆฎdictไธญ็ๆไธkey็value๏ผๅฏนdict็listๅป้๏ผvalue็ธๅ่งไธบ้ๅค
# def dict_list_duplicate(dict_list, duplicate_key):
# temp_list = []
# new_dict_list = []
# for a_dict in dict_list:
# value = a_dict[duplicate_key]
# if value not in temp_list:
# temp_list.append(value)
# new_dict_list.append(dict_)
# return new_dict_list
# def dict_list_duplicate(dict_list, duplicate_key):
# temp_set = set()
# new_dict_list = []
# for a_dict in dict_list:
# value = a_dict[duplicate_key]
# if value not in temp_set:
# temp_set.add(value)
# new_dict_list.append(dict_)
# return new_dict_list
def dict_list_duplicate(dict_list, duplicate_key):
temp_set = set()
new_dict_list = []
for a_dict in dict_list:
value = a_dict[duplicate_key]
old_set_len = len(temp_set)
temp_set.add(value)
new_set_len = len(temp_set)
if new_set_len > old_set_len:
new_dict_list.append(dict_)
return new_dict_list
if __name__ == '__main__':
old_dict_list = [
{"title": "title1", "name": "name1"},
{"title": "title2", "name": "name2"},
{"title": "title1", "name": "name3"},
]
print(old_dict_list)
# [{'title': 'title1', 'name': 'name1'}, {'title': 'title2', 'name': 'name2'}, {'title': 'title1', 'name': 'name3'}]
new_dict_list = dict_list_duplicate(old_dict_list, "title") # ๆ นๆฎdictไธญ็titleๅป้๏ผtitle็ธๅ่งไธบ้ๅค
print(new_dict_list)
# [{'title': 'title1', 'name': 'name1'}, {'title': 'title2', 'name': 'name2'}]
|
[
"[email protected]"
] | |
7a8c6b2fe81d9938e242e7bf859ec30576c2dab4
|
c550b7993524ef3598d7b93900d36e022ad2b16f
|
/venv/Scripts/pip3.8-script.py
|
d9a1cebc9fc049c6a56f94b005a001e14004390f
|
[] |
no_license
|
19982084685/network-simulation
|
523e3205eb94a312a6105946f31e031d660dcd52
|
ce825730742e6adf2d968bfe3e210c38b4419798
|
refs/heads/master
| 2022-12-25T07:05:05.536120 | 2020-09-11T15:21:24 | 2020-09-11T15:21:24 | 294,640,613 | 0 | 0 | null | null | null | null |
GB18030
|
Python
| false | false | 446 |
py
|
#!D:\Users\็ๆ\Documents\ๆๅญฆๆๆกฃ\็ฝ็ปๆๆ่ฏพ\pyProject\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
|
[
"[email protected]"
] | |
5f1c2a99593a7553184a6e88dacd5cfddfa94dc2
|
11286e7989264134a8a8d610e0f609e6fbff9140
|
/ch06/ch06_6.py
|
611bb36abeda2b0457a21b95c8675ec3d9cc42ed
|
[] |
no_license
|
p2c2e/machine_learning_with_python_cookbook
|
04eeed2e00e0a3e9c0681d4b2f4125aa85485a1d
|
b176323a02f5b5722e312a579ad764a0276ec9c6
|
refs/heads/main
| 2023-01-30T06:54:34.138786 | 2020-12-13T05:02:07 | 2020-12-13T05:02:07 | 320,987,171 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 472 |
py
|
# Load libraries
import unicodedata
import sys
# Create text
text_data = ['Hi!!!! I. Love. This. Song....',
'10000% Agree!!!! #LoveIT',
'Right?!?!']
# Create a dictionary of punctuation characters
punctuation = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
# For each string, remove any punctuation characters
[string.translate(punctuation) for string in text_data]
|
[
"[email protected]"
] | |
ebf03cfdd8a51f8ebfe2b59fa37239f887dc4074
|
a6518cd4bdb5d8d3fde49a805208d34d6381191a
|
/server/application/models/window.py
|
33c1b4ea4f3fc57f312a3debcdabc0df9eba1e29
|
[
"MIT"
] |
permissive
|
coloration-production/basic-saas
|
c43b09515be3ec08e044d25b33d69a4482e39855
|
91656e4cb70ace6b94cd0f5f6fa54f7d996106c0
|
refs/heads/main
| 2023-06-03T06:00:32.003920 | 2021-06-16T13:03:05 | 2021-06-16T13:03:05 | 375,542,039 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,665 |
py
|
# encoding: utf-8
from application.models.base_entity import BaseEntity
from application import db
from sqlalchemy_serializer import SerializerMixin
from application.models.window_widget import registrations
from application.models.widget import Widget
from datetime import datetime
class Window(BaseEntity, SerializerMixin):
__tablename__ = 'windows'
name = db.Column(db.String(32))
layout = db.Column(db.String)
widgets = db.relationship(
'Widget',
secondary = registrations,
backref = db.backref('windows', lazy = 'dynamic'),
lazy = 'dynamic'
)
def __repr__(self) -> str:
return '<Window {}:{}>'.format(self.id, self.name)
@classmethod
def create_record (Entity, **kwargs):
widgets = []
if 'widgets' in kwargs:
widgets = kwargs['widgets']
del kwargs['widgets']
record = Entity(**kwargs)
if len(widgets) > 0 and isinstance(widgets, list):
record.widgets = Widget.query.filter(Widget.id.in_(widgets)).all()
pass
db.session.add(record)
db.session.commit()
return record
@classmethod
def modify_record (Entity, id, entity):
record = Entity.query_record(id = id)
widgets = []
if 'id' in entity:
del entity['id']
if 'widgets' in entity:
widgets = entity['widgets']
del entity['widgets']
for key in entity:
setattr(record, key, entity[key])
if len(widgets) > 0 and isinstance(widgets, list):
record.widgets = Widget.query.filter(Widget.id.in_(widgets)).all()
pass
setattr(record, 'lasted', datetime.utcnow())
db.session.add(record)
db.session.commit()
return record
|
[
"[email protected]"
] | |
e5ab664edb4752d26689ea1f5d8c70d0327cb7b4
|
c876cc3d3bdb9020bf906c53f0f060379cd52b09
|
/rename.py
|
5671cda9ebf8909a59997ba752c73d1a314200f8
|
[
"Apache-2.0"
] |
permissive
|
shinysuraj/python-project
|
328660cb3203cceb76ce78d974ac543891970eed
|
7d7e31898e021d8c4c05d7d6445c5645f6452b11
|
refs/heads/master
| 2020-08-27T12:50:38.855019 | 2017-09-17T22:14:20 | 2017-09-17T22:14:20 | 217,374,680 | 0 | 0 |
Apache-2.0
| 2019-10-24T19:09:20 | 2019-10-24T19:09:20 | null |
UTF-8
|
Python
| false | false | 499 |
py
|
#This programe rename all the files from current directory and remove Number or digits from their
#names
import os
def rename_files():
#replace your own Directory address
file_list = os .listdir(r"home/ved/hello")
saved_path = os.getcwd()
print("Current Dir is %s" %saved_path)
os.chdir(r"home/ved/hello")
for file_name in file_list:
os.rename(file_name,file_name.translate(None, "0123456789"))
os.chdir(saved_path)
rename_files()
|
[
"[email protected]"
] | |
0c479cef93df6818cbeead1bcb7fcd72addeaa01
|
6ce41ca757269d8dff0ebac9aa88b098b4328a4e
|
/Data_Connect/MySQL/mysql_to_redis.py
|
84e4ee2e55dd30db6ea0bf0e278b800fe058ccde
|
[] |
no_license
|
HankTsai/Smart__Fridge
|
e7abcc0017880b59e4d61c6095064c0a3ab38a9a
|
ab850517c38d06db45e8d71dcab2f849578f5ba7
|
refs/heads/master
| 2023-01-13T20:01:58.820911 | 2020-11-30T15:40:21 | 2020-11-30T15:40:21 | 312,780,540 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,199 |
py
|
import redis
import pymysql
from user_db_api import DataBaseConnector
'''
# ้ๅซๅ็ฏๆฌ
# ไฝฟ็จ่
ๅฐ็ฎฑ่ณๆ่ฝๆๅญๅ
ธๅๆ
ๅๅฏซๅ
ฅredis
user_id_list = ["user1","user2","user3","user4"]
user_refri_dict = [{"่ๆ":"1,้ก,2020/10/23",
"้ฆ่":"2,ๆ น,2020/10/23",
"่ญๆจ":"5,ๅ,2020/10/24",
"่ฎ้ง":"7,ๅ,2020/10/21",
"้่":"3,ๅ,2020/10/24",
"็ช่":"10,้ก,2020/10/24",
"่ฑฌ่":"200,gram,2020/10/19"},
{"ๆด่ฅ":"12,้ก,2020/10/23",
"่ๅญ":"2,ๆ น,2020/10/23",
"ไนๅฑคๅก":"5,ๅ,2020/10/24",
'้ซ้บ่':"7,ๅ,2020/10/21",
"้่":"3,ๅ,2020/10/24",
'ๅคง่':"10,้ก,2020/10/24",
"็่":"200,gram,2020/10/19"},
{'ๅๅก': '391,gram,2020/10/20',
'ๅฐ็่': '221,gram,2020/10/20',
'ๅฐ้บฅ่่ฝ็ฒ': '142,gram,2020/10/20',
'ๆตท่': '360,gram,2020/10/20',
'็่
ฉ': '274,gram,2020/10/20',
'่็': '133,gram,2020/10/20',
'่ต็ๅญ': '473,gram,2020/10/20',
'่้
ฅ': '260,gram,2020/10/20'},
{'ไธธๅญ': '179,gram,2020/10/20',
'ไนณ้
ธ่้ฃฒๅ': '389,gram,2020/10/20',
'ๆฆดๆงค': '488,gram,2020/10/20',
'็ฝ่': '455,gram,2020/10/20',
'็ด
่ถ': '366,gram,2020/10/20',
'่ฑ็': '399,gram,2020/10/20',
'่่': '206,gram,2020/10/20',
'่ฑ่ฑ': '361,gram,2020/10/20',
'่ฑฌ่
ฑ่': '353,gram,2020/10/20',
'้ฌ็': '209,gram,2020/10/20'}
]
for user_id,user_refri in zip(user_id_list,user_refri_dict):
# establish user_id set
r.sadd("userid",user_id)
# establish user refrigerator hash
r.hmset(user_id,user_refri)
'''
'''
from mysql to redis
select
userid -> set
user_refrigerator -> hash
synonym word table -> hash
synonym -> ้่ธ:"้่"
็้ชจ้:"้่"
insert
1.ingredient storage record (when kafka consumer get data)
2.user profile + line_id (when questioner completed)
3.user like recipe record (when user has ordered a recipe)
update
ingredient take out record (when user has ordered a recipe)
'''
'''
MySQL ๆ็ตๅฐๅจRedisๅปบๆง3ๅๅบๆฌkey-valueไปฅๅๆธๅไปฅไฝฟ็จ่
Line ID็บkey็hash
"synonym" , data type: hash
"general_ingredient" , data type: hash
"total_user_id" , data type: hash
"U429ec102b46a5332b32c4f1a8b3b04db" , data type: hash
...
'''
def ingredient_load(db):
# ๅพmysqlๅ็พฉ่ฉๅบซๆๅ็พฉ่ฉๅญ้ฒredis (ๆ ผๅผhash- synonym(key)- {็ธฝ่ฉๅฝ(sub key):ๅฎ็พฉ้ฃๆ(value)
sql = """
SELECT s.็ธฝ้ฃๆๅ็จฑ,i.้ฃๆๅ็จฑ FROM synonym s JOIN ingredient i ON s.้ฃๆID = i.้ฃๆID;
"""
# ๅพmysqlๆ้ฃๆๅญ้ฒredis (ๆ ผๅผsort- general_ingredient:ๅฎ็พฉ้ฃๆ
sql2 = """
SELECT ้ฃๆID, ้ฃๆๅ็จฑ FROM ingredient;
"""
db.cursor.execute(sql)
synonym_redis = db.cursor.fetchall()
db.cursor.execute(sql2)
ingredient_redis = db.cursor.fetchall()
meaning_dict = dict()
for each in synonym_redis:
meaning_dict[each[0]] = each[1]
db.redis.hmset('synonym', meaning_dict)
food_dict = dict()
for each in ingredient_redis:
food_dict[each[1]] = each[0]
db.redis.hmset('general_ingredient', food_dict)
def user_id_table(db):
sql = '''
SELECT `ไฝฟ็จ่
ID`, `Line_ID` from recipe.user_profile;'''
db.cursor.execute(sql)
user_info = db.cursor.fetchall()
# (('Ryan', 'U429ec102b46a5332b32c4f1a8b3b04db'),)
user_table = {}
for user in user_info:
user_table[user[1]] = user[0]
db.redis.hmset("total_user_id", user_table)
def user_data_load(db):
# ๆๅ
จ้จๅฐ็ฎฑ่ณๆ
sql = '''
SELECT us.Line_ID, ing.้ฃๆๅ็จฑ, re.้ฃๆ้้, re.้ฃๆๅฎไฝ, re.้ฃๆๅญๆพๆฅ, re.้ฃๆๅฐๆๆฅ
FROM refrigerator_record re JOIN ingredient ing JOIN user_profile us
ON re.้ฃๆID = ing.้ฃๆID AND re.ไฝฟ็จ่
ID = us.ไฝฟ็จ่
ID
WHERE (re.้ฃๆๅ็จๆฅ is null);
'''
db.cursor.execute(sql)
refrigerator_record = db.cursor.fetchall()
if refrigerator_record:
for row in refrigerator_record:
ingredient_name = row[1]
ingredient_info = str(row[2]) + "," + row[3] + "," + str(row[4])
db.redis.hset(row[0], ingredient_name, ingredient_info)
def main():
db = DataBaseConnector()
try:
ingredient_load(db)
except Exception as e:
print(f"ingredient_load failed, Error: {e}")
try:
user_id_table(db)
except Exception as e:
print(f"user_id_table failed, Error: {e}")
try:
user_data_load(db)
except Exception as e:
print(f"user_data_load failed, Error: {e}")
db.cursor.close()
db.mysql.close()
print("MySQL loaded data to Redis successfully.")
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
b43a8c8f46ebf074abafe2fe804dd6281bc08def
|
9506d1d978882f1310e31f05624f9123f7d1e4c4
|
/model.py
|
2572da3a20621fc9fcdb81728adc3ec4b507dbb5
|
[] |
no_license
|
mindninjaX/AI-Chatbot
|
6e9b3420cb5cde85138ace552b5cd6f22fe8c26a
|
eb839a538c0067485264f9f03a8a46176e66ebc0
|
refs/heads/master
| 2023-03-30T10:38:29.420301 | 2021-04-01T05:27:54 | 2021-04-01T05:27:54 | 329,689,986 | 14 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 542 |
py
|
import torch
import torch.nn as nn
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
return out
|
[
"[email protected]"
] | |
a2c679a999c8aa27009e451be46263c0baafba57
|
b60c2ce1b3f5ae8e4381cad4564d2fb189cd325b
|
/source_manipulation/nt_parser_zefania_english.py
|
00ec14cfdd8d73f344443e416c23121bfdb9ecd3
|
[] |
no_license
|
jasonamyers/SacredPy
|
840659d4e192f88dac7fe08eac3159bbb0433491
|
2ed4951619d124aaa8f1a8d183ac0b3f816302ae
|
refs/heads/master
| 2021-01-16T21:18:08.046217 | 2013-12-08T14:07:26 | 2013-12-08T14:07:26 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,241 |
py
|
from lxml import etree
from pprint import pprint
ons = {'o': 'http://www.bibletechnologies.net/2003/OSIS/namespace'}
class Processor(object):
def __init__(self, fname):
self.tree = etree.parse(fname)
print self.tree
self.books = list()
self.process_book()
def process_verses(self, chapter):
verses = list()
for verse in chapter.findall('o:verse', namespaces=ons):
verses.append({'id': verse.get('osisID'), 'content': verse.text,})
return verses
def process_chapters(self, book):
chapters = list()
for chapter in book.findall('o:chapter', namespaces=ons):
chapters.append({
'number': chapter.get('osisID'),
'verses': self.process_verses(chapter)
})
return chapters
def process_book(self):
for book in self.tree.findall('//o:div[@type="book"]', namespaces=ons):
self.books.append({
'name': book.get('osisID'),
'chapters': self.process_chapters(book)
})
if __name__ == '__main__':
p = Processor('nt_zefania_english.xml')
print len(p.books)
for book in p.books:
pprint(book)
|
[
"[email protected]"
] | |
4c92871a9b092599b369eba37b5e69ca438f451d
|
3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2
|
/src/Week4/Practice/Trace1.py
|
6db80027484d73a47f843382e033603034f1470c
|
[] |
no_license
|
theguyoverthere/CMU15-112-Spring17
|
b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8
|
b8287092b14e82d2a3aeac6c27bffbc95382eb34
|
refs/heads/master
| 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 267 |
py
|
def onesDigit(n):
return n%10
def ct1(L):
for i in range(len(L)):
L[i] += sum(L) + max(L)
# The function onesDigit is called on each element before
# making comparison.
return sorted(L, key=onesDigit)
a = [2,1,0]
print(ct1(a))
print(a)
|
[
"[email protected]"
] | |
7b7065c44f46e1fc431ba62723c83f4e085bc20d
|
9801dd62f1c2a4454f104d26c2d7d9d75167a31c
|
/build-osmosis-script.py
|
305bfb15e1e3ea41596da49c3ed30e372b752c36
|
[] |
no_license
|
hholzgra/Extractotron
|
cc232f25bcc9609013ff2ecdf36dc8cd7fc2cf20
|
5c67734aa1107a93832d37317b6059fd5225ec3e
|
refs/heads/master
| 2021-01-15T23:35:22.356427 | 2011-09-24T03:57:42 | 2011-09-24T03:57:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,005 |
py
|
from sys import argv, stderr
from csv import DictReader
cities = list(DictReader(open('cities.txt'), dialect='excel-tab'))
try:
(osmosis, ) = argv[1:]
except ValueError:
print >> stderr, 'Usage: show-cities.py <osmosis command file>'
exit(1)
osmosis = open(osmosis, 'w')
print >> osmosis, 'bunzip2 -c planet-latest.osm.bz2 | osmosis-*/bin/osmosis --rx file=- \\'
print >> osmosis, ' --log-progress interval=60 \\'
print >> osmosis, ' --tee outputCount=2 \\'
print >> osmosis, ' --tag-filter accept-ways natural=coastline --used-node \\'
print >> osmosis, ' --wx coastline.osm.bz2 \\'
print >> osmosis, ' --tee outputCount=%d \\' % len(cities)
print >> osmosis, ' \\'
for city in cities:
print >> osmosis, ' --bb top=%(top)s left=%(left)s bottom=%(bottom)s right=%(right)s \\' % city
print >> osmosis, ' --tee outputCount=2 --wx file=ex/%(slug)s.osm.bz2 --wb file=ex/%(slug)s.osm.pbf \\' % city
print >> osmosis, '> osmosis.txt 2>&1;'
osmosis.close()
|
[
"[email protected]"
] | |
4ccf7450ab45e16ee470b9508c1564a341691058
|
5eb84c7ca6572b6503f94e53813bdab018cbbe2d
|
/rocket.py
|
477d44fb1fa2af8ee9499fc0b79d9cca28082cfc
|
[] |
no_license
|
AlexanderHHS/new
|
fa161a04d5a56a7aba8da322825f4143100a7ccf
|
b3ecdf6acbb6fc19892bcb1029e4a31b2e91435c
|
refs/heads/master
| 2020-05-29T17:26:47.167837 | 2019-05-30T20:52:02 | 2019-05-30T20:52:02 | 189,277,058 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,715 |
py
|
from math import sqrt
class Rocket():
# Rocket simulates a rocket ship for a game,
# or a physics simulation.
def __init__(self, name = "TBD", x=0, y=0):
# Each rocket has an (x,y) position.
self.x = x
self.y = y
self.name = name
def move_rocket(self, x_increment=0, y_increment=1):
# Move the rocket according to the paremeters given.
# Default behavior is to move the rocket up one unit.
self.x += x_increment
self.y += y_increment
def get_distance(self, other):
# Calculates the distance from this rocket to another rocket,
# and returns that value.
distance = sqrt((self.x-other.x)**2+(self.y-other.y)**2)
return distance
def get_name(self):
#simply return the name
return self.name
def introduction(self):
print("Hi, I am a rocket named", self.name, ". Nice to meet you.")
# Make two rockets, at different places.
rocket1 = Rocket() #This one will be at default x and y position
rocket2 = Rocket("Tom",10,5) # This one is at 10, 5
#Print the names
print("The names of the rockets are...")
print(rocket1.get_name())
print(rocket2.name)
# Show the distance between them.
distance = rocket1.get_distance(rocket2)
print("The rockets are %f units apart." % distance)
#Move rocket1 up a bit
rocket1.move_rocket(0,2)
#Move the rockets some more to test out the methods
distance = rocket1.get_distance(rocket2)
print("The rockets are,", distance, " units apart.")
rocket1.move_rocket(0,2)
distance = rocket1.get_distance(rocket2)
print("The rockets are %f units apart." % distance)
rocket1.introduction()
rocket2.introduction()
|
[
"[email protected]"
] | |
dac0cd123134ee900137c144d2a10237f3321f84
|
75b85d55fd3e88813e04d5798fc69d4f42b4b7b0
|
/deprecated_nasa_r2_common/r2_control/nodes/r2_ready_pose_high.py
|
13f64a6a301d90869675813d122e3875b9a33952
|
[] |
no_license
|
hspy/rosit
|
9d4560f0cbf4f286c6e0cbc0a1555912dd74e9e1
|
c5ba04b7e870ce807da61d4e23ba80a9404e5f2c
|
refs/heads/main
| 2023-01-19T17:23:02.197874 | 2020-12-02T07:42:21 | 2020-12-02T07:42:21 | 309,916,632 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,308 |
py
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('r2_control')
import rospy
import actionlib
import math
import random
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from copy import copy, deepcopy
TORAD = math.pi/180.0
TODEG = 1.0/TORAD
class r2ReadyPose :
def __init__(self, N, wp, arm):
self.arm = arm
self.currentData = None
self.desiredData = None
self.deadlineData = None
self.currentState = JointState()
self.currentState.position = [0]*N
self.currentState.velocity = [0]*N
self.currentState.effort = [0]*N
self.numJoints = N
self.waypoints = wp
self.fingers = [("index",4),("middle",4),("ring",3),("little",3),("thumb",4)]
rospy.Subscriber("r2/joint_states", JointState, self.jointStateCallback)
if self.arm=="left" :
self.trajPublisher = rospy.Publisher('/r2/l_arm_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/l_arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="right" :
self.trajPublisher = rospy.Publisher('/r2/r_arm_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/r_arm_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="left_hand" :
self.trajPublisher = rospy.Publisher('/r2/l_hand_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/l_hand_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="right_hand" :
self.trajPublisher = rospy.Publisher('/r2/r_hand_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/r_hand_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
elif self.arm=="neck" :
self.trajPublisher = rospy.Publisher('/r2/neck_controller/command', JointTrajectory)
self.trajClient = actionlib.SimpleActionClient('r2/neck_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
else :
rospy.logerr("r2ReadyPose::r2ReadyPose() -- unknown arm")
self.trajClient.wait_for_server()
self.actionGoal = FollowJointTrajectoryGoal()
def getNumJoints(self) :
return self.numJoints
def jointStateCallback(self, data):
self.currentState = data
def computeTrajectory(self, desiredData, deadline):
jointTraj = JointTrajectory()
currentState = copy(self.currentState)
desiredState = copy(desiredData)
# create simple lists of both current and desired positions, based on provided desired names
rospy.loginfo("r2ReadyPose::computeTrajectory() -- finding necessary joints")
desiredPositions = []
currentPositions = []
for desIndex in range(len(desiredState.name)) :
for curIndex in range(len(currentState.name)) :
if ( desiredState.name[desIndex] == currentState.name[curIndex] ) :
desiredPositions.append(desiredState.position[desIndex])
currentPositions.append(currentState.position[curIndex])
rospy.loginfo("r2ReadyPose::computeTrajectory() -- creating trajectory")
jointTraj.joint_names = desiredState.name
jointTraj.points = list()
for j in range(self.waypoints) :
trajPoint = JointTrajectoryPoint()
t = (deadline / self.waypoints) * (j + 1)
trajPoint.time_from_start = rospy.Duration(t)
trajPoint.positions = list()
for i in range(len(desiredPositions)) :
trajPoint.positions.append( self.minJerk(currentPositions[i], desiredPositions[i], deadline, t) )
jointTraj.points.append(trajPoint)
rospy.loginfo("r2ReadyPose::moveToGoal() -- using tolerances")
return jointTraj
def minJerk(self, start, end, duration, t):
tOverD = float(t) / float(duration)
return start + (end - start)*( 10*math.pow(tOverD,3) - 15*math.pow(tOverD,4) + 6*math.pow(tOverD,5) )
def moveToGoal(self, jointGoal, deadline, useTolerances) :
self.actionGoal.trajectory = self.computeTrajectory(jointGoal, deadline)
offset = 0
if useTolerances :
rospy.loginfo("r2ReadyPose::moveToGoal() -- using tolerances")
self.actionGoal.path_tolerance = []
self.actionGoal.goal_tolerance = []
if self.arm == "left_hand" :
for k in range(len(self.fingers)):
for j in range(self.fingers[k][1]):
tol.name = "r2/left_arm/hand/" + self.fingers[k][0] + "/joint" + str(j+offset)
tol.position = 0.2
tol.velocity = 1
tol.acceleration = 10
self.actionGoal.path_tolerance.append(tol)
self.actionGoal.goal_tolerance.append(tol)
elif self.arm == "right_hand" :
for k in range(len(self.fingers)):
for i in range(self.fingers[k][1]):
tol.name = "r2/right_arm/hand/" + self.fingers[k][0] + "/joint" + str(j+offset)
print tol.name
tol.position = 0.2
tol.velocity = 1
tol.acceleration = 10
self.actionGoal.path_tolerance.append(tol)
self.actionGoal.goal_tolerance.append(tol)
else :
for i in range(self.numJoints):
tol = JointTolerance()
if self.arm == "left" or self.arm == "right" :
tol.name = "r2/" + self.arm + "_arm/joint" + str(i+offset)
elif self.arm == "neck" :
tol.name = "r2/" + self.arm + "/joint" + str(i+offset)
tol.position = 0.2
tol.velocity = 1
tol.acceleration = 10
self.actionGoal.path_tolerance.append(tol)
self.actionGoal.goal_tolerance.append(tol)
else :
rospy.loginfo("r2ReadyPose::moveToGoal() -- not using tolerances")
self.actionGoal.goal_time_tolerance = rospy.Duration(10.0)
# send goal nad monitor response
self.trajClient.send_goal(self.actionGoal)
rospy.loginfo("r2ReadyPose::moveToGoal() -- returned state: %s", str(self.trajClient.get_state()))
rospy.loginfo("r2ReadyPose::moveToGoal() -- returned result: %s", str(self.trajClient.get_result()))
return
def formatJointStateMsg(self, j, offset) :
if not (len(j) == self.numJoints) :
rospy.logerr("r2ReadyPose::formatJointStateMsg() -- incorrectly sized joint message")
return None
js = JointState()
js.header.seq = 0
js.header.stamp = rospy.Time.now()
js.header.frame_id = ""
js.name = []
js.position = []
if self.arm == "left" or self.arm == "right" :
for i in range(self.numJoints):
js.name.append("r2/" + self.arm + "_arm/joint" + str(i+offset))
js.position.append(j[i])
if self.arm == "left_hand" :
for k in range(len(self.fingers)):
for i in range(self.fingers[k][1]):
js.name.append("r2/left_arm/hand/" + self.fingers[k][0] + "/joint" + str(i+offset))
js.position.append(j[i])
if self.arm == "right_hand" :
for k in range(len(self.fingers)):
for i in range(self.fingers[k][1]):
js.name.append("r2/right_arm/hand/" + self.fingers[k][0] + "/joint" + str(i+offset))
js.position.append(j[i])
elif self.arm == "neck" :
for i in range(self.numJoints):
js.name.append("r2/" + self.arm + "/joint" + str(i+offset))
js.position.append(j[i])
return js
if __name__ == '__main__':
rospy.init_node('r2_ready_pose_high')
try:
r2TrajectoryGeneratorLeft = r2ReadyPose(7, 500, "left")
r2TrajectoryGeneratorRight = r2ReadyPose(7, 500, "right")
r2TrajectoryGeneratorNeck = r2ReadyPose(3, 500, "neck")
r2TrajectoryGeneratorLeftHand = r2ReadyPose(15, 10, "left_hand")
r2TrajectoryGeneratorRightHand = r2ReadyPose(15, 10, "right_hand")
rospy.sleep(2)
lhrp = [0]*15
rhrp = [0]*15
lrp1 = [50.0*TORAD, -80.0*TORAD, -105.0*TORAD, -140.0*TORAD, 80.0*TORAD, 0.0*TORAD, 0.0*TORAD]
rrp1 = [-50.0*TORAD, -80.0*TORAD, 105.0*TORAD, -140.0*TORAD, -80.0*TORAD, 0.0*TORAD, 0.0*TORAD]
rrp2 = [ 0.4, -0.5, 1.57, -2.0, -0.7, 0.3, 0.6]
lrp2 = [-0.4, -0.5, -1.57, -2.0, 0.7, 0.3, -0.6]
nrp = [-20.0*TORAD, 0.0*TORAD, -15.0*TORAD]
print "r2ReadyPose() -- moving to ready pose"
jointGoalNeck = r2TrajectoryGeneratorNeck.formatJointStateMsg(nrp, 0)
jointGoalLeftHand = r2TrajectoryGeneratorLeftHand.formatJointStateMsg(lhrp, 0)
jointGoalRightHand = r2TrajectoryGeneratorRightHand.formatJointStateMsg(rhrp, 0)
r2TrajectoryGeneratorLeftHand.moveToGoal(jointGoalLeftHand, 0.1, False)
r2TrajectoryGeneratorRightHand.moveToGoal(jointGoalRightHand, 0.1, False)
r2TrajectoryGeneratorNeck.moveToGoal(jointGoalNeck, 0.5, False)
jointGoalLeft = r2TrajectoryGeneratorLeft.formatJointStateMsg(lrp1, 0)
jointGoalRight = r2TrajectoryGeneratorRight.formatJointStateMsg(rrp1, 0)
r2TrajectoryGeneratorLeft.moveToGoal(jointGoalLeft, 0.5, False)
r2TrajectoryGeneratorRight.moveToGoal(jointGoalRight, 0.5, False)
rospy.sleep(3)
jointGoalLeft = r2TrajectoryGeneratorLeft.formatJointStateMsg(lrp2, 0)
jointGoalRight = r2TrajectoryGeneratorRight.formatJointStateMsg(rrp2, 0)
r2TrajectoryGeneratorLeft.moveToGoal(jointGoalLeft, 0.5, False)
r2TrajectoryGeneratorRight.moveToGoal(jointGoalRight, 0.5, False)
except rospy.ROSInterruptException:
pass
|
[
"[email protected]"
] | |
67c8f6e68f42cf14fa5dda19c602fbd7976c47fc
|
b61efe2686feb44c5b0d2fb3094dd2ea94e6ca93
|
/src/control_decision_4.py
|
be6dc49f088a3f399c8bf5df9b0a6c7de0b509ca
|
[] |
no_license
|
idrissahil/bat_wifi_exploration
|
888f0f7243cc4bedeba6fe8d702762e6e2ad5da9
|
5a1bc74c1b35360d21d01e5e2a721b38fb380ac8
|
refs/heads/master
| 2020-05-31T16:38:49.118742 | 2019-06-29T14:03:28 | 2019-06-29T14:03:28 | 190,386,321 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,239 |
py
|
#! /usr/bin/env python
import rospy
import math
from sensor_msgs.msg import BatteryState
from geometry_msgs.msg import Twist, PoseArray, Pose, PoseStamped
rospy.init_node('control_decision_drone')
control_decision_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=1)
state=1
curr_pos = [0,0,0]
rrt_list=[]
index=0
def callback_gps(gps):
global curr_pos
global rrt_list
global state
global index
curr_pos[0] = gps.pose.position.x
curr_pos[1] = gps.pose.position.y
curr_pos[2] = gps.pose.position.z
if state==1:
print(state)
#curr_pos[0]=gps.pose.position.x
#curr_pos[1]=gps.pose.position.y
#curr_pos[2]=gps.pose.position.z
if len(rrt_list)>1:
state=2
print(state)
dist_point = math.sqrt(math.pow(rrt_list[index].position.x - curr_pos[0], 2)+math.pow(rrt_list[index].position.y - curr_pos[1], 2)+math.pow(rrt_list[index].position.z - curr_pos[2], 2))
if dist_point<0.3:
index=index+1
if index==len(rrt_list):
index=index-1
curr_position=PoseStamped()
#hold_position.pose.position.x= 0
#hold_position.pose.position.y = 14
#hold_position.pose.position.z= 1
curr_position.pose.position.x= rrt_list[index].position.x
curr_position.pose.position.y= rrt_list[index].position.y
curr_position.pose.position.z= rrt_list[index].position.z
curr_position.header.frame_id = "map"
control_decision_pub.publish(curr_position)
def callback_battery(rrt):
global state
global curr_pos
global rrt_list
rrt_list=rrt.poses
def callback_exploration(explore):
global state
global exploration_point_x
exploration_point_x = explore.pose.position.x
print(state)
if state ==1:
control_decision_pub.publish(explore)
def main():
exploration_sub = rospy.Subscriber('/mavros/setpoint_position/local1', PoseStamped, callback_exploration)
battery_sub = rospy.Subscriber('visual_marker_rrt', PoseArray, callback_battery)
gps_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback_gps)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
8b151a74b1e4f2022e1042b3a62cfa99f382ba1b
|
f7f45300cb7ae8a2bb1c9db79f835a402d11da33
|
/orig_py_files/ex_generators.py
|
34b437f25bad8daa10c2ee0eadde5683b957cfac
|
[] |
no_license
|
rob-kistner/modern-python
|
d025eb6c26a0c5c16846086f59625867c92909f6
|
d23f7d2fb00f27255a11290deda8759346117a04
|
refs/heads/master
| 2021-05-25T22:50:40.063110 | 2020-09-02T20:06:08 | 2020-09-02T20:06:08 | 253,953,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 770 |
py
|
from printutils import *
big_banner("""
Exercise: Week Generator
------------------------
""")
# creates generator when run
def week():
weekdays = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')
for day in weekdays:
yield day
weekgen = week()
# Above solution is said to work in the course but
# I'm getting 'function' object is not an iterator
# print(weekgen)
# print(next(week))
# print(next(week))
# print(next(week))
# print(next(week))
# print(next(week))
# this one does work though
def yes_or_no():
answer = "yes"
while True:
yield answer
answer = "no" if answer == "yes" else "yes"
yn = yes_or_no()
print(yn)
print(next(yn))
print(next(yn))
print(next(yn))
print(next(yn))
|
[
"[email protected]"
] | |
72883fcb5f9f5eef71f870b19b65d9612ea0ebf7
|
35fa64dbeb1dae686c703a5a96c33cc9df0dcf57
|
/djangogirls/djangogirls/settings.py
|
67c544a46171e2547d577016ad4049f0c17bc547
|
[] |
no_license
|
hanadevnyc/my-first-blog
|
acb545e0f01bc80880d95dcd794db8119c51a51b
|
680b68c4e692fe2508f0e1a31b892c3f8a3ab7cf
|
refs/heads/master
| 2021-01-10T11:35:18.417905 | 2016-02-27T20:37:06 | 2016-02-27T20:37:06 | 52,678,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,247 |
py
|
"""
Django settings for djangogirls project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h^61k8+ppx*-l#jhvgpe%8jdp#*m)8a4v#o5-43tnrc$kerbx8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangogirls.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangogirls.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"[email protected]"
] | |
ddb617b3840deff9580b1979fa5f9a1accfb1906
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/you-get/2016/8/common.py
|
a5a0fbab63c9d5e6a52916b9ad5356b87ef836b7
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null |
UTF-8
|
Python
| false | false | 46,179 |
py
|
#!/usr/bin/env python
SITES = {
'163' : 'netease',
'56' : 'w56',
'acfun' : 'acfun',
'archive' : 'archive',
'baidu' : 'baidu',
'bandcamp' : 'bandcamp',
'baomihua' : 'baomihua',
'bigthink' : 'bigthink',
'bilibili' : 'bilibili',
'cctv' : 'cntv',
'cntv' : 'cntv',
'cbs' : 'cbs',
'dailymotion' : 'dailymotion',
'dilidili' : 'dilidili',
'dongting' : 'dongting',
'douban' : 'douban',
'douyu' : 'douyutv',
'ehow' : 'ehow',
'facebook' : 'facebook',
'fc2' : 'fc2video',
'flickr' : 'flickr',
'freesound' : 'freesound',
'fun' : 'funshion',
'google' : 'google',
'heavy-music' : 'heavymusic',
'huaban' : 'huaban',
'iask' : 'sina',
'ifeng' : 'ifeng',
'imgur' : 'imgur',
'in' : 'alive',
'infoq' : 'infoq',
'instagram' : 'instagram',
'interest' : 'interest',
'iqilu' : 'iqilu',
'iqiyi' : 'iqiyi',
'isuntv' : 'suntv',
'joy' : 'joy',
'jpopsuki' : 'jpopsuki',
'kankanews' : 'bilibili',
'khanacademy' : 'khan',
'ku6' : 'ku6',
'kugou' : 'kugou',
'kuwo' : 'kuwo',
'le' : 'le',
'letv' : 'le',
'lizhi' : 'lizhi',
'magisto' : 'magisto',
'metacafe' : 'metacafe',
'mgtv' : 'mgtv',
'miomio' : 'miomio',
'mixcloud' : 'mixcloud',
'mtv81' : 'mtv81',
'musicplayon' : 'musicplayon',
'naver' : 'naver',
'7gogo' : 'nanagogo',
'nicovideo' : 'nicovideo',
'panda' : 'panda',
'pinterest' : 'pinterest',
'pixnet' : 'pixnet',
'pptv' : 'pptv',
'qianmo' : 'qianmo',
'qq' : 'qq',
'showroom-live' : 'showroom',
'sina' : 'sina',
'smgbb' : 'bilibili',
'sohu' : 'sohu',
'soundcloud' : 'soundcloud',
'ted' : 'ted',
'theplatform' : 'theplatform',
'thvideo' : 'thvideo',
'tucao' : 'tucao',
'tudou' : 'tudou',
'tumblr' : 'tumblr',
'twimg' : 'twitter',
'twitter' : 'twitter',
'videomega' : 'videomega',
'vidto' : 'vidto',
'vimeo' : 'vimeo',
'wanmen' : 'wanmen',
'weibo' : 'miaopai',
'veoh' : 'veoh',
'vine' : 'vine',
'vk' : 'vk',
'xiami' : 'xiami',
'xiaokaxiu' : 'yixia',
'xiaojiadianvideo' : 'fc2video',
'yinyuetai' : 'yinyuetai',
'miaopai' : 'yixia',
'youku' : 'youku',
'youtu' : 'youtube',
'youtube' : 'youtube',
'zhanqi' : 'zhanqi',
}
import getopt
import json
import locale
import logging
import os
import platform
import re
import socket
import sys
import time
from urllib import request, parse, error
from http import cookiejar
from importlib import import_module
from .version import __version__
from .util import log, term
from .util.git import get_version
from .util.strings import get_filename, unescape_html
from . import json_output as json_output_
dry_run = False
json_output = False
force = False
player = None
extractor_proxy = None
cookies = None
output_filename = None
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding.lower()
else:
default_encoding = locale.getpreferredencoding().lower()
def maybe_print(*s):
try: print(*s)
except: pass
def tr(s):
if default_encoding == 'utf-8':
return s
else:
return s
#return str(s.encode('utf-8'))[2:-1]
# DEPRECATED in favor of match1()
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
# DEPRECATED in favor of match1()
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def match1(text, *patterns):
"""Scans through a string for substrings matched some patterns (first-subgroups only).
Args:
text: A string to be scanned.
patterns: Arbitrary number of regex patterns.
Returns:
When only one pattern is given, returns a string (None if no match found).
When more than one pattern are given, returns a list of strings ([] if no match found).
"""
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def matchall(text, patterns):
"""Scans through a string for substrings matched some patterns.
Args:
text: A string to be scanned.
patterns: a list of regex pattern.
Returns:
a list if matched. empty if not.
"""
ret = []
for pattern in patterns:
match = re.findall(pattern, text)
ret += match
return ret
def launch_player(player, urls):
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
def parse_query_param(url, param):
"""Parses the query string of a URL and returns the value of a parameter.
Args:
url: A URL.
param: A string representing the name of the parameter.
Returns:
The value of the parameter.
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
def unicodize(text):
return re.sub(r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])', lambda x: chr(int(x.group(0)[2:], 16)), text)
# DEPRECATED in favor of util.legitimize()
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(data):
"""Decompresses data for Content-Encoding: deflate.
(the zlib compression is used.)
"""
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
# DEPRECATED in favor of get_content()
def get_response(url, faker = False):
# install cookies
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
# DEPRECATED in favor of get_content()
def get_html(url, encoding = None, faker = False):
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
# DEPRECATED in favor of get_content()
def get_decoded_html(url, faker = False):
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset, 'ignore')
else:
return data
def get_location(url):
response = request.urlopen(url)
# urllib will follow redirections and it's too much code to tell urllib
# not to do that
return response.geturl()
def get_content(url, headers={}, decoded=True):
"""Gets the content of a URL via sending a HTTP GET request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
logging.debug('get_content: %s' % url)
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
for i in range(10):
try:
response = request.urlopen(req)
break
except socket.timeout:
logging.debug('request attempt %s timeout' % str(i + 1))
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(response.getheader('Content-Type'), r'charset=([\w-]+)')
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
def url_size(url, faker = False, headers = {}):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(url)
size = response.headers['content-length']
return int(size) if size!=None else float('inf')
def urls_size(urls, faker = False, headers = {}):
return sum([url_size(url, faker=faker, headers=headers) for url in urls])
def get_head(url, headers = {}):
if headers:
req = request.Request(url, headers = headers)
else:
req = request.Request(url)
req.get_method = lambda : 'HEAD'
res = request.urlopen(req)
return dict(res.headers)
def url_info(url, faker = False, headers = {}):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(request.Request(url))
headers = response.headers
type = headers['content-type']
if type == 'image/jpg; charset=UTF-8' or type == 'image/jpg' : type = 'audio/mpeg' #fix for netease
mapping = {
'video/3gpp': '3gp',
'video/f4v': 'flv',
'video/mp4': 'mp4',
'video/MP2T': 'ts',
'video/quicktime': 'mov',
'video/webm': 'webm',
'video/x-flv': 'flv',
'video/x-ms-asf': 'asf',
'audio/mp4': 'mp4',
'audio/mpeg': 'mp3',
'image/jpeg': 'jpg',
'image/png': 'png',
'image/gif': 'gif',
'application/pdf': 'pdf',
}
if type in mapping:
ext = mapping[type]
else:
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(r1(r'filename="?([^"]+)"?', headers['content-disposition']))
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
except:
ext = None
else:
ext = None
if headers['transfer-encoding'] != 'chunked':
size = headers['content-length'] and int(headers['content-length'])
else:
size = None
return type, ext, size
def url_locations(urls, faker = False, headers = {}):
locations = []
for url in urls:
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(request.Request(url))
locations.append(response.url)
return locations
def url_save(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}):
file_size = url_size(url, faker = faker, headers = headers)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size!=float('inf') else filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = end = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length!=None else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size: # Download finished
break
else: # Unexpected termination. Retry request
headers['Range'] = 'bytes=' + str(received) + '-'
response = request.urlopen(request.Request(url, headers = headers), None)
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
def url_save_chunked(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}):
if os.path.exists(filepath):
if not force:
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(os.path.getsize(filepath))
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download'
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
break
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath))
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
class SimpleProgressBar:
term_size = term.get_terminal_size()[1]
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
self.speed = ''
self.last_updated = time.time()
total_pieces_len = len(str(total_pieces))
# 38 is the size of all statically known size in self.bar
total_str = '%5s' % round(self.total_size / 1048576, 1)
total_str_width = max(len(total_str), 5)
self.bar_size = self.term_size - 27 - 2*total_pieces_len - 2*total_str_width
self.bar = '{:>4}%% ({:>%s}/%sMB) โ{:โ<%s}โค[{:>%s}/{:>%s}] {}' % (
total_str_width, total_str, self.bar_size, total_pieces_len, total_pieces_len)
def update(self):
self.displayed = True
bar_size = self.bar_size
percent = round(self.received * 100 / self.total_size, 1)
if percent >= 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = 'โ'
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = 'โ' * dots + plus
bar = self.bar.format(percent, round(self.received / 1048576, 1), bar, self.current_piece, self.total_pieces, self.speed)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
if bytes_ps >= 1024 ** 3:
self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)
elif bytes_ps >= 1024 ** 2:
self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)
elif bytes_ps >= 1024:
self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format('', '=' * 40, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def get_output_filename(urls, title, ext, output_dir, merge):
# lame hack for the --output-filename option
global output_filename
if output_filename: return output_filename
merged_ext = ext
if (len(urls) > 1) and merge:
from .processor.ffmpeg import has_ffmpeg_installed
if ext in ['flv', 'f4v']:
if has_ffmpeg_installed():
merged_ext = 'mp4'
else:
merged_ext = 'flv'
elif ext == 'mp4':
merged_ext = 'mp4'
elif ext == 'ts':
if has_ffmpeg_installed():
merged_ext = 'mkv'
else:
merged_ext = 'ts'
return '%s.%s' % (title, merged_ext)
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers = {}, **kwargs):
assert urls
if json_output:
json_output_.download_urls(urls=urls, title=title, ext=ext, total_size=total_size, refer=refer)
return
if dry_run:
print('Real URLs:\n%s' % '\n'.join(urls))
return
if player:
launch_player(player, urls)
return
if not total_size:
try:
total_size = urls_size(urls, faker=faker, headers=headers)
except:
import traceback
traceback.print_exc(file=sys.stdout)
pass
title = tr(get_filename(title))
output_filename = get_output_filename(urls, title, ext, output_dir, merge)
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) and os.path.getsize(output_filepath) >= total_size * 0.9:
print('Skipping %s: file already exists' % output_filepath)
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(output_filename))
bar.update()
url_save(url, output_filepath, bar, refer = refer, faker = faker, headers = headers)
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
bar.update()
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if 'av' in kwargs and kwargs['av']:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_av
ret = ffmpeg_concat_av(parts, output_filepath, ext)
print('Merged into %s' % output_filename)
if ret == 0:
for part in parts: os.remove(part)
elif ext in ['flv', 'f4v']:
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, output_filepath)
else:
from .processor.join_flv import concat_flv
concat_flv(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, output_filepath)
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == "ts":
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
ffmpeg_concat_ts_to_mkv(parts, output_filepath)
else:
from .processor.join_ts import concat_ts
concat_ts(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
def download_urls_chunked(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers = {}):
assert urls
if dry_run:
print('Real URLs:\n%s\n' % urls)
return
if player:
launch_player(player, urls)
return
title = tr(get_filename(title))
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size and ext in ('ts'):
if not force and os.path.exists(filepath[:-3] + '.mkv'):
print('Skipping %s: file already exists' % filepath[:-3] + '.mkv')
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
parts = []
url = urls[0]
print('Downloading %s ...' % tr(filename))
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
url_save_chunked(url, filepath, bar, refer = refer, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_convert_ts_to_mkv
if ffmpeg_convert_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Conversion aborted.')
else:
print("Can't convert %s files" % ext)
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save_chunked(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
if ffmpeg_concat_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Merging aborted.')
else:
print("Can't merge %s files" % ext)
print()
def download_rtmp_url(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
if params.get("-y",False): #None or unset ->False
print('Real Playpath:\n%s\n' % [params.get("-y")])
return
if player:
from .processor.rtmpdump import play_rtmpdump_stream
play_rtmpdump_stream(player, url, params)
return
from .processor.rtmpdump import has_rtmpdump_installed, download_rtmpdump_stream
assert has_rtmpdump_installed(), "RTMPDump not installed."
download_rtmpdump_stream(url, title, ext,params, output_dir)
def download_url_ffmpeg(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
if params.get("-y",False): #None or unset ->False
print('Real Playpath:\n%s\n' % [params.get("-y")])
return
if player:
launch_player(player, [url])
return
from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream
assert has_ffmpeg_installed(), "FFmpeg not installed."
ffmpeg_download_stream(url, title, ext, params, output_dir)
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Playlist is not supported for ' + name)
return f
def print_info(site_info, title, type, size):
if json_output:
json_output_.print_info(site_info=site_info, title=title, type=type, size=size)
return
if type:
type = type.lower()
if type in ['3gp']:
type = 'video/3gpp'
elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']:
type = 'video/x-flv'
elif type in ['mkv']:
type = 'video/x-matroska'
elif type in ['mp3']:
type = 'audio/mpeg'
elif type in ['mp4']:
type = 'video/mp4'
elif type in ['mov']:
type = 'video/quicktime'
elif type in ['ts']:
type = 'video/MP2T'
elif type in ['webm']:
type = 'video/webm'
elif type in ['jpg']:
type = 'image/jpeg'
elif type in ['png']:
type = 'image/png'
elif type in ['gif']:
type = 'image/gif'
if type in ['video/3gpp']:
type_info = "3GPP multimedia file (%s)" % type
elif type in ['video/x-flv', 'video/f4v']:
type_info = "Flash video (%s)" % type
elif type in ['video/mp4', 'video/x-m4v']:
type_info = "MPEG-4 video (%s)" % type
elif type in ['video/MP2T']:
type_info = "MPEG-2 transport stream (%s)" % type
elif type in ['video/webm']:
type_info = "WebM video (%s)" % type
#elif type in ['video/ogg']:
# type_info = "Ogg video (%s)" % type
elif type in ['video/quicktime']:
type_info = "QuickTime video (%s)" % type
elif type in ['video/x-matroska']:
type_info = "Matroska video (%s)" % type
#elif type in ['video/x-ms-wmv']:
# type_info = "Windows Media video (%s)" % type
elif type in ['video/x-ms-asf']:
type_info = "Advanced Systems Format (%s)" % type
#elif type in ['video/mpeg']:
# type_info = "MPEG video (%s)" % type
elif type in ['audio/mp4']:
type_info = "MPEG-4 audio (%s)" % type
elif type in ['audio/mpeg']:
type_info = "MP3 (%s)" % type
elif type in ['image/jpeg']:
type_info = "JPEG Image (%s)" % type
elif type in ['image/png']:
type_info = "Portable Network Graphics (%s)" % type
elif type in ['image/gif']:
type_info = "Graphics Interchange Format (%s)" % type
else:
type_info = "Unknown type (%s)" % type
maybe_print("Site: ", site_info)
maybe_print("Title: ", unescape_html(tr(title)))
print("Type: ", type_info)
print("Size: ", round(size / 1048576, 2), "MiB (" + str(size) + " Bytes)")
print()
def mime_to_container(mime):
mapping = {
'video/3gpp': '3gp',
'video/mp4': 'mp4',
'video/webm': 'webm',
'video/x-flv': 'flv',
}
if mime in mapping:
return mapping[mime]
else:
return mime.split('/')[1]
def parse_host(host):
"""Parses host name and port number from a string.
"""
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port)
def set_proxy(proxy):
proxy_handler = request.ProxyHandler({
'http': '%s:%s' % proxy,
'https': '%s:%s' % proxy,
})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def unset_proxy():
proxy_handler = request.ProxyHandler({})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
# DEPRECATED in favor of set_proxy() and unset_proxy()
def set_http_proxy(proxy):
if proxy == None: # Use system default setting
proxy_support = request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = request.ProxyHandler({})
else: # Use proxy
proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
opener = request.build_opener(proxy_support)
request.install_opener(opener)
def download_main(download, download_playlist, urls, playlist, **kwargs):
for url in urls:
if url.startswith('https://'):
url = url[8:]
if not url.startswith('http://'):
url = 'http://' + url
if playlist:
download_playlist(url, **kwargs)
else:
download(url, **kwargs)
def script_main(script_name, download, download_playlist, **kwargs):
def version():
log.i('version %s, a tiny downloader that scrapes the web.'
% get_version(kwargs['repo_path']
if 'repo_path' in kwargs else __version__))
logging.basicConfig(format='[%(levelname)s] %(message)s')
help = 'Usage: %s [OPTION]... [URL]...\n\n' % script_name
help += '''Startup options:
-V | --version Print version and exit.
-h | --help Print help and exit.
\n'''
help += '''Dry-run options: (no actual downloading)
-i | --info Print extracted information.
-u | --url Print extracted information with URLs.
--json Print extracted URLs in JSON format.
\n'''
help += '''Download options:
-n | --no-merge Do not merge video parts.
--no-caption Do not download captions.
(subtitles, lyrics, danmaku, ...)
-f | --force Force overwriting existed files.
-F | --format <STREAM_ID> Set video format to STREAM_ID.
-O | --output-filename <FILE> Set output filename.
-o | --output-dir <PATH> Set output directory.
-p | --player <PLAYER [OPTIONS]> Stream extracted URL to a PLAYER.
-c | --cookies <COOKIES_FILE> Load cookies.txt or cookies.sqlite.
-x | --http-proxy <HOST:PORT> Use an HTTP proxy for downloading.
-y | --extractor-proxy <HOST:PORT> Use an HTTP proxy for extracting only.
--no-proxy Never use a proxy.
-s | --socks-proxy <HOST:PORT> Use an SOCKS5 proxy for downloading.
-t | --timeout <SECONDS> Set socket timeout.
-d | --debug Show traceback and other debug info.
'''
short_opts = 'Vhfiuc:ndF:O:o:p:x:y:s:t:'
opts = ['version', 'help', 'force', 'info', 'url', 'cookies', 'no-caption', 'no-merge', 'no-proxy', 'debug', 'json', 'format=', 'stream=', 'itag=', 'output-filename=', 'output-dir=', 'player=', 'http-proxy=', 'socks-proxy=', 'extractor-proxy=', 'lang=', 'timeout=']
if download_playlist:
short_opts = 'l' + short_opts
opts = ['playlist'] + opts
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError as err:
log.e(err)
log.e("try 'you-get --help' for more options")
sys.exit(2)
global force
global dry_run
global json_output
global player
global extractor_proxy
global cookies
global output_filename
info_only = False
playlist = False
caption = True
merge = True
stream_id = None
lang = None
output_dir = '.'
proxy = None
socks_proxy = None
extractor_proxy = None
traceback = False
timeout = 600
for o, a in opts:
if o in ('-V', '--version'):
version()
sys.exit()
elif o in ('-h', '--help'):
version()
print(help)
sys.exit()
elif o in ('-f', '--force'):
force = True
elif o in ('-i', '--info'):
info_only = True
elif o in ('-u', '--url'):
dry_run = True
elif o in ('--json', ):
json_output = True
# to fix extractors not use VideoExtractor
dry_run = True
info_only = False
elif o in ('-c', '--cookies'):
try:
cookies = cookiejar.MozillaCookieJar(a)
cookies.load()
except:
import sqlite3
cookies = cookiejar.MozillaCookieJar()
con = sqlite3.connect(a)
cur = con.cursor()
try:
cur.execute("SELECT host, path, isSecure, expiry, name, value FROM moz_cookies")
for item in cur.fetchall():
c = cookiejar.Cookie(0, item[4], item[5],
None, False,
item[0],
item[0].startswith('.'),
item[0].startswith('.'),
item[1], False,
item[2],
item[3], item[3]=="",
None, None, {})
cookies.set_cookie(c)
except: pass
# TODO: Chromium Cookies
# SELECT host_key, path, secure, expires_utc, name, encrypted_value FROM cookies
# http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/
elif o in ('-l', '--playlist'):
playlist = True
elif o in ('--no-caption',):
caption = False
elif o in ('-n', '--no-merge'):
merge = False
elif o in ('--no-proxy',):
proxy = ''
elif o in ('-d', '--debug'):
traceback = True
# Set level of root logger to DEBUG
logging.getLogger().setLevel(logging.DEBUG)
elif o in ('-F', '--format', '--stream', '--itag'):
stream_id = a
elif o in ('-O', '--output-filename'):
output_filename = a
elif o in ('-o', '--output-dir'):
output_dir = a
elif o in ('-p', '--player'):
player = a
caption = False
elif o in ('-x', '--http-proxy'):
proxy = a
elif o in ('-s', '--socks-proxy'):
socks_proxy = a
elif o in ('-y', '--extractor-proxy'):
extractor_proxy = a
elif o in ('--lang',):
lang = a
elif o in ('-t', '--timeout'):
timeout = int(a)
else:
log.e("try 'you-get --help' for more options")
sys.exit(2)
if not args:
print(help)
sys.exit()
if (socks_proxy):
try:
import socket
import socks
socks_proxy_addrs = socks_proxy.split(':')
socks.set_default_proxy(socks.SOCKS5,
socks_proxy_addrs[0],
int(socks_proxy_addrs[1]))
socket.socket = socks.socksocket
def getaddrinfo(*args):
return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
socket.getaddrinfo = getaddrinfo
except ImportError:
log.w('Error importing PySocks library, socks proxy ignored.'
'In order to use use socks proxy, please install PySocks.')
else:
import socket
set_http_proxy(proxy)
socket.setdefaulttimeout(timeout)
try:
if stream_id:
if not extractor_proxy:
download_main(download, download_playlist, args, playlist, stream_id=stream_id, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
download_main(download, download_playlist, args, playlist, stream_id=stream_id, extractor_proxy=extractor_proxy, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
if not extractor_proxy:
download_main(download, download_playlist, args, playlist, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
download_main(download, download_playlist, args, playlist, extractor_proxy=extractor_proxy, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
except KeyboardInterrupt:
if traceback:
raise
else:
sys.exit(1)
except UnicodeEncodeError:
log.e('[error] oops, the current environment does not seem to support Unicode.')
log.e('please set it to a UTF-8-aware locale first,')
log.e('so as to save the video (with some Unicode characters) correctly.')
log.e('you can do it like this:')
log.e(' (Windows) % chcp 65001 ')
log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8')
sys.exit(1)
except Exception:
if not traceback:
log.e('[error] oops, something went wrong.')
log.e('don\'t panic, c\'est la vie. please try the following steps:')
log.e(' (1) Rule out any network problem.')
log.e(' (2) Make sure you-get is up-to-date.')
log.e(' (3) Check if the issue is already known, on')
log.e(' https://github.com/soimort/you-get/wiki/Known-Bugs')
log.e(' https://github.com/soimort/you-get/issues')
log.e(' (4) Run the command with \'--debug\' option,')
log.e(' and report this issue with the full output.')
else:
version()
log.i(args)
raise
sys.exit(1)
def google_search(url):
keywords = r1(r'https?://(.*)', url)
url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords)
page = get_content(url, headers=fake_headers)
videos = re.findall(r'<a href="(https?://[^"]+)" onmousedown="[^"]+">([^<]+)<', page)
vdurs = re.findall(r'<span class="vdur _dwc">([^<]+)<', page)
durs = [r1(r'(\d+:\d+)', unescape_html(dur)) for dur in vdurs]
print("Google Videos search:")
for v in zip(videos, durs):
print("- video: %s [%s]" % (unescape_html(v[0][1]),
v[1] if v[1] else '?'))
print("# you-get %s" % log.sprint(v[0][0], log.UNDERLINE))
print()
print("Best matched result:")
return(videos[0][0])
def url_to_module(url):
try:
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
assert video_host and video_url
except:
url = google_search(url)
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
if video_host.endswith('.com.cn'):
video_host = video_host[:-3]
domain = r1(r'(\.[^.]+\.[^.]+)$', video_host) or video_host
assert domain, 'unsupported url: ' + url
k = r1(r'([^.]+)', domain)
if k in SITES:
return import_module('.'.join(['you_get', 'extractors', SITES[k]])), url
else:
import http.client
conn = http.client.HTTPConnection(video_host)
conn.request("HEAD", video_url, headers=fake_headers)
res = conn.getresponse()
location = res.getheader('location')
if location and location != url and not location.startswith('/'):
return url_to_module(location)
else:
return import_module('you_get.extractors.universal'), url
def any_download(url, **kwargs):
m, url = url_to_module(url)
m.download(url, **kwargs)
def any_download_playlist(url, **kwargs):
m, url = url_to_module(url)
m.download_playlist(url, **kwargs)
def main(**kwargs):
script_main('you-get', any_download, any_download_playlist, **kwargs)
|
[
"[email protected]"
] | |
ab8675c96b935a51728df70ac5b5869ed48e9804
|
37fd5a148523aed620426cc3f39c653e02ba2e17
|
/opencensus/trace/exceptions_status.py
|
a57bdec60434f73c96ac2a6102656e4fd033636a
|
[
"Apache-2.0"
] |
permissive
|
dineshkrishnareddy/opencensus-python
|
8ebfa74e5b487c91ec1fe5734487c9d673a77fad
|
e5e752ceab3371ec4b78cec23a717168e2ed9372
|
refs/heads/master
| 2022-02-18T08:40:07.319320 | 2019-10-01T22:41:07 | 2019-10-01T22:41:07 | 212,539,887 | 1 | 0 |
Apache-2.0
| 2019-10-03T09:16:46 | 2019-10-03T09:16:46 | null |
UTF-8
|
Python
| false | false | 914 |
py
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.rpc import code_pb2
from opencensus.trace.status import Status
CANCELLED = Status(code_pb2.CANCELLED)
INVALID_URL = Status(code_pb2.INVALID_ARGUMENT, message='invalid URL')
TIMEOUT = Status(code_pb2.DEADLINE_EXCEEDED, message='request timed out')
def unknown(exception):
return Status.from_exception(exception)
|
[
"[email protected]"
] | |
56c1035dc9a2ff3dd0e77b2fe3db2a127c3c1dbb
|
59c5820be32dd498b6cda019b268c05db90a9ab3
|
/soundscapes/soundscape_splitter.py
|
e49c6f52a7a5f781bf3e63a009ad153df941d0c9
|
[
"Apache-2.0"
] |
permissive
|
thesteve0/birdclef21
|
1d881035e9e90f95536e1382b796f25c11326438
|
9c8748edbd6febe88191736406d838787e3c7a71
|
refs/heads/main
| 2023-05-02T21:52:35.733043 | 2021-05-21T16:28:24 | 2021-05-21T16:28:24 | 357,412,620 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,199 |
py
|
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
import math
import os
def audioToSlicedSpecto(input_file, output_stub):
chunk_length_sec = 5
# Set some of the values we use for the Spectrogram
n_fft = 2048
n_mels = 256
hop_length = 256 # This is basically the size of the window for averaging samples together
y, sample_rate = librosa.load(input_file, sr=None)
# Trim the silent edges from the file
sound_array, _ = librosa.effects.trim(y)
sound_array_median = np.median(sound_array)
print('loaded file: ' + input_file)
# sample rate is samples per second so the length of the array divided by the sample rate tells us the seconds in the total track
track_length = math.floor(librosa.get_duration(sound_array, sr=sample_rate))
# determine how many chunks can fit into track and then make an array incrementing from 0 by 5 up to the total number of chunks
time_steps = np.arange(0, track_length + 1, chunk_length_sec).tolist()
# TODO we need to add 0 padding to the array to make the array divisiable by 5 seconds and add the new last 5 segment
# if time_steps[-1] < track_length:
# time_steps.append(track_length)
# time to the time steps array
# make two lists out of all the time steps we care about
# time steps = [0,5,7]
# starts = [0,5]
# stops = [5,7]
start_times = time_steps[:-1]
stop_times = time_steps[1:]
start_samples = list(map(lambda x: x * sample_rate, start_times))
stop_samples = list(map(lambda x: x * sample_rate, stop_times))
plt.figure(figsize=(60.48, 15.60), edgecolor='black', facecolor='black')
for i, (start, stop) in enumerate(zip(start_times, stop_times)):
out_filename = ''
# slice the original signal list
audio = sound_array[start_samples[i]:stop_samples[i]]
# chop ogg off the file name
out_filename = ''.join((out_file_prepped, '_', str(start), '_', str(stop), '.png'))
mel = librosa.feature.melspectrogram
S = mel(audio, sr=sample_rate, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels,
fmin=1600.0, fmax=11000)
# amin represents the amplitude mininimum (related to DB) that is considered more than 0. The higher you make the number the more noise you remove
# but you may actually start to remove the information you want.
# ref represents the value of which you are standardzing all the values against. Possible choices are mean, median, max
# We actually ended up using the median of the entire audio clip to rescale the audio values in each individual clip
p_to_d = librosa.power_to_db
S_DB = p_to_d(S, ref=sound_array_median, amin=0.0015)
spshow = librosa.display.specshow
spshow(S_DB, sr=sample_rate, hop_length=hop_length)
# Remove the black color using the method here in the article and save to disk
# https://www.delftstack.com/howto/matplotlib/hide-axis-borders-and-white-spaces-in-matplotlib/
plt.savefig(out_filename, bbox_inches='tight', pad_inches=0)
plt.close()
if __name__ == '__main__':
# Input directory should be a single directory with every species in its own sub-directory and no directories below that
input_directory = r'C:\Users\steve\data\six_species'
# This script will make this directory
output_directory = r'C:\Users\steve\data\six_species\_output'
# get all the folders and files using os.walk
# https://stackoverflow.com/questions/9727673/list-directory-tree-structure-in-python
for root, dirs, files in os.walk(input_directory):
# make all the output directories
for name in dirs:
output_path = os.path.join(output_directory, name)
if not os.path.exists(output_path):
os.makedirs(output_path)
for name in files:
out_file_prepped = os.path.join(output_directory, os.path.basename(root), os.path.splitext(name)[0])
print()
audioToSlicedSpecto(input_file=os.path.join(root, name), output_stub = out_file_prepped)
print("Done")
|
[
"[email protected]"
] | |
c2137568a2e94f717e43fd034e129651b46804a3
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/streamanalytics/azure-mgmt-streamanalytics/azure/mgmt/streamanalytics/operations/_inputs_operations.py
|
890d33f1b8b1901067d5182d5396b9ae6a0bfef4
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 |
MIT
| 2019-08-11T21:16:01 | 2018-11-28T21:34:49 |
Python
|
UTF-8
|
Python
| false | false | 28,587 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InputsOperations(object):
"""InputsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~stream_analytics_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_replace(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input, # type: "models.Input"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Creates an input or replaces an already existing input under an existing streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: The definition of the input that will be used to create a new input or replace
the existing one under the streaming job.
:type input: ~stream_analytics_management_client.models.Input
:param if_match: The ETag of the input. Omit this value to always overwrite the current input.
Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new input to be created, but to prevent updating an
existing input. Other values will result in a 412 Pre-condition Failed response.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_replace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'Input')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_replace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input, # type: "models.Input"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Updates an existing input under an existing streaming job. This can be used to partially update
(ie. update one or two properties) an input without affecting the rest the job or input
definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: An Input object. The properties specified here will overwrite the corresponding
properties in the existing input (ie. Those properties will be updated). Any properties that
are set to null here will mean that the corresponding property in the existing input will
remain the same and not change as a result of this PATCH operation.
:type input: ~stream_analytics_management_client.models.Input
:param if_match: The ETag of the input. Omit this value to always overwrite the current input.
Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'Input')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes an input from the streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Gets details about the specified input.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def list_by_streaming_job(
self,
resource_group_name, # type: str
job_name, # type: str
select=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.InputListResult"]
"""Lists all of the inputs under the specified streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param select: The $select OData query parameter. This is a comma-separated list of structural
properties to include in the response, or "\ *" to include all properties. By default, all
properties are returned except diagnostics. Currently only accepts '*\ ' as a valid value.
:type select: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InputListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~stream_analytics_management_client.models.InputListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.InputListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_streaming_job.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InputListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_streaming_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs'} # type: ignore
def _test_initial(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input=None, # type: Optional["models.Input"]
**kwargs # type: Any
):
# type: (...) -> Optional["models.ResourceTestStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ResourceTestStatus"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._test_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if input is not None:
body_content = self._serialize.body(input, 'Input')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceTestStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_test_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test'} # type: ignore
def begin_test(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input=None, # type: Optional["models.Input"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ResourceTestStatus"]
"""Tests whether an inputโs datasource is reachable and usable by the Azure Stream Analytics
service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: If the input specified does not already exist, this parameter must contain the
full input definition intended to be tested. If the input specified already exists, this
parameter can be left null to test the existing input as is or if specified, the properties
specified will overwrite the corresponding properties in the existing input (exactly like a
PATCH operation) and the resulting input will be tested.
:type input: ~stream_analytics_management_client.models.Input
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ResourceTestStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~stream_analytics_management_client.models.ResourceTestStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceTestStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._test_initial(
resource_group_name=resource_group_name,
job_name=job_name,
input_name=input_name,
input=input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ResourceTestStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_test.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test'} # type: ignore
|
[
"[email protected]"
] | |
e813294ef6a1fd27fd5b6a35d25c3055e06eb8fd
|
309963b86e666efceb3a816ca19ced70447d3d82
|
/crawl/test_url.py
|
6d6b805d036044512b3bc0976675029cc62ff533
|
[] |
no_license
|
player7450/ml-py
|
fd42cfa0248437ca8883702b3bd48df1771f36cc
|
0e10736498f0fe42431399ffd6980b9b5e1609c9
|
refs/heads/master
| 2021-09-03T08:16:42.011010 | 2018-01-07T12:26:52 | 2018-01-07T12:26:52 | 109,000,753 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 336 |
py
|
import urllib2
__author__ = 'liuzheng'
def test_url():
r = urllib2.urlopen("http://www.baidu.com");
url_text = r.read()
print url_text
def foo():
a = [1,2,3,4]
print a
a.append(5)
print a
b = tuple(a)
print b
c = (1,2,3,4)
if __name__=='__main__':
print 'hehe'
# test_url()
foo()
|
[
"[email protected]"
] | |
20b930f94ee43cd25a74f1887b8c7cef39c6b5ef
|
bc51fcd3fbea140dd7c47da83881aee5dbeb607a
|
/awscli/style.py
|
4ad34b34667ea227f795aa70ae7f0ee249838959
|
[
"Apache-2.0"
] |
permissive
|
hiroakis/aws-cli
|
46a3fce37e2e5dd86d807856a457f6a8643e0c4d
|
2f44552beb48ba02f2e7e1f410ee264bfe94a04c
|
refs/heads/master
| 2021-01-23T22:30:18.951608 | 2013-02-22T00:33:52 | 2013-02-22T00:33:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,732 |
py
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
from six.moves import cStringIO
class BaseStyle(object):
def __init__(self, doc, indent_width=4, **kwargs):
self.doc = doc
self.indent_width = indent_width
self.kwargs = kwargs
self.keep_data = True
def spaces(self, indent):
return ' ' * (indent * self.indent_width)
def start_bold(self, attrs=None):
return ''
def end_bold(self):
return ''
def bold(self, s):
return self.start_bold() + s + self.end_bold()
def h2(self, s):
return self.bold(s)
def h3(self, s):
return self.bold(s)
def start_underline(self, attrs=None):
return ''
def end_underline(self):
return ''
def underline(self, s):
return self.start_underline() + s + self.end_underline()
def start_italics(self, attrs=None):
return ''
def end_italics(self):
return ''
def italics(self, s):
return self.start_italics() + s + self.end_italics()
def start_p(self, attrs=None):
self.doc.add_paragraph()
def end_p(self):
return ''
def start_code(self, attrs=None):
self.doc.do_translation = True
return self.start_bold(attrs)
def end_code(self):
self.doc.do_translation = False
return self.end_bold()
def start_a(self, attrs=None):
self.doc.do_translation = True
return self.start_underline()
def end_a(self):
self.doc.do_translation = False
return self.end_underline()
def start_i(self, attrs=None):
self.doc.do_translation = True
return self.start_italics()
def end_i(self):
self.doc.do_translation = False
return self.end_italics()
def start_li(self, attrs):
return ''
def end_li(self):
return ''
def start_examples(self, attrs):
self.doc.keep_data = False
def end_examples(self):
self.doc.keep_data = True
class CLIStyle(BaseStyle):
def start_bold(self, attrs=None):
if self.kwargs.get('do_ansi', False):
return '\033[1m'
return ''
def end_bold(self):
if self.kwargs.get('do_ansi', False):
return '\033[0m'
return ''
def start_underline(self, attrs=None):
if self.kwargs.get('do_ansi', False):
return '\033[4m'
return ''
def end_underline(self):
if self.kwargs.get('do_ansi', False):
return '\033[0m'
return ''
def start_italics(self, attrs=None):
if self.kwargs.get('do_ansi', False):
return '\033[3m'
return ''
def end_italics(self):
if self.kwargs.get('do_ansi', False):
return '\033[0m'
return ''
def start_li(self, attrs=None):
para = self.doc.add_paragraph()
para.subsequent_indent = para.initial_indent + 1
para.write(' * ')
def h2(self, s):
para = self.doc.get_current_paragraph()
para.lines_before = 1
return self.bold(s)
def end_p(self):
para = self.doc.get_current_paragraph()
para.lines_after = 2
|
[
"[email protected]"
] | |
73be6a781970e67e35078a012c9ece685a4f9bb3
|
36671c0625da3599bd2a6b3750bc837f159ac045
|
/Tools/NEOREC/EMGdecode.py
|
797624344376fd5453b25b56bb7ae64f1cdbbe91
|
[] |
no_license
|
KKaplun/MyoSynse
|
dcda7a9ca57359f332a5db6d007db533016b5041
|
b4225aeb6646631ce09954c966165d911959fcc5
|
refs/heads/master
| 2020-12-15T08:28:11.131237 | 2020-01-21T10:01:28 | 2020-01-21T10:01:28 | 235,046,231 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,369 |
py
|
๏ปฟ# -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 21:15:26 2018
@author: ะะปะตะบัะฐะฝะดั
"""
import scipy.io
import numpy as np
import h5py
import PNinterpolate
from EMGfilter import envelopeFilter
import tqdm
from matplotlib import pyplot as plt
class EMGDecoder:
def __init__(self):
self.emgFilter=envelopeFilter()
self.fitted=False
def loadParams(self, path=''):
self.Tail = 0
self.emg_buffer = 0
self.WienerCoordsBuffer = 0
self.KalmanCoordsBuffer = 0
self.emg_buffer_size = 500
if path:
filterParams = scipy.io.loadmat(path)
else:
filterParams = scipy.io.loadmat('filterParams.mat')
self.lag = int(filterParams['lag'])
self.forward = int(filterParams['forward'])
self.downsample = int(filterParams['downsample'])
self.A = filterParams['A']
self.W = filterParams['W']
self.Ex = filterParams['Ex']
self.Ez = filterParams['Ez']
self.P_after = np.copy(self.Ex)
self.P_before = np.empty((self.Ez.shape[0], self.Ez.shape[0]))
self.Kalman_estimate = np.empty((self.W.shape[0],1))
self.Wiener_Estimate = np.empty((self.W.shape[0],1))
self.fitted=True
def fit(self, X=None,Y=None,file='experiment_data.h5',numCh=64,offCh=64,Pn=[59,77,101,125],lag=2,forward=0,downsample=0):
self.numCh=numCh
self.offCh=offCh
self.Pn=Pn
self.lag=lag
self.forward=forward
self.downsample=downsample
if type(X)==type(None) or type(Y)==type(None):
#try and read file then
with h5py.File(file,'r+') as f1:
raw_data = np.array(f1['protocol1']['raw_data'])
Y=raw_data[:,[p+self.offCh for p in Pn]]
X=raw_data[:,:self.numCh]
del raw_data
#get the envelope of EMG data and interpolate PN to EMG samplerate
X=self.emgFilter.filterEMG(X)
Y=PNinterpolate.interpolatePN(Y)
def offset(data,lag,leftOffset,rightOffset=0):
return data[leftOffset+lag:data.shape[0]-rightOffset]
emg_lag=np.empty((X.shape[0]-2-self.lag-self.forward,numCh*(self.lag+1)));
for l in range(self.lag+1):
emg_lag[:,l*numCh:(l+1)*numCh]=X[(2+l):(X.shape[0]-self.lag-self.forward+l),:]
Coord=np.copy(Y)
Vel=np.apply_along_axis(np.diff,0,Coord)
Acc=np.apply_along_axis(np.diff,0,Vel)
Coords=np.hstack((np.apply_along_axis(offset,0,Coord,self.lag,2),np.apply_along_axis(offset,0,Vel,self.lag,1),np.apply_along_axis(offset,0,Acc,self.lag,0)))
EMG_signals=np.hstack((np.ones((emg_lag.shape[0],1)),emg_lag));
self.W = np.linalg.pinv(EMG_signals)
self.W = self.W @ Coords
Measurement_Error=Coords-EMG_signals @ self.W;
Measurement_Error_Covar=np.cov(Measurement_Error.T);
self.W = self.W.T;
Now=np.hstack((np.apply_along_axis(offset,0,Coord,self.lag,3),np.apply_along_axis(offset,0,Vel,self.lag,2),np.apply_along_axis(offset,0,Acc,self.lag,1)))
Lag=np.hstack((np.apply_along_axis(offset,0,Coord,self.lag,2,1),np.apply_along_axis(offset,0,Vel,self.lag,1,1),np.apply_along_axis(offset,0,Acc,self.lag,0,1)))
self.A=np.linalg.pinv(Lag) @ Now
State_Trans_Error=Now-Lag @ self.A
State_Trans_Covar=np.cov(State_Trans_Error.T)
self.A=self.A.T
self.Ex = State_Trans_Covar; # process noise
self.Ez = Measurement_Error_Covar; # measurement noise
self.P_after = np.copy(self.Ex)
self.P_before = np.empty((self.Ez.shape[0], self.Ez.shape[0]))
self.Kalman_estimate = np.empty((self.W.shape[0],1))
self.Wiener_Estimate = np.empty((self.W.shape[0],1))
self.fitted=True
scipy.io.savemat('filterParams.mat', mdict={'W': self.W, 'A':self.A, 'Ex': self.Ex, 'Ez':self.Ez,'lag':self.lag,'forward':self.forward,'downsample':self.downsample})
self.loadParams()
def evaluate(self,X=None,Y=None,file='experiment_data.h5', numCh=None,offCh=None,Pn=None,lag=None,forward=None,downsample=None):
numCh = self.numCh if type(numCh)==type(None) else numCh
offCh = self.offCh if type(offCh)==type(None) else offCh
Pn = self.Pn if type(Pn)==type(None) else Pn
lag = self.lag if type(lag)==type(None) else lag
forward = self.forward if type(forward)==type(None) else forward
downsample = self.downsample if type(downsample)==type(None) else downsample
if type(X)==type(None) or type(Y)==type(None):
#try and read file then
with h5py.File(file,'r+') as f1:
raw_data = np.array(f1['protocol1']['raw_data'])
Y=raw_data[:,[p+offCh for p in Pn]]
X=raw_data[:,:numCh]
del raw_data
#get the envelope of EMG data and interpolate PN to EMG samplerate
X=self.emgFilter.filterEMG(X)
Y=PNinterpolate.interpolatePN(Y)
emg_lag=np.empty((X.shape[0]-2-lag-forward,numCh*(lag+1)));
for l in range(lag+1):
emg_lag[:,l*numCh:(l+1)*numCh]=X[(2+l):(X.shape[0]-lag-forward+l),:]
Coord=np.copy(Y)
EMG_signals=np.hstack((np.ones((emg_lag.shape[0],1)),emg_lag))
WienerCoords=np.empty((EMG_signals.shape[0],self.Ex.shape[1]))
KalmanCoords=np.empty((EMG_signals.shape[0],self.Ex.shape[1]))
for t in tqdm.tqdm(range(EMG_signals.shape[0])):
#Predict coordinate by state measurement equation
X_measurement_estimate=self.W @ EMG_signals[t,:][:,None];
#Store Wiener Estimate
WienerCoords[t,:]=X_measurement_estimate.T;
#Kalman
X_state_estimate = self.A @ self.Kalman_estimate
self.P_before = self.A @ self.P_after @ self.A.T + self.Ex
self.P_after=np.linalg.pinv(np.linalg.pinv(self.P_before)+np.linalg.pinv(self.Ez))
self.Kalman_estimate=self.P_after @ (np.linalg.pinv(self.P_before) @ X_state_estimate+np.linalg.pinv(self.Ez) @ X_measurement_estimate)
KalmanCoords[t,:] = self.Kalman_estimate.T
Kc=KalmanCoords[:,:Coord.shape[1]]
Tc=Coord[lag+2:,:]
kalmanStabilizationOffset=round(Tc.shape[0]*0.05)
plt.figure()
for i in range(Kc.shape[1]):
plt.subplot(Kc.shape[1]*100+11+i)
plt.plot(Kc[kalmanStabilizationOffset:,i])
plt.plot(Tc[kalmanStabilizationOffset:,i])
for i in range(Kc.shape[1]):
print(np.corrcoef(Kc[2000:,i].T,Tc[2000:,i].T)[0,1])
def fitEvaluate(self,X=None,Y=None,file='experiment_data.h5',testRatio=1/2,numCh=64,offCh=64,Pn=[59,77,101,125],lag=2,forward=0,downsample=0):
self.numCh=numCh
self.offCh=offCh
self.Pn=Pn
self.lag=lag
self.forward=forward
self.downsample=downsample
if type(X)==type(None) or type(Y)==type(None):
with h5py.File(file,'r+') as f1:
raw_data = np.array(f1['protocol1']['raw_data'])
Y=raw_data[:,[p+offCh for p in Pn]]
X=raw_data[:,:numCh]
del raw_data
split=round(X.shape[0]*(1-testRatio))
self.fit(X[:split,:],Y[:split,:])
self.evaluate(X[split:,:],Y[split:,:])
def transform(self,EMGchunk):
if not self.fitted:
self.loadParams()
chunkSize=EMGchunk.shape[0]
numCh=EMGchunk.shape[1]
if(np.isscalar(self.Tail)):
emg_buffer=np.empty((self.emg_buffer_size,1+numCh*(self.lag+1)))
emg_buffer[:,0]=1
self.WienerCoordsBuffer=np.empty((self.emg_buffer_size,self.W.shape[0]))
self.KalmanCoordsBuffer=np.empty((self.emg_buffer_size,self.W.shape[0]))
Tail=np.zeros((self.lag+1,numCh*3))
emg_lag=emg_buffer[:chunkSize-self.lag-self.forward,1:]
for l in range(self.lag+1):
emg_lag[:,l*numCh:(l+1)*numCh]=EMGchunk[(l):(chunkSize-self.lag-self.forward+l),:]
Tail[:self.lag-l,l*numCh:(l+1)*numCh]=EMGchunk[chunkSize-self.lag+l:chunkSize,:]
t=np.copy(Tail)
emg_lag=emg_buffer[:chunkSize-self.lag-self.forward,:]
else:
emg_lag=emg_buffer[:chunkSize-self.forward,1:]
for l in range(self.lag+1):
emg_lag[self.lag-l:chunkSize,l*numCh:(l+1)*numCh]=EMGchunk[0:(chunkSize-self.lag-self.forward+l),:]
emg_lag[0:self.lag-l,l*numCh:(l+1)*numCh]=Tail[:self.lag-l,l*numCh:(l+1)*numCh]
Tail[:self.lag-l,l*numCh:(l+1)*numCh]=EMGchunk[chunkSize-self.lag+l:chunkSize,:]
emg_lag=emg_buffer[:chunkSize-self.forward,:]
WienerCoords=self.WienerCoordsBuffer[:emg_lag.shape[0],:]
KalmanCoords=self.KalmanCoordsBuffer[:emg_lag.shape[0],:]
for t in range(emg_lag.shape[0]):
#Predict coordinate by state measurement equation
X_measurement_estimate=self.W @ emg_lag[t,:][:,None];
#Store Wiener Estimate
WienerCoords[t,:]=X_measurement_estimate.T;
#Kalman
X_state_estimate = self.A @ self.Kalman_estimate
self.P_before = self.A @ self.P_after @ self.A.T + self.Ex
self.P_after=np.linalg.pinv(np.linalg.pinv(self.P_before)+np.linalg.pinv(self.Ez))
self.Kalman_estimate=self.P_after @ (np.linalg.pinv(self.P_before) @ X_state_estimate+np.linalg.pinv(self.Ez) @ X_measurement_estimate)
KalmanCoords[t,:] = self.Kalman_estimate.T
return WienerCoords, KalmanCoords
|
[
"[email protected]"
] | |
88fa0f7981f3210b9998f330762fc45808b2a807
|
8a5444d37a9d926bd38261689f6c0e3f477961bb
|
/ExportToDWGsExportOptions.py
|
c43572875815b46df7395c5e0ab63fba423fb955
|
[
"MIT"
] |
permissive
|
tuskin40/pyDynamo
|
6d27dead162685f96a894e8fcb963c880e73f80d
|
550e105ec27c29e9055c16b46e0b8ecc0960421b
|
refs/heads/master
| 2023-08-02T11:06:28.691378 | 2021-09-29T02:34:42 | 2021-09-29T02:34:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 940 |
py
|
__author__ = 'Danny Bentley - [email protected]'
__twitter__ = '@danbentley'
__Website__ = 'http://dannybentley.tech/'
__version__ = '1.0.0'
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
clr.AddReference('RevitServices')
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
doc = DocumentManager.Instance.CurrentDBDocument
options = None
#check if the dwg export setting matches this name
dwg_opt = "- SOM Struc Export"
#collect all the settings in your project.
settings = FilteredElementCollector(doc).WherePasses(ElementClassFilter(ExportDWGSettings))
for element in settings:
if element.Name == dwg_opt:
options = element.GetDWGExportOptions()
break
if options is None:
options = DWGExportOptions()
OUT = options
|
[
"[email protected]"
] | |
4cb683ce0b6fa1bdbf35d8661b4e1bb6cf8b8627
|
18c6073d1d9e1a1e22a1c5f734377ebc5ceb4b40
|
/stock_predictor/stockprediction/models.py
|
3ed79fd2f70c2a0aaeeb453053f51b5b82c21dcf
|
[] |
no_license
|
shashankgd/mokshtech
|
734846d7a1466385c42bda36f705c32918e61e60
|
f533de0fe99a9646413a5ed48bdeeb55a11578a9
|
refs/heads/master
| 2020-04-05T21:48:23.594195 | 2018-11-05T01:21:39 | 2018-11-05T01:21:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,415 |
py
|
import numpy as np
from keras.layers.core import Dense, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn import neighbors
from sklearn import svm
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import property as p
def buildModel(X_train, X_test, y_train, y_test, forcast_scaled, method):
"""
Build final model for predicting real testing data
"""
if method == 'RNN':
regressor, MSE, X_train, X_test, y_train, y_test, forcast_scaled= performRNNlass(X_train, X_test, y_train, y_test, forcast_scaled)
print(method,MSE)
return regressor,MSE,X_train, X_test, y_train, y_test,forcast_scaled
elif method == 'RF':
regressor, MSE =performRFR(X_train, X_test, y_train, y_test)
print(method,MSE)
return regressor,MSE,X_train, X_test, y_train, y_test,forcast_scaled
elif method == 'SVR':
regressor, MSE, =performSVR(X_train, X_test, y_train, y_test)
print(method,MSE)
return regressor,MSE,X_train,X_test,y_train , y_test,forcast_scaled
elif method == 'KNN':
clf = neighbors.KNeighborsClassifier()
return
elif method == 'ADA':
clf = AdaBoostClassifier()
return
def performRFR(X_train, X_test, y_train, y_test):
print('rfr1',X_train.shape,X_test.shape, y_train.shape,y_test.shape)
seed = p.seed
num_trees = p.n_estimators
n_splits=p.n_splits
njobs=p.n_jobs
model = RandomForestRegressor(n_estimators=num_trees, n_jobs=njobs)
model.fit(X_train, y_train)
MSE = mse_error(y_test,X_test,model)
return model, MSE
def performRNNlass(X_train, X_test, y_train, y_test, forcast_scaled):
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
forcast_scaled = np.reshape(forcast_scaled, (forcast_scaled.shape[0], forcast_scaled.shape[1], 1))
regressor= Sequential()
dropoutunit=p.dropoutunit
LSTM_unit_increment = p.LSTM_unit_increment
# Adding the first LSTM layer and some Dropout regularisation
regressor.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))
regressor.add(Dropout(dropoutunit))
LSTM_units = 50
LSTM_units = LSTM_units + LSTM_unit_increment
# Adding a second LSTM layer and some Dropout regularisation
regressor.add(LSTM(units=LSTM_units, return_sequences=True))
regressor.add(Dropout(dropoutunit))
# Adding a third LSTM layer and some Dropout regularisation
LSTM_units = LSTM_units + LSTM_unit_increment
regressor.add(LSTM(units=LSTM_units, return_sequences=True))
regressor.add(Dropout(dropoutunit))
# Adding a fifth LSTM layer and some Dropout regularisation
LSTM_units = LSTM_units + LSTM_unit_increment
regressor.add(LSTM(units=LSTM_units))
regressor.add(Dropout(dropoutunit))
# print(X_train.shape,y_train.shape)
# Adding the output layer
regressor.add(Dense(units=1))
# Compiling the RNN
regressor.compile(optimizer='adam', loss='mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, epochs=p.epochs, batch_size=p.batch_size)
print('rnn model build',X_test.shape)
score = regressor.evaluate(X_test, y_test, batch_size=100, verbose=0)
return regressor,score,X_train, X_test, y_train, y_test,forcast_scaled
def performSVR(X_train, X_test, y_train, y_test):
model = svm.SVR(kernel='rbf', C=1e3, gamma=0.5,epsilon=p.epsilon)
model.fit(X_train, y_train)
MSE = mse_error(y_test,X_test,model)
return model, MSE
def performKNNClass(X_train, y_train, X_test, y_test, parameters):
"""
KNN binary Classification
"""
clf = neighbors.KNeighborsClassifier(parameters[0])
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def performAdaBoostClass(X_train, y_train, X_test, y_test, forcast):
"""
Ada Boosting binary Classification
"""
# n = parameters[0]
# l = parameters[1]
clf = AdaBoostClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
return accuracy
def mse_error(ytest,xtest,model):
return mean_squared_error(ytest, model.predict(xtest))
|
[
"[email protected]"
] | |
7459710f51cc4fc67c81c661b1cdbb49a98825ab
|
4e4171d9e94dd44b98b7010d86fd31b8a3c8c33e
|
/bb8/template.py
|
43fd985415ae80cd75cb7d1068de791347c6026e
|
[] |
no_license
|
thongdong7/bb8
|
1582d4f4bde06f17b95410c7ae67647189036744
|
2b771bd12584250456b1fc5b27ceeb37c55bbb14
|
refs/heads/master
| 2021-06-17T10:25:30.520708 | 2016-11-08T08:08:54 | 2016-11-08T08:08:54 | 59,622,849 | 0 | 0 | null | 2021-03-25T21:27:51 | 2016-05-25T02:01:40 |
Python
|
UTF-8
|
Python
| false | false | 261 |
py
|
from jinja2 import Template
class TemplateEngine(object):
def __init__(self):
self.params = {}
def load_params(self, params):
self.params.update(params)
def render(self, text):
return Template(text).render(**self.params)
|
[
"[email protected]"
] | |
91b306ecb2af69f0d6d781d57251266678f159f2
|
f8d3f814067415485bb439d7fe92dc2bbe22a048
|
/models/research/syntaxnet/dragnn/python/file_diff_test.py
|
9e9f1daa40a64ff9595724e30dbc95591ae299c2
|
[
"Apache-2.0"
] |
permissive
|
gmonkman/python
|
2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3
|
9123aa6baf538b662143b9098d963d55165e8409
|
refs/heads/master
| 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 |
Python
|
UTF-8
|
Python
| false | false | 1,631 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diff test that compares two files are identical."""
from absl import flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('actual_file', None, 'File to test.')
flags.DEFINE_string('expected_file', None, 'File with expected contents.')
class DiffTest(tf.test.TestCase):
def testEqualFiles(self):
content_actual = None
content_expected = None
try:
with open(FLAGS.actual_file) as actual:
content_actual = actual.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.actual_file, e.strerror))
try:
with open(FLAGS.expected_file) as expected:
content_expected = expected.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.expected_file, e.strerror))
self.assertTrue(content_actual == content_expected)
if __name__ == '__main__':
tf.test.main()
|
[
"[email protected]"
] | |
ca9362d170a5e072bbd92f1841f0cc91721cf3e2
|
69954cf777a73db48a7efabe8ef8cf655e5c2864
|
/NetworkLib.py
|
83838037506da9a84f389a1140b094679cbc19e5
|
[] |
no_license
|
batuhannzorlu/Network-Project
|
e215dc6bf65c668b79af69d591427bd3a7a5191b
|
ec23b0f101528b00c2d953fa7ae1a33a3ee5a75f
|
refs/heads/main
| 2023-07-09T11:54:21.394920 | 2021-08-08T17:32:56 | 2021-08-08T17:32:56 | 394,027,607 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,578 |
py
|
import paramiko
import time
import threading
class Switch:
def __init__( self,IP, EnableSecret='-1',SshHname='admin',SshPsswd='admin'):
self.IP = IP
self.EnableSecret = EnableSecret
self.SshHname = SshHname
self.SshPsswd = SshPsswd
class Router:
def __init__(self, IP, EnableSecret='-1',SshHname='admin',SshPsswd='admin'):
self.IP = IP
self.EnableSecret = EnableSecret
self.SshHname = SshHname
self.SshPsswd = SshPsswd
def ConnectViaSSH(device:Router):
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(device.IP,'22',device.SshHname,device.SshPsswd,
look_for_keys=False, allow_agent=False)
print('Connected Successfully!')
return ssh_client
def ConnectViaTELNET(device:Switch):
pass
def SendCommand(shell,Command):
print('sent')
shell.send(Command+'\n')
time.sleep(1)
return shell
def PrintOutput(shell):
output = shell.recv(10000)
output = output.decode('utf-8')
print(output)
def RIPV2SUB24Conf(shell,device:Router,Subnet=24):
# if (device.EnableSecret != '-1'):
SendCommand(shell,device.EnableSecret)
#SendCommand(shell,'show run | include (interface | ip address)')
SendCommand(shell,'show ip int bri')
output = shell.recv(10000)
output = output.decode('utf-8')
output_list = output.splitlines()
SendCommand(shell, ' en')
SendCommand(shell, 'admin')
SendCommand(shell, 'conf terminal')
SendCommand(shell, 'router rip')
SendCommand(shell, 'version 2')
for line in output_list:
if( line.__contains__('up')):
s = str(line)
IntIp = s.split()
if IntIp[1] != 'unassigned' :
SIntIp = IntIp[1].split('.')
IntIp[1] = SIntIp[0]+'.'+SIntIp[1]+'.'+SIntIp[2]+'.'+'0'
SendCommand(shell,'network'+' '+IntIp[1])
PrintOutput(shell)
client.close()
#User must create the vlan and give it ip address.
def EtherChannel(interface_list,vlan_num,port_channel_num, mode):
SendCommand(shell, ' en')
SendCommand(shell, 'admin')
SendCommand(shell, 'conf terminal')
for interface in interface_list:
SendCommand(shell,f'int {interface}')
SendCommand(shell,'switchport mode access')
SendCommand(shell,f'switchport access vlan {vlan_num}')
SendCommand(shell,f'channel-group {port_channel_num} mode {mode}')
SendCommand(shell,f'interface port-channel {port_channel_num}')
SendCommand(shell,'switchport trunk encapsulation dot1q')
SendCommand(shell,'switchport mode trunk')
SendCommand(shell,f'switchport trunk allowed vlan {vlan_num}')
PrintOutput(shell)
def DhcpConf(shell,device,network,SubnetMask,DefaultRouter,poolname):
SendCommand(shell,'')
def BackUp():
pass
def MultiThreading(DeviceList,TargetFunction):
threads =list()
for device in DeviceList:
th=threading.Thread(target=TargetFunction,args=(device,))
threads.append(th)
for th in threads:
th.start()
for th in threads:
th.join()
router2 = Router('10.1.1.3','admin')
client = ConnectViaSSH(router2)
shell = client.invoke_shell()
#interfaces=['e 1/0','e 1/1']
#EtherChannel(interfaces,12,1,'on')
#ripV2Conf(shell,router2)
#SendCommand(shell,'en')
#SendCommand(shell,'admin')
#SendCommand(shell,'sh run')
#PrintOutput(shell)
|
[
"[email protected]"
] | |
6bb7357e4c3c78a71da4398592fc78ff38a7ab5c
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/gaussiana/ch3_2020_09_14_14_36_41_642784.py
|
986bff292e3d397ff9a597fd31a1ee3912e49175
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
import math
def calcula_gaussiana (x,mu,sigma) :
f1 = 1/(sigma*math.sqrt(2*math.pi))
f2 = math.exp((-0.5*((x-mu)/(sigma)**2))
y = f1*f2
return y
|
[
"[email protected]"
] | |
6ba85c1726abfdec9f1ff343711d9becab558a89
|
c45ddd5464d8e9415b41543cbe6fb2d1593c3b23
|
/exercicios_em_sala/exe05/exe05_client.py
|
9a943859703d364324ff13589bea641d51d84610
|
[] |
no_license
|
msouto/20172-redes-2v-programacao-redes
|
38eb59f269840ea7062af48816720e262e91d880
|
5da51989270ff1de35019c014f99ba0b007ef939
|
refs/heads/master
| 2021-01-19T16:02:39.679034 | 2017-12-18T20:27:58 | 2017-12-18T20:27:58 | 100,985,051 | 21 | 25 | null | 2017-11-20T20:14:49 | 2017-08-21T19:23:51 |
Python
|
UTF-8
|
Python
| false | false | 280 |
py
|
from jsonsocket import Client, Server
host = '127.0.0.1'
port = '8001'
# Client code:
client = Client()
client.connect(host, int(port)).send({'some_list': [123, 456]})
response = client.recv()
print(response)
# response now is {'data': {'some_list': [123, 456]}}
client.close()
|
[
"[email protected]"
] | |
59944bb8fa971396a0f7e49931ba6f9bf8ed1091
|
4b29c3e3c8a2cad5071a3fb2ea674253c6f0ef21
|
/pycharm/digiin/case/TestLogin.py
|
70e3880684b38a0a5d5a1bb7b50cd59768931663
|
[] |
no_license
|
yz9527-1/1YZ
|
a0303b00fd1c7f782b7e4219c52f9589dd3b27b7
|
5f843531d413202f4f4e48ed0c3d510db21f4396
|
refs/heads/master
| 2022-11-30T23:50:56.682852 | 2020-08-10T02:11:13 | 2020-08-10T02:11:13 | 286,354,211 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,586 |
py
|
#coding=utf-8
import ddt,data
from common.ExcelUtil import ExcelUtil
import time
import unittest
from selenium import webdriver
def self(args):
pass
class Case(object):
def __init__(self):
pass
def get_case(self):
"""
่ทๅๆฐๆฎ
ๅพๅฐๆ็จ็ๆฐๆฎ๏ผๅนถไธไฝฟๆฐๆฎไปฅ้ฎ็ฎฑๅฐๅใๅฏ็ ใ้ขๆ็ปๆๅฎไฝใ้ขๆ็ปๆ็้กบๅบ่ฟๅ
:return:
"""
#่ทๅExcelไธญ็ๆไปถๆฐๆฎ
sheet='Login'
file=ExcelUtil(sheet_name=sheet)
data=file.get_data()
#ๅพๅฐๆ้่ฆๆฐๆฎ็็ดขๅผ๏ผ็ถๅๆ นๆฎ็ดขๅผ่ทๅ็ธๅบ้กบๅบ็ๆฐๆฎ
email_index=data[0].index("้ฎ็ฎฑๅฐๅ")
password_index=data[1].index("ๅฏ็ ")
expected_element_index=data[2].index("้ขๆ็ปๆๅฎไฝ")
expected_index=data[3].index("้ขๆ็ปๆ")
data_length=data.__len__()
all_cass=[]
#ๅป้คheader่ก๏ผๅๅ
ถไปๆ ็จ็ๆฐๆฎ
for i in range(1,data_length):
case=[]
case.append(data[i][email_index])
case.append(data[i][password_index])
case.append(data[i][expected_element_index])
case.append(data[i][expected_index])
all_cass.append(case)
return all_cass
class Login(object):
def __init__(self,driver):
self.driver=driver
def login(self,email,password):
"""็ปๅฝๆญฅ้ชค"""
#driver=webdriver.Chrome()
#self.driver=driver
#้ฎ็ฎฑๅฐๅใๅฏ็ ใ็นๅป็ปๅฝๆ้ฎๆไฝ
time.sleep(1)
if email!=None:
email_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input')
email_element.send_keys(email)
time.sleep(1)
if password!=None:
password_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input')
password_element.send_keys(password)
time.sleep(1)
login_btn=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input')
login_btn.click()
def login_assert(self,assert_type,assert_message):
"""็ปๅฝๆญ่จ"""
time.sleep(1)
if assert_type=='email error':
email_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input').text
assert email_message==assert_message
elif assert_type=='password error':
password_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input').text
assert password_message==assert_message
elif assert_type=='login sucess'or assert_type=='login fail':
login_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input').text
assert login_message==assert_message
else:
print("่พๅ
ฅ็ๆญ่จ็ฑปๅไธๆญฃ็กฎ")
@ddt
class TextLogin(unittest.TestCase):
"""ๆต่ฏ็ปๅฝ"""
def setUp(self):
self.driver=webdriver.Chrome()
url="http://192.168.0.30:18069"
self.driver.implicitly_wait(20)
self.driver.maximize_window()
self.driver.get(url=url)
def tearDown(self):
self.driver.quit()
case=Case().get_case()
@data(*case)
@unpack
def test_login(self,password,assert_type,assert_message):
login=Login(driver=self.driver)
login.login(email=email,password=password)
login.login_assert(assert_type=assert_type,assert_message=assert_message)
if __name__=='__main__':
unittest.main
|
[
"[email protected]"
] | |
7a54c06f19a3583c531d27b86a0c4953aa5d59fd
|
147ad0231450e0b2ad14a8da8cc515a52f6a425a
|
/venv/bin/pyrsa-encrypt
|
4beb1da65ee6918a981d2ce17c2ef9186a773101
|
[] |
no_license
|
ayush-patel/PriceDrop-backend
|
4950956a801c8172ba6b05effd4415e50c4d9cf1
|
8721fafd3061f15f606db3f642693dd195b309dc
|
refs/heads/master
| 2021-01-19T08:13:19.704727 | 2017-04-08T06:15:09 | 2017-04-08T06:15:09 | 87,612,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 265 |
#!/Users/ayush/Documents/Development/PriceChange/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt())
|
[
"[email protected]"
] | ||
17989c3088b3000ff5653aa61f6730d7a718bb06
|
75258c8efa8e756234f7d32f729f1089e1667594
|
/DawgCTF 2020/Coding/Miracle Mile/client0.py
|
aff5a4e950aab4fac76367a1705d9fa58bcc478f
|
[] |
no_license
|
darkvoid32/CTF-writeups
|
7c9452a74930d63c26246311fc9de89a568c65f1
|
ea19afefa93b4cfb08f3d655bbf1065bb94cd6ac
|
refs/heads/master
| 2021-04-05T05:53:43.055056 | 2021-03-07T15:18:52 | 2021-03-07T15:18:52 | 248,516,925 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,314 |
py
|
# -*- coding: utf-8 -*-
"""
Created for Spring 2020 CTF
Cryptography 0
10 Points
Welcome to my sanity check. You'll find this to be fairly easy.
The oracle is found at umbccd.io:13370, and your methods are:
flg - returns the flag
tst - returns the message after the : in "tst:..."
@author: pleoxconfusa
"""
import socket
import math
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('ctf.umbccd.io', 5300)
sock.connect(server_address)
#sock.sendall(msg.encode())
data = sock.recv(1024)
while 1:
data = sock.recv(1024)
print(data.decode())
no = data.decode().split('I ran ')[1].split(' ')[0]
time = data.decode().split(' in ')[1].split(' ')[0]
print(no)
print(time)
curr_sec = time.split(':')[2]
curr_min = time.split(':')[1]
curr_hour = time.split(':')[0]
new_min = int(curr_min) + int(curr_hour) * 60
new_sec = int(curr_sec) + int(new_min) * 60
new_sec_div = new_sec / int(no)
print(new_sec_div)
print(math.floor(new_sec_div))
sec = math.floor(new_sec_div) % 60
print(sec)
minute = (math.floor(new_sec_div) - sec) / 60
print(str(int(minute)) + ':' + str(sec) + ' minutes/mile')
sock.sendall(str.encode(str(int(minute)) + ':' + str(sec) + ' minutes/mile'))
#sock.sendall()
sock.close()
|
[
"[email protected]"
] | |
1e47b7d55a3b5b0dd7d1bb4083416b058727c5c6
|
2b40312991aee831532551de4524e9a6182ad2dd
|
/auto_otc_confirm_chk.py
|
90344acd9554491c54c5542538618c0b02c4faf7
|
[] |
no_license
|
PyBack/AutoExcelData
|
58d86b49eb8a9708bf455336741c479e6b31b437
|
c1e33ffd6895ca610f3591672ca9c548a16f4a30
|
refs/heads/master
| 2021-06-06T10:57:40.514629 | 2020-07-01T06:37:06 | 2020-07-01T06:37:06 | 146,445,024 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,609 |
py
|
# -*- coding: utf-8 =*-
from __future__ import print_function
import time
import datetime as dt
import logging
import getpass
import pandas as pd
import clipboard
import auto_helper as helper
import xlwings as xw
from handler_hnet import handle_hnet
from read_data_file import read_otc_termination_file
logger = logging.getLogger('AutoOTC.Termination')
logger.setLevel(logging.DEBUG)
# create file handler whhich logs even debug messages
# fh = logging.FileHandler('AutoReport.log')
fh = logging.handlers.RotatingFileHandler('AutoOTC.log', maxBytes=104857, backupCount=3)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s [%(levelname)s %(name)s %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handler to logger
logger.addHandler(fh)
# logger.addHandler(ch)
def get_confirm_isin_list_from_hnet(window_hnet=None):
if window_hnet is None:
logger.info('no handle of hnet')
return
if not window_hnet.Exists():
logger.info('no handle of hnet')
return
window_hnet.SetFocus()
# 30192 ํ์๊ฒฐํฉ์ฆ๊ถ์ํ๊ณ ์ง
sub_window_title = u'32802 ํ์๊ฒฐํฉ์ฆ๊ถ์ํ์ ๋ณด'
sub_window = window_hnet[sub_window_title]
if not sub_window.Exists():
window_hnet.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('30192')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_window.Maximize()
sub_window.Restore()
sub_window.SetFocus()
msg = '== START of get_confirm_isin_code_list_from_hnet ==='
logger.info(msg)
sub_window.ClickInput(coords=(90, 15)) # ์
๋ฌด๊ตฌ๋ถ
for i in xrange(6):
helper.press('up_arrow')
for i in xrange(3):
helper.press('down_arrow')
helper.press('enter')
time.sleep(0.5)
helper.press('enter')
sub_window.RightClickInput(coords=(90, 140))
helper.press('up_arrow')
time.sleep(0.5)
helper.press('up_arrow')
time.sleep(0.5)
helper.press('enter')
time.sleep(0.5)
data_table = clipboard.paste()
data_table_rows = data_table.split("\n")
isin_code_list = list()
for row in data_table_rows:
column_list = row.split("\t")
if column_list[0] != u"์ํ์ฝ๋" and len(column_list[0]) >= 10:
isin_code_list.append(column_list[0])
# print(column_list[0])
logger.info("data load->isin_code cnt: %d" % len(isin_code_list))
sub_window.Close()
msg = "== END of get_confirm_isin_code_list_from_hnet ==="
logger.info(msg)
return isin_code_list
def get_total_settle_list_from_hnet(window_hent=None, strdate=None):
if window_hent is None:
logger.info('no handle of hent...')
return
if not window_hent.Exists():
logger.info('no handle of hent...')
return
window_hent.SetFocus()
# 66305 ํตํฉ์ค์ผ์ฅด๋ด์ญ1
sub_window_title = u'66305 ํตํฉ์ค์ผ์ฅด๋ด์ญ1'
sub_window = window_hent[sub_window_title]
if sub_window.Exists():
sub_window.Close()
window_hent.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('66305')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_window.Maximize()
sub_window.Restore()
sub_window.SetFocus()
msg = '== START of get_total_settle_list_from_hnet ==='
logger.info(msg)
sub_window.DoubleClickInput(coords=(90, 15)) # ์กฐํ๊ธฐ๊ฐ
for i in xrange(2):
for date_digit in strdate:
helper.press(date_digit)
sub_window.DoubleClickInput(coords=(90, 55)) # ์ข
๋ชฉ์ข
๋ฅ
for i in xrange(5):
helper.press('down_arrow')
for i in xrange(3):
helper.press('up_arrow')
helper.press('enter')
sub_window.DoubleClickInput(coords=(700, 55)) # ์ผ๊ด์กฐํ
helper.press('enter')
time.sleep(15)
sub_window.ClickInput(coords=(90, 120)) # ์๋ฃ ๋ณต์ฌ
helper.press('up_arrow')
time.sleep(1)
helper.press('up_arrow')
time.sleep(1)
helper.press('enter')
time.sleep(1)
data = clipboard.paste()
data = data.split("\r\n")
new_data_lst = []
for row in data:
row_lst = row.split('\t')
new_data_lst.append(row_lst)
df_data = pd.DataFrame(new_data_lst)
headers = df_data.iloc[0]
df_data = pd.DataFrame(df_data.values[1:], columns=headers)
# df_data.index = df_data[u'๋์ฝ๋']
df_data.index = df_data[df_data.columns[5]]
sub_window.Close()
msg = '=== END of get_total_settle_list_from_hnet ==='
logger.info(msg)
return df_data
def get_target_product_data(excel_file_name='', strdate='', term='์ํ'):
if excel_file_name == '':
excel_file_name = u'OTC์ํ๋ฆฌ์คํธ.xlsx'
df = read_otc_termination_file(excel_file_name, strdate[:4] + "." + strdate[4:6])
if not strdate in df.index:
target_df = pd.DataFrame()
# target_df = df.iloc[-2:].copy()
return target_df
df = df.loc[strdate]
if len(df) == 0:
return df
elif isinstance(df, pd.Series):
df_new = pd.DataFrame(df)
df_new = df_new.transpose()
df = df_new.copy()
if term == '์ํ':
target_df = df[(df[u'๊ตฌ๋ถ'] != 'ELT') & (df[u'๊ตฌ๋ถ'] != 'DLT')]
target_df = target_df[(target_df[u'์ํ์ฌ๋ถ'] == u'๋ง32805๊ธฐ์ํ') | (target_df[u'์ํ์ฌ๋ถ'] == u'์กฐ๊ธฐ์ํ')]
return target_df
elif term == '์ํ_ALL':
target_df = df.copy()
target_df = target_df[(target_df[u'์ํ์ฌ๋ถ'] == u'๋ง๊ธฐ์ํ') | (target_df[u'์ํ์ฌ๋ถ'] == u'์กฐ๊ธฐ์ํ')]
return target_df
elif term == '๋ฏธ์ํ':
target_df = df.copy()
target_df = target_df[(target_df[u'์ํ์ฌ๋ถ'] == u'๋ฏธ์ํ')]
return target_df
else:
df = pd.DataFrame()
return df
pass
def chk_in_isin_list(target_df, isin_code_list):
# ํ์๊ฒฐํฉ์ฆ๊ถ์ํ๊ณ ์ง 30192 4.์์ตํ์
msg = '=== START chk_in_isin_list %d ===' % (len(target_df))
logger.info(msg)
chk_in_list = []
chk_in_count = 0
for i in range(len(target_df)):
exp_date = target_df.iloc[i][u'์ํ์์ ์ผ']
str_exp_date = u"%d-%0d-%d" % (exp_date.year, exp_date.month, exp_date.day)
msg = u"%s %s %s %s %s " % (target_df.iloc[i][u'์ข
๋ชฉ์ฝ๋'],
target_df.iloc[i][u'๊ตฌ๋ถ'],
target_df.iloc[i][u'์ํ์ฌ๋ถ'],
str_exp_date,
target_df.iloc[i][u'์์ต๊ตฌ์กฐ'],
)
if target_df.iloc[i][u'์ข
๋ชฉ์ฝ๋'] in isin_code_list:
chk_in_count += 1
msg = msg + u" CHK_IN"
chk_in_list.append(target_df.iloc[i][u'์ข
๋ชฉ์ฝ๋'])
else:
msg = msg + u"CHK_OUT"
logger.info(msg)
for isin in chk_in_list:
isin_code_list.remove(msg)
msg = 'CHK_IN: %s CHK_OUT: %s' % (chk_in_count, len(target_df) - chk_in_count)
logger.info(msg)
if chk_in_count > 0:
msg = 'Must generate %d sms for termination H.Net #30192' % chk_in_count
logger.warning(msg)
msg = '=== END chk_in_isin_list ===='
logger.info(msg)
return isin_code_list
pass
def chk_isin_in_salesteam(window_hnet, isin_code_list, df_data):
# 32802 ํ์๊ฒฐํฉ์ฆ๊ถ์ํ์ ๋ณด
sub_windwow_title = u'32802 ํ์๊ฒฐํฉ์ฆ๊ถ์ํ์ ๋ณด'
sub_windwow = window_hnet[sub_windwow_title]
if not sub_windwow.Exists():
window_hnet.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('32802')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_windwow.Maximize()
sub_windwow.Restore()
sub_windwow.SetFocus()
msg = '== START of chk_isin_in_saleteam ==='
logger.info(msg)
df_data_sub = df_data[(df_data[u'Sales๋ถ์'] == u'PB') & (df_data[u'๊ฒฐ์ ์ํ'] == u'์ต์ข
ํ์ ')]
df_data_early = df_data_sub[(df_data_sub[u'์ผ์๊ตฌ๋ถ'] == 'OBS') & (df_data_sub[u'Sched.Type'] == u'์๋ฌด์ค๋')]
df_data_mat = df_data_sub[(df_data_sub[u'์ผ์๊ตฌ๋ถ'] == 'MAT')]
df_data_sub = df_data[(df_data[u'Sales๋ถ์'] == u'PB') & (df_data[u'๊ฒฐ์ ์ํ'] == u'๋ฏธ์
๋ ฅ')]
df_data_delay = df_data_sub[(df_data_sub[u'์ผ์๊ตฌ๋ถ'] == 'OBS') & (df_data_sub[u'Sched.Type'] == u'์๋ฌด์ค๋')]
logger.info('Sched Early Term-> %d' % len(df_data_early))
logger.info('Sched Delay Term-> %d' % len(df_data_delay))
logger.info('Sched MAT Term-> %d' % len(df_data_mat))
for isin_code in isin_code_list:
sub_windwow.ClickInput(coords=(90, 35)) # ์ข
๋ชฉ์ฝ๋
clipboard.copy(isin_code[2:])
helper.paste()
# helper.press('enter')
sub_windwow.ClickInput(coords=(775, 35)) # ์กฐํ
time.sleep(0.5)
sub_windwow.RightClickInput(coords=(110, 80)) # ๋์ฝ๋
helper.press('up_arrow')
helper.press('up_arrow')
helper.press('enter')
deal_code = clipboard.paste()
if deal_code in list(df_data.index):
if len(df_data.loc[deal_code][u'Sales๋ถ์']) > 0:
sales_team = df_data.loc[deal_code][u'Sales๋ถ์'][0]
settle_state = df_date.loc[deal_code][u'๊ฒฐ์ฌ์ํ'][0]
sched_type = df_data.loc[deal_code][u'Sched.Type'][0]
else:
sales_team = df_data.loc[deal_code][u'Sales๋ถ์']
settle_state = df_data.loc[deal_code][u'๊ฒฐ์ฌ์ํ']
sched_type = df_data.loc[deal_code][u'Sched.Type']
msg = u"%s %s %s %s" % (isin_code, sales_team, settle_state, sched_type)
logger.info(msg)
else:
logger.info("%s not in list" % deal_code)
msg = '=== END of chk_isin_in_saleteam ==='
logger.info(msg)
def chk_isin_in_schedul_list(window_hnet, target_df, df_data):
# 32802 ํ์๊ฒฐํฉ์ฆ๊ถ์ํ์ ๋ณด
sub_window_title = u'32802 ํ์๊ฒฐํฉ์ฆ๊ถ์ํ์ ๋ณด'
sub_window = window_hnet[sub_window_title]
if not sub_window.Exists():
window_hnet.ClickInput(coords=(70, 70)) # Editor (# of sub_window)
clipboard.copy('32802')
helper.paste()
helper.press('enter')
time.sleep(0.5)
sub_window.Maximize()
sub_window.Restore()
sub_window.SetFocus()
msg = '== START of chk_isin_in_schedule_list ==='
logger.info(msg)
def main():
import argparse
now_dt = dt.datetime.now()
strdate = now_dt.strftime("%Y%m%d")
parser = argparse.ArgumentParser()
parser.add_argument('date',
type=lambda s: dt.datetime.strptime(s, "%Y%m%d").strftime("%Y%m%d"),
default=strdate,
help="Target Date",
nargs='?'
)
args = parser.parse_args()
logger.info("Target Date: %s" % args.date)
# pw = getpass.getpass("PWD: ")
# date_lst = ['20180212',
# '20180213',
# ]
# date_rng = pd.bdate_range('2018-05-17', '2018-07-01')
# date_lst = [d.strftime('%Y%m%d') for d in date_rng]
excel_file_name = u'OTC์ํ๋ฆฌ์คํธ.xlsx'
window_hnet = handle_hnet()
isin_code_list = get_confirm_isin_list_from_hnet(window_hnet)
target_df = get_target_product_data(excel_file_name, args.date)
if len(target_df) > 0:
target_df = target_df[[u'์ข
๋ชฉ์ฝ๋', u'์ํ๋ช
', u'๊ตฌ๋ถ', u'์์ต๊ตฌ์กฐ', u'์ํ์ฌ๋ถ', u'์ํ์์ ์ผ']]
isin_code_list = chk_in_isin_list(target_df, isin_code_list)
if len(isin_code_list) == 0:
isin_code_list = list(target_df[u'์ข
๋ชฉ์ฝ๋'])
print(isin_code_list)
if len(isin_code_list) >= 0:
df_data = get_total_settle_list_from_hnet(window_hnet, args.date)
chk_isin_in_salesteam(window_hnet, isin_code_list, df_data)
pass
if __name__ == "__main__":
main()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.