blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbddcb5659c0c9194a9534e43292ed4ffe3b7bd3 | c79416e3f2bc1af3a5c3436282e5c6c95067f473 | /itp1/6_b.py | ade1b7199b6af2d6f9cc3d86e63afc2686fdb73f | [] | no_license | mollinaca/AOJ | c14149c265c3a6cc572617d381609ed624c68df3 | 3389c4060faa18934a02246a0c9e5e234a637514 | refs/heads/master | 2021-05-17T21:20:27.935501 | 2020-04-29T13:52:18 | 2020-04-29T13:52:18 | 250,957,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
n = int(input())
d = {}
for i in range(n):
d[i]=list(map(str,input().split()))
for mark in ['S','H','C','D']:
for i in range(1,14):
if not [mark,str(i)] in d.values():
pass
print (mark,i)
| [
"[email protected]"
] | |
c60a7da867aa0e9c2c1416dd43b9fef21fc0ab41 | 1e6aa6dcd483fe66aac09bec23cdb686409a5dac | /merakiq2.py | a3bc1aa453da75d6776be22edfe7245a3c47b51f | [] | no_license | subreena10/files | 0e872a7132a6044ab0cb7ce92f7fb76cb00c6b9e | a97eedd9e7085e6b2ae8af01fd656b44169b0048 | refs/heads/main | 2023-08-14T23:51:27.833537 | 2021-10-01T17:06:33 | 2021-10-01T17:06:33 | 412,552,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | my_files=open("people1.exercise.txt","r")
count=0 # count of number of lines in people.exercise.txt
for name in my_files:
count=count+1
print(count)
my_files.close()
# print(my_files.read()) #to read a file ... | [
"[email protected]"
] | |
fc9e58a4cee12bf97f508979c3cec3a4ee283489 | d9e8b7d5c468b38cdf18cece9dff12ad1188a71b | /Model_Forms/Emp_registration/Employee_registration/manage.py | 6f9e96a4ea4484312e7af9e0068504613e0c6118 | [] | no_license | Ruchika-Munde/Django_Task | f14e0497a4f8045a68dbe58bbd772abf606369d3 | 7fa549842a544527b9f78cbfcf52c26dde31463c | refs/heads/master | 2022-12-16T17:53:12.577323 | 2020-09-09T07:34:43 | 2020-09-09T07:34:43 | 294,036,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Employee_registration.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
390a27c09a7533071f5b9daddeb43cc8ec889b7e | 09b859d9e1a221c43fb068c115f42c397c63e535 | /modcrop.py | 386d649480f7574e944ff7a0b7b0b70df60f74c6 | [] | no_license | lygztq/L-Super-Resolution | c44e3994506526151cc2dd3f8d501eb6617849c8 | da799d4f2a32dfc8cde628715fd8ae3c999edb09 | refs/heads/master | 2021-08-23T10:50:56.421743 | 2017-12-04T15:45:31 | 2017-12-04T15:45:31 | 112,314,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | def modcrop(imgs, modulo):
dim1 = imgs.shape[0] - (imgs.shape[0] % modulo)
dim2 = imgs.shape[1] - (imgs.shape[1] % modulo)
if len(imgs.shape) == 2:
imgs = imgs[:dim1,:dim2]
else:
imgs = imgs[:dim1,:dim2,:]
return imgs
| [
"[email protected]"
] | |
d698f65935d09f0fd991d2afe81d51f4f35b74b6 | bcca5d8690ab27b15d82aa8900a0481d8b1ee175 | /delivery/api/views.py | 7066e100076edb6ebd1919190b1234996e07bcea | [] | no_license | Pushkar-Bhuse/RestaurantProject | f0607008e6a8fcda5de1188267df9726f8fea774 | c17e9df279b0711db8e639d295598f91273cdd59 | refs/heads/master | 2020-05-07T22:15:03.001909 | 2019-04-17T22:02:10 | 2019-04-17T22:02:10 | 180,937,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,262 | py | from rest_framework.views import APIView
import json
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from urllib import request
from datetime import datetime
from rest_framework import generics
from delivery.models import Order,Product,OrderItem
from .serializers import OrderSerializer, ProductSerializer,OrderItemSerializer
from rest_framework.renderers import TemplateHTMLRenderer
from django.http import JsonResponse
from django.contrib.auth import get_user_model
from delivery.api.location import *
from datetime import datetime
from django.db.models import Q
from reservation.models import Reservation,Branch
user = get_user_model()
class CartItems(generics.ListAPIView):
queryset = Order.objects.all()
serializer_class = OrderSerializer
def get_queryset(self, *args, **kwargs):
tweet_id = self.kwargs.get("pk")
qs = Order.objects.filter(owner__id=tweet_id, is_ordered = False).order_by("date_ordered")
return qs
class UpdateOrder(APIView):
def post(self, request, *args, **kwargs):
# import pdb; pdb.set_trace()
order_item = json.loads(request.POST['item'])
item = order_item["item"]
value = order_item["quantity"]
if value == 0:
temp = Product.objects.filter(id = item)[0]
obj = Order.objects.filter(is_ordered = False, owner = request.user)[0]
for item in obj.items.all():
if item.product == temp:
item.delete()
item.save()
else:
temp = Product.objects.filter(id = item)[0]
obj = Order.objects.filter(is_ordered = False, owner = request.user)[0]
for item in obj.items.all():
if item.product == temp:
item.quantity = value
item.set_individual_price()
item.save()
obj = Order.objects.filter(is_ordered = False, owner = request.user)[0]
obj.set_cart_total()
obj.save()
return JsonResponse({'success':True})
class AddToCart(APIView):
def post(self,request, *args, **kwargs):
product = request.POST["product"]
obj, created = Order.objects.get_or_create(is_ordered = False, owner = request.user)
now = datetime.now()
if created:
obj.ref_code = now.strftime("%d") + str(request.user.id)
obj.save()
# for product in product_list:
temp = Product.objects.get(id = product)
item = OrderItem.objects.create(quantity = 1, product = temp, price = temp.cost)
obj.items.add(item)
obj.set_cart_total()
obj.save()
return JsonResponse({'success':True})
class RemoveFromCart(APIView):
def post(self,request, *args, **kwargs):
product = request.POST["product"]
# import pdb; pdb.set_trace()
obj= Order.objects.get(is_ordered = False, owner = request.user)
# for product in product_list:
temp = Product.objects.get(id = product)
# item = OrderItem.objects.get(product = temp)
for item in obj.items.all():
if item.product == temp:
item.delete()
obj.set_cart_total()
obj.save()
return JsonResponse({'success':True})
# class DeliveryConfirmation(APIView):
# def post(self, request, *args, *kwargs):
# refcode = tweet_id = self.kwargs.get("ref_code")
class ChartData(APIView):
def get(self,request,*args,**kwargs):
place = self.kwargs.get("place")
# label = ["12:00 PM","1:00 PM","2:00 PM","3:00 PM","4:00 PM","5:00 PM","6:00 PM","7:00 PM","8:00 PM","9:00 PM","10:00 PM","11:00 PM","00:00 AM",]
label = []
data = []
population = {}
for i in range (12,24):
if i == 23:
temp = "{}PM-{}AM".format(i-12,0)
else:
temp = "{}PM-{}PM".format(i-12,i+1-12)
label.append(temp)
count = Reservation.objects.filter(Q(time__hour__gte = i) & Q(time__hour__lt = i+1), place__id = place).count()
data.append(count)
# import pdb; pdb.set_trace()
return JsonResponse({"label":label,"data":data})
| [
"[email protected]"
] | |
e120442d408f42c431f1405c77073a0733e53147 | dd64d0af062cdd64ced8e7f81c6c502582cc4b1a | /BlueGecko/Generators/generate_bglib_msvcsharp.py | 938b1f1b18a2129d2fb6e8de0282d27e81d04228 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | veerarajendran/bglib | c3bcdea90b0153c7bf3beee07dc15a4055ff0e23 | f5b8be4a13958a20d35557a9fa932c5355991bb0 | refs/heads/master | 2023-03-19T01:15:52.331710 | 2021-03-06T03:52:53 | 2021-03-06T03:52:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,456 | py | # ================================================================
# Blue Gecko BLE API BGLib code generator: MSVCSharp platform
# Jeff Rowberg <[email protected]>
# ----------------------------------------------------------------
#
# CHANGELOG:
# 2013-05-?? - Initial release
# 2020-08-01 - Ported to Blue Gecko API v2.x, Python 3 <[email protected]>
#
# ================================================================
from xml.dom.minidom import parseString
import string
from datetime import datetime
# open, read, and close the gecko API XML data
print("Reading gecko.xml...")
file = open('gecko.xml', 'r')
data = file.read()
file.close()
# parse XML into a DOM structure
print("Parsing BLE API definition...")
dom = parseString(data)
# read relevant dom nodes for highlighter generation
ble_datatypes = dom.getElementsByTagName('datatype')
ble_classes = dom.getElementsByTagName('class')
# Do some string substitutions to use all caps for abbreviations used in the API names
name_string_subs = ['Gap', 'Sm', 'Smp', 'Adc', 'Rx', 'Tx', 'Ps', 'Phy', 'Io',
'Spi', 'I2c', 'Dfu', 'Gatt','Le','L2cap','Cte','Uuid','Dtm']
command_method_definitions = []
response_callback_declarations = []
response_callback_structure = []
response_callback_initializations = []
response_callback_parser_conditions = []
event_callback_declarations = []
event_callback_structure = []
event_callback_initializations = []
event_callback_parser_conditions = []
constant_macros = []
struct_definitions = []
for ble_class in ble_classes:
class_name_raw = ble_class.attributes['name'].value
# Fix up class names to make them prettier and CSharp-ish
class_name_parts = string.capwords(class_name_raw.replace('_', ' ')).split(' ')
class_name = ''
for word in class_name_parts:
if word in name_string_subs:
class_name += word.upper()
else:
class_name += word
# print("class_name={}".format(class_name)) #debug
print("Gathering command, event, and enum data from main class '" + class_name + "'...")
if len(response_callback_parser_conditions) > 0:
response_callback_parser_conditions.append('else if (bgapiRXBuffer[2] == ' + ble_class.attributes['index'].value + ') {')
else:
response_callback_parser_conditions.append('if (bgapiRXBuffer[2] == ' + ble_class.attributes['index'].value + ') {')
num_responses = 0
for ble_command in ble_class.getElementsByTagName('command'):
# print(class_name + '_' + ble_command.attributes['name'].value) #debug
command_name_parts = (string.capwords(ble_command.attributes['name'].value.replace('_', ' '))).split(' ')
if num_responses == 0:
response_callback_structure.append('namespace ' + class_name + ' {') # begin with class name
command_prefix = class_name
command_name = ''
for word in command_name_parts:
if word in name_string_subs:
command_name += word.upper()
else:
command_name += word
# gather parameter info, if present
ble_params = ble_command.getElementsByTagName('params');
parameters = []
payload_length = 0
payload_additional = ''
payload_parameters = []
payload_extra_lines = []
ble_param_name = ''
if len(ble_params) > 0:
for ble_param in ble_params[0].getElementsByTagName('param'):
if ble_param.attributes['name'].value == 'params':
# 'params' is a keyword in C#. Let's change to 'parameters'
ble_param_name = 'parameters'
else:
ble_param_name = ble_param.attributes['name'].value
if ble_param.attributes['type'].value == 'uint8':
parameters.append('Byte ' + ble_param_name)
payload_parameters.append(ble_param_name);
payload_length += 1
elif ble_param.attributes['type'].value == 'int8':
parameters.append('SByte ' + ble_param_name)
payload_parameters.append('(Byte)' + ble_param_name);
payload_length += 1
elif ble_param.attributes['type'].value == 'uint16':
parameters.append('UInt16 ' + ble_param_name)
payload_parameters.append('(Byte)(' + ble_param_name + '), (Byte)(' + ble_param_name + ' >> 8)');
payload_length += 2
elif ble_param.attributes['type'].value == 'int16':
parameters.append('Int16 ' + ble_param_name)
payload_parameters.append('(Byte)(' + ble_param_name + '), (Byte)(' + ble_param_name + ' >> 8)');
payload_length += 2
elif ble_param.attributes['type'].value == 'uint32':
parameters.append('UInt32 ' + ble_param_name)
payload_parameters.append('(Byte)(' + ble_param_name + '), (Byte)(' + ble_param_name + ' >> 8), (Byte)(' + ble_param_name + ' >> 16), (Byte)(' + ble_param_name + ' >> 24)');
payload_length += 4
elif ble_param.attributes['type'].value == 'int32':
parameters.append('Int32 ' + ble_param_name)
payload_parameters.append('(Byte)(' + ble_param_name + '), (Byte)(' + ble_param_name + ' >> 8), (Byte)(' + ble_param_name + ' >> 16), (Byte)(' + ble_param_name + ' >> 24)');
payload_length += 4
elif ble_param.attributes['type'].value == 'bd_addr':
parameters.append('Byte[] ' + ble_param_name)
payload_parameters.append('0, 0, 0, 0, 0, 0')
payload_extra_lines.append('Array.Copy(' + ble_param_name + ', 0, cmd, ' + str(payload_length + 4) + ', 6);');
payload_length += 6
elif ble_param.attributes['type'].value == 'uint8array':
parameters.append('Byte[] ' + ble_param_name)
payload_parameters.append('(Byte)' + ble_param_name + '.Length')
payload_length += 1
payload_additional += ' + ' + ble_param_name + '.Length'
payload_extra_lines.append('Array.Copy(' + ble_param_name + ', 0, cmd, ' + str(payload_length + 4) + ', ' + ble_param_name + '.Length);');
# gather return value info, if present
ble_returns = ble_command.getElementsByTagName('returns');
returns = []
if len(ble_returns) > 0:
for ble_return in ble_returns[0].getElementsByTagName('param'):
returns.append(ble_return.attributes['type'].value + ' ' + ble_return.attributes['name'].value)
payload_str = ''
if len(payload_parameters) > 0:
payload_str = ', ' + ', '.join(payload_parameters)
command_method_definitions.append('public Byte[] BLECommand' + command_prefix + command_name + '(' + ', '.join(parameters) + ') {')
if len(payload_extra_lines) == 0:
command_method_definitions.append(' return new Byte[] { 0x20, ' + str(payload_length) + ', ' + ble_class.attributes['index'].value + ', ' + ble_command.attributes['index'].value + payload_str + ' };')
else:
command_method_definitions.append(' Byte[] cmd = new Byte[' + str(payload_length + 4) + payload_additional + '];')
command_method_definitions.append(' Array.Copy(new Byte[] { 0x20, (Byte)(' + str(payload_length) + payload_additional + '), ' + ble_class.attributes['index'].value + ', ' + ble_command.attributes['index'].value + payload_str + ' }, 0, cmd, 0, ' + str(payload_length + 4) + ');')
for l in payload_extra_lines:
command_method_definitions.append(' ' + l)
command_method_definitions.append(' return cmd;')
command_method_definitions.append('}')
parameters = []
param_init = []
response_args = []
buf_pos = 4
if len(ble_returns) > 0:
for ble_return in ble_returns[0].getElementsByTagName('param'):
if (ble_return.attributes['type'].value == 'uint8'):
parameters.append('Byte ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('bgapiRXBuffer[' + str(buf_pos) + ']')
buf_pos += 1
elif (ble_return.attributes['type'].value == 'uint16'):
parameters.append('UInt16 ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('(UInt16)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8))')
buf_pos += 2
elif (ble_return.attributes['type'].value == 'uint32'):
parameters.append('UInt32 ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('(UInt32)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8) + (bgapiRXBuffer[' + str(buf_pos + 2) + '] << 16) + (bgapiRXBuffer[' + str(buf_pos + 3) + '] << 24))')
buf_pos += 4
elif (ble_return.attributes['type'].value == 'int8'):
parameters.append('SByte ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('(SByte)(bgapiRXBuffer[' + str(buf_pos) + '])')
buf_pos += 1
elif (ble_return.attributes['type'].value == 'int16'):
parameters.append('Int16 ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('(Int16)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8))')
buf_pos += 2
elif (ble_return.attributes['type'].value == 'int32'):
parameters.append('Int32 ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('(Int32)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8) + (bgapiRXBuffer[' + str(buf_pos + 2) + '] << 16) + (bgapiRXBuffer[' + str(buf_pos + 3) + '] << 24))')
buf_pos += 4
elif (ble_return.attributes['type'].value == 'bd_addr'):
parameters.append('Byte[] ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('(Byte[])(bgapiRXBuffer.Skip(' + str(buf_pos) + ').Take(6).ToArray())')
buf_pos += 6
elif (ble_return.attributes['type'].value == 'uint8array'):
parameters.append('Byte[] ' + ble_return.attributes['name'].value)
param_init.append('this.' + ble_return.attributes['name'].value + ' = ' + ble_return.attributes['name'].value + ';')
response_args.append('(Byte[])(bgapiRXBuffer.Skip(' + str(buf_pos + 1) + ').Take(bgapiRXBuffer[' + str(buf_pos) + ']).ToArray())')
# buf_pos doesn't matter here since uint8arrays are ALWAYS at the end
cs_code = []
cs_code.append('if (BLEResponse' + command_prefix + command_name + ' != null) {')
cs_code.append(' BLEResponse' + command_prefix + command_name + '(this, new BlueGecko.BLE.Responses.' + command_prefix + '.' + command_name + 'EventArgs(')
if len(response_args) > 0:
cs_code.append(' ' + ',\n '.join(response_args))
cs_code.append(' ));')
cs_code.append('}')
response_callback_declarations.append('public event BlueGecko.BLE.Responses.' + command_prefix + '.' + command_name + 'EventHandler BLEResponse' + command_prefix + command_name + ';')
response_callback_structure.append(' public delegate void ' + command_name + 'EventHandler(object sender, BlueGecko.BLE.Responses.' + command_prefix + '.' + command_name + 'EventArgs e);')
response_callback_structure.append(' public class ' + command_name + 'EventArgs : EventArgs {')
for parameter in parameters:
response_callback_structure.append(' public readonly ' + parameter + ';')
if len(param_init) > 0:
response_callback_structure.append(' public ' + command_name + 'EventArgs(' + ', '.join(parameters) + ') {')
response_callback_structure.append(' ' + '\n '.join(param_init))
response_callback_structure.append(' }')
else:
response_callback_structure.append(' public ' + command_name + 'EventArgs(' + ', '.join(parameters) + ') { }')
response_callback_structure.append(' }')
response_callback_structure.append('')
if num_responses > 0:
response_callback_parser_conditions.append(' else if (bgapiRXBuffer[3] == ' + ble_command.attributes['index'].value + ')')
response_callback_parser_conditions.append(' {')
else:
response_callback_parser_conditions.append(' if (bgapiRXBuffer[3] == ' + ble_command.attributes['index'].value + ')')
response_callback_parser_conditions.append(' {')
response_callback_parser_conditions.append(' ' + '\n '.join(cs_code))
if ble_class.attributes['index'].value == '0' and ble_command.attributes['index'].value == '0':
response_callback_parser_conditions.append(' SetBusy(false);')
response_callback_parser_conditions.append(' }')
num_responses += 1
if num_responses > 0:
response_callback_structure.append('}')
response_callback_parser_conditions.append('}')
if len(event_callback_parser_conditions) > 0:
event_callback_parser_conditions.append('else if (bgapiRXBuffer[2] == ' + ble_class.attributes['index'].value + ') {')
else:
event_callback_parser_conditions.append('if (bgapiRXBuffer[2] == ' + ble_class.attributes['index'].value + ') {')
num_events = 0
for ble_event in ble_class.getElementsByTagName('event'):
# print(class_name + '_' + ble_event.attributes['name'].value) #debug
event_name_parts = (string.capwords(ble_event.attributes['name'].value.replace('_', ' '))).split(' ')
if num_events ==0:
event_callback_structure.append('namespace ' + class_name + ' {') # begin with class name
event_prefix = class_name
event_name = ''
for word in event_name_parts:
if word in name_string_subs:
event_name += word.upper()
else:
event_name += word
# gather parameter info, if present
ble_params = ble_event.getElementsByTagName('params');
parameters = []
param_init = []
event_args = []
buf_pos = 4
if len(ble_params) > 0:
for ble_param in ble_params[0].getElementsByTagName('param'):
if (ble_param.attributes['type'].value == 'uint8'):
parameters.append('Byte ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('bgapiRXBuffer[' + str(buf_pos) + ']')
buf_pos += 1
elif (ble_param.attributes['type'].value == 'uint16'):
parameters.append('UInt16 ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('(UInt16)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8))')
buf_pos += 2
elif (ble_param.attributes['type'].value == 'uint32'):
parameters.append('UInt32 ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('(UInt32)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8) + (bgapiRXBuffer[' + str(buf_pos + 2) + '] << 16) + (bgapiRXBuffer[' + str(buf_pos + 3) + '] << 24))')
buf_pos += 4
elif (ble_param.attributes['type'].value == 'int8'):
parameters.append('SByte ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('(SByte)(bgapiRXBuffer[' + str(buf_pos) + '])')
buf_pos += 1
elif (ble_param.attributes['type'].value == 'int16'):
parameters.append('Int16 ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('(Int16)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8))')
buf_pos += 2
elif (ble_param.attributes['type'].value == 'int32'):
parameters.append('Int32 ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('(Int32)(bgapiRXBuffer[' + str(buf_pos) + '] + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 8) + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 16) + (bgapiRXBuffer[' + str(buf_pos + 1) + '] << 24))')
buf_pos += 4
elif (ble_param.attributes['type'].value == 'bd_addr'):
parameters.append('Byte[] ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('(Byte[])(bgapiRXBuffer.Skip(' + str(buf_pos) + ').Take(6).ToArray())')
buf_pos += 6
elif (ble_param.attributes['type'].value == 'uint8array'):
parameters.append('Byte[] ' + ble_param.attributes['name'].value)
param_init.append('this.' + ble_param.attributes['name'].value + ' = ' + ble_param.attributes['name'].value + ';')
event_args.append('(Byte[])(bgapiRXBuffer.Skip(' + str(buf_pos + 1) + ').Take(bgapiRXBuffer[' + str(buf_pos) + ']).ToArray())')
# buf_pos doesn't matter here since uint8arrays are ALWAYS at the end
cs_code = []
cs_code.append('if (BLEEvent' + event_prefix + event_name + ' != null) {')
cs_code.append(' BLEEvent' + event_prefix + event_name + '(this, new BlueGecko.BLE.Events.' + event_prefix + '.' + event_name + 'EventArgs(')
if len(event_args) > 0:
cs_code.append(' ' + ',\n '.join(event_args))
cs_code.append(' ));')
cs_code.append('}')
event_callback_declarations.append('public event BlueGecko.BLE.Events.' + event_prefix + '.' + event_name + 'EventHandler BLEEvent' + event_prefix + event_name + ';')
event_callback_structure.append(' public delegate void ' + event_name + 'EventHandler(object sender, BlueGecko.BLE.Events.' + event_prefix + '.' + event_name + 'EventArgs e);')
event_callback_structure.append(' public class ' + event_name + 'EventArgs : EventArgs {')
for parameter in parameters:
event_callback_structure.append(' public readonly ' + parameter + ';')
if len(param_init) > 0:
event_callback_structure.append(' public ' + event_name + 'EventArgs(' + ', '.join(parameters) + ') {')
event_callback_structure.append(' ' + '\n '.join(param_init))
event_callback_structure.append(' }')
else:
event_callback_structure.append(' public ' + event_name + 'EventArgs(' + ', '.join(parameters) + ') { }')
event_callback_structure.append(' }')
event_callback_structure.append('')
if num_events > 0:
event_callback_parser_conditions.append(' else if (bgapiRXBuffer[3] == ' + ble_event.attributes['index'].value + ')')
event_callback_parser_conditions.append(' {')
else:
event_callback_parser_conditions.append(' if (bgapiRXBuffer[3] == ' + ble_event.attributes['index'].value + ')')
event_callback_parser_conditions.append(' {')
event_callback_parser_conditions.append(' ' + '\n '.join(cs_code))
if ble_class.attributes['index'].value == '0' and ble_event.attributes['index'].value == '0':
event_callback_parser_conditions.append(' SetBusy(false);')
event_callback_parser_conditions.append(' }')
num_events += 1
if num_events > 0:
event_callback_structure.append('}')
event_callback_parser_conditions.append('}')
for ble_enum in ble_class.getElementsByTagName('enum'):
# print(class_name + '_' + ble_enum.attributes['name'].value) #debug
enum_name = class_name + '_' + ble_enum.attributes['name'].value
constant_macros.append('#define BGLIB_' + (enum_name.upper() + ' ').ljust(54) + ble_enum.attributes['value'].value)
if len(constant_macros) != 0:
if constant_macros[len(constant_macros) - 1] != '':
constant_macros.append('')
# create C# library files
print("Writing C# source library files to BGLib.cs")
source = open('BGLib.cs', 'w')
source.write('// Blue Gecko v2.x BGLib C# interface library\n\
// 2013-01-15 by Jeff Rowberg <[email protected]\n\
// 2020-08-03 Ported to Blue Gecko API v2.x by Kris Young <[email protected]>\n\
// Updates should (hopefully) always be available at https://github.com/jrowberg/bglib\n\
\n\
/* ============================================\n\
BGLib C# interface library code is placed under the MIT license\n\
Original work Copyright (c) 2013 Jeff Rowberg\n\
Modifications Copyright (c) 2020 Silicon Laboratories\n\
\n\
Permission is hereby granted, free of charge, to any person obtaining a copy\n\
of this software and associated documentation files (the "Software"), to deal\n\
in the Software without restriction, including without limitation the rights\n\
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n\
copies of the Software, and to permit persons to whom the Software is\n\
furnished to do so, subject to the following conditions:\n\
\n\
The above copyright notice and this permission notice shall be included in\n\
all copies or substantial portions of the Software.\n\
\n\
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\
THE SOFTWARE.\n\
===============================================\n'
+ 'Generated on {}'.format(datetime.now().strftime("%Y-%b-%d %H:%M:%S")) +
'\n=============================================== */\n\
\n\
using System;\n\
using System.Collections.Generic;\n\
using System.Linq;\n\
using System.Text;\n\
\n\
namespace BlueGecko {\n\
\n\
namespace BLE {\n\
\n\
namespace Responses {\n\
' + ('\n '.join(response_callback_structure)) + '\n\
}\n\
\n\
namespace Events {\n\
' + ('\n '.join(event_callback_structure)) + '\n\
}\n\
\n\
}\n\
\n\
public class BGLib\n\
{\n\
\n\
' + ('\n '.join(command_method_definitions)) + '\n\n\
' + ('\n '.join(response_callback_declarations)) + '\n\n\
' + ('\n '.join(event_callback_declarations)) + '\n\
\n\
private Byte[] bgapiRXBuffer = new Byte[65];\n\
private int bgapiRXBufferPos = 0;\n\
private int bgapiRXDataLen = 0;\n\
\n\
private Boolean parserBusy = false;\n\
\n\
public void SetBusy(Boolean isBusy) {\n\
this.parserBusy = isBusy;\n\
}\n\
\n\
public Boolean IsBusy() {\n\
return parserBusy;\n\
}\n\
\n\
public UInt16 Parse(Byte ch) {\n\
/*#ifdef DEBUG\n\
// DEBUG: output hex value of incoming character\n\
if (ch < 16) Serial.write(0x30); // leading \'0\'\n\
Serial.print(ch, HEX); // actual hex value\n\
Serial.write(0x20); // trailing \' \'\n\
#endif*/\n\
\n\
/*\n\
BGAPI packet structure (as of 2020-06-12):\n\
Byte 0:\n\
[7] - 1 bit, Message Type (MT) 0 = Command/Response, 1 = Event\n\
[6:3] - 4 bits, Technology Type (TT) 0100b/0x04 - Blue Gecko\n\
[2:0] - 3 bits, Length High (LH) Payload length (high bits)\n\
Byte 1: 8 bits, Length Low (LL) Payload length (low bits)\n\
Byte 2: 8 bits, Class ID (CID) Command class ID\n\
Byte 3: 8 bits, Command ID (CMD) Command ID\n\
Bytes 4-n: 0 - 2048 Bytes, Payload (PL) Up to 2048 bytes of payload\n\
*/\n\
\n\
// check packet position\n\
if (bgapiRXBufferPos == 0) {\n\
// beginning of packet, check for correct framing/expected byte(s)\n\
// BGAPI packet for Blue Gecko must be either Command/Response (0x20) or Event (0xa0)\n\
// Verify four bit technology type == 0x02 shifted into TT field\n\
if ((ch & 0x78) == (0x04)<<3) {\n\
// store new character in RX buffer\n\
bgapiRXBuffer[bgapiRXBufferPos++] = ch;\n\
} else {\n\
/*#ifdef DEBUG\n\
Serial.print("*** Packet frame sync error! Expected .0000... binary, got 0x");\n\
Serial.println(ch, HEX);\n\
#endif*/\n\
return 1; // packet format error\n\
}\n\
} else {\n\
// middle of packet, assume we\'re okay\n\
bgapiRXBuffer[bgapiRXBufferPos++] = ch;\n\
if (bgapiRXBufferPos == 2) {\n\
// just received "Length Low" byte, so store expected packet length\n\
bgapiRXDataLen = ch + ((bgapiRXBuffer[0] & 0x07) << 8);\n\
} else if (bgapiRXBufferPos == bgapiRXDataLen + 4) {\n\
// just received last expected byte\n\
/*#ifdef DEBUG\n\
Serial.print("\\n<- RX [ ");\n\
for (uint8_t i = 0; i < bgapiRXBufferPos; i++) {\n\
if (bgapiRXBuffer[i] < 16) Serial.write(0x30);\n\
Serial.print(bgapiRXBuffer[i], HEX);\n\
Serial.write(0x20);\n\
}\n\
Serial.println("]");\n\
#endif*/\n\
\n\
// check packet type\n\
if ((bgapiRXBuffer[0] & 0x80) == 0) {\n\
// 0x00 = Response packet\n\
' + ('\n '.join(response_callback_parser_conditions)) + '\n\
SetBusy(false);\n\
} else {\n\
// 0x80 = Event packet\n\
' + ('\n '.join(event_callback_parser_conditions)) + '\n\
}\n\
\n\
// reset RX packet buffer position to be ready for new packet\n\
bgapiRXBufferPos = 0;\n\
}\n\
}\n\
\n\
return 0; // parsed successfully\n\
}\n\
\n\
public UInt16 SendCommand(System.IO.Ports.SerialPort port, Byte[] cmd) {\n\
SetBusy(true);\n\
port.Write(cmd, 0, cmd.Length);\n\
return 0; // no error handling yet\n\
}\n\
\n\
}\n\
\n\
}\n\
')
source.close()
print("Finished!\n")
print("C# Installation Instructions:")
print("====================================")
print("1. Add BGLib.cs to your project")
print("2. Import BlueGecko.* in your source file(s)")
print("3. Add event handlers for desired BGLib response and event packets\n")
| [
"[email protected]"
] | |
482e379d33aaa03739f0ae02089372a704e412a2 | 8c1e0f1955c4ed4e23d5619ba59eab1200b0b141 | /coding and encoding/Код Хаффмана/TaskA.py | 0deb54bbf8897e306ab5e26ab28e68b4d8cf96cd | [] | no_license | LorentsAn/Itmo_Discrete_Math | 672607d39df328fcdf6ffc6a79f836afe2960d7f | 8e383653a80fb4adf27a31089937956859890ce4 | refs/heads/main | 2023-06-02T13:01:13.084816 | 2021-06-27T00:04:36 | 2021-06-27T00:04:36 | 380,572,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | import sys
def huffman_cod(a, n):
b = [1000000001] * n
ans = 0
i, j = 0, 0
for k in range(n - 1):
if i + 1 <= n - 1 and j + 1 <= n - 1:
if a[i] + a[i + 1] <= a[i] + b[j] and a[i] + a[i + 1] <= b[j] + b[j + 1]:
b[k] = a[i] + a[i + 1]
ans += b[k]
i += 2
continue
if a[i] + b[j] <= a[i] + a[i + 1] and a[i] + b[j] <= b[j] + b[j + 1]:
b[k] = a[i] + b[j]
ans += b[k]
i += 1
j += 1
continue
if b[j] + b[j + 1] <= a[i] + a[i + 1] and b[j] + b[j + 1] <= a[i] + b[j]:
b[k] = b[j] + b[j + 1]
ans += b[k]
j += 2
elif i + 1 <= n - 1 and j >= n:
if a[i] + a[i + 1] <= a[i] + b[j]:
b[k] = a[i] + a[i + 1]
ans += b[k]
i += 2
continue
if a[i] + b[j] <= a[i] + a[i + 1]:
b[k] = a[i] + b[j]
ans += b[k]
i += 1
j += 1
continue
elif j + 1 <= n - 1 and i <= n - 1:
if a[i] + b[j] <= b[j] + b[j + 1]:
b[k] = a[i] + b[j]
ans += b[k]
i += 1
j += 1
continue
if b[j] + b[j + 1] <= a[i] + b[j]:
b[k] = b[j] + b[j + 1]
ans += b[k]
j += 2
elif j + 1 <= n - 1 < i:
b[k] = b[j] + b[j + 1]
ans += b[k]
j += 2
return ans
n = int(input())
a = [0] * n
c = sys.stdin.readline().split()
for i in range(n):
a[i] = int(c[i])
a.sort()
print(huffman_cod(a, n))
| [
"[email protected]"
] | |
15aa93890cc5835ae46c4f71496c787ab5fb7f36 | 4b26ad54cd7c2c5bd192246df2bb759f16e1e758 | /tests/common/test_run/conv_run.py | ce47a34938b2b93d24fb2d55c626bac68e79b6b9 | [
"Zlib",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"Unlicense",
"BSD-2-Clause"
] | permissive | czlsccz/akg | 1f61e1dd631e291ec7dc8753a382fc4fba19d00a | 20697026a83d49af713c6a9bd21b50e1eb3bddbf | refs/heads/master | 2023-06-21T04:29:00.741453 | 2021-07-23T01:35:45 | 2021-07-23T01:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,119 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import math
import numpy as np
from akg import tvm
from akg.utils import kernel_exec as utils
from akg.ops.nn import conv
from tests.common.tensorio import compare_tensor
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
from tests.common.test_run.conv_utils import conv_param_prepare, conv_shape_4d, conv_forward_naive, conv_tensor_4d_to_5d
from akg.utils import validation_check as vc_util
from akg.utils.kernel_exec import gen_kernel_name
from tests.common.test_utils import compute_blockdim
def conv_run(fmap_shape, filter_shape, pad, stride, dilation, use_bias=False, attrs=None, dump_data=False):
conv_dtype = 'float16'
vc_util.convolution_format_check(fmap_shape, filter_shape, pad, stride, dilation)
conv_param = {'stride': stride, 'pad': pad, 'dilation': dilation}
stride, pad, dilation = conv_param_prepare(conv_param)
fm_shape, w_shape, out_shape = conv_shape_4d(fmap_shape, filter_shape, pad, stride, dilation)
IN, IC, IH, IW = fm_shape
WN, WC, WH, WW = w_shape
C0 = 16
if use_bias:
input_shape = [(IN, IC // C0, IH, IW, C0), (WC // C0 * WH * WW, WN // 16, 16, C0), (1, WN // 16, 1, 1, 16)]
else:
input_shape = [(IN, IC // C0, IH, IW, C0), (WC // C0 * WH * WW, WN // 16, 16, C0)]
input_file = os.environ.get("RANDOM_DATA_DISK_PATH", "")
expect_file = input_file + "/" + gen_kernel_name([input_shape], [conv_dtype], op_attrs=[fmap_shape, filter_shape, pad, stride, dilation, use_bias, attrs], kernel_name='conv') + ".bin"
all_dynamic = 0 # kh kw pad stride
partial_dynamic = 0 # fn fc1 fh fw wN wC
if attrs.get("dynamic"):
all_dynamic = 1
print("=================all dynamic==================")
if attrs.get("partial_dynamic"):
partial_dynamic = 1
print("=================partial dynamic==================")
dynamic = partial_dynamic or all_dynamic
if not dynamic:
print("=================static shape==================")
if dynamic:
fmap_shape_real = fmap_shape
filter_shape_real = filter_shape
pad_real = pad
stride_real = stride
dilation_real = dilation
if partial_dynamic or all_dynamic:
N = tvm.var("N")
C = tvm.var("CI")
CI1 = tvm.var("CI1")
H = tvm.var("H")
W = tvm.var("W")
COUT = tvm.var("CO")
CO1 = tvm.var("CO1")
_, _, KH, KW = filter_shape
SH, SW = stride
PT, PB, PL, PR = pad
params = ()
if all_dynamic:
PARAM_KH = tvm.var("KH")
PARAM_KW = tvm.var("KW")
PARAM_PT = tvm.var("PT")
PARAM_PB = tvm.var("PB")
PARAM_PL = tvm.var("PL")
PARAM_PR = tvm.var("PR")
PARAM_SH = tvm.var("SH")
PARAM_SW = tvm.var("SW")
PARAM_T1_0_H = tvm.var("T1_0_H")
PARAM_T1_0_W = tvm.var("T1_0_W")
PARAM_T1_0_C1 = tvm.var("T1_0_C1")
PARAM_T0_0_MO = tvm.var("T0_0_MO")
PARAM_T0_0_NO = tvm.var("T0_0_NO")
PARAM_T0_0_KO = tvm.var("T0_0_KO")
params = (PARAM_KH, PARAM_KW, PARAM_PT, PARAM_PB, PARAM_PL, PARAM_PR, PARAM_SH, PARAM_SW,
PARAM_T1_0_H, PARAM_T1_0_W, PARAM_T1_0_C1, PARAM_T0_0_MO, PARAM_T0_0_NO, PARAM_T0_0_KO)
DEBUG = 1
if dynamic:
KH_FAKE = 11
KW_FAKE = 31
fmap_shape = (N, C, H, W)
filter_shape = (COUT, C, KH, KW)
if not DEBUG:
CO1 = (COUT + 15) // 16
CI1 = (C + 15) // 16
if use_bias:
# input_shape = [(IN, IC // C0, IH, IW, C0), (WC // C0 * WH * WW, WN // 16, 16, C0), (1, WN // 16, 1, 1, 16)]
if all_dynamic:
input_shape = [(N, CI1, H, W, 16), (CI1 * KH_FAKE * KW_FAKE, CO1, 16, 16), (1, CO1, 1, 1, 16)]
else:
input_shape = [(N, CI1, H, W, 16), (CI1 * KH * KW, CO1, 16, 16), (1, CO1, 1, 1, 16)]
else:
# input_shape = [(IN, IC // C0, IH, IW, C0), (WC // C0 * WH * WW, WN // 16, 16, C0)]
if all_dynamic:
input_shape = [(N, CI1, H, W, 16), (CI1 * KH_FAKE * KW_FAKE, CO1, 16, 16)]
else:
input_shape = [(N, CI1, H, W, 16), (CI1 * KH * KW, CO1, 16, 16)]
mod = utils.op_build_test(conv.conv, [input_shape], [conv_dtype],
op_attrs=[fmap_shape, filter_shape, pad, stride, dilation, use_bias, attrs, params],
kernel_name='conv', attrs=attrs)
fmap_data, filter_data, bias_data, expect = gen_data(fmap_shape_real, filter_shape_real, pad_real, stride_real, dilation_real, use_bias, expect_file)
else:
mod = utils.op_build_test(conv.conv, [input_shape], [conv_dtype],
op_attrs=[fmap_shape, filter_shape, pad, stride, dilation, use_bias, attrs],
kernel_name='conv', attrs=attrs)
fmap_data, filter_data, bias_data, expect = gen_data(fmap_shape, filter_shape, pad, stride, dilation, use_bias, expect_file)
if dump_data:
with open('input.bin', 'wb') as fo:
fo.write(fmap_data.astype(np.float16, copy=False))
with open('filter.bin', 'wb') as fo:
fo.write(filter_data.astype(np.float16, copy=False))
with open('bias.bin', 'wb') as fo:
fo.write(bias_data.astype(np.float16, copy=False))
with open('output.bin', 'wb') as fo:
fo.write(expect.astype(np.float16, copy=False))
out_data = np.full(expect.shape, np.nan, 'float16')
if use_bias:
input = [fmap_data, filter_data, bias_data]
else:
input = [fmap_data, filter_data]
flag_w = os.environ.get("WRITE_TO_DISK", "No")
if flag_w == "Yes":
return input, out_data, expect, True
if not dynamic:
args = input
args.append(out_data)
args = tuple(args)
out_data = utils.mod_launch(mod, args, expect=expect)
else:
args = []
args.append(fmap_data)
args.append(filter_data)
args.append(out_data)
if partial_dynamic or all_dynamic:
args.append(IN)
args.append(IC)
args.append(IH)
args.append(IW)
args.append(WN)
if all_dynamic:
args.append(KH)
args.append(KW)
args.append(PT)
args.append(PB)
args.append(PL)
args.append(PR)
args.append(SH)
args.append(SW)
if attrs.get("conv_tile") and len(attrs["conv_tile"]) == 7:
T1_0_H = attrs["conv_tile"][0]
T1_0_C1 = attrs["conv_tile"][1]
T0_0_MO = attrs["conv_tile"][2]
T0_0_KO = attrs["conv_tile"][3]
T0_0_NO = attrs["conv_tile"][4]
T1_0_W = attrs["conv_tile"][5]
if T1_0_H == IH:
T1_0_H += PT + PB
T1_0_H_cut = (T1_0_H - KH) // SH + 1
if T1_0_W == IW:
T1_0_W += PL + PR
T1_0_W_cut = (T1_0_W - KW) // SW + 1
args.append(T1_0_H_cut)
args.append(T1_0_W_cut)
args.append((T1_0_C1+15)//16)
args.append((T0_0_MO+15)//16)
args.append((T0_0_NO+15)//16)
args.append((T0_0_KO+15)//16)
if DEBUG:
args.append(IC//16)
args.append(WN//16)
block_dim = min(32, IN)
args.append(block_dim)
out_data = utils.mod_launch(mod, args, outputs=(2,), expect=expect)
rtol, atol = get_rtol_atol("conv", conv_dtype)
return input, out_data, expect, compare_tensor(out_data, expect, rtol=rtol, atol=atol, equal_nan=True)
def gen_data(fm_shape, w_shape, pad, stride, dilation, bias, expect_file):
conv_param = {'stride': stride, 'pad': pad, 'dilation': dilation}
stride, pad, dilation = conv_param_prepare(conv_param)
fm_shape, w_shape, out_shape = conv_shape_4d(fm_shape, w_shape, pad, stride, dilation)
IN, IC, IH, IW = fm_shape
WN, WC, WH, WW = w_shape
x = random_gaussian((IN, IC, IH, IW), miu=1, sigma=0.1).astype(np.float16)
w = random_gaussian((WN, WC, WH, WW), miu=0.5, sigma=0.01).astype(np.float16)
if bias:
b = random_gaussian((WN,), miu=1, sigma=0.1).astype(np.float16)
else:
b = (np.array(np.zeros(WN))).astype(np.float16, copy=False)
flag_w = os.environ.get("WRITE_TO_DISK", "No")
if (flag_w == "No") and (os.path.exists(expect_file)==True):
#read expect from file
out = np.fromfile(expect_file, np.float16).reshape(out_shape)
else:
#compute expect data:
out = conv_forward_naive(x.astype(np.float32), w.astype(np.float32), b.astype(np.float32), conv_param)
out = out.astype(np.float16)
if flag_w == "Yes":
# write expect to file
with open(expect_file, "w+") as file:
out.tofile(file)
file.close()
return conv_tensor_4d_to_5d(x, w, b, out)
| [
"[email protected]"
] | |
756dcdcf0f8b18ffac381c892f619723e4ad7d06 | 9cc5735d21dcfa1121ccaa8ee9bb9b36d3158151 | /StaticValues.py | 2024c8deafa8405eb8f364fd8c53c5ad117a3740 | [] | no_license | huangsunyang/WechatRobot | 07fe23585b62da3c2d1df50d712ebcfe274e1548 | b4585be51a6c7be31e7c63bfff5c4d76ed428854 | refs/heads/master | 2020-03-29T18:19:45.090828 | 2018-10-28T14:10:52 | 2018-10-28T14:10:52 | 150,205,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # coding=UTF-8
caring_friends_list = [u"何可", u"PP", u"黄孙扬", u"", u"小何老师"]
self_user_id = None
temp_msgs_cache = {} | [
"[email protected]"
] | |
b5ce0502060706646b712352b62678340dd7fe48 | 57ccfa900d82794b18f69c42eb174ff816f6a900 | /1 - Júnior/Collections/sets/set_comprehensions.py | 12307161cdce96c909dc586eb8c92b66e508c20d | [] | no_license | ayrtoncarlos/Formacao-Python-TreinaWeb | b1b8e2e6729ec4a45693c436aa5ee2edb89cd7a7 | 32f3e3eb70fe03754c8428846bcf447bb986825b | refs/heads/main | 2023-04-04T13:24:55.119595 | 2021-04-08T04:10:12 | 2021-04-08T04:10:12 | 355,758,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | # Criando um set comprehensions
set_comprehensions = {i*i for i in range(10)}
print(type(set_comprehensions))
print(set_comprehensions)
set_1 = {1, 2, 3}
set_2 = {4, 5, 6}
outro_set_comprehensions = {i for i in set_1.union(set_2)}
print(type(outro_set_comprehensions))
print(outro_set_comprehensions)
| [
"[email protected]"
] | |
a27ff240d8d46c504cd63ae93e2494b8e79f3bb2 | 99d2addf16d5637fd0133fe23153119be4f34e43 | /app/recipe/tests/test_tags_api.py | 8008972a1c50be21f47218c1ed51f485a927bc5e | [
"MIT"
] | permissive | mtbriones/recipe-app-api | b3db6c7f59d4a37da922efacc204405c5fcc0e69 | 31a24626e92fb2a039bc892fc9743b5799e96956 | refs/heads/master | 2022-11-30T00:37:09.006763 | 2020-08-12T09:27:49 | 2020-08-12T09:27:49 | 285,354,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,002 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAG_URL = reverse('recipe:tag-list')
class PublicTagApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAG_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagApiTest(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Desert')
res = self.client.get(TAG_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAG_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {'name': 'Test tag'}
self.client.post(TAG_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid_self(self):
"""Test creating a new tag with invalid payload"""
payload = {'name': ''}
res = self.client.post(TAG_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipe(self):
"""Test filtering tags by those assigned to recipes"""
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander egg on a leash',
time_munites=10,
price=10.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(
TAG_URL,
{'assigned_only': 1}
)
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_assigned_unique(self):
"""Test filtering tags by assigned returns unique items"""
tag = Tag.objects.create(user=self.user, name='Breakfast')
Tag.objects.create(user=self.user, name='Lunch')
recipe1 = Recipe.objects.create(
title='Pancakes',
time_munites=5,
price=2.00,
user=self.user
)
recipe1.tags.add(tag)
recipe2 = Recipe.objects.create(
title='Porridge',
time_munites=3,
price=10.00,
user=self.user
)
recipe2.tags.add(tag)
res = self.client.get(TAG_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| [
"[email protected]"
] | |
629162d4781da3145fb67d49532eefb028b41d9a | 8510ad53eefe046275bd9eed9fa69c4ae1506d9e | /display/Display.py | f922942d643f1b2adcc5f65b61c4b163ee1fa366 | [] | no_license | vmuthuk2/DistributedOPFAlg | d4cb1b0f8e2c75bb72d7ca974769489d468681fe | ad5e1ccfffb1df4eccf9da9e851042fd8925c649 | refs/heads/master | 2021-06-06T10:01:37.731168 | 2015-10-30T18:14:45 | 2015-10-30T18:14:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,671 | py | import matplotlib.pyplot as plt
import os
import math
import numpy as np
class Display:
"""
Display the useful results regarding all the raw data:
1.Primal residual v.s. Dual residual
2.Mean executing time.
3.Objective value (loss)
"""
def __init__(self,pRes,dRes,exeTime,var,obj,feederName):
self.pRes=np.array(pRes)
self.dRes=np.array(dRes)
self.exeTime=np.array(exeTime)
self.node_set=var
self.obj=obj
self.dir=os.getcwd()+'/result/'+feederName
self.feederName=feederName
if(not os.path.exists(self.dir)):
os.makedirs(self.dir)
def plot_primalDualRes(self):
"""
Plot the primal and dual residual
"""
plt.semilogy(self.pRes,linewidth=2.0)
plt.semilogy(self.dRes,linewidth=2.0,ls='--')
plt.legend(['Primal Residual','Dual Residual'])
plt.xlabel('Number of Iterations')
plt.ylabel('Residual error')
plt.title('Primal and Dual Residual')
plt.savefig(self.dir+'/residualerror.png')
plt.clf()
def plot_normal_primalDualRes(self):
"""
Plot the primal and dual residual (divide by number of agents )
"""
numBus=math.sqrt(len(self.node_set))
plt.semilogy(self.pRes/numBus,linewidth=2.0)
plt.semilogy(self.dRes/numBus,linewidth=2.0,ls='--')
plt.legend(['Primal Residual','Dual Residual'])
plt.xlabel('Number of Iterations')
plt.ylabel('Residual error')
plt.title('Primal and Dual Residual')
plt.savefig(self.dir+'/residualerror.png')
plt.clf()
def show_simulationRst(self):
for n in self.node_set:
print n
def write_log(self):
with open(self.dir+'/log.txt','w') as f:
f.write('The network thermal loss is '+str(self.thermalLoss())+'MW\n\n')
x=self.statTime()
f.write('The total computation time is '+str(x[0])[0:5]+'s\n')
f.write('The mean computation time is '+str(x[1])[0:5]+'s\n')
f.write('The std of computation time is '+str(x[2])[0:5]+'s\n\n')
def statTime(self):
totalTime=np.sum(self.exeTime)
avgTime=np.mean(self.exeTime)
stdTime=np.std(self.exeTime)
return (totalTime,avgTime,stdTime)
def thermalLoss(self):
loss=0
for n in self.node_set:
loss+=n.z_npower.sum().real
return loss
| [
"[email protected]"
] | |
414acbdaade3d0362d3dda95f4aafc884d4f33d0 | 8c5a895011063b13552b55702d40cef1aa5472e2 | /IMDB Dataset.py | f066cdfae55e64b90a2ab5031b0244ca70403bb0 | [] | no_license | Admiralhunter/TensorFlowProjects | 39c513bba76cf96c846b316a42f7fdd043dca70f | 2b50f506a39842561b919ecea70436449c8586c3 | refs/heads/master | 2020-06-02T12:31:42.390791 | 2019-06-18T03:59:30 | 2019-06-18T03:59:30 | 191,154,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | import matplotlib.pyplot as plt
import numpy as np
from keras import layers
from keras import models
from keras.datasets import imdb
# Import data and vectorize values as either 0's or 1's
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
# setting aside validation set
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# Create model
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(partial_x_train,
partial_y_train,
epochs=4,
batch_size=512,
validation_data=(x_val, y_val))
# Gets values and plots loss values for train and test data
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(history_dict['acc']) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training Loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf()
acc_values = history_dict['acc']
val_acc_values = history_dict['val_acc']
plt.plot(epochs, acc_values, 'bo', label='Training Accuracy')
plt.plot(epochs, val_acc_values, 'b', label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
results = model.evaluate(x_test,y_test)
print(results)
| [
"[email protected]"
] | |
4d843e8049a723bb7b1e5db76bede1477a38816e | bde622bd6c8d50528930fc62deeb397575e7c0eb | /Python/pi/gpio_basic.py | 41a770adc9c1e8781f8a9b6bb4f6c3af3285e30f | [] | no_license | dnivanthaka/demo-programs | 6b6700b262bd0509aa319e4a79848b73587be9da | b763c71023cc7553344043851921879c79480b84 | refs/heads/master | 2021-01-10T13:00:58.191468 | 2016-03-20T07:56:57 | 2016-03-20T07:56:57 | 54,304,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | import RPi.GPIO as GPIO
# set up the GPIO channels - one input and one output
GPIO.setup(11, GPIO.IN)
GPIO.setup(12, GPIO.OUT)
# input from pin 11
input_value = GPIO.input(11)
# output to pin 12
GPIO.output(12, True)
# the same script as above but using BCM GPIO 00..nn numbers
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN)
GPIO.setup(18, GPIO.OUT)
input_value = GPIO.input(17)
GPIO.output(18, True)
| [
"[email protected]"
] | |
db5d17c366e3d45b484559e9686129e983c0bf14 | ad5b9bf9673fd894e6aff69dae99c1c6d56c8aff | /test_case/test_baidu.py | b3ec94f349724e9553ddaef6519cfda63babbccb | [] | no_license | ajuana/api_auto | 855d31cd9eb7530e44494d1cf643ebb638cae97f | 0676b8ee8fa19e2626f4a68ca564f550baac3297 | refs/heads/master | 2023-02-08T20:53:05.138213 | 2020-12-31T04:24:47 | 2020-12-31T04:24:47 | 325,244,178 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 136 | py | # coding=gbk
import requests
from common.get_token import get_token
url='https://www.baidu.com/'
r=requests.get(url)
print('ºÃµÄ')
| [
"[email protected]"
] | |
e1eced3e0d27f3ba73c85b79dc1a5cf92f51364b | 1fe591727e6972e51cf97d7597734c9a872b6f73 | /tv_shows/tv_shows/settings.py | e54102ceca47cdf73cedc3b0de58db43fae135be | [] | no_license | abbykahler/Python | 2639967867313749d6c380665e689716d0f35fcd | 0d6a7fa9ddd86535fb091eb861772b3c11a71941 | refs/heads/main | 2023-07-15T13:02:24.623934 | 2021-08-28T02:31:41 | 2021-08-28T02:31:41 | 334,590,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,106 | py | """
Django settings for tv_shows project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f(h5tq#a5_b$gpi9k*hx2_26im&+-!96a)e9mudft@217xqq=d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tv_shows.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tv_shows.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
0de970ab171f85303c1ef6af95fdcb034e0db9ef | cfb37360ff0f0856ffa76c95266f9784dd331549 | /machine_learning/jointDir/bandSim.py | 6c0879a3bc8a8a53900ef4cec9e1cb592ce88e5b | [] | no_license | rastirith/Project_FRB | e80c634a0c1ea650c8ae7814eb2dac417e3e2f18 | e9bce8631c8ba4780d7de10bbe10d29565394f56 | refs/heads/master | 2020-04-01T14:10:26.192757 | 2019-05-09T12:13:20 | 2019-05-09T12:13:20 | 153,283,576 | 0 | 0 | null | 2019-05-09T12:13:21 | 2018-10-16T12:42:50 | Python | UTF-8 | Python | false | false | 11,053 | py | import numpy as np
from matplotlib import pyplot as plt
import math
from scipy import special
import scipy.stats as stats
def cordes(Wms):
""" Calculate the dm range spanned by band at random value between
0.87 and 0.93 in the cordes function.
Keyword arguments:
Wms -- the width (ms) property of signal under consideration
"""
freq = 1.732 # Centre frequency of survey
bandWidth = 336 # Bandwidth of survey
SNratio = np.random.uniform(0.87, 0.93) # Ratio of the peak to the cutoff point in the data
x = np.linspace(-500,500,10000) # Generates x-values for the cordes function
zeta = (6.91*10**-3)*bandWidth*(freq**-3)*(Wms**-1)*x # Zeta function in the cordes function
y = (math.pi**(1/2))*0.5*(zeta**-1)*special.erf(zeta) # Values of cordes function, ranges between 0 and 1
dm = x[np.nonzero(y > SNratio)] # Array containing all dm values corresponding to y-values > SNratio
dm_range = dm[-1] - dm[0] # Theoretical allowed DM range for the current candidate
return dm_range/2
def bandCand():
"""Simulates DM, SN, and width data for a candidate signal"""
finalSNarr = [] # Simulated SN data
finalDMarr = [] # Simulated DM datq
finalWarr = [] # Simulated width (ms) data
peakSN = 0 # SN value of the peak
while peakSN < 20: # Peak SN is always higher than 20
peakSN = (np.absolute(np.random.gumbel(1, 1.2))**0.65)*65
tempArr = np.full((1), 1.0) # Cordes function values corresponding to the SN values of "bands", peak SN has value 1.0.
tempWarr = np.full((1), np.random.uniform(0.05,0.3)) # Widths of the "bands", peak has small width (0.05ms to 0.3ms)
n = int((1 + peakSN/50) + (np.random.normal(0, 1)**2)**0.5) # Number of bands, derived empirically from Crab data
band = np.random.normal(5/12, 1/18) # Cordes value of band
tempArr = np.append(tempArr, band)
for l in range(n - 1): # Creates remaining bands underneath the second
band *= np.random.normal(2/3, 1/6)
tempArr = np.append(tempArr, band)
tempArr[::-1].sort() # Sorts the array in descending order
tempArr *= peakSN # Assigns actual SN values
for k in range(len(tempArr) - 1): # Assigns width data for all bands
factor = np.random.normal(0.460, 0.066) # Exponential decay factor, dervied empirically from Crab data
w = ((tempArr[k + 1]/tempArr[k])**(-1/factor))*tempWarr[k] # Uses ratios of SN to calculate corresponding width data
tempWarr = np.append(tempWarr, w)
tempArr = tempArr[np.nonzero(tempWarr < 40)] # Bands considered should have width less than 40
tempWarr = tempWarr[np.nonzero(tempWarr < 40)]
if len(tempArr) == 1: # Exception case where all bands but the peak has been sorted away
tempArr = np.append(tempArr, np.random.uniform(10,12)) # Creates bottom band of low DM (10 to 12)
factor = np.random.normal(0.460, 0.066)
w = ((tempArr[-1]/tempArr[0])**(-1/factor))*tempWarr[0]
tempWarr = np.append(tempWarr, w)
cordAlt = cordes(tempWarr[-1]) # Calculates DM range spanned by bottom band, used to guide tail points data range
"""Loops through each band (index k) and generates all data for bands and tails"""
for k in range(int(1), int(len(tempArr))):
cord = cordes(tempWarr[k]) # DM range spanned by current band
numDMpoints = int(np.round(np.random.normal(8*cord, cord)*2)) # Number of points in band
bandDMs = np.linspace(-cord, +cord, numDMpoints) # Generates a range of DM data twice as large as needed
bandDMs = np.random.choice(bandDMs, size = int(numDMpoints/2), replace = False) # Randomly reduces the range to half (simulates randomness of points)
bandSNs = np.random.normal(tempArr[k], 0.3, len(bandDMs)) # All SN data in the band
bandWs = np.random.normal(tempWarr[k], 0.01, len(bandDMs)) # All width data in the band
if k == 1: # Adds the peak points data to the first array (just to include it)
bandDMs = np.append(bandDMs, 0)
bandSNs = np.append(bandSNs, tempArr[0])
bandWs = np.append(bandWs, tempWarr[0])
tWidth = 0.4 + (np.log2(tempWarr[k])/10) # Width (ms) of tail parts, as derived from Crab data:
# Tails seem to follow the cordes function peaking at peakSN
# increasing with 0.1 ms for every factor of 2 increase in the
# actual width data (2ms -> 0.5, 4ms -> 0.6, 8ms -> 0.7)
xTail = np.linspace(-cordAlt-5, cordAlt+5, 1*numDMpoints) # Tail dm data
zeta = (6.91*10**-3)*336*(1.732**-3)*(tWidth**-1)*xTail # Zeta function in the cordes function
zeta[zeta == 0] = 0.000001 # Fixes zeta = 0 issue
yDeviations = np.random.normal(0,0.02, len(zeta)) # Deviation in SN data (to add randomness)
yTail = (math.pi**(1/2))*0.5*(zeta**-1)*special.erf(zeta) + yDeviations # Final cordes value for tail's SN
wTail = np.random.normal(tempWarr[k], 0.01, len(zeta)) # Final width data for tails
yTail = np.array(yTail)
wTail = np.array(wTail)
capRatio = np.random.uniform(tempArr[-1]/tempArr[0] + 0.08, 0.24) # Upper cap of SN ratios
xTail = xTail[np.nonzero(yTail < capRatio)] # Removes all data below the above defined upper cap
wTail = wTail[np.nonzero(yTail < capRatio)]
yTail = yTail[yTail < capRatio]
yTail *= tempArr[0] # Multiplies with peakSN, i.e. turns cordes ratios into absolute SN values
xTail = xTail[np.nonzero(yTail > tempArr[-1])] # Only keeps tail data above the bottom band
wTail = wTail[np.nonzero(yTail > tempArr[-1])]
yTail = yTail[yTail > tempArr[-1]]
# Under the tails there are further points vertically under the tail points
xTailVert = np.array([])
yTailVert = np.array([])
wTailVert = np.array([])
for i in range(len(yTail)): # Goes through all tail points to create points vertically underneath them
randVar = np.random.uniform(0,1) # Random variable between 0 and 1
if randVar > (i/(2.5*len(yTail))) + 0.2 - k*0.1: # Defines a probability to create more points under the tail point
num = np.random.randint(0, 5) # Number of vertical tail points
temp = np.random.uniform(tempArr[-1], yTail[i], num) # SN values of vertical tail points
yTailVert = np.concatenate((yTailVert, temp))
xTailVert = np.concatenate((xTailVert, [xTail[i]]*num))
wTailVert = np.concatenate((wTailVert, np.random.normal(tempWarr[k], 0.01, num)))
if len(yTailVert) > 0: # Only keeps vertical tail points that are above 2 SN less than the bottom band
xTailVert = xTailVert[np.nonzero(yTailVert > (tempArr[-1] - 2))]
wTailVert = wTailVert[np.nonzero(yTailVert > (tempArr[-1] - 2))]
yTailVert = yTailVert[yTailVert > (tempArr[-1] - 2)]
# Final data arrays
finalSNarr = np.concatenate((finalSNarr, yTailVert, yTail, bandSNs))
finalDMarr = np.concatenate((finalDMarr, xTailVert, xTail, bandDMs))
finalWarr = np.concatenate((finalWarr, wTailVert, wTail, bandWs))
addRight = np.random.randint(1,4) # Number of additional tails
for q in range(addRight):
""" Adds additional tails that does not necessarily correspond to the bands seen """
step = np.random.uniform(0.05, 0.15) # Width step
tWidth += step # Increases width accordingly
""" Rest of this for loop is same as the commented for loop above"""
xTail = np.linspace(-cordAlt-5, cordAlt+5, 1*numDMpoints)
zeta = (6.91*10**-3)*336*(1.732**-3)*(tWidth**-1)*xTail
zeta[zeta == 0] = 0.000001
yDeviations = np.random.normal(0,0.02, len(zeta))
yTail = (math.pi**(1/2))*0.5*(zeta**-1)*special.erf(zeta) + yDeviations
wTail = np.random.normal(tempWarr[-1], 0.01, len(zeta))
capRatio = np.random.uniform(tempArr[-1]/tempArr[0] + 0.08, 0.24)
xTail = xTail[np.nonzero(yTail < capRatio)]
wTail = wTail[np.nonzero(yTail < capRatio)]
yTail = yTail[yTail < capRatio]
yTail *= tempArr[0]
xTail = xTail[np.nonzero(yTail > tempArr[-1])]
wTail = wTail[np.nonzero(yTail > tempArr[-1])]
yTail = yTail[yTail > tempArr[-1]]
xTailVert = np.array([])
yTailVert = np.array([])
wTailVert = np.array([])
for i in range(len(yTail)):
randVar = np.random.uniform(0,1)
if randVar > (i/(2.5*len(yTail))) + 0.2 - k*0.1:
num = int(np.random.uniform(0, 5))
temp = np.random.uniform(tempArr[-1], yTail[i], num)
yTailVert = np.concatenate((yTailVert, temp))
xTailVert = np.concatenate((xTailVert, [xTail[i]]*num))
wTailVert = np.concatenate((wTailVert, np.random.normal(tempWarr[-1], 0.01, num)))
if len(yTailVert) > 0:
xTailVert = xTailVert[np.nonzero(yTailVert > (tempArr[-1] - 2))]
wTailVert = wTailVert[np.nonzero(yTailVert > (tempArr[-1] - 2))]
yTailVert = yTailVert[yTailVert > (tempArr[-1] - 2)]
finalSNarr = np.concatenate((finalSNarr, yTailVert, yTail))
finalDMarr = np.concatenate((finalDMarr, xTailVert, xTail))
finalWarr = np.concatenate((finalWarr, wTailVert, wTail))
""" Adds random noise to the final plots """
noiseFraction = 1/4 # Fraction of points that is noise
noiseNum = int((noiseFraction*len(finalDMarr))/(1 - noiseFraction)) # Number of noise points
noiseDM = np.random.uniform(np.amin(finalDMarr), np.amax(finalDMarr), noiseNum) # DM noise data
noiseSN = np.random.normal(np.amin(finalSNarr), np.amax([2, np.amax(finalSNarr)/20]), len(noiseDM)) # SN noise data
noiseDM = noiseDM[np.nonzero(noiseSN > 8)] # Requires noise to be above 8 SN
noiseSN = noiseSN[noiseSN > 8]
noiseW = np.full((len(noiseDM)), 32) # Typical noise width value
finalSNarr = np.concatenate((finalSNarr, noiseSN))
finalDMarr = np.concatenate((finalDMarr, noiseDM))
finalWarr = np.concatenate((finalWarr, noiseW))
return finalDMarr, finalSNarr, finalWarr
bandCand() | [
"[email protected]"
] | |
1a098b3050fd0f58f5c44a3c2cd180f28aa4a3bc | 3c20dd8a37f6694cea1c0cf2111524a5741cf94a | /Desafio063.py | e51a25a9fb82a9704a95d43d1eaa53207dce4906 | [
"MIT"
] | permissive | SheilaFernandes/Desafios-py | f3cdef561223960083655dda47ce2aa08570fcbb | d1dedc8d3111a4dd762409822bd0b5f8a416f728 | refs/heads/master | 2022-11-06T21:28:53.628632 | 2020-06-23T01:44:07 | 2020-06-23T01:44:07 | 272,025,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | print('---'*35)
print('Sequencia de Fibonacci')
print('---'*35)
n = int(input('Digite um número >>>'))
t1 = 0
t2 = 1
cont = 3
print('{} - {}'.format(t1, t2), end='')
while cont <= n:
t3 = t1 + t2
print(' - {}'.format(t3), end='')
cont += 1
t1 = t2
t2 = t3
print('-----FIM-----')
| [
"[email protected]"
] | |
cf85725a18771089fc4c39ecdbb49c74fe733f2e | aec2b7b853493111c526dbdddf2967628af0705a | /Machine Learning/cross_validation.py | ae9e40b0cb5e38f9a3ee58df06dcbdaf64d8fdde | [] | no_license | hudaoling/hudaoling_20200907 | 6b10d4b9496330c908889b85e14a3d068cbb8d1e | 55d2792518c1676428f7a65fb37cc8541568d6ff | refs/heads/master | 2022-12-07T15:46:33.785550 | 2020-09-07T07:49:16 | 2020-09-07T07:49:16 | 293,445,472 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | from sklearn.datasets import make_regression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_validate
X, y = make_regression(n_samples=1000, random_state=0)
lr = LinearRegression()
print(X)
print('**************')
print(y)
print(len(X),len(y))
result = cross_validate(lr, X, y) # defaults to 5-fold CV
print(result['test_score']) # r_squared score is high because dataset is easy
print(result) | [
"[email protected]"
] | |
9d2a65a4ec84a2bdab7268c50a1505976a49dc07 | ca3d1625d69e7ffa8b3ec67d711c4db55c25d33d | /src/GACNN-Esfahanian-Akhavan/testGenerationalGA.py | 0f52659feab9c7c791955874b7d34a2c7793e81a | [] | no_license | fireHedgehog/cnn_test | acc5fe421a554bc54422a1b1bfb2897c64c02077 | 1ec21f0b5207689b2fe7f69dae6ffa3d912d5194 | refs/heads/master | 2021-01-04T15:12:35.973100 | 2021-01-02T23:22:49 | 2021-01-02T23:22:49 | 240,607,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | # -*- coding: utf-8 -*-
"""
Created on 2019/11/21 15:54
@file: testGenerationalGA.py
@author: Matt
"""
from DataMgr import load_cifar10, write_performance
from GenerationalGA import GenerationalGA
from keras.utils import to_categorical
root = '../../data/cifar-10-batches-py'
# root = '/home/u800199/workdir/datasets/cifar10'
X_train, y_train, X_test, y_test = load_cifar10(root)
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
train_size = len(X_train)
test_size = len(X_test)
g = GenerationalGA(
_X_train=X_train[:train_size],
_y_train=y_train[:train_size],
_X_test=X_test[:test_size],
_y_test=y_test[:test_size],
_pop_size=100,
_r_mutation=0.2,
_p_crossover=0, # no use
_p_mutation=0.3,
_max_iter=30,
_min_fitness=0.95,
_batch_size=5000,
_elite_num=0, # no use
_mating_pool_size=0, # no use
)
g.run()
write_performance(g.evaluation_history, 'GenerationalGA_CIFAR10.txt')
| [
"[email protected]"
] | |
358f1e9a270eafd12e6574e79f677c1214882890 | 4ae93a75c9d32f8752f3a65699870cf14f85bea4 | /ospcpts/ptsrun.py | b997b0c76ca7387c41d12376761542df08ff0407 | [] | no_license | krisgospc/ospc_pts_burnin | 093a62338728e212434157dd4ef5fc4b7e531589 | 19068d8d0591a9a5f03fd4a8be7fa891feabde32 | refs/heads/master | 2021-01-19T20:12:08.640041 | 2017-08-23T21:02:10 | 2017-08-23T21:02:10 | 101,222,123 | 1 | 0 | null | 2017-08-23T21:02:11 | 2017-08-23T20:28:53 | Python | UTF-8 | Python | false | false | 623 | py | #!/usr/bin/env python3
""" logic to interact with phoronix-test-suite """
import json
from subprocess import call
# basic JSON write
def write_prefs():
with open('filename.json', 'w') as f:
json.dumps(data, f)
# basic JSON read
def load_prefs():
with open('filename.json', 'r') as r:
data_struct = json.loads(f)
# find loops
def get_loops():
for x in data_struct["loops"]:
loops = x
# write the command for PTS to start running
def run_bash():
call(["TOTAL_LOOP_TIME = ", loops, "PTS_CONCURRENT_TEST_RUNS = ", runs,
"phoronix-test-suite stress-run ", test_suite])
| [
"[email protected]"
] | |
84e091a4689fb3a8e2ad1937e3d9a994f38ec460 | 0b3e097b17ed24ade1e5993591c5f59847a72296 | /ex7.py | 77b9644d53228514df45e2b0f3b54220e0a4197e | [] | no_license | mauabe/python-thw | 0518437fe4b4ab9309e6e6b854c3bec923a0db59 | 65074e6b858fc28ac6430e39961f145ddf9050db | refs/heads/master | 2020-04-13T10:43:48.552043 | 2019-02-14T02:18:06 | 2019-02-14T02:18:06 | 163,151,151 | 0 | 0 | null | 2019-02-14T02:18:07 | 2018-12-26T07:35:47 | Python | UTF-8 | Python | false | false | 458 | py | print ("Mary had a little lamb.")
print ("Its fleece was white as %s." % 'snow')
print ("and everywhere that Mary went.")
print ("." * 10) # what'd that do?
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
end10 = "g"
end11 = "e"
end12 = "r"
# watch that comma at the end. try removing it to see what happens
print (end1 + end2 + end3 + end4 + end5 + end6)
print (end7 + end8 + end9 + end10 + end11+ end12) | [
"[email protected]"
] | |
3349a87a3c7b1d9abb8cb70bb22bb03a02aae1fe | 7849b6c610d8b0e423560f817ec3526c08c5a72c | /pipeline/run_copy_stripped.py | 0ac70ed522805e87c4dc1fba8f43f45ee24bb82a | [
"BSD-2-Clause"
] | permissive | shuo-zhou/openfmri | 6e5fb47ac66c19df29c4db98df587107c986a262 | 6514f26510fa889fa6679babbbc92b29f8dd6b0a | refs/heads/master | 2022-10-19T22:27:57.258636 | 2017-06-22T20:48:46 | 2017-06-22T20:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | #!/usr/bin/env python
""" run_copy_stripped.py - copy skull-stripped images from freesurfer dirs
"""
## Copyright 2011, Russell Poldrack. All rights reserved.
## Redistribution and use in source and binary forms, with or without modification, are
## permitted provided that the following conditions are met:
## 1. Redistributions of source code must retain the above copyright notice, this list of
## conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above copyright notice, this list
## of conditions and the following disclaimer in the documentation and/or other materials
## provided with the distribution.
## THIS SOFTWARE IS PROVIDED BY RUSSELL POLDRACK ``AS IS'' AND ANY EXPRESS OR IMPLIED
## WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
## FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RUSSELL POLDRACK OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
## CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
## ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os,sys
def usage():
"""Print the docstring and exit with error."""
sys.stdout.write(__doc__)
sys.exit(2)
if len(sys.argv)>1:
dataset=sys.argv[1]
else:
usage()
if len(sys.argv)>2:
basedir=sys.argv[2]
if not os.path.exists(basedir):
print 'basedir %s does not exist!'%basedir
sys.exit(1)
else:
basedir='/corral-repl/utexas/poldracklab/openfmri/staged/'
if len(sys.argv)>3:
subdir=sys.argv[3]
if not os.path.exists(subdir):
print 'subdir %s does not exist!'%subdir
sys.exit(1)
else:
subdir='/corral-repl/utexas/poldracklab/openfmri/subdir/'
outfile=open('run_copy_stripped_%s.sh'%dataset,'w')
dsdir=os.path.join(basedir,dataset)
for root,dirs,files in os.walk(dsdir):
for f in files:
if f.rfind('highres001.nii.gz')>-1 and root.find(dataset)>-1:
f_split=root.split('/')
outfile.write('mri_convert --out_orientation LAS %s/%s_%s/mri/brainmask.mgz --reslice_like %s/highres001.nii.gz %s/highres001_brain.nii\n'%(subdir,f_split[-3],f_split[-2],root,root))
outfile.write('gzip %s/highres001_brain.nii\n'%root)
outfile.write('fslmaths %s/highres001_brain.nii.gz -thr 1 -bin %s/highres001_brain_mask.nii.gz\n'%(root,root))
outfile.close()
print 'now launch using:'
print 'sh run_copy_stripped_%s.sh'%dataset
| [
"[email protected]"
] | |
2959732295f265b0871df0ee7464ee12779c2961 | 5a634ad6f7f2f49282e8de0aeb87bea0d45aae00 | /django_bootstrap3_multidatepicker/django_bootstrap3_multidatepicker/urls.py | 10d7e3358a74025057d93d52250cf05307a0831e | [
"Apache-2.0"
] | permissive | dhgrz/django-bootstrap3-multidatepicker | f6a839c18f6d70ba7236a6901431c5beec222f21 | 2e57cc3806aba194aeee89e2e27284c30e964cab | refs/heads/master | 2021-01-20T23:21:25.817837 | 2016-02-19T02:51:30 | 2016-02-19T02:51:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | # Copyright 2016 Fabian Wenzelmann
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""django_bootstrap3_multidatepicker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
64a1d5950d97a40344b5459169c86f648d9edef3 | 9b9efe9285ffec50ba815e908b6495d6bce48364 | /challenge2/test_challenge2.py | 8a1982f4e5df9d2231d9dd30384464107505cec7 | [] | no_license | giovannamascarenhas/spoonGuru-project | 793234b233cdbd6c7dc06f3e264f591ca977a53e | 425bd64b5367b3e0b68fc35956af772704a1d5a7 | refs/heads/main | 2023-01-31T18:11:25.475829 | 2020-12-14T14:17:39 | 2020-12-14T14:17:39 | 321,326,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # Internal packages
from challenge_2 import calculate_largest_loss
def test_calculate_largest_loss_function():
"""This function tests the calculate_largest_loss function"""
pricesLst = [2, 5, 10, 12, 13]
assert calculate_largest_loss(pricesLst) == 5
def test_calculate_largest_loss_function_with_0_lenght():
"""This function tests the calculate_largest_loss function
with 0 elements"""
pricesLst = []
assert calculate_largest_loss(pricesLst) == 0
| [
"[email protected]"
] | |
3d5065fe584688913112e0c113e1e416c28f4aa6 | 58377872129227fab9804f9508a248bcdcf1d8ea | /Layers/sigmoid.py | 0002533d4a8c44358ce7842026b267d84ba1615c | [] | no_license | duduhali/HasAI | 0dc7fcfba2fd46aa8d73bd90eeacb2dc95a58b97 | 96b7e963eb72470508977dbd39c310f41fd0f46d | refs/heads/master | 2023-01-02T06:37:35.055717 | 2019-10-01T10:37:23 | 2019-10-01T10:37:23 | 309,314,800 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | import numpy as np
class Sigmoid:
def __init__(self): # 无参数,不需初始化
pass
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def forward(self, x):
self.x = x
# print(x.shape) (1024, 26, 1)
self.y = self.sigmoid(x) #可以处理单个元素,也可以处理np数组
return self.y
def backward(self, d):
sig = self.sigmoid(self.x)
self.dx = d * sig * (1 - sig)
return self.dx # 反向传递梯度 | [
"a88b88c88"
] | a88b88c88 |
27c30d4803558159abc9243b9311c56e9c586492 | fc52062f9c0f51dc2b25a61ba586108a3f3f663c | /modules/peewee.py | cd3c87c0264d267832faee3509355de3de316328 | [] | no_license | d1ffuz0r/freeshardwatcher | 4fa9f3de4f925ab9f1cc6cb410dc85d8fffab6c8 | 9ff3ef6b16b598f1d458af79013c65ab7bd89eb3 | refs/heads/master | 2021-01-10T21:18:46.240764 | 2012-03-10T10:50:26 | 2012-03-10T10:50:26 | 3,048,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58,199 | py | # (\
# ( \ /(o)\ caw!
# ( \/ ()/ /)
# ( `;.))'".)
# `(/////.-'
# =====))=))===()
# ///'
# //
# '
from __future__ import with_statement
from datetime import datetime
import decimal
import logging
import os
import re
import threading
import time
try:
import sqlite3
except ImportError:
sqlite3 = None
try:
import psycopg2
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql
except ImportError:
mysql = None
__all__ = [
'ImproperlyConfigured', 'SqliteDatabase', 'MySQLDatabase', 'PostgresqlDatabase',
'asc', 'desc', 'Count', 'Max', 'Min', 'Sum', 'Q', 'Field', 'CharField', 'TextField',
'DateTimeField', 'BooleanField', 'DecimalField', 'FloatField', 'IntegerField',
'PrimaryKeyField', 'ForeignKeyField', 'Model', 'filter_query', 'annotate_query',
]
class ImproperlyConfigured(Exception):
pass
if sqlite3 is None and psycopg2 is None and mysql is None:
raise ImproperlyConfigured('Either sqlite3, psycopg2 or MySQLdb must be installed')
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, lambda v: str(v))
sqlite3.register_converter('decimal', lambda v: decimal.Decimal(v))
DATABASE_NAME = os.environ.get('PEEWEE_DATABASE', 'peewee.db')
logger = logging.getLogger('peewee.logger')
class BaseAdapter(object):
"""
The various subclasses of `BaseAdapter` provide a bridge between the high-
level `Database` abstraction and the underlying python libraries like
psycopg2. It also provides a way to unify the pythonic field types with
the underlying column types used by the database engine.
The `BaseAdapter` provides two types of mappings:
- mapping between filter operations and their database equivalents
- mapping between basic field types and their database column types
The `BaseAdapter` also is the mechanism used by the `Database` class to:
- handle connections with the database
- extract information from the database cursor
"""
operations = {'eq': '= %s'}
interpolation = '%s'
def get_field_types(self):
field_types = {
'integer': 'INTEGER',
'float': 'REAL',
'decimal': 'DECIMAL',
'string': 'VARCHAR',
'text': 'TEXT',
'datetime': 'DATETIME',
'primary_key': 'INTEGER',
'foreign_key': 'INTEGER',
'boolean': 'SMALLINT',
}
field_types.update(self.get_field_overrides())
return field_types
def get_field_overrides(self):
return {}
def connect(self, database, **kwargs):
raise NotImplementedError
def close(self, conn):
conn.close()
def lookup_cast(self, lookup, value):
if lookup in ('contains', 'icontains'):
return '%%%s%%' % value
elif lookup in ('startswith', 'istartswith'):
return '%s%%' % value
return value
def last_insert_id(self, cursor, model):
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
class SqliteAdapter(BaseAdapter):
# note the sqlite library uses a non-standard interpolation string
operations = {
'lt': '< ?',
'lte': '<= ?',
'gt': '> ?',
'gte': '>= ?',
'eq': '= ?',
'ne': '!= ?', # watch yourself with this one
'in': 'IN (%s)', # special-case to list q-marks
'is': 'IS ?',
'icontains': "LIKE ? ESCAPE '\\'", # surround param with %'s
'contains': "GLOB ?", # surround param with *'s
'istartswith': "LIKE ? ESCAPE '\\'",
'startswith': "GLOB ?",
}
interpolation = '?'
def connect(self, database, **kwargs):
if not sqlite3:
raise ImproperlyConfigured('sqlite3 must be installed on the system')
return sqlite3.connect(database, **kwargs)
def lookup_cast(self, lookup, value):
if lookup == 'contains':
return '*%s*' % value
elif lookup == 'icontains':
return '%%%s%%' % value
elif lookup == 'startswith':
return '%s*' % value
elif lookup == 'istartswith':
return '%s%%' % value
return value
class PostgresqlAdapter(BaseAdapter):
operations = {
'lt': '< %s',
'lte': '<= %s',
'gt': '> %s',
'gte': '>= %s',
'eq': '= %s',
'ne': '!= %s', # watch yourself with this one
'in': 'IN (%s)', # special-case to list q-marks
'is': 'IS %s',
'icontains': 'ILIKE %s', # surround param with %'s
'contains': 'LIKE %s', # surround param with *'s
'istartswith': 'ILIKE %s',
'startswith': 'LIKE %s',
}
def connect(self, database, **kwargs):
if not psycopg2:
raise ImproperlyConfigured('psycopg2 must be installed on the system')
return psycopg2.connect(database=database, **kwargs)
def get_field_overrides(self):
return {
'primary_key': 'SERIAL',
'datetime': 'TIMESTAMP',
'decimal': 'NUMERIC',
}
def last_insert_id(self, cursor, model):
cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (
model._meta.db_table, model._meta.pk_name))
return cursor.fetchone()[0]
class MySQLAdapter(BaseAdapter):
operations = {
'lt': '< %s',
'lte': '<= %s',
'gt': '> %s',
'gte': '>= %s',
'eq': '= %s',
'ne': '!= %s', # watch yourself with this one
'in': 'IN (%s)', # special-case to list q-marks
'is': 'IS %s',
'icontains': 'LIKE %s', # surround param with %'s
'contains': 'LIKE BINARY %s', # surround param with *'s
'istartswith': 'LIKE %s',
'startswith': 'LIKE BINARY %s',
}
def connect(self, database, **kwargs):
if not mysql:
raise ImproperlyConfigured('MySQLdb must be installed on the system')
return mysql.connect(db=database, **kwargs)
def get_field_overrides(self):
return {
'primary_key': 'integer AUTO_INCREMENT',
'boolean': 'bool',
'float': 'double precision',
'text': 'longtext',
'decimal': 'numeric',
}
class Database(object):
"""
A high-level api for working with the supported database engines. `Database`
provides a wrapper around some of the functions performed by the `Adapter`,
in addition providing support for:
- execution of SQL queries
- creating and dropping tables and indexes
"""
def __init__(self, adapter, database, threadlocals=False, **connect_kwargs):
self.adapter = adapter
self.database = database
self.connect_kwargs = connect_kwargs
if threadlocals:
self.__local = threading.local()
else:
self.__local = type('DummyLocal', (object,), {})
self._conn_lock = threading.Lock()
def connect(self):
with self._conn_lock:
self.__local.conn = self.adapter.connect(self.database, **self.connect_kwargs)
self.__local.closed = False
def close(self):
with self._conn_lock:
self.adapter.close(self.__local.conn)
self.__local.closed = True
def get_conn(self):
if not hasattr(self.__local, 'closed') or self.__local.closed:
self.connect()
return self.__local.conn
def get_cursor(self):
return self.get_conn().cursor()
def execute(self, sql, params=None, commit=False):
cursor = self.get_cursor()
res = cursor.execute(sql, params or ())
if commit:
self.commit()
logger.debug((sql, params))
return cursor
def commit(self):
self.get_conn().commit()
def rollback(self):
self.get_conn().rollback()
def last_insert_id(self, cursor, model):
return self.adapter.last_insert_id(cursor, model)
def rows_affected(self, cursor):
return self.adapter.rows_affected(cursor)
def column_for_field(self, db_field):
try:
return self.adapter.get_field_types()[db_field]
except KeyError:
raise AttributeError('Unknown field type: "%s", valid types are: %s' % \
db_field, ', '.join(self.adapter.get_field_types().keys())
)
def create_table(self, model_class, safe=False):
framing = safe and "CREATE TABLE IF NOT EXISTS %s (%s);" or "CREATE TABLE %s (%s);"
columns = []
for field in model_class._meta.fields.values():
columns.append(field.to_sql())
query = framing % (model_class._meta.db_table, ', '.join(columns))
self.execute(query, commit=True)
def create_index(self, model_class, field, unique=False):
framing = 'CREATE %(unique)s INDEX %(model)s_%(field)s ON %(model)s(%(field)s);'
if field not in model_class._meta.fields:
raise AttributeError(
'Field %s not on model %s' % (field, model_class)
)
unique_expr = ternary(unique, 'UNIQUE', '')
query = framing % {
'unique': unique_expr,
'model': model_class._meta.db_table,
'field': field
}
self.execute(query, commit=True)
def drop_table(self, model_class, fail_silently=False):
framing = fail_silently and 'DROP TABLE IF EXISTS %s;' or 'DROP TABLE %s;'
self.execute(framing % model_class._meta.db_table, commit=True)
def get_indexes_for_table(self, table):
raise NotImplementedError
def get_tables(self):
raise NotImplementedError
class SqliteDatabase(Database):
def __init__(self, database, **connect_kwargs):
super(SqliteDatabase, self).__init__(SqliteAdapter(), database, **connect_kwargs)
def get_indexes_for_table(self, table):
res = self.execute('PRAGMA index_list(%s);' % table)
rows = sorted([(r[1], r[2] == 1) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute('select name from sqlite_master where type="table" order by name')
return [r[0] for r in res.fetchall()]
class PostgresqlDatabase(Database):
def __init__(self, database, **connect_kwargs):
super(PostgresqlDatabase, self).__init__(PostgresqlAdapter(), database, **connect_kwargs)
def get_indexes_for_table(self, table):
res = self.execute("""
SELECT c2.relname, i.indisprimary, i.indisunique
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i
WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid
ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname""", (table,))
return sorted([(r[0], r[1]) for r in res.fetchall()])
def get_tables(self):
res = self.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY c.relname""")
return [row[0] for row in res.fetchall()]
class MySQLDatabase(Database):
def __init__(self, database, **connect_kwargs):
super(MySQLDatabase, self).__init__(MySQLAdapter(), database, **connect_kwargs)
def get_indexes_for_table(self, table):
res = self.execute('SHOW INDEXES IN %s;' % table)
rows = sorted([(r[2], r[1] == 0) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute('SHOW TABLES;')
return [r[0] for r in res.fetchall()]
class QueryResultWrapper(object):
"""
Provides an iterator over the results of a raw Query, additionally doing
two things:
- converts rows from the database into model instances
- ensures that multiple iterations do not result in multiple queries
"""
def __init__(self, model, cursor):
self.model = model
self.cursor = cursor
self._result_cache = []
self._populated = False
def model_from_rowset(self, model_class, row_dict):
instance = model_class()
for attr, value in row_dict.iteritems():
if attr in instance._meta.fields:
field = instance._meta.fields[attr]
setattr(instance, attr, field.python_value(value))
else:
setattr(instance, attr, value)
return instance
def _row_to_dict(self, row, result_cursor):
return dict((result_cursor.description[i][0], value)
for i, value in enumerate(row))
def __iter__(self):
if not self._populated:
return self
else:
return iter(self._result_cache)
def next(self):
row = self.cursor.fetchone()
if row:
row_dict = self._row_to_dict(row, self.cursor)
instance = self.model_from_rowset(self.model, row_dict)
self._result_cache.append(instance)
return instance
else:
self._populated = True
raise StopIteration
# create
class DoesNotExist(Exception):
pass
# semantic wrappers for ordering the results of a `SelectQuery`
def asc(f):
return (f, 'ASC')
def desc(f):
return (f, 'DESC')
# wrappers for performing aggregation in a `SelectQuery`
def Count(f, alias='count'):
return ('COUNT', f, alias)
def Max(f, alias='max'):
return ('MAX', f, alias)
def Min(f, alias='min'):
return ('MIN', f, alias)
def Sum(f, alias='sum'):
return ('SUM', f, alias)
# decorator for query methods to indicate that they change the state of the
# underlying data structures
def returns_clone(func):
def inner(self, *args, **kwargs):
clone = self.clone()
res = func(clone, *args, **kwargs)
return clone
return inner
# helpers
ternary = lambda cond, t, f: (cond and [t] or [f])[0]
class Node(object):
def __init__(self, connector='AND'):
self.connector = connector
self.children = []
self.negated = False
def connect(self, rhs, connector):
if isinstance(rhs, Q):
if connector == self.connector:
self.children.append(rhs)
return self
else:
p = Node(connector)
p.children = [self, rhs]
return p
elif isinstance(rhs, Node):
p = Node(connector)
p.children = [self, rhs]
return p
def __or__(self, rhs):
return self.connect(rhs, 'OR')
def __and__(self, rhs):
return self.connect(rhs, 'AND')
def __invert__(self):
self.negated = not self.negated
return self
def __nonzero__(self):
return bool(self.children)
def __unicode__(self):
query = []
nodes = []
for child in self.children:
if isinstance(child, Q):
query.append(unicode(child))
elif isinstance(child, Node):
nodes.append('(%s)' % unicode(child))
query.extend(nodes)
connector = ' %s ' % self.connector
query = connector.join(query)
if self.negated:
query = 'NOT %s' % query
return query
class Q(object):
def __init__(self, **kwargs):
self.query = kwargs
self.parent = None
self.negated = False
def connect(self, connector):
if self.parent is None:
self.parent = Node(connector)
self.parent.children.append(self)
def __or__(self, rhs):
self.connect('OR')
return self.parent | rhs
def __and__(self, rhs):
self.connect('AND')
return self.parent & rhs
def __invert__(self):
self.negated = not self.negated
return self
def __unicode__(self):
bits = ['%s = %s' % (k, v) for k, v in self.query.items()]
if len(self.query.items()) > 1:
connector = ' AND '
expr = '(%s)' % connector.join(bits)
else:
expr = bits[0]
if self.negated:
expr = 'NOT %s' % expr
return expr
def parseq(*args, **kwargs):
node = Node()
for piece in args:
if isinstance(piece, (Q, Node)):
node.children.append(piece)
else:
raise TypeError('Unknown object: %s', piece)
if kwargs:
node.children.append(Q(**kwargs))
return node
class EmptyResultException(Exception):
pass
class BaseQuery(object):
query_separator = '__'
requires_commit = True
force_alias = False
def __init__(self, model):
self.model = model
self.query_context = model
self.database = self.model._meta.database
self.operations = self.database.adapter.operations
self.interpolation = self.database.adapter.interpolation
self._dirty = True
self._where = {}
self._joins = {}
self._joined_models = set()
def _clone_dict_graph(self, dg):
cloned = {}
for node, edges in dg.items():
cloned[node] = list(edges)
return cloned
def clone_where(self):
return self._clone_dict_graph(self._where)
def clone_joins(self):
return self._clone_dict_graph(self._joins)
def clone(self):
raise NotImplementedError
def lookup_cast(self, lookup, value):
return self.database.adapter.lookup_cast(lookup, value)
def parse_query_args(self, model, **query):
parsed = {}
for lhs, rhs in query.iteritems():
if self.query_separator in lhs:
lhs, op = lhs.rsplit(self.query_separator, 1)
else:
op = 'eq'
try:
field = model._meta.get_field_by_name(lhs)
except AttributeError:
field = model._meta.get_related_field_by_name(lhs)
if field is None:
raise
if isinstance(rhs, Model):
rhs = rhs.get_pk()
if op == 'in':
if isinstance(rhs, SelectQuery):
lookup_value = rhs
operation = 'IN (%s)'
else:
if not rhs:
raise EmptyResultException
lookup_value = [field.db_value(o) for o in rhs]
operation = self.operations[op] % \
(','.join([self.interpolation for v in lookup_value]))
elif op == 'is':
if rhs is not None:
raise ValueError('__is lookups only accept None')
operation = 'IS NULL'
lookup_value = []
else:
lookup_value = field.db_value(rhs)
operation = self.operations[op]
parsed[field.name] = (operation, self.lookup_cast(op, lookup_value))
return parsed
@returns_clone
def where(self, *args, **kwargs):
self._where.setdefault(self.query_context, [])
parsed = parseq(*args, **kwargs)
if parsed:
self._where[self.query_context].append(parsed)
@returns_clone
def join(self, model, join_type=None, on=None):
if self.query_context._meta.rel_exists(model):
self._joined_models.add(model)
self._joins.setdefault(self.query_context, [])
self._joins[self.query_context].append((model, join_type, on))
self.query_context = model
else:
raise AttributeError('No foreign key found between %s and %s' % \
(self.query_context.__name__, model.__name__))
@returns_clone
def switch(self, model):
if model == self.model:
self.query_context = model
return
if model in self._joined_models:
self.query_context = model
return
raise AttributeError('You must JOIN on %s' % model.__name__)
def use_aliases(self):
return len(self._joined_models) > 0 or self.force_alias
def combine_field(self, alias, field_name):
if alias:
return '%s.%s' % (alias, field_name)
return field_name
def follow_joins(self, current, alias_map, alias_required, alias_count, seen=None):
computed = []
seen = seen or set()
if current not in self._joins:
return computed
for i, (model, join_type, on) in enumerate(self._joins[current]):
seen.add(model)
if alias_required:
alias_count += 1
alias_map[model] = 't%d' % alias_count
else:
alias_map[model] = ''
from_model = current
field = from_model._meta.get_related_field_for_model(model, on)
if field:
left_field = field.name
right_field = model._meta.pk_name
else:
field = from_model._meta.get_reverse_related_field_for_model(model, on)
left_field = from_model._meta.pk_name
right_field = field.name
if join_type is None:
if field.null and model not in self._where:
join_type = 'LEFT OUTER'
else:
join_type = 'INNER'
computed.append(
'%s JOIN %s AS %s ON %s = %s' % (
join_type,
model._meta.db_table,
alias_map[model],
self.combine_field(alias_map[from_model], left_field),
self.combine_field(alias_map[model], right_field),
)
)
computed.extend(self.follow_joins(model, alias_map, alias_required, alias_count, seen))
return computed
def compile_where(self):
alias_count = 0
alias_map = {}
alias_required = self.use_aliases()
where_with_alias = []
where_data = []
if alias_required:
alias_count += 1
alias_map[self.model] = 't%d' % alias_count
else:
alias_map[self.model] = ''
computed_joins = self.follow_joins(self.model, alias_map, alias_required, alias_count)
for model in sorted(self._where, key=lambda m: alias_map[m]):
for node in self._where[model]:
query, data = self.parse_node(node, model, alias_map)
where_with_alias.append(query)
where_data.extend(data)
return computed_joins, where_with_alias, where_data, alias_map
def convert_where_to_params(self, where_data):
flattened = []
for clause in where_data:
if isinstance(clause, (tuple, list)):
flattened.extend(clause)
else:
flattened.append(clause)
return flattened
def parse_node(self, node, model, alias_map):
query = []
query_data = []
nodes = []
for child in node.children:
if isinstance(child, Q):
parsed, data = self.parse_q(child, model, alias_map)
query.append(parsed)
query_data.extend(data)
elif isinstance(child, Node):
parsed, data = self.parse_node(child, model, alias_map)
query.append('(%s)' % parsed)
query_data.extend(data)
query.extend(nodes)
connector = ' %s ' % node.connector
query = connector.join(query)
if node.negated:
query = 'NOT (%s)' % query
return query, query_data
def parse_q(self, q, model, alias_map):
query = []
query_data = []
parsed = self.parse_query_args(model, **q.query)
for (name, lookup) in parsed.iteritems():
operation, value = lookup
if isinstance(value, SelectQuery):
sql, value = self.convert_subquery(value)
operation = operation % sql
query_data.append(value)
combined = self.combine_field(alias_map[model], name)
query.append('%s %s' % (combined, operation))
if len(query) > 1:
query = '(%s)' % (' AND '.join(query))
else:
query = query[0]
if q.negated:
query = 'NOT %s' % query
return query, query_data
def convert_subquery(self, subquery):
subquery.query, orig_query = subquery.model._meta.pk_name, subquery.query
subquery.force_alias, orig_alias = True, subquery.force_alias
sql, data = subquery.sql()
subquery.query = orig_query
subquery.force_alias = orig_alias
return sql, data
def raw_execute(self):
query, params = self.sql()
return self.database.execute(query, params, self.requires_commit)
class RawQuery(BaseQuery):
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
super(RawQuery, self).__init__(model)
def sql(self):
return self._sql, self._params
def execute(self):
return QueryResultWrapper(self.model, self.raw_execute())
def join(self):
raise AttributeError('Raw queries do not support joining programmatically')
def where(self):
raise AttributeError('Raw queries do not support querying programmatically')
def switch(self):
raise AttributeError('Raw queries do not support switching contexts')
def __iter__(self):
return self.execute()
class SelectQuery(BaseQuery):
requires_commit = False
def __init__(self, model, query=None):
self.query = query or '*'
self._group_by = []
self._having = []
self._order_by = []
self._limit = None
self._offset = None
self._distinct = False
self._qr = None
super(SelectQuery, self).__init__(model)
def clone(self):
query = SelectQuery(self.model, self.query)
query.query_context = self.query_context
query._group_by = list(self._group_by)
query._having = list(self._having)
query._order_by = list(self._order_by)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._qr = self._qr
query._where = self.clone_where()
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
return query
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def limit(self, num_rows):
self._limit = num_rows
@returns_clone
def offset(self, num_rows):
self._offset = num_rows
def count(self):
tmp_lim, tmp_off = self._limit, self._offset
self._limit = self._offset = None
tmp_query = self.query
if self.use_aliases():
self.query = 'COUNT(t1.%s)' % (self.model._meta.pk_name)
else:
self.query = 'COUNT(%s)' % (self.model._meta.pk_name)
res = self.database.execute(*self.sql())
# restore
self.query = tmp_query
self._limit = tmp_lim
self._offset = tmp_off
return res.fetchone()[0]
@returns_clone
def group_by(self, clause):
model = self.query_context
if isinstance(clause, basestring):
fields = (clause,)
elif isinstance(clause, (list, tuple)):
fields = clause
elif issubclass(clause, Model):
model = clause
fields = clause._meta.get_field_names()
self._group_by.append((model, fields))
@returns_clone
def having(self, clause):
self._having.append(clause)
@returns_clone
def distinct(self):
self._distinct = True
@returns_clone
def order_by(self, *clauses):
order_by = []
for clause in clauses:
if isinstance(clause, tuple):
if len(clause) == 3:
model, field, ordering = clause
elif len(clause) == 2:
if isinstance(clause[0], basestring):
model = self.query_context
field, ordering = clause
else:
model, field = clause
ordering = 'ASC'
else:
raise ValueError('Incorrect arguments passed in order_by clause')
else:
model = self.query_context
field = clause
ordering = 'ASC'
order_by.append(
(model, field, ordering)
)
self._order_by = order_by
def exists(self):
clone = self.paginate(1, 1)
clone.query = '(1) AS a'
curs = self.database.execute(*clone.sql())
return bool(curs.fetchone())
def get(self, *args, **kwargs):
try:
orig_ctx = self.query_context
self.query_context = self.model
obj = self.where(*args, **kwargs).paginate(1, 1).execute().next()
return obj
except StopIteration:
raise self.model.DoesNotExist('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % (
self.sql()
))
finally:
self.query_context = orig_ctx
def filter(self, *args, **kwargs):
return filter_query(self, *args, **kwargs)
def annotate(self, related_model, aggregation=None):
return annotate_query(self, related_model, aggregation)
def parse_select_query(self, alias_map):
if isinstance(self.query, (list, tuple)):
query = {self.model: self.query}
else:
query = self.query
if isinstance(query, basestring):
if query in ('*', self.model._meta.pk_name) and self.use_aliases():
return '%s.%s' % (alias_map[self.model], query)
return query
elif isinstance(query, dict):
qparts = []
aggregates = []
for model, cols in query.iteritems():
alias = alias_map.get(model, '')
for col in cols:
if isinstance(col, tuple):
func, col, col_alias = col
aggregates.append('%s(%s) AS %s' % \
(func, self.combine_field(alias, col), col_alias)
)
else:
qparts.append(self.combine_field(alias, col))
return ', '.join(qparts + aggregates)
else:
raise TypeError('Unknown type encountered parsing select query')
def sql(self):
joins, where, where_data, alias_map = self.compile_where()
table = self.model._meta.db_table
params = []
group_by = []
if self.use_aliases():
table = '%s AS %s' % (table, alias_map[self.model])
for model, clause in self._group_by:
alias = alias_map[model]
for field in clause:
group_by.append(self.combine_field(alias, field))
else:
group_by = [c[1] for c in self._group_by]
parsed_query = self.parse_select_query(alias_map)
if self._distinct:
sel = 'SELECT DISTINCT'
else:
sel = 'SELECT'
select = '%s %s FROM %s' % (sel, parsed_query, table)
joins = '\n'.join(joins)
where = ' AND '.join(where)
group_by = ', '.join(group_by)
having = ' AND '.join(self._having)
order_by = []
for piece in self._order_by:
model, field, ordering = piece
if self.use_aliases() and field in model._meta.fields:
field = '%s.%s' % (alias_map[model], field)
order_by.append('%s %s' % (field, ordering))
pieces = [select]
if joins:
pieces.append(joins)
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
if group_by:
pieces.append('GROUP BY %s' % group_by)
if having:
pieces.append('HAVING %s' % having)
if order_by:
pieces.append('ORDER BY %s' % ', '.join(order_by))
if self._limit:
pieces.append('LIMIT %d' % self._limit)
if self._offset:
pieces.append('OFFSET %d' % self._offset)
return ' '.join(pieces), params
def execute(self):
if self._dirty or not self._qr:
try:
self._qr = QueryResultWrapper(self.model, self.raw_execute())
self._dirty = False
return self._qr
except EmptyResultException:
return iter([])
else:
# call the __iter__ method directly
return iter(self._qr)
def __iter__(self):
return self.execute()
class UpdateQuery(BaseQuery):
def __init__(self, model, **kwargs):
self.update_query = kwargs
super(UpdateQuery, self).__init__(model)
def clone(self):
query = UpdateQuery(self.model, **self.update_query)
query._where = self.clone_where()
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
return query
def parse_update(self):
sets = {}
for k, v in self.update_query.iteritems():
try:
field = self.model._meta.get_field_by_name(k)
except AttributeError:
field = self.model._meta.get_related_field_by_name(k)
if field is None:
raise
sets[field.name] = field.db_value(v)
return sets
def sql(self):
joins, where, where_data, alias_map = self.compile_where()
set_statement = self.parse_update()
params = []
update_params = []
for k, v in set_statement.iteritems():
params.append(v)
update_params.append('%s=%s' % (k, self.interpolation))
update = 'UPDATE %s SET %s' % (
self.model._meta.db_table, ', '.join(update_params))
where = ' AND '.join(where)
pieces = [update]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute()
return self.database.rows_affected(result)
class DeleteQuery(BaseQuery):
def clone(self):
query = DeleteQuery(self.model)
query._where = self.clone_where()
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
return query
def sql(self):
joins, where, where_data, alias_map = self.compile_where()
params = []
delete = 'DELETE FROM %s' % (self.model._meta.db_table)
where = ' AND '.join(where)
pieces = [delete]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute()
return self.database.rows_affected(result)
class InsertQuery(BaseQuery):
def __init__(self, model, **kwargs):
self.insert_query = kwargs
super(InsertQuery, self).__init__(model)
def parse_insert(self):
cols = []
vals = []
for k, v in self.insert_query.iteritems():
field = self.model._meta.get_field_by_name(k)
cols.append(k)
vals.append(field.db_value(v))
return cols, vals
def sql(self):
cols, vals = self.parse_insert()
insert = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.model._meta.db_table,
','.join(cols),
','.join(self.interpolation for v in vals)
)
return insert, vals
def where(self, *args, **kwargs):
raise AttributeError('Insert queries do not support WHERE clauses')
def join(self, *args, **kwargs):
raise AttributeError('Insert queries do not support JOINs')
def execute(self):
result = self.raw_execute()
return self.database.last_insert_id(result, self.model)
def model_or_select(m_or_q):
if isinstance(m_or_q, BaseQuery):
return (m_or_q.model, m_or_q)
else:
return (m_or_q, m_or_q.select())
def convert_lookup(model, joins, lookup):
operations = model._meta.database.adapter.operations
pieces = lookup.split('__')
operation = None
query_model = model
if len(pieces) > 1:
if pieces[-1] in operations:
operation = pieces.pop()
lookup = pieces.pop()
# we have some joins
if len(pieces):
query_model = model
for piece in pieces:
# piece is something like 'blog' or 'entry_set'
joined_model = None
for field in query_model._meta.get_fields():
if not isinstance(field, ForeignKeyField):
continue
if piece in (field.descriptor, field.related_name):
joined_model = field.to
if not joined_model:
try:
joined_model = query_model._meta.reverse_relations[piece]
except KeyError:
raise ValueError('Unknown relation: "%s" of "%s"' % (
piece,
query_model,
))
joins.setdefault(query_model, set())
joins[query_model].add(joined_model)
query_model = joined_model
if operation:
lookup = '%s__%s' % (lookup, operation)
return query_model, joins, lookup
def filter_query(model_or_query, *args, **kwargs):
"""
Provide a django-like interface for executing queries
"""
model, select_query = model_or_select(model_or_query)
query = {} # mapping of models to queries
joins = {} # a graph of joins needed, passed into the convert_lookup function
# due to quirks in the way where clauses are defined, Q() queries can only
# work on a single table -- this will need to be redesigned so Q() objects
# can work on multiple models
def fix_q(node_or_q, joins):
if isinstance(node_or_q, Node):
for child in node_or_q.children:
query_model = fix_q(child, joins)
elif isinstance(node_or_q, Q):
new_query = {}
for raw_lookup, value in node_or_q.query.items():
# do we have a query model at this point?
query_model, joins, lookup = convert_lookup(model, joins, raw_lookup)
new_query[lookup] = value
node_or_q.query = new_query
return query_model
for node_or_q in args:
queried_model = fix_q(node_or_q, joins)
query.setdefault(queried_model, [])
query[queried_model].append(node_or_q)
# iterate over keyword lookups and determine lookups and necessary joins
for raw_lookup, value in kwargs.items():
queried_model, joins, lookup = convert_lookup(model, joins, raw_lookup)
query.setdefault(queried_model, [])
query[queried_model].append((lookup, value))
def follow_joins(current, query):
if current in joins:
for joined_model in joins[current]:
query = query.switch(current)
if joined_model not in query._joined_models:
query = query.join(joined_model)
query = follow_joins(joined_model, query)
return query
select_query = follow_joins(model, select_query)
for model, lookups in query.items():
qargs, qkwargs = [], {}
for lookup in lookups:
if isinstance(lookup, tuple):
qkwargs[lookup[0]] = lookup[1]
else:
qargs.append(lookup)
select_query = select_query.switch(model).where(*qargs, **qkwargs)
return select_query
def annotate_query(select_query, related_model, aggregation):
aggregation = aggregation or Count(related_model._meta.pk_name)
model = select_query.model
select_query = select_query.switch(model)
cols = select_query.query
# ensure the join is there
if related_model not in select_query._joined_models:
select_query = select_query.join(related_model).switch(model)
# query for it
if isinstance(cols, dict):
selection = cols
group_by = cols[model]
elif isinstance(cols, basestring):
selection = {model: [cols]}
if cols == '*':
group_by = model
else:
group_by = [col.strip() for col in cols.split(',')]
elif isinstance(cols, (list, tuple)):
selection = {model: cols}
group_by = cols
else:
raise ValueError('Unknown type passed in to select query: "%s"' % type(cols))
# query for the related object
selection[related_model] = [aggregation]
select_query.query = selection
return select_query.group_by(group_by)
class Field(object):
db_field = ''
default = None
field_template = "%(column_type)s%(nullable)s"
_field_counter = 0
_order = 0
def get_attributes(self):
return {}
def __init__(self, null=False, db_index=False, unique=False, verbose_name=None,
help_text=None, *args, **kwargs):
self.null = null
self.db_index = db_index
self.unique = unique
self.attributes = self.get_attributes()
self.default = kwargs.get('default', None)
self.verbose_name = verbose_name
self.help_text = help_text
kwargs['nullable'] = ternary(self.null, '', ' NOT NULL')
self.attributes.update(kwargs)
Field._field_counter += 1
self._order = Field._field_counter
def add_to_class(self, klass, name):
self.name = name
self.model = klass
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
setattr(klass, name, None)
def render_field_template(self):
col_type = self.model._meta.database.column_for_field(self.db_field)
self.attributes['column_type'] = col_type
return self.field_template % self.attributes
def to_sql(self):
rendered = self.render_field_template()
return '%s %s' % (self.name, rendered)
def null_wrapper(self, value, default=None):
if (self.null and value is None) or default is None:
return value
return value or default
def db_value(self, value):
return value
def python_value(self, value):
return value
def lookup_value(self, lookup_type, value):
return self.db_value(value)
def class_prepared(self):
pass
class CharField(Field):
db_field = 'string'
field_template = '%(column_type)s(%(max_length)d)%(nullable)s'
def get_attributes(self):
return {'max_length': 255}
def db_value(self, value):
if self.null and value is None:
return value
value = value or ''
return value[:self.attributes['max_length']]
def lookup_value(self, lookup_type, value):
if lookup_type == 'contains':
return '*%s*' % self.db_value(value)
elif lookup_type == 'icontains':
return '%%%s%%' % self.db_value(value)
else:
return self.db_value(value)
class TextField(Field):
db_field = 'text'
def db_value(self, value):
return self.null_wrapper(value, '')
def lookup_value(self, lookup_type, value):
if lookup_type == 'contains':
return '*%s*' % self.db_value(value)
elif lookup_type == 'icontains':
return '%%%s%%' % self.db_value(value)
else:
return self.db_value(value)
class DateTimeField(Field):
db_field = 'datetime'
def python_value(self, value):
if isinstance(value, basestring):
value = value.rsplit('.', 1)[0]
return datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6])
return value
class IntegerField(Field):
db_field = 'integer'
def db_value(self, value):
return self.null_wrapper(value, 0)
def python_value(self, value):
if value is not None:
return int(value)
class BooleanField(IntegerField):
db_field = 'boolean'
def db_value(self, value):
if value:
return 1
return 0
def python_value(self, value):
return bool(value)
class FloatField(Field):
db_field = 'float'
def db_value(self, value):
return self.null_wrapper(value, 0.0)
def python_value(self, value):
if value is not None:
return float(value)
class DecimalField(Field):
db_field = 'decimal'
field_template = '%(column_type)s(%(max_digits)d, %(decimal_places)d)%(nullable)s'
def get_attributes(self):
return {
'max_digits': 10,
'decimal_places': 5,
}
def db_value(self, value):
return self.null_wrapper(value, decimal.Decimal(0))
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
field_template = "%(column_type)s NOT NULL PRIMARY KEY"
class ForeignRelatedObject(object):
def __init__(self, to, field):
self.to = to
self.field = field
self.field_name = self.field.name
self.cache_name = '_cache_%s' % self.field_name
def __get__(self, instance, instance_type=None):
if not getattr(instance, self.cache_name, None):
id = getattr(instance, self.field_name, 0)
qr = self.to.select().where(**{self.to._meta.pk_name: id})
try:
setattr(instance, self.cache_name, qr.get())
except self.to.DoesNotExist:
if not self.field.null:
raise
return getattr(instance, self.cache_name, None)
def __set__(self, instance, obj):
assert isinstance(obj, self.to), "Cannot assign %s, invalid type" % obj
setattr(instance, self.field_name, obj.get_pk())
setattr(instance, self.cache_name, obj)
class ReverseForeignRelatedObject(object):
def __init__(self, related_model, name):
self.field_name = name
self.related_model = related_model
def __get__(self, instance, instance_type=None):
query = {self.field_name: instance.get_pk()}
qr = self.related_model.select().where(**query)
return qr
class ForeignKeyField(IntegerField):
db_field = 'foreign_key'
field_template = '%(column_type)s%(nullable)s REFERENCES %(to_table)s (%(to_pk)s)%(cascade)s%(extra)s'
def __init__(self, to, null=False, related_name=None, cascade=False, extra=None, *args, **kwargs):
self.to = to
self.related_name = related_name
self.cascade = cascade
self.extra = extra
kwargs.update({
'cascade': ' ON DELETE CASCADE' if self.cascade else '',
'extra': self.extra or '',
})
super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
def add_to_class(self, klass, name):
self.descriptor = name
self.name = name + '_id'
self.model = klass
if self.to == 'self':
self.to = self.model
self.verbose_name = self.verbose_name or re.sub('_', ' ', name).title()
if self.related_name is None:
self.related_name = klass._meta.db_table + '_set'
klass._meta.rel_fields[name] = self.name
setattr(klass, self.descriptor, ForeignRelatedObject(self.to, self))
setattr(klass, self.name, None)
reverse_rel = ReverseForeignRelatedObject(klass, self.name)
setattr(self.to, self.related_name, reverse_rel)
self.to._meta.reverse_relations[self.related_name] = klass
def lookup_value(self, lookup_type, value):
if isinstance(value, Model):
return value.get_pk()
return value or None
def db_value(self, value):
if isinstance(value, Model):
return value.get_pk()
return value
def class_prepared(self):
# unfortunately because we may not know the primary key field
# at the time this field's add_to_class() method is called, we
# need to update the attributes after the class has been built
self.attributes.update({
'to_table': self.to._meta.db_table,
'to_pk': self.to._meta.pk_name,
})
# define a default database object in the module scope
database = SqliteDatabase(DATABASE_NAME)
class BaseModelOptions(object):
ordering = None
def __init__(self, model_class, options=None):
# configurable options
options = options or {'database': database}
for k, v in options.items():
setattr(self, k, v)
self.rel_fields = {}
self.reverse_relations = {}
self.fields = {}
self.model_class = model_class
def get_sorted_fields(self):
return sorted(self.fields.items(), key=lambda (k,v): (k == self.pk_name and 1 or 2, v._order))
def get_field_names(self):
return [f[0] for f in self.get_sorted_fields()]
def get_fields(self):
return [f[1] for f in self.get_sorted_fields()]
def get_field_by_name(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError('Field named %s not found' % name)
def get_related_field_by_name(self, name):
if name in self.rel_fields:
return self.fields[self.rel_fields[name]]
def get_related_field_for_model(self, model, name=None):
for field in self.fields.values():
if isinstance(field, ForeignKeyField) and field.to == model:
if name is None or name == field.name or name == field.descriptor:
return field
def get_reverse_related_field_for_model(self, model, name=None):
for field in model._meta.fields.values():
if isinstance(field, ForeignKeyField) and field.to == self.model_class:
if name is None or name == field.name or name == field.descriptor:
return field
def rel_exists(self, model):
return self.get_related_field_for_model(model) or \
self.get_reverse_related_field_for_model(model)
class BaseModel(type):
inheritable_options = ['database', 'ordering']
def __new__(cls, name, bases, attrs):
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
attr_dict = {}
meta = attrs.pop('Meta', None)
if meta:
attr_dict = meta.__dict__
for b in bases:
base_meta = getattr(b, '_meta', None)
if not base_meta:
continue
for (k, v) in base_meta.__dict__.items():
if k in cls.inheritable_options and k not in attr_dict:
attr_dict[k] = v
_meta = BaseModelOptions(cls, attr_dict)
if not hasattr(_meta, 'db_table'):
_meta.db_table = re.sub('[^a-z]+', '_', cls.__name__.lower())
setattr(cls, '_meta', _meta)
_meta.pk_name = None
for name, attr in cls.__dict__.items():
if isinstance(attr, Field):
attr.add_to_class(cls, name)
_meta.fields[attr.name] = attr
if isinstance(attr, PrimaryKeyField):
_meta.pk_name = attr.name
if _meta.pk_name is None:
_meta.pk_name = 'id'
pk = PrimaryKeyField()
pk.add_to_class(cls, _meta.pk_name)
_meta.fields[_meta.pk_name] = pk
_meta.model_name = cls.__name__
for field in _meta.fields.values():
field.class_prepared()
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %s>' % (
_meta.model_name, self.__unicode__()))
exception_class = type('%sDoesNotExist' % _meta.model_name, (DoesNotExist,), {})
cls.DoesNotExist = exception_class
return cls
class Model(object):
__metaclass__ = BaseModel
def __init__(self, *args, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __eq__(self, other):
return other.__class__ == self.__class__ and \
self.get_pk() and \
other.get_pk() == self.get_pk()
def get_field_dict(self):
def get_field_val(field):
field_value = getattr(self, field.name)
if not self.get_pk() and field_value is None and field.default is not None:
if callable(field.default):
field_value = field.default()
else:
field_value = field.default
setattr(self, field.name, field_value)
return (field.name, field_value)
pairs = map(get_field_val, self._meta.fields.values())
return dict(pairs)
@classmethod
def table_exists(cls):
return cls._meta.db_table in cls._meta.database.get_tables()
@classmethod
def create_table(cls, fail_silently=False):
if fail_silently and cls.table_exists():
return
cls._meta.database.create_table(cls)
for field_name, field_obj in cls._meta.fields.items():
if isinstance(field_obj, PrimaryKeyField):
cls._meta.database.create_index(cls, field_obj.name, True)
elif isinstance(field_obj, ForeignKeyField):
cls._meta.database.create_index(cls, field_obj.name, field_obj.unique)
elif field_obj.db_index or field_obj.unique:
cls._meta.database.create_index(cls, field_obj.name, field_obj.unique)
@classmethod
def drop_table(cls, fail_silently=False):
cls._meta.database.drop_table(cls, fail_silently)
@classmethod
def filter(cls, *args, **kwargs):
return filter_query(cls, *args, **kwargs)
@classmethod
def select(cls, query=None):
select_query = SelectQuery(cls, query)
if cls._meta.ordering:
select_query = select_query.order_by(*cls._meta.ordering)
return select_query
@classmethod
def update(cls, **query):
return UpdateQuery(cls, **query)
@classmethod
def insert(cls, **query):
return InsertQuery(cls, **query)
@classmethod
def delete(cls, **query):
return DeleteQuery(cls, **query)
@classmethod
def raw(cls, sql, *params):
return RawQuery(cls, sql, *params)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save()
return inst
@classmethod
def get_or_create(cls, **query):
try:
inst = cls.get(**query)
except cls.DoesNotExist:
inst = cls.create(**query)
return inst
@classmethod
def get(cls, *args, **kwargs):
return cls.select().get(*args, **kwargs)
def get_pk(self):
return getattr(self, self._meta.pk_name, None)
def save(self):
field_dict = self.get_field_dict()
field_dict.pop(self._meta.pk_name)
if self.get_pk():
update = self.update(
**field_dict
).where(**{self._meta.pk_name: self.get_pk()})
update.execute()
else:
insert = self.insert(**field_dict)
new_pk = insert.execute()
setattr(self, self._meta.pk_name, new_pk)
def delete_instance(self):
return self.delete().where(**{
self._meta.pk_name: self.get_pk()
}).execute()
| [
"[email protected]"
] | |
5c42760ff78d4b51e4343021b7deb5b12a24f0b4 | 4f6281659c20c11468d147cef6c730a4cbad0d58 | /node_modules/live-server/node_modules/fsevents/build/config.gypi | 803b85c0269b54bd837ac7480240685904eaa6ce | [
"MIT"
] | permissive | devdaddy/storybook-ui | faa85f265d8890c6122cef96d9645a5ba3b370af | 5ddaaf5ebb58413e91ca3bcd0a21e6ea74d94542 | refs/heads/main | 2023-08-13T19:32:56.840543 | 2021-07-21T15:03:16 | 2021-07-21T15:03:16 | 330,773,110 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,667 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt67l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "67",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "7",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/ericsc/Library/Caches/node-gyp/14.15.1",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/sh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/ericsc/.npm-init.js",
"userconfig": "/Users/ericsc/.npmrc",
"cidr": "",
"node_version": "14.15.1",
"user": "502",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/ericsc/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.8 node/v14.15.1 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/tmp",
"unsafe_perm": "",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
] | |
7a9966931bb8737df6d6d1b33a77d939a8ad824e | 4254d744d58b8a718a7251ca9bab7bb34c056018 | /src/social/settings.py | 2e535631740005044a97e378b40c4350e4cc2200 | [] | no_license | Eyakub/django-social-login | 85b13b552b2719b9c8b1df60a835190bf571da52 | dcf72c533689940a02b6df620235143902f1cc73 | refs/heads/master | 2022-12-21T00:21:11.212002 | 2019-08-27T06:05:13 | 2019-08-27T06:05:13 | 204,456,358 | 0 | 0 | null | 2022-12-08T06:04:55 | 2019-08-26T10:56:32 | Python | UTF-8 | Python | false | false | 3,578 | py | """
Django settings for social project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_p&lm3$1q9g6h7rc_cc*bwem0h0m84mq3j^vau2o-4=oy^d9-='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'social.todo',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'social.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'social.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
# needed to login by username in django admin, regardless of 'allauth'
'django.contrib.auth.backends.ModelBackend',
# 'allauth' specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
SITE_ID = 1
LOGIN_REDIRECT_URL = '/' | [
"[email protected]"
] | |
3d0514621d7d21aecbaad5567828f757b3050c7e | b7e924cff3940a94014f7ef83f830f31c61b1ce1 | /Assignments/Data Types and Variables/Exercise/10. Gladiator Expenses.py | bba19e3016d62c9f98abeae636dcb4500ff4f8ff | [
"MIT"
] | permissive | KaloyankerR/python-fundamentals-repository | a1406ca021819ca32390700380646f1107bf078e | b8e69523ea7e6aa352e8398f0202e283374a0f7c | refs/heads/master | 2023-04-10T05:22:43.907759 | 2021-04-20T20:45:54 | 2021-04-20T20:45:54 | 289,025,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | lost_fights_count = int(input())
helmet_price = float(input())
sword_price = float(input())
shield_price = float(input())
armor_price = float(input())
helmets_trashed = 0
swords_trashed = 0
shields_trashed = 0
armors_trashed = 0
for fight in range(1, lost_fights_count + 1):
if fight % 2 == 0:
helmets_trashed += 1
if fight % 3 == 0:
swords_trashed += 1
if fight % 6 == 0:
shields_trashed += 1
if shields_trashed % 2 == 0:
armors_trashed += 1
total_first_racer = (helmets_trashed * helmet_price) + (swords_trashed * sword_price) + (shields_trashed * shield_price) + (
armors_trashed * armor_price)
print(f'Gladiator expenses: {total_first_racer:.2f} aureus')
| [
"[email protected]"
] | |
aee5f137c27330cda1e66924e85cebf0986034f1 | bc31e2039bead10adccbd4ff0e869ff417ffa8c4 | /external_dns/tests/common.py | a19296b56a3df6fb8fb02ab1ee0930cdabaa1028 | [] | permissive | koreissm/integrations-core | 0a70c0bb26cc534699c3f776678cef0590fc9887 | 4ab56de323dccf977c5f3e7e70b37069365912ad | refs/heads/master | 2020-11-24T23:37:03.463919 | 2019-12-13T17:40:57 | 2019-12-13T17:40:57 | 228,390,125 | 1 | 0 | BSD-3-Clause | 2019-12-16T13:10:24 | 2019-12-16T13:10:23 | null | UTF-8 | Python | false | false | 250 | py | # (C) Datadog, Inc. 2019
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from datadog_checks.dev import get_here
HERE = get_here()
FIXTURE_DIR = os.path.join(HERE, 'fixtures')
CHECK_NAME = 'external_dns'
| [
"[email protected]"
] | |
903c47ea2af331897d25c06feef451857180cf8f | 18dba2f82e17873e5e8161e74bc714ef88b09b36 | /realestate/devrep/migrations/0024_auto__chg_field_partner_note.py | ed1aaeb01c94b5280314b32aee557cdf13b875eb | [] | no_license | sanchellius/estate-agent | 8013573624b62ea3b6362fa0c22edf8371ca6966 | 53c15c2f2c970bd432ae579b5aa6f76ab2fbac49 | refs/heads/master | 2021-01-17T21:15:35.988578 | 2016-07-25T21:51:24 | 2016-07-25T21:51:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,463 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Partner.note'
db.alter_column('devrep_partner', 'note', self.gf('django.db.models.fields.CharField')(max_length=1000, null=True))
def backwards(self, orm):
# Changing field 'Partner.note'
db.alter_column('devrep_partner', 'note', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'devrep.address': {
'Meta': {'ordering': "['id']", 'object_name': 'Address'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'devrep.citizenship': {
'Meta': {'ordering': "['name']", 'object_name': 'Citizenship'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.clientpartner': {
'Meta': {'unique_together': "(('client', 'partner'),)", 'object_name': 'ClientPartner'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Client']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Partner']"}),
'partner_client_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.PartnerClientStatus']"})
},
'devrep.devprofile': {
'Meta': {'object_name': 'DevProfile'},
'bad_habits': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'coverage_localities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'person_coverage'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['estatebase.Locality']"}),
'coverage_regions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'person_coverage'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['estatebase.Region']"}),
'experience': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Experience']", 'null': 'True', 'blank': 'True'}),
'gears': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'goods': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['devrep.Goods']", 'null': 'True', 'through': "orm['devrep.GoodsProfileM2M']", 'blank': 'True'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pc_skills': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Quality']", 'null': 'True', 'blank': 'True'}),
'transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'work_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['devrep.WorkType']", 'null': 'True', 'through': "orm['devrep.WorkTypeProfile']", 'blank': 'True'})
},
'devrep.experience': {
'Meta': {'ordering': "['name']", 'object_name': 'Experience'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.extraprofile': {
'Meta': {'object_name': 'ExtraProfile'},
'address': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'extra_profile'", 'unique': 'True', 'null': 'True', 'to': "orm['devrep.Address']"}),
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'birthplace': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'citizenship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Citizenship']", 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'M'", 'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'passport_number': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'passport_series': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'patronymic': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'devrep.gear': {
'Meta': {'ordering': "['name']", 'object_name': 'Gear'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'devrep.goods': {
'Meta': {'ordering': "['name']", 'object_name': 'Goods'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Measure']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['devrep.Goods']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'G'", 'max_length': '1', 'db_index': 'True', 'blank': 'True'})
},
'devrep.goodsprofilem2m': {
'Meta': {'unique_together': "(('goods', 'dev_profile'),)", 'object_name': 'GoodsProfileM2M'},
'dev_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.DevProfile']"}),
'goods': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Goods']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Measure']", 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'devrep.measure': {
'Meta': {'ordering': "['name']", 'object_name': 'Measure'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.partner': {
'Meta': {'ordering': "['name']", 'object_name': 'Partner'},
'address': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'partner'", 'unique': 'True', 'null': 'True', 'to': "orm['devrep.Address']"}),
'clients': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['estatebase.Client']", 'null': 'True', 'through': "orm['devrep.ClientPartner']", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['devrep.Partner']"}),
'partner_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'partner'", 'to': "orm['devrep.PartnerType']"}),
'person_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'devrep.partnerclientstatus': {
'Meta': {'ordering': "['name']", 'object_name': 'PartnerClientStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.partnertype': {
'Meta': {'ordering': "['name']", 'object_name': 'PartnerType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.quality': {
'Meta': {'ordering': "['name']", 'object_name': 'Quality'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'devrep.worktype': {
'Meta': {'ordering': "['name']", 'object_name': 'WorkType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Measure']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['devrep.WorkType']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'devrep.worktypeprofile': {
'Meta': {'unique_together': "(('work_type', 'dev_profile'),)", 'object_name': 'WorkTypeProfile'},
'dev_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.DevProfile']"}),
'experience': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Experience']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Measure']", 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'price_max': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_min': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'quality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.Quality']", 'null': 'True', 'blank': 'True'}),
'work_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['devrep.WorkType']"})
},
'estatebase.client': {
'Meta': {'ordering': "['-id']", 'object_name': 'Client'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'client_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.ClientType']", 'on_delete': 'models.PROTECT'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dev_profile': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'client'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['devrep.DevProfile']", 'blank': 'True', 'unique': 'True'}),
'extra_profile': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'client'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['devrep.ExtraProfile']", 'blank': 'True', 'unique': 'True'}),
'has_dev_profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['estatebase.HistoryMeta']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Origin']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'estatebase.clienttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ClientType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.geogroup': {
'Meta': {'ordering': "['name']", 'object_name': 'GeoGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.historymeta': {
'Meta': {'object_name': 'HistoryMeta'},
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creators'", 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modificated': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'updators'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['auth.User']"})
},
'estatebase.locality': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'region'),)", 'object_name': 'Locality'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locality_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.LocalityType']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'name_gent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name_loct': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.Region']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'})
},
'estatebase.localitytype': {
'Meta': {'ordering': "['name']", 'object_name': 'LocalityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'prep_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'sort_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'estatebase.origin': {
'Meta': {'ordering': "['name']", 'object_name': 'Origin'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'estatebase.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
'geo_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['estatebase.GeoGroup']", 'on_delete': 'models.PROTECT'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metropolis': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'metropolis_region'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['estatebase.Locality']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'regular_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'regular_name_gent': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['devrep'] | [
"[email protected]"
] | |
1d829257f6aa9bd937c16ebe5dc899e634989812 | 0edd49b1f7a34a63bbf9ddf92fbc75fe5d46be63 | /tests/resampling/sliced_wasserstein/test_utils.py | 131dbb1425e598da9f4c013a6bac28c588ed7cb9 | [
"Apache-2.0"
] | permissive | JTT94/filterflow | 10c5a5f4cd8bf27f620c83f1b677b162eb59f5c6 | 5d8300ba247c4c17e1a301a22560c24fd0670bfe | refs/heads/master | 2023-04-17T00:24:06.027624 | 2022-10-27T09:20:11 | 2022-10-27T09:20:11 | 247,799,848 | 39 | 4 | Apache-2.0 | 2020-10-13T08:48:19 | 2020-03-16T19:28:52 | Jupyter Notebook | UTF-8 | Python | false | false | 809 | py | import tensorflow as tf
from scipy.stats.stats import _cdf_distance as st_cdf_distance
from filterflow.resampling.differentiable.loss.sliced_wasserstein import _cdf_distance
class TestUtils(tf.test.TestCase):
def setUp(self):
import numpy as np
self.x = np.random.normal(0., 1., [1, 100])
self.y = np.random.normal(0., 1., [1, 100])
self.w_x = np.random.uniform(0., 1., [1, 100])
self.w_y = np.random.uniform(0., 1., [1, 100])
self.w_x /= self.w_x.sum(1, keepdims=True)
self.w_y /= self.w_y.sum(1, keepdims=True)
def test_cdf_distance(self):
tf_res = _cdf_distance(self.x, self.y, self.w_x, self.w_y)
sc_res = st_cdf_distance(2, self.x[0], self.y[0], self.w_x[0], self.w_y[0])
self.assertAllClose(tf_res[0], sc_res)
| [
"[email protected]"
] | |
c394b74aeadad20ebc853e06278df909788eb6fa | 5bb5a722fbf9e0a29c73ff3184dcbcce1639cf44 | /cafesite/cafeyo/views.py | efcb7fdb6a1398241dea2393aef5ad3d754d8f3b | [] | no_license | software-engineering-term-project/cafe-recommendation | 969afad5ac4a8e5327b3ed50780f6c611717c0b0 | 0c3335455cbb7b87d8a432b8bb0c71dba5105761 | refs/heads/master | 2021-05-24T08:31:35.976128 | 2020-06-26T15:13:04 | 2020-06-26T15:13:04 | 253,471,528 | 0 | 2 | null | 2020-06-06T23:44:07 | 2020-04-06T11:01:30 | HTML | UTF-8 | Python | false | false | 2,981 | py | import random
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.db.models import Max
from .models import Gate, Cafe, Category, Menu
# Create your views here.
class IndexView(generic.TemplateView):
template_name = 'cafeyo/index.html'
class CafelistView(generic.ListView):
model = Cafe
template_name = 'cafeyo/cafe_list.html'
context_object_name = 'cafe_list'
def get_queryset(self):
if 'category_id' in self.kwargs:
return Cafe.objects.filter(문=self.kwargs['gate_id'], 카테고리=self.kwargs['category_id'])
else:
return Cafe.objects.filter(문=self.kwargs['gate_id'])
def get_context_data(self, **kwargs):
context = super(CafelistView, self).get_context_data(**kwargs)
max_id = Cafe.objects.all().aggregate(max_id=Max('id'))['max_id']
if 'category_id' in self.kwargs:
if Cafe.objects.filter(문=self.kwargs['gate_id'], 카테고리=self.kwargs['category_id']).exists():
while True:
rand = random.randint(1, max_id)
context['random_cafe'] = Cafe.objects.filter(
문=self.kwargs['gate_id'], 카테고리=self.kwargs['category_id'], pk=rand).first()
if context['random_cafe']:
break
else:
if Cafe.objects.filter(문=self.kwargs['gate_id']).exists():
while True:
rand = random.randint(1, max_id)
context['random_cafe'] = Cafe.objects.filter(
문=self.kwargs['gate_id'], pk=rand).first()
if context['random_cafe']:
break
context['id_param'] = self.kwargs['gate_id']
return context
class ResultsView(generic.DetailView):
model = Cafe
template_name = 'cafeyo/results.html'
def get_context_data(self, **kwargs):
if Menu.objects.filter(카페=self.kwargs['pk'], 카페인=True).exists():
context = super(ResultsView, self).get_context_data(**kwargs)
max_id = Menu.objects.all().aggregate(max_id=Max('id'))['max_id']
while True:
rand = random.randint(1, max_id)
context['cafein_menu'] = Menu.objects.filter(
카페=self.kwargs['pk'], 카페인=True, pk=rand).first()
if context['cafein_menu']:
break
if Menu.objects.filter(카페=self.kwargs['pk'], 카페인=False).exists():
while True:
rand = random.randint(1, max_id)
context['decafein_menu'] = Menu.objects.filter(
카페=self.kwargs['pk'], 카페인=False, pk=rand).first()
if context['decafein_menu']:
break
return context
| [
"[email protected]"
] | |
a5cda1f17a107696d4d83cd64a8d802ebe46b3dc | 45093e6470e866dede760bfb7a082bcbdb540adf | /venv/lib/python3.8/site-packages/urllib3/response.py | 02db0b14c4836f0acc9ab5f7518ed488b3fc97e7 | [] | no_license | rwinfield/first-bot | 0dee7441d80abdd0d93c58b73518e373a8d0af18 | ff6b2628767d8a7e8ebc6115dbf3658429764490 | refs/heads/master | 2023-08-17T01:58:41.332059 | 2023-08-04T19:25:52 | 2023-08-04T19:25:52 | 151,766,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/50/f8/0b/9a71e3e33ef56671fc8af60eca77004e27d33b0f4542e914a839dc9027 | [
"[email protected]"
] | |
7211e38c162d1e85a5d4701b938298e7b0341d7a | a7bc7593d72b85f20455ab1d67bffee2ad294011 | /TCP_Server/Multiple_Connections/tcpThreadedServerCallback.py | 9694d25d02b6b179c5140ebaf50cb48375195ea6 | [
"MIT"
] | permissive | EdgeLab-FHDO/Sliding-Window | 3e6e44e907f21006c67e36e9c6b5d93d15128795 | 8cb930202242245a8358c8cda1b8533fde3512fb | refs/heads/master | 2022-12-15T08:19:23.164307 | 2020-09-13T12:44:21 | 2020-09-13T12:44:21 | 293,291,622 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,255 | py | from datetime import datetime
from json import loads, dumps
from pprint import pprint
import socket
from threading import Thread
import time
import argparse
CLOSE_KEY="close_comm"
communication_flag=True
#Receives as an argument the ID of the vehicle
parser = argparse.ArgumentParser()
parser.add_argument("server_port", help="8008-8009...")
args = parser.parse_args()
serverport=int(args.server_port )#the given Vehicle ID must match with the one of the scenario
class ThreadedServer(Thread):
def __init__(self, host, port, timeout=60, callback=None, debug=False):
self.host = host
self.port = port
self.timeout = timeout
self.callback = callback
self.debug = debug
Thread.__init__(self)
# run by the Thread object
def run(self):
if self.debug:
print(datetime.now())
print('SERVER Starting...', '\n')
self.listen()
def listen(self):
# create an instance of socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the socket to its host and port
self.sock.bind((self.host, self.port))
if self.debug:
print(datetime.now())
print('SERVER Socket Bound', self.host, self.port, '\n')
# start listening for a client
self.sock.listen(5)
if self.debug:
print(datetime.now())
print('SERVER Listening...', '\n')
while True:
# get the client object and address
client, address = self.sock.accept()
# set a timeout
client.settimeout(self.timeout)
if self.debug:
print(datetime.now())
print('CLIENT Connected:', client, '\n')
# start a thread to listen to the client
Thread(
target=self.listenToClient,
args=(client, address, self.callback)
).start()
Thread(
target=self.sendToClient,
args=(client, address, 1)
).start()
def listenToClient(self, client, address, callback):
# set a buffer size ( could be 2048 or 4096 / power of 2 )
size = 1024
global CLOSE_KEY
global communication_flag
while communication_flag:
try:
# try to receive data from the client
data = client.recv(size).decode('utf-8')
if data:
#data = loads(data.rstrip('\0'))
if self.debug:
print(datetime.now())
print('CLIENT Data Received', client)
print('Data:')
pprint(data, width=1)
print('\n')
#if data== CLOSE_KEY:
#communication_flag=False
#client.close()
if callback is not None:
callback(client, address, data)
else:
raise error('Client disconnected')
except:
if self.debug:
print(datetime.now())
print('CLIENT Disconnected:', client, '\n')
client.close()
return False
def sendToClient(self, client, address,time_sleep):
global communication_flag
#data="{'V_ID':'v001','det_time':'1584882567.7939332','h_ID':'v001-10','type':'ip', 'location':2302.1666666666506}"
data="hello"+str(address)
#send_data="0005hello0002Hi0004hola"
send_data=self.setdata(data, 4)
while communication_flag:
try:
# try to sed data to the client
if send_data:
if self.debug:
print(datetime.now())
print('Data to be sent', client)
client.send(send_data.encode('utf-8'))
else:
raise error('No data to be sent')
except:
if self.debug:
print(datetime.now())
print('CLIENT Disconnected:', '\n')
client.close()
return False
time.sleep(time_sleep)
def setdata(self,data, header_lenght):
my_header=str(len(data))
my_header_lenght=len(my_header)
while header_lenght>my_header_lenght:
my_header="0"+my_header
my_header_lenght=len(my_header)
send_data=my_header+data
return send_data
def some_callback(client, address, data):
print('data received', data)
# send a response back to the client
res = {
'cmd': data['cmd'],
'data': data['data']
}
response = dumps(res)
client.send(response.encode('utf-8'))
if __name__ == "__main__":
ThreadedServer('127.0.0.1', serverport, timeout=86400, debug=True).start()
#ThreadedServer('127.0.0.1', 8008, timeout=86400, callback=some_callback, debug=True).start() #to enable a callback | [
"[email protected]"
] | |
d771ce2305c140ba240e770f75da36633ccc5fa2 | a2d7bea260a90b597b4afde27dfb7cc91551e5de | /qrcode.py | 4a7c0a0ffe9d48b61799fe9aea5de678ba2e41a1 | [] | no_license | znsoooo/lsx | 91af25171ab28b5edfc07b127dadf831f062bdef | 4f46815fea3816cbc25911190a1dea03686502c7 | refs/heads/master | 2022-04-26T15:20:39.286159 | 2020-03-28T01:35:22 | 2020-03-28T01:35:22 | 115,320,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | # -*- coding: utf-8 -*-
import Tkinter
from PIL import ImageTk
import qrcode
def qrmake(qrstr):
global n
n = n + 1
print qrstr
qr=qrcode.QRCode(version=19, box_size=3, border=2)
qr.add_data(qrstr)
qr.make(fit=True)
img=qr.make_image()
img.save('qrcode.png')
bm.append(ImageTk.PhotoImage(file = 'qrcode.png'))
label.configure(image = bm[n])
setCenter()
def submit():
global i, data
data = ent.get("0.0", "end")
i = -1
nextpic()
def nextpic():
global i, data
i = i + 1
qrlen = 300
qrmake(data[qrlen*i:qrlen*(i+1)])
def setCenter():
top.update_idletasks()
x = (top.winfo_screenwidth() - top.winfo_reqwidth())/2
y = (top.winfo_screenheight() - top.winfo_reqheight())/2
top.geometry("+%d+%d"%(x,y))
top = Tkinter.Tk()
n = -1
bm = []
label = Tkinter.Label(top)
ent = Tkinter.Text(top, width = 36, height = 5)
btn1 = Tkinter.Button(top, text = 'Set', command = submit)
btn2 = Tkinter.Button(top, text = 'Next', command = nextpic)
label.pack()
ent.pack()
btn1.pack()
btn2.pack()
qrmake('hello, qrc')
top.mainloop()
| [
"[email protected]"
] | |
f75884d586b309a8798fa48c923319bb4832a172 | a0773806514525e722a98c5a93dd7c06b619af54 | /gender_project/1.SystemInstallation/Boto/boto_install_VMS.py | e0203717c1b24ecf93d1156f06c15d63eacb38cd | [] | no_license | wppaul/gender_project | 432594af53813f88b2fa77a282b200aaa19ff005 | 5bdf3eb9f00e832f85a21971abe823338b19fdf8 | refs/heads/master | 2022-06-12T15:33:55.959883 | 2016-06-08T13:34:28 | 2022-06-10T06:49:47 | 60,486,657 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,599 | py | # Author : PENG WANG
# Student Number : 680868
# Supervisor : Prof. Richard Sinnott
# Subject: COMP90055 COMPUTING PROJECT
# Project Name : Gender Identification and Sentiment Analysis on Twitter through machine learning approaches
import boto
import time
from boto.ec2.regioninfo import RegionInfo
#Set up the region and Establish the connection
region=RegionInfo(name='melbourne', endpoint='nova.rc.nectar.org.au')
ec2_comm = boto.connect_ec2(aws_access_key_id='1bf4fd7557a84d559ae85a9455837b78', aws_secret_access_key='9c8d5a0fae2c4c87bd86e42d3ae9fe33', is_secure=True,
region=region, port=8773, path='/services"})/Cloud', validate_certs=False)
#This is used to store the ip addresses of Virtual Machines
VM_ips = []
print("Connection is established!")
#Check Security group and add new groups as well as rules if needed
def check_group_status(groupname):
check = False
group = ec2_comm.get_all_security_groups()
for g in group:
if g.name == groupname:
check = True
return check
def create_security_group(groupname):
check = check_group_status(groupname)
if check == False:
print("creating security group for %s!" %groupname)
security_group = ec2_comm.create_security_group(groupname,"allowing %s access!" %groupname)
if groupname == 'ssh':
print("adding the new rules for %s!" %groupname)
security_group.authorize("tcp",22,22,"0.0.0.0/0")
elif groupname == 'http':
print("adding the new rules for %s!" %groupname)
security_group.authorize("tcp",80,80,"0.0.0.0/0")
security_group.authorize("tcp",443,443,"0.0.0.0/0")
security_group.authorize("tcp",5984,5984,"0.0.0.0/0")
else:
print("This Security Group of \"%s\" is available to use!" %groupname)
# Function for Launching Instances
def launch_instance(num_of_instance):
num = num_of_instance
for i in range(num):
ec2_comm.run_instances('ami-000037b9', key_name='project_key', placement='melbourne',instance_type='m1.small', security_groups=['http','ssh'])
# Function for creating and attaching volumes based on the placement of Instance
def attach_volume(instance):
vol = ec2_comm.create_volume(50,instance.placement)
vol.attach(instance.id,"/dev/vdc")
# Verify the system status and perform the functions correspondingly
def check_status():
print("Creating instances......")
print('Waiting for instances to start......')
launch_instance(4)
reservations = ec2_comm.get_all_reservations()
for i in range(len(reservations)):
instance = reservations[i].instances[0]
status = reservations[i].instances[0].update()
while status == 'pending': # wait till instance is running
time.sleep(30)
print("VM%s is %s" %(i,status))
status = reservations[i].instances[0].update()
if status == 'running': # if instance is running then attach the volume correspondingly
instance.add_tag("Name","VM%s"%i)
VM_ips.append(instance.ip_address)
attach_volume(instance)
print("Instance VM%s is now ready to use" %i)
else:
print('Instance VM%s status:' %i + status)
#generate basic host file for ansible automation processes
def output_host_file():
info = '\n'.join(VM_ips)
path = '/Users/Paul/desktop/hosts'
user = 'ansible_user=ubuntu'
key = 'ansible_private_key_file=/Users/Paul/Desktop/project_key.pem'
with open(path,'w') as f:
f.write('[allservers]\n'+info+'\n\n[allservers:vars]\n'+ user + '\n'+ key)
print('The ansible host file is generated!')
create_security_group('http')
create_security_group('ssh')
check_status()
output_host_file()
print("Congratulations! The systems are successfully established!!!") | [
"[email protected]"
] | |
664bf80e2b8b6e0fce11e6abe9c3f7d8b088dd15 | fd0e683a14708fd91f54fa085511988142d8f64b | /etl_user_processing_dag.py | 91e4f6c2fc9c2417b56e94f285cd96491f22c19d | [] | no_license | anhassan/User-Processing-ETL-with-Apache-Airflow | 3f7d6857bf96f26845b3a31f59dcbae814717424 | 7b6dcb4c541f93c4e56d26a663269dcaa2ab4479 | refs/heads/main | 2023-02-28T05:33:03.132505 | 2021-02-08T01:04:11 | 2021-02-08T01:04:11 | 336,905,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,485 | py | from airflow.models import DAG
from datetime import datetime, timedelta
from airflow.providers.sqlite.operators.sqlite import SqliteOperator
from airflow.providers.http.sensors.http import HttpSensor
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
import json
import pandas as pd
def _processing_user(ti):
user_raw = ti.xcom_pull(task_ids=['user_fetching'])[0]
if user_raw is not None and "results" in user_raw:
user = user_raw['results'][0]
user_json = {"email": user['email'],
"firstname": user['name']['first'],
"lastname": user['name']['last'],
"country": user['location']['country'],
"username": user['login']['username'],
"password": user['login']['password']}
user_df = pd.DataFrame([user_json])
user_df.to_csv('/tmp/users.csv', index=False, header=False)
default_args = {'start_date': datetime(2021, 2, 5)}
dag = DAG(dag_id="user_processing",
default_args=default_args,
catchup=False,
schedule_interval=timedelta(1)
)
create_table_task = SqliteOperator(
task_id="create_table",
sqlite_conn_id='db_sqlite',
sql='''
CREATE TABLE IF NOT EXISTS users(
email TEXT NOT NULL PRIMARY KEY,
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
country TEXT NOT NULL,
username TEXT NOT NULL,
password TEXT NOT NULL
);''',
dag=dag
)
is_api_available_task = HttpSensor(
task_id="sense_api_presence",
http_conn_id="user_api",
endpoint="api/",
method="GET",
dag=dag
)
user_fetching_task = SimpleHttpOperator(
task_id="fetch_user",
http_conn_id="user_api",
endpoint="api/",
method="GET",
response_filter=lambda response: json.loads(response.text),
log_response=True,
dag=dag
)
user_filtering_task = PythonOperator(
task_id="filter_user",
python_callable=_processing_user,
dag=dag
)
user_storing_task = BashOperator(
task_id="persist_user",
bash_command='echo -e ".separator ","\n.import /tmp/users.csv users" | sqlite3 /home/airflow/airflow/airflow.db',
dag=dag
)
create_table_task >> is_api_available_task >> user_fetching_task >> user_filtering_task >> user_storing_task
| [
"[email protected]"
] | |
939811b8912ed2c6432484c973e24256f3eb4565 | a4dba9e5085611d906c3c8a0ae18210a0539c366 | /old programs/test1.py | 1bbe1e2678f8ca2f7f671457c29384c14ccc3660 | [] | no_license | itsdeepesh5/PythonProblems | 688ed9684045ab1d74c346e5683e8a05f6c7582d | c316637655a204a92c93839e4e1972db963e3b36 | refs/heads/master | 2020-04-23T05:14:40.314490 | 2019-02-15T22:04:25 | 2019-02-15T22:04:25 | 170,934,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import mysql.connector
from mysql.connector import Error
def connect():
""" Connect to MySQL database """
try:
conn = mysql.connector.connect(host='localhost',
database='sys',
user='root',
password='root')
if conn.is_connected():
print('Connected to MySQL database')
except Error as e:
print(e)
finally:
conn.close()
if __name__ == '__main__':
connect() | [
"[email protected]"
] | |
eac51ba7939513e8781980ad7eff33ef416da53e | 365a8edd7ce051528d7d1c95949f346dd040e6ee | /images/beta/api/models.py | aa60f755b46bf7ea11649372a77c6a7cafdfcc22 | [] | no_license | alavarello/tp-redes | 4b773d62267f89ab8025bc7c796c437fda385b05 | 276dbb7e297e52f49858094a6c3b637e7486b067 | refs/heads/master | 2022-10-26T06:03:59.115403 | 2020-06-11T15:54:58 | 2020-06-11T15:54:58 | 270,426,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
def __repr__(self):
return '<User %r>' % self.email
| [
"[email protected]"
] | |
26193cdf761f8077dc688f10883c353f61f3e148 | 07a585035614fd4ea7c34f471fc197562fb76a44 | /locust/test/mock_locustfile.py | f912e5eee49b6f220c6d4be6c0f58c79293b07af | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | voiapp/locust | e8a8bda4ad3a3428810c75eb0dfc12e31fdbe2a2 | 34cb6d1e0792a5ba4e1042a93ffb21cba87c00a8 | refs/heads/master | 2021-05-20T08:11:59.321737 | 2020-03-31T23:44:15 | 2020-03-31T23:44:30 | 252,186,479 | 0 | 0 | MIT | 2020-04-01T13:42:19 | 2020-04-01T13:42:19 | null | UTF-8 | Python | false | false | 1,224 | py | import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
| [
"[email protected]"
] | |
7d3791445dfd1896e15d32f5c4c64df6ce6269f4 | 39c16220bbbac56a06c39ff05efc4d3260c75cca | /a_b_somadosquadrados.py | 76d68347b38f6677a3e0cb8bc9170f292f16ed5a | [] | no_license | ericklennon/Exercicios-resolvidos | dafa0ca585c9aa0e9af6b0ddc37082223afeb48e | 3e1d3d9a544d6826da5ef6296e4fc79354d32f9d | refs/heads/main | 2023-05-03T11:43:21.285197 | 2021-05-18T22:35:44 | 2021-05-18T22:35:44 | 368,680,948 | 0 | 0 | null | 2021-05-18T22:27:51 | 2021-05-18T22:24:40 | null | UTF-8 | Python | false | false | 233 | py | print('Seja bem-vindo(a)')
print('Vamos começar!')
a = float(input('Informe o valor de "a": '))
b = float(input('Informe o valor de "b": '))
hipotenusa = a**2 + b**2
k = int(hipotenusa)
print('O valor da hipotenusa é =', k)
| [
"[email protected]"
] | |
ac571b5be106417bf78308ae66630111171b967a | 63b79eb44cf682ece74be1fc866f7651837db448 | /test/test_game_play_coordinates.py | c482bc9cac2940b0ad284f9d969ff2faa9d1ff13 | [] | no_license | bclark86/powerplay-py | c8cc4df8acd9ada91299706b7a7113ab9c963645 | 584d754629936a93d95157356ff806a5c68438dc | refs/heads/main | 2023-07-19T04:23:16.510338 | 2021-09-02T13:17:12 | 2021-09-02T13:17:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # coding: utf-8
"""
NHL API
Documenting the publicly accessible portions of the NHL API. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import powerplay
from powerplay.models.game_play_coordinates import GamePlayCoordinates # noqa: E501
from powerplay.rest import ApiException
class TestGamePlayCoordinates(unittest.TestCase):
"""GamePlayCoordinates unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGamePlayCoordinates(self):
"""Test GamePlayCoordinates"""
# FIXME: construct object with mandatory attributes with example values
# model = powerplay.models.game_play_coordinates.GamePlayCoordinates() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
3628a8ce50f058c736fb039f85f5847d526b6eba | bd7b5a9908103656604169db275b0804ee536635 | /Fault_Toleraant_Replication_and_Consistency/src/tempSensor/tempSensor.py | 89483b833b149945a02efcf2bdf8e741bf5ff9c2 | [] | no_license | Odey/DOS | c4eae6c318c6b1b1838565e1b06dfc5dc4b7f5d0 | 4fb67d60a4149518bdcb8ed3b5b3bb4cd20e0949 | refs/heads/master | 2020-04-04T22:01:53.630627 | 2018-11-06T02:35:17 | 2018-11-06T02:35:17 | 156,307,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,185 | py | #!/usr/bin/python
'''tempSensor.py: Returns the current temperature value to the Gateway on request from it. Pull Based.
Return types: float, int
'''
import xmlrpclib
import random
import socket
import sys
import threading
import time
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
import xmlrpclib
flagFailed=0
available_gateways = [8001,9001]
gateway_connected_to = 0 ## updated dynamically
######### Temperature Sensor Process ##########################
#########...... Pull Based ..........##########################
def RPCServer():
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Create server
server = SimpleXMLRPCServer(("localhost", 8007), requestHandler=RequestHandler,logRequests=False, allow_none=True)
server.register_introspection_functions()
### Heartbeat to check if the node is alive or not
def is_alive():
return True
server.register_function(is_alive)
##Reconfigure the device in the event of fault recovery.
def reconfigure(failed):
global flagFailed
flagFailed=1
available_gateways.remove(int(failed))
server.register_function(reconfigure)
### creating Temperature sensor object with various attributes and associated get() and set() functions.
class tempSensor:
def __init__(self):
self.name ="tempSensor"
self.ID =0
self.timestamp=0
self.clock =0
self.temp =0 ## initially the temperature is set to 0
self.offset =0
def get_type(self, c): ## getting type of the node: sensor/device
if self.clock<=c[0]:
self.clock=c[0]+1
else:
self.clock = self.clock + 1
return "Sensor"
def Clock(self): ## reporting the clock in case of events
self.clock=self.clock+1
return [self.clock]
def get_state(self, c): ## getting the current temperature
if self.clock<=c[0]:
self.clock=c[0]+1
else:
self.clock = self.clock + 1
self.temp=random.randint(40,80) ## temperature is randomly generated.
self.clock_plot(c+[self.clock])
return self.temp
def clock_plot(self,sequence): ## for reporting the logical clock for event ordering
return 1
gateway = xmlrpclib.ServerProxy('http://localhost:8001')
gateway.eventplot(sequence)
def get_timestamp(self): ## returns synced timestamp for any event
self.timestamp = time.time()+self.offset
return self.timestamp
def correct_time(self, val): ## corrects the time of this node during clock synchronization.
self.offset = val
self.timestamp = self.timestamp + val
print "corrected time ", self.timestamp
def get_ID(self, c): ## id of this node
if self.clock<=c[0]:
self.clock=c[0]+1
else:
self.clock = self.clock + 1
return self.ID
def get_name(self, c): ## name of this node
if self.clock<=c[0]:
self.clock=c[0]+1
else:
self.clock = self.clock + 1
return self.name
def set_ID(self, ID, c): ## setting ID,happens only once during registration with the gateway.
if self.clock<=c[0]:
self.clock=c[0]+1
else:
self.clock = self.clock + 1
self.ID=ID
server.register_instance(tempSensor())
try:
server.serve_forever()
except KeyboardInterrupt: ##Not working look into this.
print "Exiting"
server.close()
sys.exit()
def get_red(str):
print "\033[1;31m"+str+"\033[1;m"
return
## Temperature sensor registers itself with the gateway.
## It does not do anything on its own after that.(no push only pull)
def RPCClient():
global gateway_connected_to
for item in available_gateways:
proxyGateway = xmlrpclib.ServerProxy('http://localhost:'+str(item))
load = proxyGateway.get_load_status()
if int(load)>=3:
continue
else:
gateway_connected_to = item
break
get_red("tempSensor connected to "+str(gateway_connected_to))
temp1 = xmlrpclib.ServerProxy('http://localhost:8007')
newID=proxyGateway.register(temp1.get_type(temp1.Clock()), temp1.get_name(temp1.Clock()))
temp1.set_ID(newID, temp1.Clock())
#Lock = threading.Lock()
thread1 = threading.Thread(target=RPCServer)
thread1.start()
time.sleep(0.1)
thread2 = threading.Thread(target=RPCClient)
thread2.start()
| [
"[email protected]"
] | |
0d57683a61dfaa651ed6bf5b91d90af7036cacd0 | f258338e5ffe7a9954572d917bf483d9bcd5852c | /Term-2/Project-2-RNN-Time-Series-Prediction-and-Text-Generation/my_answers.py | 13cf6de70a846e422dd7d259231316a483645349 | [
"MIT"
] | permissive | rayheberer/udacity-AI | 8826d0ef446580b847ba0e58e83f99c5b89e87fb | 60bc03477130ba4f37dcfd30737da0fd1632d85e | refs/heads/master | 2020-03-13T00:16:42.481077 | 2018-07-12T01:21:31 | 2018-07-12T01:21:31 | 130,883,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,045 | py | import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import keras
# fill out the function below that transforms the input series
# and window-size into a set of input/output pairs for use with our RNN model
def window_transform_series(series, window_size):
# containers for input/output pairs
X = []
y = []
for i in range(len(series)-window_size):
X.append(series[i:i+window_size])
y.append(series[i+window_size])
# reshape each
X = np.asarray(X)
X.shape = (np.shape(X)[0:2])
y = np.asarray(y)
y.shape = (len(y),1)
return X,y
# build an RNN to perform regression on our time series input/output data
def build_part1_RNN(window_size):
model = Sequential()
model.add(LSTM(5, input_shape=(window_size, 1)))
model.add(Dense(1))
return model
# return the text input with only ascii lowercase and the punctuation given below included.
def cleaned_text(text):
punctuation = ['!', ',', '.', ':', ';', '?']
lowercase = 'abcdefghijklmnopqrstuvwxyz'
for char in set(text):
if char not in punctuation and char not in lowercase:
text = text.replace(char, ' ')
return text
# fill out the function below that transforms the input text and window-size into a set of input/output pairs for use with our RNN model
def window_transform_text(text, window_size, step_size):
# containers for input/output pairs
inputs = []
outputs = []
for ix in range(0, len(text)-window_size, step_size):
inputs.append(text[ix:ix+window_size])
outputs.append(text[ix+window_size])
return inputs, outputs
# build the required RNN model:
# a single LSTM hidden layer with softmax activation, categorical_crossentropy loss
def build_part2_RNN(window_size, num_chars):
model = Sequential()
model.add(LSTM(200, input_shape=(window_size, num_chars)))
model.add(Dense(num_chars, activation='softmax'))
return model
| [
"[email protected]"
] | |
c64c5449dc8fe3ced7cb038841a9b78bb23a72a9 | 5e5413977a65a2b44b9f0494864a7f1d863cedee | /tweety/src/tweety/accounts/models.py | f3eee341b4f3919ac23651dce1fc0a3e4773a90f | [] | no_license | tiwarianup/django-projects | ad7c627a6ea4620ca3d872bd20deb815f6b726ef | 8bafa582f25d6e3185540cf09becfafdda095489 | refs/heads/master | 2020-06-04T04:19:01.128465 | 2019-07-10T04:40:58 | 2019-07-10T04:40:58 | 191,869,396 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,440 | py | from django.db import models
from django.conf import settings
from django.urls import reverse_lazy
from django.db.models.signals import post_save
# Create your models here.
class UserProfileManager(models.Manager):
use_for_related_fields = True
def all(self):
qs = self.get_queryset().all()
try:
if self.instance:
qs = qs.exclude(user=self.instance)
except:
pass
return qs
def toggleFollow(self, user, toToggleUser):
userProfile, created = UserProfile.objects.get_or_create(user=user)
if toToggleUser in userProfile.following.all():
userProfile.following.remove(toToggleUser)
added = False
else:
userProfile.following.add(toToggleUser)
added = True
return added
def isFollwoing(self, user, followedByUser):
userProfile, created = UserProfile.objects.get_or_create(user=user)
if created:
return False
if followedByUser in userProfile.following.all():
return True
return False
def recommended(self, user, limitTo=10):
profile = user.profile
following = profile.get_following()
print(following)
qs = self.get_queryset().exclude(user__in=following).exclude(id=profile.id).order_by("?")[:limitTo]
print(qs)
return qs
class UserProfile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='profile')
following = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, related_name='followed_by')
# user.profile.following -> users i follow
# user.followed_by -> users that follow me - reverse relationship
objects = UserProfileManager()
def __str__(self):
return str(self.user.username)
def get_following(self):
return self.following.all().exclude(username=self.user.username)
def get_follow_url(self):
return reverse_lazy("profiles:follow", kwargs={"username":self.user.username})
def get_absolute_url(self):
return reverse_lazy("profiles:detail",kwargs={"username":self.user.username})
def post_save_user_receiver(sender, instance, created, *args, **kwargs):
print(instance)
if created:
new_profile = UserProfile.objects.get_or_create(user=instance)
post_save.connect(post_save_user_receiver, sender=settings.AUTH_USER_MODEL) | [
"[email protected]"
] | |
1b24ffede505ce8b2a302f8347e5af6d4abb194a | ff588c5b395c7d12171c4492d56d3ccc73f6c283 | /tsmsognScrapy/pipelines.py | 4b5f4a25d5d1ece24ab03c517adb5b7e51a3982b | [] | no_license | tsmsogn/scrapy_getting_started | c3c2db9c6c33083863c8486d655638e7ff4d4166 | 46ce687b8a440d5508ff2b35a42a3a3d332e6215 | refs/heads/master | 2021-05-11T16:20:34.911861 | 2018-01-17T01:16:24 | 2018-01-17T02:44:18 | 117,761,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class TsmsognscrapyPipeline(object):
def process_item(self, item, spider):
return item
| [
"[email protected]"
] | |
f013d68d8e05bec898c19a70247a883399f8252b | 807bb7e63bfacb4582fa61e03ff0c34d23084d00 | /latihan02.py | 993c7098607ccb19782cbf5cb9ad86db00de1028 | [] | no_license | Ibadnf/latihan02 | 3551e840e6900137ef48c1c3bb15d6f2a2dff8f6 | 4482f21f85586db719a6af1aa7228556b44de103 | refs/heads/main | 2022-12-31T15:30:55.336436 | 2020-10-21T04:28:57 | 2020-10-21T04:28:57 | 303,573,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py |
number=1+1+2+3/4.0
print(number)
mod=10%3 # sisa dari pembagian 3
print(mod)
square=7**2 # perpangkatan
print(square)
helloworld="Hello"+" "+"World"
print(helloworld)
myName="Luke"
print(f"Hello, {myName}")
mylist=["Luke Skywlake","Anakin Skywlaker","Yoda","Padem"]
print(mylist)
print(mylist[::-1])
for x in mylist:
print(x) | [
"[email protected]"
] | |
82e92e8bdfc250134fffea8b3a0a788f2edb446a | 17c2a9e1de8615be1ec1c17a6ec12a4501f290aa | /jobparser/spiders/hhru.py | bd2485e1c1ef5acbccb50a557540fb72a32eeffb | [] | no_license | Belfi-Gor/data_mining | 7ca0a73e482c767d03b1c6a0f1913d7622d7df36 | 6bf9a5f8703e2d6ddf12ad9da6931fdf38794114 | refs/heads/master | 2020-07-24T23:15:38.101896 | 2020-01-10T09:38:26 | 2020-01-10T09:38:26 | 208,079,647 | 0 | 0 | null | 2020-01-10T09:38:28 | 2019-09-12T15:09:30 | Python | UTF-8 | Python | false | false | 1,125 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.http import HtmlResponse
from jobparser.items import JobparserItem
class HhruSpider(scrapy.Spider):
name = 'hhru'
allowed_domains = ['hh.ru']
start_urls = ['https://hh.ru/search/vacancy?area=113&st=searchVacancy&text=python']
def parse(self, response:HtmlResponse):
next_page = response.css("a.HH-Pager-Controls-Next::attr(href)").extract_first()
yield response.follow(next_page, callback=self.parse)
vacancy = response.css('div.vacancy-serp div.vacancy-serp-item div.vacancy-serp-item__row_header a.bloko-link::attr(href)').extract()
for link in vacancy:
yield response.follow(link, callback=self.vacancy_parse)
pass
def vacancy_parse(self, response: HtmlResponse):
name = response.css("div.vacancy-title h1.header::text").extract_first()
salary = response.css("div.vacancy-title p.vacancy-salary::text").extract_first()
company = ''.join(response.css('a.vacancy-company-name span::text').extract())
yield JobparserItem(name=name, salary=salary, company=company)
| [
"[email protected]"
] | |
235eb291a1c42d067f9cdaccc1d16d535d9e3e62 | f39e22eb902bf884fd3a862ef2def95b916f303d | /code.py | a6b946c0657dc5f66b02af9d019f0d2fa20aebff | [
"MIT"
] | permissive | TheHockeyist/TheHockeyistBot | eeeb8298ac3b0e43a6ade0e4d25114562c5bbc43 | 6a22caab3ea68f5b4cda085c3e1fe6a0fc090486 | refs/heads/master | 2021-01-19T10:52:08.044814 | 2017-02-27T20:50:47 | 2017-02-27T20:50:47 | 82,220,724 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | # To be planned out.
# Ideas for what it should do, hmmm...
# Mathematical calculations, hockey statistics, I don't know...
| [
"[email protected]"
] | |
a335f7cb8da78ddcf79d0b037399a66f51813b90 | 00aac96ce48372a6065551fb0922bb670b427987 | /half_random_forest/code/half_random_forest.py | 20a38fee49c75b2be76065183970f65077c6c1e4 | [
"MIT"
] | permissive | Niederb/python_machine_learning | c67709b8ef9204c295f769310935baf9cd1f3d43 | 8512f419c3b8f07ef0aa12aeaddabec5129d03cd | refs/heads/master | 2021-01-10T15:06:23.892598 | 2016-02-15T20:31:25 | 2016-02-15T20:31:25 | 47,146,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 22:22:33 2015
@author: thoma
"""
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_digits
from random import sample
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from scipy.stats import mode
from sklearn.preprocessing import Imputer
digits = load_digits()
print(digits.data.shape)
test_percentage = 0.5
f_t, f_v, c_t, c_v = train_test_split(digits.data, digits.target, test_size=test_percentage)
nan_prob_t = 0.0
nan_mask_t = np.random.binomial(n=1, p=nan_prob_t, size=np.shape(f_t))
nan_f_t = f_t
nan_f_t[nan_mask_t==1] = np.nan
nan_prob_v = 0.0
nan_mask_v = np.random.binomial(n=1, p=nan_prob_v, size=np.shape(f_v))
nan_f_v = f_v
nan_f_v[nan_mask_v == 1] = np.nan
class HalfRF:
def __init__(self, data, classes, tree_features, n_trees=100):
self.n_features = np.shape(data)[1]
n_rows = np.shape(data)[0]
n_nans = np.sum(np.isnan(data), 0)
data = data[:, n_nans < n_rows]
self.n_features = np.shape(data)[1]
n_nans = np.sum(np.isnan(data), 1)
data = data[n_nans < self.n_features, :]
self.n_rows = np.shape(data)[0]
if (tree_features > self.n_features):
tree_features = self.n_features
self.col_list = np.zeros((n_trees, tree_features), dtype='int')
self.n_trees = n_trees
self.bags = []
for i in range(n_trees):
cols = sample(range(self.n_features), tree_features)
cols.sort()
self.col_list[i, :] = cols
data_temp = data[:, cols]
n_nans = np.sum(np.isnan(data_temp), 1)
data_temp = data_temp[n_nans == 0, :]
classes_temp = classes[n_nans == 0]
#bag = BaggingClassifier(n_estimators=1, max_features=tree_features)
bag = RandomForestClassifier(n_estimators=1, max_features=tree_features)
bag.fit(data_temp, classes_temp)
self.bags.append(bag)
print(np.shape(data_temp))
def classify(self, data):
nan_cols = np.arange(self.n_features)[np.isnan(data)]
decisions = []
s1 = set(nan_cols)
for i in range(self.n_trees):
cols = self.col_list[i]
s2 = set(cols)
if len(s1.intersection(s2)) > 0:
#decisions[i] = -1
continue
decisions.append(self.bags[i].predict(data[cols]))
if (len(decisions) == 0):
return (-1, 0, 0)
return (mode(decisions)[0][0][0], mode(decisions)[1][0][0], len(decisions))
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(f_t)
imp_f_t = imp.transform(f_t)
imp_f_v = imp.transform(f_v)
n_trees = 300
tree_features = 64
clf = HalfRF(imp_f_t, c_t, tree_features, n_trees)
n_validation = np.shape(f_v)[0]
results = np.zeros((n_validation, 3))
for i in range(n_validation):
v_item = imp_f_v[i, :]
(prediction, votes, total_votes) = clf.classify(v_item)
results[i, :] = (prediction, votes, total_votes)
#print("%f/%f" % (prediction, c_v[i]))
print(1.0* sum(results[:, 0] == c_v)/n_validation)
print(sum(results[:, 2] == 0))
print(np.mean(results[:, 2]))
imp_clf = RandomForestClassifier(n_trees, max_features=tree_features)
imp_clf.fit(imp_f_t, c_t)
imp_prediction = imp_clf.predict(imp_f_v)
print(1.0*sum(imp_prediction == c_v)/n_validation)
print("Hello World") | [
"[email protected]"
] | |
44ac30f5c599cfbc4fe9fb1aa72957353874147a | 517cefe4090a96ff042046c165ae235e840712c8 | /task2/untitled.py | 5b716c95dadbb44d8366e936e113a87e974189fc | [] | no_license | az2181036/seclass | 19ba1e292627f2da578831a9aa777e97fc540d05 | 2c66dd710104f51f4128ac1107f3ac5b284a8882 | refs/heads/master | 2020-05-05T06:09:14.096570 | 2019-05-20T13:14:04 | 2019-05-20T13:14:04 | 179,777,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,298 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'untitled.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QtCore.QRect(50, 120, 591, 401))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.rst = QtWidgets.QTextBrowser(self.gridLayoutWidget)
self.rst.setEnabled(False)
self.rst.setObjectName("rst")
self.gridLayout.addWidget(self.rst, 5, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(50)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 4, 0, 1, 1)
self.num2 = QtWidgets.QTextEdit(self.gridLayoutWidget)
self.num2.setObjectName("num2")
self.gridLayout.addWidget(self.num2, 3, 0, 1, 1)
self.label = QtWidgets.QLabel(self.gridLayoutWidget)
self.label.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(50)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 0, 1, 1)
self.num1 = QtWidgets.QTextEdit(self.gridLayoutWidget)
self.num1.setObjectName("num1")
self.gridLayout.addWidget(self.num1, 1, 0, 1, 1)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(690, 460, 81, 51))
font = QtGui.QFont()
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setEnabled(False)
self.label_3.setGeometry(QtCore.QRect(170, 50, 361, 41))
font = QtGui.QFont()
font.setPointSize(26)
font.setBold(True)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 20))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.pushButton.clicked.connect(self.compute)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label_2.setText(_translate("MainWindow", "="))
self.label.setText(_translate("MainWindow", "×"))
self.pushButton.setText(_translate("MainWindow", "Compute"))
self.label_3.setText(_translate("MainWindow", "Big number multiply"))
def compute(self):
num1 = self.num1.toPlainText()
num2 = self.num2.toPlainText()
if not num1.isdigit() or not num2.isdigit():
reply = QMessageBox.warning(self.pushButton, 'Warning', 'Please input a positive integer.', \
QMessageBox.Yes)
else:
num1 = int(num1)
num2 = int(num2)
self.rst.setText(str(num1*num2))
| [
"[email protected]"
] | |
e6a3c636ec00dbde2939e81839b5db363579020f | ae7c0f2ce24ee11be70b6514ed43b647a17cefff | /posts/admin.py | 3bf168e5e58edbd1cee32e3e862344db15bf7861 | [] | no_license | spinning210/dSite | f1480d6821b9d9637cfe4d245b6a484fb0a1350d | 745bad9ecc3e25e48f98caec7af70aeb42726241 | refs/heads/master | 2023-02-01T08:43:32.016324 | 2020-12-11T12:54:44 | 2020-12-11T12:54:44 | 309,000,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 530 | py | from django.contrib import admin
from .models import Location, Post
# Register your models here.
class LocationAdmin(admin.ModelAdmin):
list_display = ('id', 'name')
class PostAdmin(admin.ModelAdmin):
list_display = ('subject', 'content', 'author', 'location')
exclude = ('create_date', )
#fields = ('subject', 'content', 'author', 'location')
admin.site.register(Location, LocationAdmin) #註冊至Administration(管理員後台)
admin.site.register(Post, PostAdmin) #註冊至Administration(管理員後台) | [
"[email protected]"
] | |
7b3e65bcbef044c3c760b54c83f76f8d1890daac | 9d5a2a3971b6248844bd4e0e3905273e59ef8fb8 | /02 BuiltIn Functions/itertools_start.py | dd16551a4f212ce0b432c7322675874e0f5b93f4 | [] | no_license | jssoni14/AdvancedPython | 7c9815a1a91717f47f70cf8cf37ab24b437f424f | 5c6c16df9db72ab15898bab424dec6bc65a91575 | refs/heads/master | 2023-06-03T10:54:16.687348 | 2021-06-11T21:24:17 | 2021-06-11T21:24:17 | 375,839,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | # advanced iteration functions in the itertools package
import itertools
def testFunction(x):
return x<40
def main():
# TODO: cycle iterator can be used to cycle over a collection
seq1 = ["Joe", "John", "Mike"]
cycle1 = itertools.cycle(seq1)
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
# TODO: use count to create a simple counter
count1 = itertools.count(100,10)
print(next(count1))
print(next(count1))
print(next(count1))
print(next(count1))
# TODO: accumulate creates an iterator that accumulates values
vals = [10,20,30,40,50,40,30]
acc = itertools.accumulate(vals, max)
print(list(acc))
# TODO: use chain to connect sequences together
x = itertools.chain("ABCD","1234")
print(list(x))
# TODO: dropwhile and takewhile will return values until
# a certain condition is met that stops them
print(list(itertools.dropwhile(testFunction, vals)))
print(list(itertools.takewhile(testFunction, vals)))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c60e78dfe5a97c9bb01f35d199028d137100d2ee | 5a86ddc489b786f13ba11f0eddabbc0bbb94bfca | /template.py | 362e56931545c46e2888a9a12bf12c6094447df1 | [
"MIT"
] | permissive | tobiaslidstrom/CalibrePathTP | 86ba20ca2b541e9c781881695e02c223b38eebe3 | 9d78a9d4160ecfa86081212af733745dfac113c0 | refs/heads/main | 2023-02-17T20:55:40.768206 | 2021-01-15T19:59:24 | 2021-01-15T19:59:24 | 329,997,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | program:
tags = sublist(raw_list("tags", ","), 0, 1, ",");
author = sublist(raw_list('authors', ", "), 0, 2, ", ");
series = strcat(field("series"), " (", field("series_index"), ") - ");
title = re(field("title"), ":", " -");
if field("series") == "" then
series = ""
fi;
title_trim = strcat(substr(title, 0, 50), " […]");
title = cmp(strlen(title), 50, title, title, title_trim);
strcat(tags, "/", author, "/", series, title, "/", author, " - ", title);
| [
"[email protected]"
] | |
65d5b9bf0861114c0d692e35fa09b9aabbd749b3 | 6483f7f3989685a8dba5a5d8b670c4e4d66bd295 | /basic_system/views.py | 05a8d8360cbcc440ca61851291f650838ca3e6ec | [
"MIT"
] | permissive | JagerCox/basic-project-django2-channels2-gunicorn-dahpne | 531ec1f818ce78be8a1331cc53aa94b5961629b5 | 5329a06d251706f6f02cf202dff04959982d8214 | refs/heads/master | 2020-05-06T13:30:26.095687 | 2019-04-08T12:11:17 | 2019-04-08T12:11:17 | 180,140,830 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | import json
from django.shortcuts import render
from django.utils.safestring import mark_safe
def index(request):
return render(request, 'basic_system/index.html', {})
def room(request, room_name):
return render(request, 'basic_system/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
| [
"[email protected]"
] | |
7a44bea4a795b724b445971f61de8cd2ad70e5d8 | d88a920852df1d7dd7c7cfde00785021f0f4ab34 | /config/settings/test.py | 000ec7c53dc8061794e6d3ceb82289d37908a2cc | [
"MIT"
] | permissive | DavidLSO/weather_tracker | 891f24fd512941957f17e8f625d8ad3e16fa992a | 74df430341b0415f476f92aaf46ecd6c7e9f37f1 | refs/heads/master | 2022-12-11T18:23:19.572732 | 2019-05-26T17:27:23 | 2019-05-26T17:27:23 | 188,711,793 | 0 | 0 | MIT | 2022-11-22T03:51:29 | 2019-05-26T17:20:32 | Python | UTF-8 | Python | false | false | 2,044 | py | """
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="dDLoKgrOcjX4uNehPCdAojLoYIVntDrQTtkgfdBgCYDqayZJwyOEiNSpBT6w6VvN",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG # noqa F405
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = "localhost"
# https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = 1025
# Your stuff...
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
b86346c7eb137597a3ace943be68d03eff5a74b1 | 0018a062b99c2154de254d2ad9da81125ae7bbe0 | /face_cluster.py | 7da1ef4f6f701275aec513b7e213218d29c13653 | [] | no_license | kesuosi/face_cluster | 971f042f6573641e69ce3752dab67d4444b96a63 | f7e90d658b3d264b8e8118c133b22c74a5b37edf | refs/heads/master | 2020-03-21T04:57:15.250329 | 2018-06-21T07:48:29 | 2018-06-21T07:48:29 | 138,136,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,429 | py | # coding: utf-8
"""
@author: xhb
"""
import sys
import os
import dlib
import glob
import cv2
import time
# 指定路径
current_path = os.getcwd()
model_path = current_path + '/model/'
shape_predictor_model = model_path + '/shape_predictor_5_face_landmarks.dat'
face_rec_model = model_path + '/dlib_face_recognition_resnet_model_v1.dat'
face_folder = current_path + '/faces/'
output_folder = current_path + '/output/'
# 导入模型
detector = dlib.get_frontal_face_detector()
shape_detector = dlib.shape_predictor(shape_predictor_model)
face_recognizer = dlib.face_recognition_model_v1(face_rec_model)
# 为后面操作方便,建了几个列表
descriptors = []
images = []
# 遍历faces文件夹中所有的图片
for f in glob.glob(os.path.join(face_folder, "*.png")):
# print('Processing file:{}'.format(f))
# 读取图片
img = cv2.imread(f)
# 转换到rgb颜色空间
img2 = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 检测人脸
dets = detector(img2, 1)
# print("Number of faces detected: {}".format(len(dets)))
# 遍历所有的人脸
for index, face in enumerate(dets):
# 检测人脸特征点
shape = shape_detector(img2, face)
# 投影到128D
face_descriptor = face_recognizer.compute_face_descriptor(img2, shape)
# print(type(face_descriptor))
# 保存相关信息
descriptors.append(face_descriptor)
images.append((img2, shape))
# 聚类
ti1 = time.time()
labels = dlib.chinese_whispers_clustering(descriptors, 0.6)
ti2 = time.time()
delta_time = ti2 - ti1
print(labels)
print('clustering cost time: {}'.format(str(delta_time)))
print("labels: {}".format(labels))
num_classes = len(set(labels))
print("Number of clusters: {}".format(num_classes))
# 为了方便操作,用字典类型保存
face_dict = {}
for i in range(num_classes):
face_dict[i] = []
print (face_dict)
for i in range(len(labels)):
face_dict[labels[i]].append(images[i])
# print(face_dict)
print (face_dict.keys())
# 遍历字典,保存结果
for key in face_dict.keys():
file_dir = os.path.join(output_folder, str(key))
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
for index, (image, shape) in enumerate(face_dict[key]):
file_path = os.path.join(file_dir, 'face_' + str(index))
# print (file_path)
dlib.save_face_chip(image, shape, file_path, size=150, padding=0.25)
| [
"[email protected]"
] | |
f559375572f1a380d65ced85f30cdcdc69e0c335 | af8adcdab990ad84f5ac88b321c0e3fc03ddb247 | /079_Word_Search.py | 2156e8d40f1113f9a849f98fc57815632c09f523 | [] | no_license | xudong-sun/leetcode | eafe8bf0336cf3766a0e49230881bae24f8dbb6f | 6f41bc3915881e3c0431550f497b297b7b5a38fa | refs/heads/master | 2023-06-12T16:48:57.449716 | 2023-06-08T06:05:31 | 2023-06-08T06:05:31 | 88,631,317 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,357 | py | '''
Given a 2D board and a word, find if the word exists in the grid.
The word can be constructed from letters of sequentially adjacent cell, where "adjacent" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.
For example,
Given board =
[
['A','B','C','E'],
['S','F','C','S'],
['A','D','E','E']
]
word = "ABCCED", -> returns true,
word = "SEE", -> returns true,
word = "ABCB", -> returns false.
'''
class Solution(object):
def _find(self, level, x, y):
if level == len(self.word):
self.found = True
return
if x > 0 and not self.visited[x-1][y] and self.board[x-1][y] == self.word[level]:
self.visited[x-1][y] = True
self._find(level+1, x-1, y)
self.visited[x-1][y] = False
if self.found: return
if x < self.m - 1 and not self.visited[x+1][y] and self.board[x+1][y] == self.word[level]:
self.visited[x+1][y] = True
self._find(level+1, x+1, y)
self.visited[x+1][y] = False
if self.found: return
if y > 0 and not self.visited[x][y-1] and self.board[x][y-1] == self.word[level]:
self.visited[x][y-1] = True
self._find(level+1, x, y-1)
self.visited[x][y-1] = False
if self.found: return
if y < self.n -1 and not self.visited[x][y+1] and self.board[x][y+1] == self.word[level]:
self.visited[x][y+1] = True
self._find(level+1, x, y+1)
self.visited[x][y+1] = False
def exist(self, board, word):
if len(word) == 0: return True
self.word = word
self.board = board
self.m = len(board)
if self.m == 0: return False
self.n = len(board[0])
if self.n == 0: return False
self.found = False
self.visited = [[False] * self.n for _ in xrange(self.m)]
for i in xrange(self.m):
for j in xrange(self.n):
if board[i][j] == word[0]:
self.visited[i][j] = True
self._find(1, i, j)
self.visited[i][j] = False
if self.found: return True
return self.found
if __name__ == '__main__':
#print Solution().exist(['abce','sfcs','adee'], 'q')
print Solution().exist(['aa'], 'aaa')
| [
"[email protected]"
] | |
f2fb741ac114ed73288bcee38cb59153a3b97a0c | a41c1a198a74536be3e46be8951f8a7ef6b8130a | /portal_radicaciones/migrations/0012_auto_20201002_1818.py | 6e74ca5179b2fd0df70159174885c97b6a158b61 | [] | no_license | ricardoqp1997/FamedicPortal | ae54dea15865da942cf63069fd467eb93ab2fbd0 | 47934f7a20e782d2287c7ca153d6cf2fa0f425e6 | refs/heads/master | 2023-07-28T10:02:32.009703 | 2021-09-15T14:33:09 | 2021-09-15T14:33:09 | 286,587,802 | 0 | 0 | null | 2020-12-10T21:52:27 | 2020-08-10T22:05:18 | HTML | UTF-8 | Python | false | false | 775 | py | # Generated by Django 3.0.3 on 2020-10-02 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal_radicaciones', '0011_auto_20201002_1053'),
]
operations = [
migrations.AddField(
model_name='locacion',
name='municipio',
field=models.CharField(choices=[('ARAU', 'Arauca'), ('BOYC', 'Boyacá'), ('CASN', 'Casanare'), ('CUND', 'Cundinamarca'), ('META', 'Meta')], default='META', max_length=5, verbose_name='departamento'),
),
migrations.AlterField(
model_name='locacion',
name='locacion_name',
field=models.CharField(default='VILLAVICENCIO', max_length=50, verbose_name='municipio'),
),
]
| [
"[email protected]"
] | |
8cc7a154f4e03c2ce422a55ad82bfa3360792052 | 43b61fd5c3a041c861c29604ff26aac716c04d83 | /intro-dl/introdl.py | 59104fe3d34fd0330100bf7cf5410aa81ae210ef | [] | no_license | ENSCMA2/linghacks-2019-workshops | 126baa3f8082b6f0ccc18e612305f9c6e63b14d0 | d05f538c17364450fbab55d8addd18b5a225d333 | refs/heads/main | 2021-07-08T12:01:51.860264 | 2020-08-23T23:16:15 | 2020-08-23T23:16:15 | 178,033,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
import tensorflow
# load the dataset
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=top_words)
max_review_length = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_review_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_review_length)
# embeddings
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=max_review_length))
# lstm
model.add(LSTM(5)) # feel free to change this number, around 100 probably works better, but of course, greater number = takes more time
# actual output
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=3,batch_size=64)
# evaluation
scores = model.evaluate(X_test,y_test,verbose=0)
print("Accuracy: %.2f%%"% (scores[1]*100))
| [
"[email protected]"
] | |
a28149614442929fb3eaa922c7eb04596d00b89a | 7a724badef6a881d63d7692de6a7b94daaf820be | /counting.py | 3a6f774c02a3340d04722ad0f356fb809ee5d7be | [] | no_license | mirshahzad/python-basic | aa19641140e10e6be88944c81c927410ffc23759 | 1552be46b5890c9976fad43dba007410396ad92a | refs/heads/master | 2022-10-24T18:13:58.938338 | 2020-06-13T09:14:12 | 2020-06-13T09:14:12 | 255,885,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | current_number = 1
while current_number <= 5:
print(current_number)
current_number += 1 | [
"[email protected]"
] | |
64bbc4b3a0a65ec19bda0ca7856ad6ce2c7ad637 | 88c1f9ccb62e91d6b0574bcde1043921bdeb0126 | /test_utilities/src/d1_test/mock_api/get_system_metadata.py | 6418ea795f12a47d1fc3fe6f94c299215b30e1fb | [
"Apache-2.0"
] | permissive | jevans97utk/d1_python | 83b8de8780287c655779844f367b9189413da074 | 3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d | refs/heads/master | 2020-05-21T01:16:50.677816 | 2019-04-22T16:09:44 | 2019-04-22T16:09:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,803 | py | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock:
CNRead.getSystemMetadata(session, pid) → SystemMetadata
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNRead.getSystemMetadata
MNRead.getSystemMetadata(session, pid) → SystemMetadata
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/MN_APIs.html#MNRead.getSystemMetadata
A DataONEException can be triggered by adding a custom header. See
d1_exception.py
"""
import logging
import re
import responses
import d1_common.const
import d1_common.url
import d1_test.instance_generator.sciobj
import d1_test.mock_api.d1_exception
import d1_test.mock_api.util
# Config
META_ENDPOINT_RX = r'v([123])/meta/(.*)'
def add_callback(base_url):
responses.add_callback(
responses.GET,
re.compile(r'^' + d1_common.url.joinPathElements(base_url, META_ENDPOINT_RX)),
callback=_request_callback,
content_type='',
)
def _request_callback(request):
logging.debug('Received callback. url="{}"'.format(request.url))
# Return DataONEException if triggered
exc_response_tup = d1_test.mock_api.d1_exception.trigger_by_header(request)
if exc_response_tup:
return exc_response_tup
# Return NotFound
pid, client = _parse_url(request.url)
if pid.startswith('<NotFound>'):
return d1_test.mock_api.d1_exception.trigger_by_status_code(request, 404)
# Return regular response
pid, sid, sciobj_bytes, sysmeta_pyxb = d1_test.instance_generator.sciobj.generate_reproducible_sciobj_with_sysmeta(
client, pid
)
header_dict = {'Content-Type': d1_common.const.CONTENT_TYPE_XML}
return 200, header_dict, sysmeta_pyxb.toxml('utf-8')
def _parse_url(url):
version_tag, endpoint_str, param_list, query_dict, client = d1_test.mock_api.util.parse_rest_url(
url
)
assert endpoint_str == 'meta'
assert (
len(param_list) == 1
), 'getSystemMetadata() accepts a single parameter, the PID'
return param_list[0], client
| [
"[email protected]"
] | |
00b93fc5791fa20e9c520adcd54c23edeb92cc4d | 67663fe1791aacea412f8895edc3c49450761daa | /venv/Lib/site-packages/ctree/simd/types.py | b0edf994503b140dadb74c5629ba86edbdc41b36 | [] | no_license | devvirus/CsvToJson | 090be750cebdc0b62c68038c1c2843aee4c353e6 | 4a6d84e60b34a782475a2f23adf2a9bbeae8f4f3 | refs/heads/master | 2022-12-01T18:14:25.486904 | 2020-08-12T10:51:42 | 2020-08-12T10:51:42 | 286,987,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from ctree.types import CtreeType
class SimdType(CtreeType):
"""Base class for all SIMD Types."""
def codegen(self, indent=0):
from ctree.simd.codegen import SimdCodeGen
return SimdCodeGen().visit(self)
class m256d(SimdType):
pass
| [
"[email protected]"
] | |
4c082872c00e2802c6747f84f4f54edfa26e02e0 | 2800dbea417b57aab9277dc7ddc86f8eff571b37 | /streamparse/cli/logs.py | a2a17584fcb2d788e8af8dcaebe471cdfed6f2a2 | [
"Apache-2.0"
] | permissive | gchiam/streamparse | c439f7251942d3544c1b35bb68eb5e05b2433b48 | 79d824ffedfdf32865a09d3828b9f943546ac514 | refs/heads/master | 2023-01-28T04:09:51.001040 | 2015-12-07T01:01:27 | 2015-12-07T01:01:27 | 47,520,791 | 0 | 0 | Apache-2.0 | 2023-01-18T22:56:48 | 2015-12-07T00:56:01 | Python | UTF-8 | Python | false | false | 41 | py | # XXX: Add suparser_hook and main method
| [
"[email protected]"
] | |
052bcde8f4d4a4f9c5fc05536d70f9ec4edf3900 | 05ed1cd79adb6001928cbb88bf8181a7bc7c53c6 | /Chapter6/1-text-read.py | 54f120189e3074f3e61c6869f095666f4bf2cbfc | [] | no_license | wangying2016/Web_Scraping_with_Python | 7da07923fd98759772f022625d380113a6319d9e | 75b8c61273d3a43976dca7dbf3b53084c6c929bf | refs/heads/master | 2021-08-23T08:26:10.615460 | 2017-12-04T09:15:42 | 2017-12-04T09:15:42 | 107,662,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | from urllib.request import urlopen
textPage = urlopen('http://www.pythonscraping.com/pages/warandpeace/chapter1.txt')
print(textPage.read())
| [
"[email protected]"
] | |
ece8d0af3727dc49722e10f2dff6e19722cb6b8d | 141e73826ed5bed9caab23e7a2a5a49cfa184715 | /LearningBornschein/mca-genmodel-test/rf-learning/params-20x20-dog/mca-20-0500-H12.py | 9dad02afdc5612779dac3be859fd79fec669037e | [] | no_license | haefnerlab/LIF_Sampling_Project | d407a8e8dac73d82da4f46d1ae204f033ffdb876 | e6092f6c7539564f68826d689c4170633956f5dc | refs/heads/master | 2023-05-07T11:09:56.605722 | 2021-05-27T03:51:41 | 2021-05-27T03:51:41 | 371,237,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | # -*- coding: utf-8
#
# LinCA run for the MCA/BSC Journal Paper
#
from __future__ import division
# Training-data
data_factor = 1.
datafile = "../data/patches-20-dog.h5"
N = 100000
# Model to use
from pulp.em.camodels.mmca_et import MMCA_ET
model_class=MMCA_ET
# Number of hidden causes
H = 500
# Which parameters should be learned:
#to_learn = ['W']
to_learn = ['W', 'pi', 'sigma']
# ET approximation parameters
Hprime = 12
gamma = 10
#In this case, we need an estimation for the parameters
W_init = 'estimate'
pi_init = 'estimate'
sigma_init = 'estimate'
# np.random.normal(scale=W_noise_intensity, size=(H, D)) is added after each run
W_noise = 0.0
pi_noise = 0.
sigma_noise = 0.
# Annealing:
temp_start = 1.0
temp_end = 1.0
anneal_steps = 100
anneal_start = 20
anneal_end = 80
cut_start = 1./3
cut_end = 2./3
noise_decrease = 80
noise_end = 90
anneal_prior = False
# Images used:
channel_splitted = False #This means single channel (this convention'll be reversed)
# Post-Processing:
s_p = 3.
s_m = 1.
processing = 'deconvolve'
fit = True
| [
"[email protected]"
] | |
882db86727840e2fb056a21c2c18c7845a1ccd74 | 4eac4124ea5d204a3c3026be122f8fe1e18812f5 | /tests/azure_upload.py | 34565eb7e65007f3032e251372294ae1ce179e9b | [] | no_license | webclinic017/Airflow_DAG | 74026d0f661944e5b7c3ba75c50cece1396a25fc | 1af39904ed1429ff1dce24f122343d5722e41f03 | refs/heads/master | 2023-08-03T11:33:02.474432 | 2021-09-16T05:07:54 | 2021-09-16T05:07:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, DateType, IntegerType, DecimalType
# Create Spark Session
spark = SparkSession.builder.master("local").appName("azure_upload").getOrCreate()
# Set up access key for Azure blob storage
sc = spark.sparkContext
sc._jsc.hadoopConfiguration().set("fs.wasbs.impl", "org.apache.hadoop.fs.azure.NativeAzureFileSystem")
sc._jsc.hadoopConfiguration().set("fs.azure.account.key.yahoofinancestoragerg.blob.core.windows.net", "")
# Define schema
schema = StructType([
StructField("date_time", DateType(), True),
StructField("open", DecimalType(38, 12), True),
StructField("high",DecimalType(38, 12), True),
StructField("low", DecimalType(38, 12), True),
StructField("close", DecimalType(38, 12), True),
StructField("adj_close", DecimalType(38, 12), True),
StructField("volume", IntegerType(), True),
])
# Read CSV files from Local System to DataFrames
aapl_df = spark.read.option("header", False).schema(schema).csv("/usr/local/airflow/21.7_Airflow_DAG/data/q/2021-08-14/AAPL.csv")
tsla_df = spark.read.option("header", False).schema(schema).csv("/usr/local/airflow/21.7_Airflow_DAG/data/q/2021-08-14/TSLA.csv")
# Load PARQUET files to Azure blob storage
aapl_df.write.mode("overwrite").parquet("wasbs://[email protected]/AAPL")
tsla_df.write.mode("overwrite").parquet("wasbs://[email protected]/TSLA") | [
"[email protected]"
] | |
1d342596a3c2b5fe7b4c0f02b71440fa4bb49c42 | 3e19e9f7d1a052f15bb6e9aef7db9a1fc464480f | /ScrapyProject/tiki/tiki/main.py | 7867b9a670386b5f62f0aa1c2d1e74723e5581cb | [] | no_license | Jamie33/learngit | b690ff18e3924e10eee9d7281bf8e0ea4868eccc | 063c9af279a9f7374209d08d284f8a161c76d80c | refs/heads/master | 2023-08-11T19:21:23.782192 | 2023-08-11T05:22:56 | 2023-08-11T05:22:56 | 121,004,322 | 15 | 5 | null | 2023-02-16T01:27:32 | 2018-02-10T10:00:46 | Jupyter Notebook | UTF-8 | Python | false | false | 613 | py | from scrapy.cmdline import execute # 调用此函数可以执行scrapy的脚本
import sys
import os
# 用来设置工程目录,有了它才可以让命令行生效
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# os.path.abspath(__file__) 用来获取当前py文件的路径
# os.path.dirname() 用来获取文件的父亲的路径
# 调用execute()函数执行scarpy的命令 scary crawl 爬虫文件名字
#execute(['scrapy', 'crawl', 'tikiVN','-o','sendoVN_cosmetics_page20_rank.csv'])
execute(['scrapy', 'crawl', 'tikiVN','-o','tikiVN_health_page_filterClosetest.csv']) | [
"[email protected]"
] | |
5e62e5fca95e72c565b182146721fc4664fbc24e | 65df22b4a43adff0db0ba4cd80d875317c7a2f85 | /backend/le20_dev_6236/settings.py | caf138510a9bebc4dd104be49b365755a9c597ec | [] | no_license | crowdbotics-apps/le20-dev-6236 | b4cdc066fcd4751acd5068d54b46e14c939c0259 | 792f7bfa8cc92782c89023bbd1bf7691f8ee77b9 | refs/heads/master | 2022-11-12T21:03:47.778993 | 2020-06-25T10:01:54 | 2020-06-25T10:01:54 | 273,238,723 | 0 | 0 | null | 2020-06-25T10:01:56 | 2020-06-18T12:57:59 | JavaScript | UTF-8 | Python | false | false | 5,723 | py | """
Django settings for le20_dev_6236 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "le20_dev_6236.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "le20_dev_6236.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
064343a96df7219bc7cba517033e35fe1defd3ef | a01c25608188e2bbb4ac3da8b8695fbc69946d56 | /uptodate/card.py | 8f5c06c23409b4b01994bc886dc37398975f114c | [] | no_license | wo0lien/ppc | a83ed1b7cfd606adfa060797c79a0493bffb446f | c3d112b0bc5e2d35ff94c2532e08425f2ceb3bf0 | refs/heads/master | 2020-09-30T13:40:43.198773 | 2020-01-21T14:17:33 | 2020-01-21T14:17:37 | 227,298,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | # -*-coding: utf8-*-
class GameCard:
"""
La classe gamecard permet le transport facilité des données entre les threads client et displayer
"""
def __init__(self, color, nb):
"""
Constructeur iinitialisant une nouvelle carte a partir de 2 parametres
Parameters:
color (string): couleur de la carte r pour red ou b pour blue
nb (int): valeur de la carte
"""
self.color = str(color)
self.nb = int(nb)
def __str__(self):
"""Affichage d'une carte - utilisé pour le debug"""
return ("||"+str(self.color)+"|"+str(self.nb)+"||")
def tosend(self):
"""Fonction qui genere un code string pour la communication avec le board"""
return str(self.color)+"|"+str(self.nb)
| [
"[email protected]"
] | |
73d61e85cc20a0f7345e4c42d7e577ee78cbcc65 | dce89ac52c2b64ae8e3310156c540583c3b617a8 | /hw18/RunDecisionTreeRegression.py | c21e5dace251f5f22c420cb19ff26322888628a2 | [] | no_license | Xi-Plus/NKUST-Parallel-Processing | b07c6f307e7619bd22969db9af505ef4589bc8d5 | 450881a20c17fef4ca7b01399055f5d7c5cd52b9 | refs/heads/master | 2022-11-08T21:12:26.767939 | 2020-06-28T14:26:30 | 2020-06-28T14:26:30 | 259,319,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,060 | py | # -*- coding: UTF-8 -*-
import findspark
findspark.init()
import sys
from time import time
import pandas as pd
import matplotlib.pyplot as plt
from pyspark import SparkConf, SparkContext
from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.regression import LabeledPoint
import numpy as np
from pyspark.mllib.evaluation import RegressionMetrics
import math
def SetLogger(sc):
logger = sc._jvm.org.apache.log4j
logger.LogManager.getLogger("org"). setLevel(logger.Level.ERROR)
logger.LogManager.getLogger("akka").setLevel(logger.Level.ERROR)
logger.LogManager.getRootLogger().setLevel(logger.Level.ERROR)
def SetPath(sc):
global Path
if sc.master[0:5] == "local":
Path = "file:/home/xiplus/hw18/"
else:
Path = "hdfs://localhost:9000/user/xiplus/"
# 如果您要在cluster模式執行(hadoop yarn 或Spark Stand alone),請依照書上說明,先上傳檔案至HDFS目錄
def extract_label(record):
return convert_float(record[-2])
# return convert_float(record[-1])
def convert_float(x):
if x in ['?', 'NaN']:
return 0
return float(x)
def extract_features(record, featureEnd):
features = [convert_float(field) for field in record[:-2]]
return features
def PrepareData(sc):
# ----------------------1.匯入並轉換資料-------------
print("開始匯入資料...")
rawDataWithHeader = sc.textFile(Path + "Bias_correction_ucl.csv")
header = rawDataWithHeader.first()
rawData = rawDataWithHeader.filter(lambda x: x != header)
lines = rawData.map(lambda x: x.split(","))
print(lines.first())
print("共計:" + str(lines.count()) + "筆")
# ----------------------2.建立訓練評估所需資料 RDD[LabeledPoint]-------------
labelpointRDD = lines.map(lambda r: LabeledPoint(
extract_label(r),
extract_features(r, len(r) - 1)))
print labelpointRDD.first()
# ----------------------3.以隨機方式將資料分為3部份並且回傳-------------
(trainData, validationData, testData) = labelpointRDD.randomSplit([8, 1, 1])
print("將資料分trainData:" + str(trainData.count())
+ " validationData:" + str(validationData.count())
+ " testData:" + str(testData.count()))
# print labelpointRDD.first()
return (trainData, validationData, testData) # 回傳資料
def PredictData(sc, model):
i = 0
# ----------------------1.匯入並轉換資料-------------
print("開始匯入資料...")
rawDataWithHeader = sc.textFile(Path + "Bias_correction_ucl.csv")
header = rawDataWithHeader.first()
rawData = rawDataWithHeader.filter(lambda x: x != header)
lines = rawData.map(lambda x: x.split(","))
#print (lines.first())
print("共計:" + str(lines.count()) + "筆")
# ----------------------2.建立訓練評估所需資料 LabeledPoint RDD-------------
labelpointRDD = lines.map(lambda r: LabeledPoint(
extract_label(r),
extract_features(r, len(r) - 1)))
# ----------------------3.定義字典----------------
SeasonDict = {1: "春", 2: "夏", 3: "秋", 4: "冬"}
HoildayDict = {0: "非假日", 1: "假日"}
WeekDict = {0: "一", 1: "二", 2: "三", 3: "四", 4: "五", 5: "六", 6: "日"}
WorkDayDict = {1: "工作日", 0: "非工作日"}
WeatherDict = {1: "晴", 2: "陰", 3: "小雨", 4: "大雨"}
# ----------------------4.進行預測並顯示結果--------------
for lp in labelpointRDD.take(100):
i = i + 1
predict = float(model.predict(lp.features))
label = lp.label
features = lp.features
result = ("正確" if (label == predict) else "錯誤")
error = math.fabs(label - predict)
dataDesc = str(i) + " 特徵: " + str(features) +\
" ==> 預測結果:" + str(predict) +\
" , 實際:" + str(label) + result + ", 誤差:" + str(error)
print dataDesc
def evaluateModel(model, validationData):
score = model.predict(validationData.map(lambda p: p.features))
scoreAndLabels = score.zip(validationData.map(lambda p: p.label))
metrics = RegressionMetrics(scoreAndLabels)
RMSE = metrics.rootMeanSquaredError
return(RMSE)
def trainEvaluateModel(trainData, validationData,
impurityParm, maxDepthParm, maxBinsParm):
startTime = time()
model = DecisionTree.trainRegressor(trainData,
categoricalFeaturesInfo={},
impurity=impurityParm,
maxDepth=maxDepthParm,
maxBins=maxBinsParm)
RMSE = evaluateModel(model, validationData)
duration = time() - startTime
print "訓練評估:使用參數" + \
" impurityParm= %s" % impurityParm + \
" maxDepthParm= %s" % maxDepthParm + \
" maxBinsParm = %d." % maxBinsParm + \
" 所需時間=%d" % duration + \
" 結果RMSE = %f " % RMSE
return (RMSE, duration, impurityParm, maxDepthParm, maxBinsParm, model)
def evalParameter(trainData, validationData, evaparm, impurityList, maxDepthList, maxBinsList):
metrics = [trainEvaluateModel(trainData, validationData, impurity, maxdepth, maxBins)
for impurity in impurityList
for maxdepth in maxDepthList
for maxBins in maxBinsList]
if evaparm == "impurity":
IndexList = impurityList[:]
elif evaparm == "maxDepth":
IndexList = maxDepthList[:]
elif evaparm == "maxBins":
IndexList = maxBinsList[:]
df = pd.DataFrame(metrics, index=IndexList,
columns=['RMSE', 'duration', 'impurityParm', 'maxDepthParm', 'maxBinsParm', 'model'])
showchart(df, evaparm, 'RMSE', 'duration', 0, 200)
def showchart(df, evalparm, barData, lineData, yMin, yMax):
ax = df[barData].plot(kind='bar', title=evalparm, figsize=(10, 6), legend=True, fontsize=12)
ax.set_xlabel(evalparm, fontsize=12)
ax.set_ylim([yMin, yMax])
ax.set_ylabel(barData, fontsize=12)
ax2 = ax.twinx()
ax2.plot(df[[lineData]].values, linestyle='-', marker='o', linewidth=2.0, color='r')
plt.show()
def evalAllParameter(training_RDD, validation_RDD, impurityList, maxDepthList, maxBinsList):
metrics = [trainEvaluateModel(trainData, validationData, impurity, maxdepth, maxBins)
for impurity in impurityList
for maxdepth in maxDepthList
for maxBins in maxBinsList]
Smetrics = sorted(metrics, key=lambda k: k[0])
bestParameter = Smetrics[0]
print("調校後最佳參數:impurity:" + str(bestParameter[2])
+ " ,maxDepth:" + str(bestParameter[3])
+ " ,maxBins:" + str(bestParameter[4])
+ " ,結果RMSE = " + str(bestParameter[0]))
return bestParameter[5]
def parametersEval(training_RDD, validation_RDD):
print("----- 評估maxDepth參數使用 ---------")
evalParameter(training_RDD, validation_RDD, "maxDepth",
impurityList=["variance"],
maxDepthList=[3, 5, 10, 15, 20, 25],
maxBinsList=[10])
print("----- 評估maxBins參數使用 ---------")
evalParameter(training_RDD, validation_RDD, "maxBins",
impurityList=["variance"],
maxDepthList=[10],
maxBinsList=[3, 5, 10, 50, 100, 200])
def CreateSparkContext():
sparkConf = SparkConf() \
.setAppName("RunDecisionTreeRegression") \
.set("spark.ui.showConsoleProgress", "false")
sc = SparkContext(conf=sparkConf)
print("master=" + sc.master)
SetLogger(sc)
SetPath(sc)
return (sc)
if __name__ == "__main__":
print("RunDecisionTreeRegression")
sc = CreateSparkContext()
sc.setCheckpointDir(Path + "checkpoint/")
print("==========資料準備階段===============")
(trainData, validationData, testData) = PrepareData(sc)
trainData.persist()
validationData.persist()
testData.persist()
print("==========訓練評估階段===============")
(AUC, duration, impurityParm, maxDepthParm, maxBinsParm, model) = \
trainEvaluateModel(trainData, validationData, "variance", 10, 100)
if (len(sys.argv) == 2) and (sys.argv[1] == "e"):
parametersEval(trainData, validationData)
elif (len(sys.argv) == 2) and (sys.argv[1] == "a"):
print("-----所有參數訓練評估找出最好的參數組合---------")
model = evalAllParameter(trainData, validationData,
["variance"],
[3, 5, 10, 15, 20, 25],
[3, 5, 10, 50, 100, 200])
print("==========測試階段===============")
RMSE = evaluateModel(model, testData)
print("使用testata測試最佳模型,結果 RMSE:" + str(RMSE))
print("==========預測資料===============")
PredictData(sc, model)
# print model.toDebugString()
| [
"[email protected]"
] | |
fde5e4450e2ac92a400a65b989d74f1f88271ff8 | e3a4adddc08f0006c679386ec74163d8c24ec5dd | /bipedal/bipedal.py | 5a0da03a160f15cbb4b05db94b49cf3f9b0b18af | [
"MIT"
] | permissive | anetczuk/pybraingym | 1acb8108ec16ac8fdb2545689addfe589bc30ae0 | 4f930021d7802e88c75a1a0aed135dd4de66cc1b | refs/heads/master | 2020-06-23T04:11:51.252106 | 2019-08-12T23:17:11 | 2019-08-12T23:17:11 | 198,507,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | #!/usr/bin/env python3
import gym
env = gym.make('BipedalWalker-v2')
for i_episode in range(100):
observation = env.reset()
for t in range(10000):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("{} timesteps taken for the episode".format(t+1))
break
## fix import error (sys.meta_path)
env.env.close()
| [
"[email protected]"
] | |
0ff8ef926e518fe014f3efb91f0978235d331d59 | 71894f980d1209017837d7d02bc38ffb5dbcb22f | /multi/devicePresenseAlert/homeassistant/helpers/location.py | eb7b469a19664e6b201c7c9af0a84d43f17c0f73 | [
"MIT"
] | permissive | masomel/py-iot-apps | 0f2418f8d9327a068e5db2cdaac487c321476f97 | 6c22ff2f574a37ba40a02625d6ed68d7bc7058a9 | refs/heads/master | 2021-03-22T04:47:59.930338 | 2019-05-16T06:48:32 | 2019-05-16T06:48:32 | 112,631,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,017 | py | """Location helpers for Home Assistant."""
from .typing import Sequence
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.core import State
from homeassistant.util import location as loc_util
def has_location(state: State) -> bool:
"""Test if state contains a valid location.
Async friendly.
"""
return (isinstance(state, State) and
isinstance(state.attributes.get(ATTR_LATITUDE), float) and
isinstance(state.attributes.get(ATTR_LONGITUDE), float))
def closest(latitude: float, longitude: float,
states: Sequence[State]) -> State:
"""Return closest state to point.
Async friendly.
"""
with_location = [state for state in states if has_location(state)]
if not with_location:
return None
return min(
with_location,
key=lambda state: loc_util.distance(
latitude, longitude, state.attributes.get(ATTR_LATITUDE),
state.attributes.get(ATTR_LONGITUDE))
)
| [
"[email protected]"
] | |
1681288f44cba226e3a4e72dd23a6e23b719d69e | e34ba3f00ca72e26640c4fe030670b691026f5e3 | /Game.py | 129fb184d45b2d540af273b5f86c311fcf6e2c24 | [] | no_license | ytong3/casino-simulation | 5cb0b1aeca7ca444816a903f976702c5c15f28e7 | c8d814cd1090c52dad326925c1358044b1400ded | refs/heads/master | 2021-09-04T07:35:19.583367 | 2018-01-17T03:32:07 | 2018-01-17T03:32:07 | 116,899,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | from Outcome import Outcome
from Bin import Bin
'''
Responsibilities:
1. get bets from Playe
2. spin the Wheel
3. collects losing bets
4. pays winning bets.
'''
class Game:
def __inti__(self):
five = Outcome("00-0-1-2-3", 6)
zero = Bin([Outcome("0",35), five])
| [
"[email protected]"
] | |
72d5838be1fe45e1f179972b1c3bc7d7b576c40d | 145b3dda3854f9adfe3f9628b82006994edb842e | /server.py | a3f60137ee8e9cc7f8a8ecb577530fb82af411b6 | [] | no_license | dotuan1120/pi-surveillance | 038f92cc2c939fb43233c1377e9eb540f5ed5b44 | 78c3c3ebb0fab64b1914df051686420d36331e07 | refs/heads/master | 2022-12-21T19:37:09.163411 | 2020-10-01T00:40:53 | 2020-10-01T00:40:53 | 299,993,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,251 | py | import time
import pyrebase
import os
from imutils import build_montages
import datetime
import numpy as np
import imagezmq
import argparse
import imutils
import cv2
import shutil
# convert the video from avi format to mp4 format
def convert_avi_to_mp4(avi_file_path, output_name):
pro = os.popen(
"ffmpeg -i '{input}' -ac 2 -b:v 2000k -c:a aac -c:v libx264 -b:a 160k -vprofile high -bf 0 -strict "
"experimental -f mp4 '{output}'".format(
input=avi_file_path, output=output_name))
pro.read()
return True
# Convert the video from avi format to mp4 format
# Upload videos to the Firebase Cloud Storage
# Move the videos to the local directory
def handle_outputs(video_name_avi, image_name, date, time):
# Initialize video name and paths
video_name_mp4 = "({ts}).mp4".format(ts=time)
video_cloud_path = 'videos/{date}/{vid}'.format(date=date, vid=video_name_mp4)
video_name_avi_path = "/home/tuan/Downloads/pi-surveillance/{vid}".format(vid=video_name_avi)
local_storage_path = '/home/tuan/Downloads/pi-surveillance/{loc}'.format(loc=date)
# Convert from avi to mp4 format
convert_avi_to_mp4(video_name_avi_path, video_name_mp4)
# Upload to the cloud storage and realtime database
storage.child(video_cloud_path).put(video_name_mp4)
vidRef = storage.child('videos/{date}/{vid}'.format(date=date, vid=video_name_mp4)).get_url(None)
db.child("surveillance").child(date).child(time).update({"video": vidRef})
# Move video and images to the local storage and delete the avi file
move_file(video_name_avi, video_name_mp4, image_name, local_storage_path)
# Create a local directory to store videos and images
def create_dir(date):
parent_dir = "/home/tuan/Downloads/pi-surveillance"
path = os.path.join(parent_dir, date)
try:
os.mkdir(path)
print("Directory '%s' created" % date)
except OSError as error:
print(error)
# Move the file to the destination directory
def move_file(video_name_avi, video_name_mp4, image_name, local_storage_path):
try:
os.remove(video_name_avi)
shutil.move(video_name_mp4, local_storage_path)
shutil.move(image_name, local_storage_path)
print("success")
except OSError as error:
print(error)
# Firebase configuration
config = {
"apiKey": "AIzaSyDzALNGaFzBfKTwQiEvht1brD5KxVqGyEE",
"authDomain": "pi-surveillance-9dc05.firebaseapp.com",
"databaseURL": "https://pi-surveillance-9dc05.firebaseio.com",
"projectId": "pi-surveillance-9dc05",
"storageBucket": "pi-surveillance-9dc05.appspot.com",
"messagingSenderId": "408628332478",
"appId": "1:408628332478:web:4b1ac996f34981136d9bef"
};
# Initialize the Firebase services
firebase = pyrebase.initialize_app(config)
db = firebase.database()
storage = firebase.storage()
if __name__ == '__main__':
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
ap.add_argument("-mW", "--montageW", required=True, type=int,
help="montage frame width")
ap.add_argument("-mH", "--montageH", required=True, type=int,
help="montage frame height")
args = vars(ap.parse_args())
# initialize the ImageHub object
imageHub = imagezmq.ImageHub()
# initialize the list of class labels MobileNet SSD was trained to detect,
# then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the frame dictionary
frameDict = {}
# assign montage width and height of the video interface
mW = args["montageW"]
mH = args["montageH"]
frame_counter = 0
# start looping over all the frames
while True:
# receive Pi name and frame from the RPi and send REPLY to the Pi
(rpiName, frame) = imageHub.recv_image()
imageHub.send_reply(b'OK')
# resize the frame to have a maximum width of 600 pixels
# construct a blob using the frame dimensions
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
size = (w, h)
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and predictions
net.setInput(blob)
detections = net.forward()
# detected variable is True when there is human in the frame, otherwise is False
detected = False
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with the prediction
confidence = detections[0, 0, i, 2]
# check if the confidence is greater than the minimum confidence
# and the detection contains human presence
if confidence > args["confidence"] and int(detections[0, 0, i, 1]) == 15:
detected = True
# extract the index of the class label from the detections
idx = int(detections[0, 0, i, 1])
# if there are 10 consecutive frames containing human(s)
if frame_counter == 10:
# Creat the date and time
date = datetime.datetime.now().strftime("%d-%b-%Y")
time = datetime.datetime.now().strftime("%H:%M:%S")
# Create a storage directory which its name is the date
create_dir(date)
# Upload the image first
image_name = '({ts}).jpg'.format(ts=time)
cv2.imwrite(image_name, frame)
image_cloud_path = 'images/{date}/{img}'.format(date=date, img=image_name)
storage.child(image_cloud_path).put(image_name)
imgRef = storage.child('images/{date}/{img}'.format(date=date, img=image_name)).get_url(None)
# Update name and image in Realtime Database
db.child("surveillance").child(date).child(time).update({"image": imgRef})
db.child("surveillance").child(date).child(time).update({"name": time})
# Initialize a video object
video_name_avi = "({ts}).avi".format(ts=time)
result = cv2.VideoWriter(video_name_avi, cv2.VideoWriter_fourcc(*'MJPG'), 10, size)
print("begin to record")
frame_counter += 1
# If there are more than 10 consecutive frames containing human(s), begin to record the video
elif frame_counter > 10:
result.write(frame)
# if there are more than 100 consecutive frames containing human(s),
# reset frame counter to stop recording, and handle the video
if frame_counter > 110:
frame_counter = 0
print("end recording")
# upload video to cloud storage
handle_outputs(video_name_avi, image_name, date, time)
else:
frame_counter += 1
else:
frame_counter += 1
# If there is no human in the current frame
if detected is False:
# reset frame counter to stop recording, and handle the video if the program are recording
if frame_counter > 10:
# upload video to cloud storage
handle_outputs(video_name_avi, image_name, date, time)
frame_counter = 0
# reset frame counter if there are less than 10 consecutive frames containing human
else:
frame_counter = 0
print("frame_counter = ", frame_counter)
# update the new frame in the frame dictionary
frameDict[rpiName] = frame
# build a montage using images in the frame dictionary
montages = build_montages(frameDict.values(), (w, h), (mW, mH))
# display the montage(s) on the screen
for (i, montage) in enumerate(montages):
cv2.imshow("Monitor ({})".format(i),
montage)
# detect any key pressed
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# Cleanup
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
e3ddd256f3d55736325b6627069fd7ff399b4526 | 5996c22ae79075dd9b2e583a5f5ffa18bdde6a2d | /code/accelmotor.py | 1120dfec136a789f9c41b487ad365c63b72dcee7 | [] | no_license | hnovak94/SelfBalancingRobot | a8647e2cd49bf67610f9b5d92ab77f1261b5e541 | a11cec2cf5c6bf25cc13ebee5878de716f35b34d | refs/heads/main | 2023-05-24T07:27:05.041427 | 2021-06-09T16:08:48 | 2021-06-09T16:08:48 | 335,387,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | import adafruit_lsm303
import board
import time
import pwmio
import busio
i2c = busio.I2C(board.SCL, board.SDA)
sensor = adafruit_lsm303.LSM303(i2c)
motorA1 = pwmio.PWMOut(board.D13, frequency=5000, duty_cycle=0)
motorA2 = pwmio.PWMOut(board.D12, frequency=5000, duty_cycle=0)
motorB1 = pwmio.PWMOut(board.D2, frequency=5000, duty_cycle=0)
motorB2 = pwmio.PWMOut(board.D6, frequency=5000, duty_cycle=0)
while True:
raw_accel_x, raw_accel_y, raw_accel_z = sensor.raw_acceleration
accel_x, accel_y, accel_z = sensor.acceleration
raw_mag_x, raw_mag_y, raw_mag_z = sensor.raw_magnetic
mag_x, mag_y, mag_z = sensor.magnetic
print((accel_x, accel_y))
# print('Acceleration raw: ({0:6d}, {1:6d}, {2:6d}), (m/s^2): ({3:10.3f}, {4:10.3f}, {5:10.3f})'.format(raw_accel_x, raw_accel_y, raw_accel_z, accel_x, accel_y, accel_z))
# print('Magnetometer raw: ({0:6d}, {1:6d}, {2:6d}), (gauss): ({3:10.3f}, {4:10.3f}, {5:10.3f})'.format(raw_mag_x, raw_mag_y, raw_mag_z, mag_x, mag_y, mag_z))
# print('')
time.sleep(.2)
# the difference between the two determines the speed.
# whichever pin is higher determines the direction
# max is 65535 (2^16), motor stalls when less than ~40000
# difference of 0, it stops
# difference 40000+ increases speed as difference increases
motorA1.duty_cycle = 0
motorB1.duty_cycle = 0
motorA2.duty_cycle = 60000
motorB2.duty_cycle = 60000
print("spins fast in d1")
time.sleep(1)
motorA1.duty_cycle = 0
motorB1.duty_cycle = 0
motorA2.duty_cycle = 0
motorB2.duty_cycle = 0
print("stops")
time.sleep(1)
motorA1.duty_cycle = 60000
motorB1.duty_cycle = 60000
motorA2.duty_cycle = 0
motorB2.duty_cycle = 0
print("spins fast in d2")
time.sleep(1)
# x accel
# -3 -------- 0 ---------- +3
# wheel speed
# try multiplying by 20000
# if x is 1 then diff is 20000
# if x is 2 then diff is 40000, etc
# -60000 ---- 0 ---------- +60000
| [
"[email protected]"
] | |
4ca5a17410e4aa40109deb8f261fe69e84e5f22e | 0021a45bf27dde70cfa54bf1df9d51f27b6be076 | /python/huffman-compress.py | d1a1a77bf9c919459640c14ff996763d250d7f1f | [
"MIT"
] | permissive | GraphicYan/Reference-Huffman-coding | e74a4e65edb413afa59ff5e44ba47a2d2bdbe546 | bc286358d24b81cd06287b3b062da54388e2ee67 | refs/heads/master | 2023-02-12T19:44:08.501375 | 2020-04-14T03:19:24 | 2020-04-14T03:19:24 | 350,271,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,698 | py | #
# Compression application using static Huffman coding
#
# Usage: python huffman-compress.py InputFile OutputFile
# Then use the corresponding huffman-decompress.py application to recreate the original input file.
# Note that the application uses an alphabet of 257 symbols - 256 symbols for the byte values
# and 1 symbol for the EOF marker. The compressed file format starts with a list of 257
# code lengths, treated as a canonical code, and then followed by the Huffman-coded data.
#
# Copyright (c) Project Nayuki
#
# https://www.nayuki.io/page/reference-huffman-coding
# https://github.com/nayuki/Reference-Huffman-coding
#
import contextlib, sys
import huffmancoding
# Command line main application function.
def main(args):
# Handle command line arguments
if len(args) != 2:
sys.exit("Usage: python huffman-compress.py InputFile OutputFile")
inputfile, outputfile = args
# Read input file once to compute symbol frequencies.
# The resulting generated code is optimal for static Huffman coding and also canonical.
freqs = get_frequencies(inputfile)
freqs.increment(256) # EOF symbol gets a frequency of 1
code = freqs.build_code_tree()
canoncode = huffmancoding.CanonicalCode(tree=code, symbollimit=freqs.get_symbol_limit())
# Replace code tree with canonical one. For each symbol,
# the code value may change but the code length stays the same.
code = canoncode.to_code_tree()
# Read input file again, compress with Huffman coding, and write output file
with open(inputfile, "rb") as inp, \
contextlib.closing(huffmancoding.BitOutputStream(open(outputfile, "wb"))) as bitout:
write_code_len_table(bitout, canoncode)
compress(code, inp, bitout)
# Returns a frequency table based on the bytes in the given file.
# Also contains an extra entry for symbol 256, whose frequency is set to 0.
def get_frequencies(filepath):
freqs = huffmancoding.FrequencyTable([0] * 257)
with open(filepath, "rb") as input:
while True:
b = input.read(1)
if len(b) == 0:
break
freqs.increment(b[0])
return freqs
def write_code_len_table(bitout, canoncode):
for i in range(canoncode.get_symbol_limit()):
val = canoncode.get_code_length(i)
# For this file format, we only support codes up to 255 bits long
if val >= 256:
raise ValueError("The code for a symbol is too long")
# Write value as 8 bits in big endian
for j in reversed(range(8)):
bitout.write((val >> j) & 1)
def compress(code, inp, bitout):
enc = huffmancoding.HuffmanEncoder(bitout)
enc.codetree = code
while True:
b = inp.read(1)
if len(b) == 0:
break
enc.write(b[0])
enc.write(256) # EOF
# Main launcher
if __name__ == "__main__":
main(sys.argv[1 : ])
| [
"[email protected]"
] | |
fd9ed163d4c9c80a30e6f861cbd14e49ab3cf4ad | f54070cd3048a3645cb25f301592a904d387a1c9 | /python_prgrams/testpython/range.py | bbf0f3ea21df573de2ae047ecff3657b38dcb360 | [] | no_license | mak705/Python_interview | 02bded60417f1e6e2d81e1f6cde6961d95da2a8e | aff2d6018fd539dbcde9e3a6b3f8a69167ffca0d | refs/heads/master | 2020-03-22T21:03:34.018919 | 2019-11-15T08:51:34 | 2019-11-15T08:51:34 | 140,653,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | num=list(range(10))
print num
| [
"[email protected]"
] | |
d43d96f3a9130aae70c839b3190ffb085dd7ca71 | 3a2fde1c89ecea37cc9476d017478933e44f44d2 | /insight_testsuite/temp/src/bi_antifraud2.py | 225d0a70699265095aa8411490bb9cfeb31fb8f6 | [] | no_license | Kebniss/Insight-code-challenge | 70715b1e0a7f9541df90f5beba6d943f21c0cc4a | e3c58de0eda297b79aca81c525e284cbdd98283a | refs/heads/master | 2020-12-24T10:40:22.273868 | 2016-11-11T06:57:09 | 2016-11-11T06:57:09 | 73,139,660 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,155 | py | import os
from features import *
from Graph import Graph
import logging
cur_path = os.path.dirname('__file__')
logger = logging.getLogger()
hdlr = logging.FileHandler(os.path.join(cur_path, 'bi_antifraud2.log'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
input_batch = os.path.relpath('..\\paymo_input\\batch_payment_trimmed.csv', cur_path)
connections = Graph()
i = 0
with open(input_batch, 'r') as f:
for line in f:
# skip header
if i == 0:
i += 1
continue
input_line = line.split(',')
try:
user_1 = int(input_line[0].strip())
except:
logger.error('Could not parse {0} at line {1}'.format(input_line[0], i))
continue
try:
user_2 = int(input_line[1].strip())
except:
logger.error('Could not parse {0} at line {1}'.format(input_line[1], i))
continue
i += 1
connections.add_edge(user_1, user_2)
input_stream = os.path.relpath('..\\paymo_input\\stream_payment_trimmed.csv', cur_path)
i = 0
output1 = []
output_stream = os.path.relpath('..\\paymo_output\\bi_output2.txt', cur_path)
with open(output_stream, 'w') as fout:
with open(input_stream, 'r') as fin:
for line in fin:
if i == 0:
i += 1
continue
if i % 10000 == 0:
logger.info("Working on row " + str(i) )
input_line = line.split(',')
try:
user_1 = int(input_line[0].strip())
except:
logger.error('Could not parse {0} at line {1}'.format(input_line[0], i))
continue
try:
user_2 = int(input_line[1].strip())
except:
logger.error('Could not parse {0} at line {1}'.format(input_line[1], i))
continue
i += 1
fout.write(bi_feature_2(connections, user_1, user_2) + "\n")
connections.add_edge(user_1, user_2)
logger.info("Done!")
| [
"[email protected]"
] | |
3d37ae217173ffd7aa941fb1ccd8e907bdd53baf | a3fb024bd084405ea56d7aaba1a2a9b77899178a | /u2net_portrait_demo.py | 81d8feb24d319b33a26bc37018acb55d677baee4 | [
"Apache-2.0"
] | permissive | actbee/U-2-Net | 6c7d4b79e368735d2c23b10b595e36cf3c262d8c | 680f9402857837b51663fb502a1f149f7daaf256 | refs/heads/master | 2023-01-23T05:03:38.460443 | 2020-11-22T08:10:01 | 2020-11-22T08:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,632 | py | import cv2
import torch
from model import U2NET
from torch.autograd import Variable
import numpy as np
from glob import glob
import os
def detect_single_face(face_cascade,img):
# Convert into grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
if(len(faces)==0):
print("Warming: no face detection, the portrait u2net will run on the whole image!")
return None
# filter to keep the largest face
wh = 0
idx = 0
for i in range(0,len(faces)):
(x,y,w,h) = faces[i]
if(wh<w*h):
idx = i
wh = w*h
return faces[idx]
# crop, pad and resize face region to 512x512 resolution
def crop_face(img, face):
# no face detected, return the whole image and the inference will run on the whole image
if(face is None):
return img
(x, y, w, h) = face
height,width = img.shape[0:2]
# crop the face with a bigger bbox
hmw = h - w
hpad = int(h/2)+1
wpad = int(w/2)+1
l,r,t,b = 0,0,0,0
left = x-wpad
if(left<0):
left = 0
l = wpad-x
right = x+w+wpad
if(right>width):
right = width
r = right-width
top = y - hpad
if(top<0):
top = 0
t = hpad-y
bottom = y+h+int(hpad*0.5)
if(bottom>height):
bottom = height
b = bottom-height
im_face = img[top:bottom,left:right]
if(len(im_face.shape)==2):
im_face = np.repeat(im_face[:,:,np.newaxis],(1,1,3))
im_face = np.pad(im_face,((t,b),(l,r),(0,0)),mode='constant',constant_values=((255,255),(255,255),(255,255)))
# pad to achieve image with square shape for avoding face deformation after resizing
hf,wf = im_face.shape[0:2]
if(hf-2>wf):
wfp = int((hf-wf)/2)
im_face = np.pad(im_face,((0,0),(wfp,wfp),(0,0)),mode='constant',constant_values=((255,255),(255,255),(255,255)))
elif(wf-2>hf):
hfp = int((wf-hf)/2)
im_face = np.pad(im_face,((hfp,hfp),(0,0),(0,0)),mode='constant',constant_values=((255,255),(255,255),(255,255)))
# resize to have 512x512 resolution
im_face = cv2.resize(im_face, (512,512), interpolation = cv2.INTER_AREA)
return im_face
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def inference(net,input):
# normalize the input
tmpImg = np.zeros((input.shape[0],input.shape[1],3))
input = input/np.max(input)
tmpImg[:,:,0] = (input[:,:,2]-0.406)/0.225
tmpImg[:,:,1] = (input[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (input[:,:,0]-0.485)/0.229
# convert BGR to RGB
tmpImg = tmpImg.transpose((2, 0, 1))
tmpImg = tmpImg[np.newaxis,:,:,:]
tmpImg = torch.from_numpy(tmpImg)
# convert numpy array to torch tensor
tmpImg = tmpImg.type(torch.FloatTensor)
if torch.cuda.is_available():
tmpImg = Variable(tmpImg.cuda())
else:
tmpImg = Variable(tmpImg)
# inference
d1,d2,d3,d4,d5,d6,d7= net(tmpImg)
# normalization
pred = 1.0 - d1[:,0,:,:]
pred = normPRED(pred)
# convert torch tensor to numpy array
pred = pred.squeeze()
pred = pred.cpu().data.numpy()
del d1,d2,d3,d4,d5,d6,d7
return pred
def main():
# get the image path list for inference
im_list = glob('./test_data/test_portrait_images/your_portrait_im/*')
print("Number of images: ",len(im_list))
# indicate the output directory
out_dir = './test_data/test_portrait_images/your_portrait_results'
if(not os.path.exists(out_dir)):
os.mkdir(out_dir)
# Load the cascade face detection model
face_cascade = cv2.CascadeClassifier('./saved_models/face_detection_cv2/haarcascade_frontalface_default.xml')
# u2net_portrait path
model_dir = './saved_models/u2net_portrait/u2net_portrait.pth'
# load u2net_portrait model
net = U2NET(3,1)
net.load_state_dict(torch.load(model_dir))
if torch.cuda.is_available():
net.cuda()
net.eval()
# do the inference one-by-one
for i in range(0,len(im_list)):
print("--------------------------")
print("inferencing ", i, "/", len(im_list), im_list[i])
# load each image
img = cv2.imread(im_list[i])
height,width = img.shape[0:2]
face = detect_single_face(face_cascade,img)
im_face = crop_face(img, face)
im_portrait = inference(net,im_face)
# save the output
cv2.imwrite(out_dir+"/"+im_list[i].split('/')[-1][0:-4]+'.png',(im_portrait*255).astype(np.uint8))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0d023197183142a0da5adb4fbb0229f4429a8fe4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/48/usersdata/122/16246/submittedfiles/estatistica.py | 660f88790c989a5091f5670efe17839c069c155c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 853 | py | # -*- coding: utf-8 -*-
from __future__ import division
def media(lista):
soma = 0
for i in range(0,len(lista),1):
soma = soma + lista[i]
resultado = soma/len(lista)
return resultado
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def des(lista):
a=0
for i in range (0,n,1):
a=a+((lista[i]-media(lista))**2
s=((1/(n-1))*a)**0.5
return s
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
lista_a=[]
lista_b=[]
n=input ('digite o valor de n:')
for i in range (0,n,1):
lista_a.append(input('Digite um elemento A:'))
for i in range (0,n,1):
lista_b.append(input('Digite um elemento B:'))
media_a=media(a)
media_b=media(b)
des_a=des(a)
des_b=des(b)
print media(a)
print s(a)
print media(b)
print s(b) | [
"[email protected]"
] | |
a4c9a10fbe13412bf90987b9c58ecd040195d3d8 | f083fc8981e55565ce7696ec0a875646784db8de | /tests/test_tests.py | 69e3de77a215839b86283efc83573aeb0753c84a | [] | no_license | virginiais4lovers/blumpkin | d82887071f4b8f9cf5943b9cacab27e2ce7e1460 | 049f87fe0aeb79d0a39d94c72254592b15e8e5f6 | refs/heads/master | 2020-04-01T18:07:15.174452 | 2015-01-06T21:24:39 | 2015-01-06T21:24:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 369 | py | from __future__ import unicode_literals
import mock
from . import TestCase
from blumpkin import test
class TestTestCase(TestCase):
@mock.patch('blumpkin.test.subprocess')
def test_write(self, pytest):
result = test.run(
'blumpkin', ('xml', 'term-missing'), 'tests/'
)
self.assertEqual(result, pytest.call.return_value)
| [
"[email protected]"
] | |
caeade0a67e84ba808da9bbf6202788f0039a0a0 | 532dc8a48406f6e428990eff9d356b24307f3c43 | /NER_bert.py | f9b5a97f97c6ff5a9978a55958aa953d1692a53c | [] | no_license | yexing99/BERT-NER | c5f7bea705b08f3f4e752d8e171458b3f536f30a | a0c2ebb5cd0ad5b3301ceb1270ccb59ca0121a27 | refs/heads/master | 2020-05-07T10:39:32.627827 | 2019-04-10T00:57:12 | 2019-04-10T00:57:12 | 180,426,293 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,472 | py |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
data = pd.read_csv("./data/NER/ner_dataset.csv", encoding="latin1").fillna(method="ffill")
data.tail(10)
# In[2]:
type(data)
# In[3]:
class SentenceGetter(object):
def __init__(self, data):
self.n_sent = 1
self.data = data
self.empty = False
agg_func = lambda s: [(w, p, t) for w, p, t in zip(s["Word"].values.tolist(),
s["POS"].values.tolist(),
s["Tag"].values.tolist())]
self.grouped = self.data.groupby("Sentence #").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
# In[4]:
getter = SentenceGetter(data)
# ### This is how the sentences in the dataset look like.
# In[5]:
sentences = [" ".join([s[0] for s in sent]) for sent in getter.sentences]
sentences[0]
# ### The sentences are annotated with the BIO-schema and the labels look like this.
# In[6]:
labels = [[s[2] for s in sent] for sent in getter.sentences]
print(labels[0])
# In[7]:
tags_vals = list(set(data["Tag"].values))
tag2idx = {t: i for i, t in enumerate(tags_vals)}
# In[8]:
tag2idx
# ### Prepare the sentences and labels
# Before we can start fine-tuning the model, we have to prepare the data set for the use with pytorch and bert.
# In[9]:
import torch
from torch.optim import Adam
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from pytorch_pretrained_bert import BertTokenizer, BertConfig
from pytorch_pretrained_bert import BertForTokenClassification, BertAdam
# In[10]:
from pytorch_pretrained_bert.tokenization import BertTokenizer
# In[11]:
MAX_LEN = 75 ##max length of token in sequence
bs = 32 ##batch size
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
# ### The Bert implementation comes with a pretrained tokenizer and a definied vocabulary. We load the one related to the smallest pre-trained model bert-base-uncased. Try also the cased variate since it is well suited for NER.
# In[12]:
##load BertTokenizer class
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# In[13]:
from pathlib import Path
Path.home() / '.pytorch_pretrained_bert'
# In[14]:
tokenizer
# In[15]:
### tokenize sentences
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print(tokenized_texts[0])
# In[16]:
#cut and pad the token and label sequences to our desired length.
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in labels],
maxlen=MAX_LEN, value=tag2idx["O"], padding="post",
dtype="long", truncating="post")
# In[17]:
tokenizer.vocab.items()
# ### The Bert model supports something called attention_mask, which is similar to the masking in keras. So here we create the mask to ignore the padded elements in the sequences.
# In[18]:
attention_masks = [[float(i>0) for i in ii] for ii in input_ids]
# In[19]:
len(attention_masks)
# In[20]:
tr_inputs, val_inputs, tr_tags, val_tags = train_test_split(input_ids, tags,
random_state=2018, test_size=0.1)
tr_masks, val_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=2018, test_size=0.1)
# Since we’re operating in pytorch, we have to convert the dataset to torch tensors.
# In[21]:
tr_inputs = torch.tensor(tr_inputs)
val_inputs = torch.tensor(val_inputs)
tr_tags = torch.tensor(tr_tags)
val_tags = torch.tensor(val_tags)
tr_masks = torch.tensor(tr_masks)
val_masks = torch.tensor(val_masks)
# In[22]:
input_ids[0]
# In[23]:
sentences[0]
# ### The last step is to define the dataloaders. We shuffle the data at training time with the RandomSampler and at test time we just pass them sequentially with the SequentialSampler.
# In[24]:
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=bs)
valid_data = TensorDataset(val_inputs, val_masks, val_tags)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=bs)
# In[25]:
train_dataloader
# ## Setup the Bert model for finetuning
# The pytorch-pretrained-bert package provides a BertForTokenClassification class for token-level predictions. BertForTokenClassification is a fine-tuning model that wraps BertModel and adds token-level classifier on top of the BertModel. The token-level classifier is a linear layer that takes as input the last hidden state of the sequence. We load the pre-trained bert-base-uncased model and provide the number of possible labels.
# In[26]:
#init BertForTokenClassification class
##from_pretrained from BERTFromPreTrained
model = BertForTokenClassification.from_pretrained("bert-base-uncased", num_labels=len(tag2idx))
# Now we have to pass the model parameters to the GPU. ### why?
# In[27]:
model.cuda();
# Before we can start the fine-tuning process, we have to setup the optimizer and add the parameters it should update. A common choice is the Adam optimizer. We also add some weight_decay as regularization to the main weight matrices. If you have limited resources, you can also try to just train the linear classifier on top of Bert and keep all other weights fixed. This will still give you a good performance.
# In[28]:
FULL_FINETUNING = True
if FULL_FINETUNING:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
optimizer = Adam(optimizer_grouped_parameters, lr=3e-5)
# In[29]:
param_optimizer = list(model.classifier.named_parameters())
### Q1:model.classifier(where comes this classifier from BertTokenClassification, but argument for classifier is different than declaration)
## model is inheritated from BERTPretrainedModel.from_pretrained,
##classifier is self.classifier=nn.Linear in BertForTokenClassification, named_parameters is from nn.module
##because from_pretrained is a class method, so model is initialized with from_pretrained argument and
#has properties of both (BertForTokenClassification and BertFromPretrained)
# In[30]:
model.classifier
# In[31]:
# is there any place used "forward" in BertForTokenClassification?
# In[32]:
#in tokenization.py, didn't find declaration for cls: tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
# In[33]:
param_optimizer[1] ## Q3 which part is pre-trained?
# In[34]:
param_optimizer[0]
# In[35]:
model.config ## Q4 I can't figure out where to pass this config to model
# In[36]:
# Q5 what is this cls?
##def from_pretrained(cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None,
# from_tf=False, *inputs, **kwargs):
##cls refers as class itself
# In[37]:
#Q6: *inputs, ** kwargs, and super
#def __init__(self, config, *inputs, **kwargs):
# super(BertPreTrainedModel, self).__init__()
## super here means initialize the parent class of BertPreTRainedModel, which is nn.Module
# ### First we define some metrics, we want to track while training. We use the f1_score from the seqeval package. You can find more details here. And we use simple accuracy on a token level comparable to the accuracy in keras.
# In[38]:
from seqeval.metrics import f1_score
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=2).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# In[ ]:
epochs = 5
max_grad_norm = 1.0
for _ in trange(epochs, desc="Epoch"):
# TRAIN loop
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
# add batch to gpu
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# forward pass
loss = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# backward pass
loss.backward()
# track train loss
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
# gradient clipping
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
model.zero_grad()
# print train loss per epoch
print("Train loss: {}".format(tr_loss/nb_tr_steps))
# VALIDATION on validation set
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
tmp_eval_loss = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
logits = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.append(label_ids)
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += b_input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
pred_tags = [tags_vals[p_i] for p in predictions for p_i in p]
valid_tags = [tags_vals[l_ii] for l in true_labels for l_i in l for l_ii in l_i]
print("F1-Score: {}".format(f1_score(pred_tags, valid_tags)))
# Evaluation
# In[ ]:
model.eval()
predictions = []
true_labels = []
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in valid_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
tmp_eval_loss = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
logits = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = logits.detach().cpu().numpy()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
label_ids = b_labels.to('cpu').numpy()
true_labels.append(label_ids)
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += b_input_ids.size(0)
nb_eval_steps += 1
pred_tags = [[tags_vals[p_i] for p_i in p] for p in predictions]
valid_tags = [[tags_vals[l_ii] for l_ii in l_i] for l in true_labels for l_i in l ]
print("Validation loss: {}".format(eval_loss/nb_eval_steps))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
print("Validation F1-Score: {}".format(f1_score(pred_tags, valid_tags)))
| [
"[email protected]"
] | |
eeaf45bf3db5b3349a80e28f68148f462f520202 | 698512c01048fcefcc14583089ef2e8c7962923a | /python_work/Chapter_7/rollercoaster.py | 3b4483f592aa7d6e2e7e8cc4bc95115eecb7f32a | [] | no_license | Miguel-Tirado/Python | c76cb9846c9a2b9c6b3c4827cdb95042f4e5d447 | 227def380c64095c3040c848aa035ac46d26d079 | refs/heads/main | 2023-04-16T12:15:03.301275 | 2021-04-30T16:39:48 | 2021-04-30T16:39:48 | 346,443,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | height = input("How tall are you, in inches? ")
height = int(height)
if height > 48 :
print("\nYou're tall enouph to ride!")
else:
print("\nYou'll be able to ride when you're a liitle older.") | [
"[email protected]"
] | |
a89afd1288717c32ee23e15ec54d8047335423b4 | 8ff6c3e513e17be6c51b484bed81d03150bdd175 | /2013-04-analytic/part1/ex471a.py | 24bddb89fc53b92d0344be1df0f62932c9d0bc86 | [] | no_license | ricbit/Oldies | f1a2ac520b64e43d11c250cc372d526e9febeedd | 2d884c61ac777605f7260cd4d36a13ed5a2c6a58 | refs/heads/master | 2023-04-27T20:35:19.485763 | 2023-04-26T04:45:44 | 2023-04-26T04:45:44 | 2,050,140 | 40 | 8 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | import math
def s(n):
k0 = n**(2/3.)
ans = 0
for k in xrange(0,int(k0)):
ans += k/float(n)*math.exp(-k*k/2.0/n)
return ans
for i in xrange(6):
print s(10**i)
| [
"[email protected]"
] | |
76348930d71974586001b6d3a68f23ce2df2995a | b313f1b7b3ae2162a44f416baba8357f0d052003 | /papers/GRN-NER/data_format_util.py | aa045957029e56d2566ef406271fb1a58df92eb8 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/vert-papers | e7cae3dc790c01447a48caa0456f555120f20e84 | c47d103d872cf3db2859410211a6083e0d0caf63 | refs/heads/master | 2023-08-18T19:42:21.218463 | 2023-08-16T02:21:25 | 2023-08-16T02:21:25 | 198,793,756 | 248 | 93 | MIT | 2023-08-16T02:21:27 | 2019-07-25T08:48:16 | Python | UTF-8 | Python | false | false | 2,161 | py | import re
def digit_to_zero(s):
"""
checked
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s)
def iob1_to_iob2(tags):
"""
checked
Check that tags have a valid IOB or IOB2/BIO format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag == 'O':
continue
split = tag.split('-')
if len(split) != 2 or split[0] not in ['I', 'B']:
return False
if split[0] == 'B':
continue
elif i == 0 or tags[i - 1] == 'O': # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
elif tags[i - 1][1:] == tag[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i] = 'B' + tag[1:]
return True
def iob2_to_iobes(tags):
"""
checked
IOB2 -> IOBES
"""
new_tags = []
for i, tag in enumerate(tags):
if tag == 'O':
new_tags.append(tag)
elif tag.split('-')[0] == 'B':
if i + 1 != len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('B-', 'S-'))
elif tag.split('-')[0] == 'I':
if i + 1 < len(tags) and \
tags[i + 1].split('-')[0] == 'I':
new_tags.append(tag)
else:
new_tags.append(tag.replace('I-', 'E-'))
else:
raise Exception('Invalid IOB format!')
return new_tags
def iobes_to_iob2(tags):
"""
checked
IOBES -> IOB2
"""
new_tags = []
for i, tag in enumerate(tags):
if tag.split('-')[0] == 'B':
new_tags.append(tag)
elif tag.split('-')[0] == 'I':
new_tags.append(tag)
elif tag.split('-')[0] == 'S':
new_tags.append(tag.replace('S-', 'B-'))
elif tag.split('-')[0] == 'E':
new_tags.append(tag.replace('E-', 'I-'))
elif tag.split('-')[0] == 'O':
new_tags.append(tag)
else:
raise Exception('Invalid format!')
return new_tags
| [
"[email protected]"
] | |
6c11072b4da33d33523686ffbec02ddab50fc37f | 4e3d2a4198b380136a9ecc3dfc57ba7ab446b1cd | /serctl/ctlser | 74c23e941255257323d36ef885a63cd66491d881 | [] | no_license | BackupTheBerlios/ser | f14376c2cbf596c5d1cced150f2c9d89590a77f9 | ebb6af399af6a0501b3b2e29b0b479106d27e1ae | refs/heads/master | 2020-05-18T16:10:14.236498 | 2011-02-07T13:13:37 | 2011-02-07T13:13:37 | 40,253,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | #!/usr/bin/env python
# -*- encoding: UTF-8 -*-
#
# $Id: ctlser,v 1.1 2006/01/18 17:49:20 hallik Exp $
#
# Copyright (C) 2005 iptelorg GmbH
#
# This is part of SER (SIP Express Router), a free SIP server project.
# You can redistribute it and/or modify it under the terms of GNU General
# Public License as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Created: 2005/12/21
# Last update: 2005/12/27
from serctl.main import main
import sys
main(sys.argv)
| [
"hallik"
] | hallik |
|
d815646acb16240a2be7587ebb21ba0eb184e25e | 55d6f893988cb04f6abe007a2f701101b264323a | /mysite/mysite/article/migrations/0004_auto_20190324_1906.py | 5c443df00bbe7ed093f467308343b1a8a16e24b3 | [] | no_license | 371524zhouha/pythondjango_zh | 8488cf5bef98a5d0fc63ad9c56e34f8a813794cb | 468af9ca98b8e14a8c1997ab186be2f6db314c47 | refs/heads/master | 2020-05-24T09:31:58.029873 | 2019-05-17T12:21:25 | 2019-05-17T12:21:25 | 187,208,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | # Generated by Django 2.1.4 on 2019-03-24 11:06
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('article', '0003_auto_20190323_1443'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('commentator', models.CharField(max_length=90)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ('-created',),
},
),
migrations.AlterField(
model_name='articlepost',
name='created',
field=models.DateTimeField(default=datetime.datetime(2019, 3, 24, 11, 6, 47, 994382, tzinfo=utc)),
),
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='article.ArticlePost'),
),
]
| [
"[email protected]"
] | |
4d4eea725222a65903abd31a019b38dc94824931 | ee8c9fc247f66a661913b6e89d2abc83f73ced88 | /crawdatatxt.py | 09fd86192bef8354120fbb7fead13a7b1ea6160c | [] | no_license | nguyendong07/crawldata | 8d721d17b5da907db4e9696563b44a06b02920dd | e9366db70959f3f58d36252065c6216d7ddc2cd1 | refs/heads/master | 2023-06-01T05:00:13.673440 | 2021-06-29T03:05:28 | 2021-06-29T03:05:28 | 353,557,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,025 | py | # import textwrap
# import requests
# import json
# import pyodbc
# import pandas as pd
# import urllib
#
# # headers = {'CLIENTAPIKEY': '5ce554c2-1332-481e-97c2-5856d9612433'}
# # a = 0;
# # detail_data = []
# # all_cover_url = []
# # # for x in range (int(22199/20)):
# # URL = "https://api-smartapp.namdinh.gov.vn/api/articles/?limit=20&offset=22180"
# # r = requests.get(url=URL, headers=headers)
# # data = r.json()
# # x = json.dumps(data)
# # json_object = json.loads(x)
# # n = len(json_object["results"]["data"])
# # for i in range(n):
# # category = json_object["results"]["data"][i]["category"]
# # cover_url = json_object["results"]["data"][i]["cover_url"]
# # place_name = json_object["results"]["data"][i]["place_name"]
# # title = json_object["results"]["data"][i]["title"]
# # content = json_object["results"]["data"][i]["content"]
# # extra_info = json_object["results"]["data"][i]["extra_info"]
# # website = json_object["results"]["data"][i]["website"]
# # phone_contact = json_object["results"]["data"][i]["phone_contact"]
# # latitude = json_object["results"]["data"][i]["latitude"]
# # longitude = json_object["results"]["data"][i]["longitude"]
# # date_start = json_object["results"]["data"][i]["date_start"]
# # time_start = json_object["results"]["data"][i]["time_start"]
# # date_end = json_object["results"]["data"][i]["date_end"]
# # time_end = json_object["results"]["data"][i]["time_end"]
# # data = [category,cover_url,place_name,title,content,extra_info,website,phone_contact,latitude,longitude,date_start,time_start,date_end,time_end]
# # print(cover_url)
# # all_cover_url.append(cover_url)
# # detail_data.append(data)
# # i = i+1;
# # # a = a + 20
# # b = 0;
# # for x in range(len(all_cover_url)):
# # if all_cover_url[b] is not None:
# # img_name = all_cover_url[x].split("/")[-1]
# # urllib.request.urlretrieve(all_cover_url[x], "C:/Users/Admin/Desktop/anh/" + img_name)
# # b = b + 1;
# # else:
# # b = b + 1;
# # for index, row in enumerate(detail_data):
# # # define an insert query with place holders for the values.
# # insert_query = textwrap.dedent('''
# # INSERT INTO dia_diem (category,
# # cover_url,
# # place_name,
# # title,content,
# # extra_info,
# # website,
# # phone_contact,
# # latitude,
# # longitude,
# # date_start,
# # time_start,
# # date_end,
# # time_end)
# # VALUES (?,?,?,?,?,?,?,?,?,?,?,?`,?,?);
# # ''')
# # # define the values`
# # values = (row[0], row[1], row[2], row[3],row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13])
# # # insert the data into the database
# # cursor.execute(insert_query, values)
# # # commit the inserts.
# # cnxn.commit()
# # # grab all the rows from the table
# # cursor.execute('SELECT * FROM dia_diem')
# # # for row in cursor:
# # # print(row)
# # # close the cursor and connection
# # cursor.close()
# # cnxn.close()
# # print("done!")
# detail_data = []
# _data = []
# server = 'ADMIN'
# database = 'NNLogin'
# cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';Trusted_Connection=yes;')
# cursor = cnxn.cursor()
# with open('GT_NOTICE_BOARD_TYPE.json') as json_file:
# data = json.load(json_file)
# print(data[1]['Icon'])
# detail_data.append(data[1]['Icon'])
# print(detail_data)
# n = len(data)
# for i in range(n):
# Type_ID = str(data[i]['Type_ID']),
# category = data[i]['category'],
# Icon = data[i]['Icon'],
# IsDelete = str(data[i]['IsDelete'])
# # detail_data.append(data)
# # print(detail_data)
# for index, row in enumerate(detail_data):
# # define an insert query with place holders for the values.
# insert_query = textwrap.dedent('''
# INSERT INTO loai_bien_bao (Type_ID,
# category,
# Icon,
# IsDelete
# )
# VALUES (?,?,?,?);
# ''')
# # define the values`
# values = (row[0], row[1], row[2], row[3])
# # insert the data into the database
# cursor.execute(insert_query, values)
# # commit the inserts.
# cnxn.commit()
# # grab all the rows from the table
# cursor.execute('SELECT * FROM loai_bien_bao')
# # for row in cursor:
# # print(row)
# # close the cursor and connection
# cursor.close()
# cnxn.close()
# print("done!")
#
#
#
# # print(len(data))
# # print(data[1]['Type_ID'])
#
#
#
###code loai bien giao thong
import textwrap
import requests
import json
import pyodbc
import pandas as pd
import urllib
server = 'ADMIN'
database = 'NNLogin'
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';Trusted_Connection=yes;')
cursor = cnxn.cursor()
# headers = {'CLIENTAPIKEY': '5ce554c2-1332-481e-97c2-5856d9612433'}
# a = 0;
# detail_data = []
# all_cover_url = []
# with open('GT_NOTICE_BOARD.json', encoding='utf-8') as json_file:
# data = json.load(json_file)
# x = json.dumps(data)
# json_object = json.loads(x)
# n = len(json_object)
# for i in range(n):
# Type_ID = json_object_category["results"]["data"][i]["Type_ID"]
# Name = json_object_category["results"]["data"][i]["Name"]
# NameEN = json_object_category["results"]["data"][i]["NameEN"]
# Detail = json_object_category["results"]["data"][i]["Detail"]
# Icon = json_object_category["results"]["data"][i]["Icon"]
# UpdateDay = json_object_category["results"]["data"][i]["UpdateDay"]
# IsDelete = json_object_category["results"]["data"][i]["IsDelete"]
#
# # if str(category) == "Toàn bộ":
# # json_object_category["results"]["data"][i]["id"] = 3
# # print( json_object_category["results"]["data"][i])
# data = [Type_ID, Name, NameEN,Detail,Icon,UpdateDay,IsDelete]
# detail_data.append(data)
# for index, row in enumerate(detail_data):
# # define an insert query with place holders for the values.
# insert_query = textwrap.dedent('''
# INSERT INTO NoticeBoardDetail (Type_ID,
# Name,
# NameEN,
# Detail,
# Icon,
# UpdateDay,
# IsDelete)
# VALUES (?,?,?,?,?,?,?);
# ''')
# # define the values
# values = (row[0], row[1], row[2], row[3], row[4], row[5], row[6])
# # insert the data into the database
# cursor.execute(insert_query, values)
# # commit the inserts.
# cnxn.commit()
# # grab all the rows from the table
# cursor.execute('SELECT * FROM NoticeBoardDetail')
# # for row in cursor:
# # print(row)
# # close the cursor and connection
# cursor.close()
# cnxn.close()
# print("done!")
###code đổ dữ liệu các danh mục
# import textwrap
# import requests
# import json
# import pyodbc
# import pandas as pd
# import urllib
# server = 'ADMIN'
# database = 'NNLogin'
# cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';Trusted_Connection=yes;')
# cursor = cnxn.cursor()
#
# headers = {'CLIENTAPIKEY': '5ce554c2-1332-481e-97c2-5856d9612433'}
# a = 0;
# detail_data = []
# all_cover_url = []
# with open('GT_NOTICE_BOARD_TYPE.json', encoding='utf-8') as json_file:
# data = json.load(json_file)
# x = json.dumps(data)
# json_object = json.loads(x)
# n = len(json_object)
##cách viết khác
#
headers = {'CLIENTAPIKEY': '5ce554c2-1332-481e-97c2-5856d9612433'}
a = 0;
detail_data = []
all_cover_url = []
for x in range(1):
URL = "https://api-smartapp.namdinh.gov.vn/api/articles/?limit=20&offset=22180"
r = requests.get(url=URL, headers=headers)
data = r.json()
x = json.dumps(data)
json_object_category = json.loads(x)
n = len(json_object_category["results"]["data"])
a = a + 20
print(n)
for i in range(n):
category = json_object_category["results"]["data"][i]["category"]
cover_url = json_object_category["results"]["data"][i]["cover_url"]
place_name = json_object_category["results"]["data"][i]["place_name"]
title = json_object_category["results"]["data"][i]["title"]
content = json_object_category["results"]["data"][i]["content"]
extra_info = json_object_category["results"]["data"][i]["extra_info"]
website = json_object_category["results"]["data"][i]["website"]
phone_contact = json_object_category["results"]["data"][i]["phone_contact"]
latitude = json_object_category["results"]["data"][i]["latitude"]
longitude = json_object_category["results"]["data"][i]["longitude"]
date_start = json_object_category["results"]["data"][i]["date_start"]
time_start = json_object_category["results"]["data"][i]["time_start"]
date_end = json_object_category["results"]["data"][i]["date_end"]
time_end = json_object_category["results"]["data"][i]["time_end"]
if str(category) == "Điểm mua sắm":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 3
elif category == "Nhà hàng":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 1
elif category == "Bệnh viện":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 2
elif category == "Điểm đỗ xe":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 4
elif category == "Địa điểm nổi tiếng":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 5
elif category == "Di tích lịch sử":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 6
elif category == "Sự kiện dịp Tết":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 7
elif category == "Lễ hội":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 8
elif category == "Danh lam thắng cảnh":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 9
elif category == "Đền":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 10
elif category == "Chùa":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 11
elif category == "Hiệu thuốc":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 12
elif category == "Phòng khám":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 13
elif category == "Khách sạn":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 14
elif category == "Điểm đen giao thông":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 15
elif category == "Trạm thu phí":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 16
elif category == "Gara ô tô":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 17
elif category == "Trạm xăng":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 18
elif category == "Điểm ứng cứu TNGT":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 19
elif category == "Điểm khuyến mại":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 20
elif category == "Nhà thờ":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 21
elif category == "Bảo trợ xã hội":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 22
elif category == "Trạm y tế":
json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"] = 23
DanhMucTheoLinhVucId = json_object_category["results"]["data"][i]["DanhMucTheoLinhVucId"]
data = [DanhMucTheoLinhVucId,title,cover_url,time_start,place_name,latitude,longitude,phone_contact,website,extra_info,content]
detail_data.append(data)
print(detail_data)
for index, row in enumerate(detail_data):
# define an insert query with place holders for the values.
insert_query = textwrap.dedent('''
INSERT INTO DiaDiem (DanhMucTheoLinhVucId,
TenDiaDiem,
AnhDaiDien,
GioMoCua,
DiaChi,
ToaDoX,
ToaDoY,
SoDienThoai,
TrangWeb,
ThongTinThem,
MoTa)
VALUES (?,?,?,?,?,?,?,?,?,?,?);
''')
# define the values
values = (row[0], row[1], row[2], row[3], row[4],row[5],row[6],row[7],row[8],row[9],row[10])
# insert the data into the database
cursor.execute(insert_query, values)
# commit the inserts.
cnxn.commit()
# grab all the rows from the table
cursor.execute('SELECT * FROM DiaDiem')
# for row in cursor:
# print(row)
# close the cursor and connection
cursor.close()
cnxn.close()
print("done!")
| [
"[email protected]"
] | |
9fd1a14ff5d323874f4c67fbfadaa56f0d311a2b | 1a0ee89a93d0699ce3352ed228d68f414e85fc3a | /app.py | 5b0b6b1e45d1b9ddbd3ee84078e2e68f14f7f6c7 | [] | no_license | herick024/webapp | f2a15807060b6b08e0982de971de930b187df4d7 | 479473f13ed0ea5a64bec7110806bd1cabdcc9e7 | refs/heads/master | 2020-12-24T05:51:25.179751 | 2016-11-11T04:33:57 | 2016-11-11T04:33:57 | 73,430,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | import web
render = web.template.render('views/')
urls = (
'/','index',
'/about','about'
)
class index:
def GET(self):
return render.index()
class about:
def GET(self):
return about
if __name__ == '__main__':
app = web.application(urls,globals())
web.config.debug = True
app.run()
| [
"[email protected]"
] | |
2daf908821f129104172103afa5f796ce87117f6 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/str_cmp-4.py | d5784ad9b7a06692f4f8615dcc4a2eadc7e42809 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | $TypedVar = "Hello"
b:str = "World"
c:str = "ChocoPy"
def eq(a:str, b:str) -> bool:
return a == b
def neq(a:str, b:str) -> bool:
return a != b
print(eq(a,a))
print(eq(a,b))
print(neq(a,b))
print(neq(b,b))
print(eq(c,a))
print(neq(c,b))
| [
"[email protected]"
] | |
cb62d0ce8b32f6c17d8835d07e0b8e1216e6e29e | e25d6f06e927af969c023520710533b3da9e930f | /main.py | cd34cb999a0eb1313a2aa6524f10b2609905d871 | [] | no_license | mahajanyogita/Tic-Tac-Toe-Game | 448c563687739f55fdc0eee3ac752b22aebe980d | 4873af4dc8094deac2325a06fb9d6df5b1bcc646 | refs/heads/main | 2023-05-28T06:00:15.490215 | 2021-06-08T07:34:33 | 2021-06-08T07:34:33 | 374,921,333 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,877 | py | import board_formation
import insert_inboard
import check_winner
print("Lets Play TIC TAC TOE!\n")
while True:
player1 = input("Enter Player 1 NAME: ")
char1 = input("Enter character You Want (O/X) : ")
player2 = input("Enter Player 2 Name: ")
if char1 == 'X':
char2 = 'O'
else:
char2 = 'X'
players={char1: player1, char2: player2}
print("\nPLAYER 1: ", player1, "YOU WILL USE CHARACTER", char1)
print("\nPLAYER 2: ", player2, "YOU WILL USE CHARACTER", char2)
playBoard = {'1': ' ', '2': ' ', '3': ' ',
'4': ' ', '5': ' ', '6': ' ',
'7': ' ', '8': ' ', '9': ' '
}
board_char = board_formation.print_board(playBoard)
max_count = 9
match = 3
while board_char.count(char1)+board_char.count(char2) < max_count:
#for player1
p1 = insert_inboard.Insert(player1, char1, playBoard)
playBoard = p1.game()
board_char = board_formation.print_board(playBoard)
if board_char.count(char1) >= match:
win = check_winner.winner(playBoard, char1)
if win[0]:
break
if board_char.count(char1)+board_char.count(char2) == max_count:
break
#for player2
p2=insert_inboard.Insert(player2, char2, playBoard)
playBoard = p2.game()
board_formation.print_board(playBoard)
if board_char.count(char2) >= match:
win = check_winner.winner(playBoard, char2)
if win[0]:
break
if win[0]:
print("Congratulations!", players[win[1]], "\nYOU ARE THE WINNER")
else:
print("GAME OVER ! Tie")
again=input("\nWant to Play AGAIN?\t Press 'Y'\nTo Quit \t\t\t press 'N'\n")
if again == 'N':
print("THANK YOU!")
break
| [
"[email protected]"
] | |
5386f46ab977c23908fb45a28b1bce3084a6c75d | 167face5e34f69ba36b8a8d93306387dcaa50d24 | /sorted.py | 124b09ab9a5a2973c794325b4581657815c54410 | [] | no_license | william-cirico/python-study | 4fbe20936c46af6115f0d88ad861c71e6273db71 | 5923268fea4c78707fe82f1f609535a69859d0df | refs/heads/main | 2023-04-19T03:49:23.237829 | 2021-05-03T01:24:56 | 2021-05-03T01:24:56 | 309,492,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py | """
Sorted
sorted() serve para ordenar.
"""
lista = [4, 7, 8, 1, 4, 2]
lista.sort()
print(lista)
numeros = [6, 1, 8, 2]
print(sorted(numeros)) # Sempre retorna uma lista
print(numeros) # Não modifica o iterável diferente do sort()
# Adicionando parâmetros
print(sorted(numeros, reverse=True))
# Podemos utilizar o sorted para coisas mais complexas
usuarios = [
{"username": "samuel", "tweets": ["Eu adoro bolos", "Eu adoro pizzas"]},
{"username": "carla", "tweets": ["Eu amo meu gato", "Eu adoro pizzas"]},
{"username": "jeff", "tweets": []},
{"username": "doggo", "tweets": ["Eu gosto de cachorros"]}
]
# Ordenando pelo username
print(sorted(usuarios, key=lambda usuario: usuario["username"]))
# Ordenando pelo número de tweets
print(sorted(usuarios, key=lambda usuario: len(usuario["tweets"])))
# Último Exemplo
musicas = [
{'Título': 'Thunderstruck', 'tocou': 3},
{'Título': 'Dead Skin Masc', 'tocou': 2},
{'Título': 'Back in Black', 'tocou': 4}
]
# Ordena da menos tocada para a mais tocada
print(sorted(musicas, key=lambda musica: musica['tocou']))
print(sorted(musicas, key=lambda musica: musica['tocou'], reverse=True))
| [
"[email protected]"
] | |
bff58ea8daf128d4fb61ece00543dc0674ee23f5 | 497dc09ca780a2786e4414bfc5b2d7c79d9df939 | /pset7/finance/application.py | be33ddfc4016ee1ed2e23a3bf7375fea20cc2eed | [] | no_license | aaron-xyz/CS50 | c50b585e5cc79f759b2e9d6dc59cdc6eab6496ba | e860828a9271e0fa60499f8aa3cc09cbee3309df | refs/heads/master | 2021-01-18T19:57:35.792881 | 2017-12-30T20:52:41 | 2017-12-30T20:52:41 | 100,542,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,828 | py | from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session, url_for
from flask_session import Session
from passlib.apps import custom_app_context as pwd_context
from tempfile import mkdtemp
from datetime import datetime
from helpers import *
# configure application
app = Flask(__name__)
# ensure responses aren't cached
if app.config["DEBUG"]:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# custom filter
app.jinja_env.filters["usd"] = usd
# ref: https://github.com/ansible/ansible/pull/17251/files/d132552b6eef805e194d1f30b5d29d4ee6324a58?diff=split&short_path=3e8c7a3
app.jinja_env.filters["add"] = add
# configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
@app.route("/")
@login_required
def index():
# get id of active user
active_user = session["user_id"]
# query from users the active user info
user = db.execute("SELECT cash FROM users WHERE id = :userid", userid=active_user)
c = user[0]["cash"]
# query from stocks all the rows where user_id == active_user
rows = db.execute("SELECT symbol, name, date, SUM(shares) FROM stocks WHERE user_id = :userid GROUP BY symbol", userid=active_user)
# search symbols obtained in the query with lookup and update prices in rows
for row in rows:
vals = lookup(row["symbol"])
row["price"] = vals["price"]
row["total"] = row["SUM(shares)"]*vals["price"]
# render index template
return render_template("index.html", r=rows, cash=c)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock."""
# if user reached via POST (as via a form via POST)
if request.method == "POST":
# ensure symbol is entered else apology
if not request.form.get("symbol"):
return apology("Enter a symbol")
# ensure shares is entered else apology
if not request.form.get("shares"):
return apology("enter number of shares")
# lookup for symbol
vals = lookup(request.form.get("symbol"))
# ensure symbol exist else apology
if not vals:
return apology("symbol not valid")
# get user id
active_user = session["user_id"]
# query data of active user
user = db.execute("SELECT * FROM users WHERE id = :userid", userid=active_user)
# calculate the cost (shares times price)
cost = round(int(request.form.get("shares")) * vals["price"], 2)
# ensure cash is greater than cost
if user[0]["cash"] < cost:
return apology("can't afford that cost")
#substract cost from cash and UPDATE in users
user_update = db.execute("UPDATE users SET cash = :cash WHERE id = :userid", cash=user[0]["cash"] - cost, userid=active_user)
# INSERT the data of transaction in stocks (user id, symbol, name, shares, price, date)
# datetime: https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-xiii-dates-and-times
new_stock = db.execute("INSERT INTO stocks (symbol, name, shares, price, user_id, date) VALUES(:symbol, :name, :shares, :price, :userid, :now)", symbol=vals["symbol"], name=vals["name"], shares=request.form.get("shares"), price=vals["price"], userid=active_user, now=str(datetime.utcnow()))
# redirect to index
return redirect(url_for("index"))
# else buy reached via GET (as via a link via GET)
else:
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Show history of transactions."""
# get id of active user
active_user = session["user_id"]
# SELECT from "stocks" all the transactions of the current user
rows = db.execute("SELECT symbol, shares, price, date FROM stocks WHERE user_id = :userid ORDER BY date DESC", userid=active_user)
# render index template
return render_template("history.html", historial=rows)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in."""
# forget any user_id
session.clear()
# if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))
# ensure username exists and password is correct
if len(rows) != 1 or not pwd_context.verify(request.form.get("password"), rows[0]["hash"]):
return apology("invalid username and/or password")
# remember which user has logged in
session["user_id"] = rows[0]["id"]
# redirect user to home page
return redirect(url_for("index"))
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out."""
# forget any user_id
session.clear()
# redirect user to login form
return redirect(url_for("login"))
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
# # if user reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# ensure symbol is typed
if not request.form.get("symbol"):
return apology("Input a symbol")
# query and store that symbol in the database
vals = lookup(request.form.get("symbol"))
# check if that symbol exist
if not vals:
return apology("symbol does not exist")
# if exist render quoted
return render_template("quoted.html", symbol=vals["symbol"], name=vals["name"], price=vals["price"])
# else GET
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user."""
# forget any user_id
session.clear()
# if user reached via POST (as via a form via POST)
if request.method == "POST":
# ensure username is not empty
if not request.form.get("username"):
return apology("a username is necessary")
# ensure password is not empty
if not request.form.get("password"):
return apology("a password is necessary")
# ensure confirmation is not empty
if not request.form.get("confirmation"):
return apology("Type your password again")
# ensure username is avalaibable - query in database
checkname = db.execute("SELECT * FROM users WHERE username = :username", username=request.form.get("username"))
if len(checkname) > 0:
return apology("username already taken")
# ensure password and confirmation are identical
if request.form.get("password") != request.form.get("confirmation"):
return apology("password does not match")
# insert username and password (hashed) - :new will just get the primary key value and not the whole row, remember that!
new = db.execute("INSERT INTO users (username, hash) VALUES(:username, :hashed)", username=request.form.get("username"), hashed=pwd_context.hash(request.form.get("password")))
if not new:
return apology("Couldn't create username")
# pull the whole row recently created in new
rows = db.execute("SELECT * FROM users WHERE id = :userid", userid=new)
# automatic login
session["user_id"] = rows[0]["id"]
# redirect to index
return redirect(url_for("index"))
# else user reached via GET (as via a link via GET)
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock."""
# if user reach via POST (as via a form via a POST)
if request.method == "POST":
# get id of active user
active_user = session["user_id"]
# ensure a symbol was submited else apology
if not request.form.get("symbol"):
return apology("select a symbol")
# ensure a share was submited else apology
if not request.form.get("shares"):
return apology("missing number of shares")
# query from users the active user info
user = db.execute("SELECT * FROM users WHERE id = :userid", userid=active_user)
# query from stocks all the rows where user_id == active_user
stock = db.execute("SELECT symbol, SUM(shares) FROM stocks WHERE symbol = :symbol AND user_id = :userid GROUP BY symbol",
symbol=request.form.get("symbol"), userid=active_user)
# lookup for updated values fo symbol
vals = lookup(request.form.get("symbol"))
# ensure shares does not exceeds current shares else apology
if int(stock[0]["SUM(shares)"]) < int(request.form.get("shares")):
return apology("not enough shares in your pocket")
# INSERT that transaction in the database STOCKS
sell = db.execute("INSERT INTO stocks (symbol, name, shares, price, user_id, date) VALUES(:symbol, :name, :shares, :price, :userid, :date)", symbol=request.form.get("symbol"), name=vals["name"], shares=0-int(request.form.get("shares")), price=vals["price"], userid=active_user, date=str(datetime.utcnow()))
# UPDATE USERS
renew = db.execute("UPDATE users SET cash = :cash WHERE id = :userid", cash=user[0]["cash"]+int(request.form.get("shares"))*vals["price"], userid=active_user)
# redirect to index
return redirect(url_for("index"))
# else user reached via GET (as via a link via GET)
else:
# get id of active user
active_user = session["user_id"]
# query for how the different symbols given the user
stock = db.execute("SELECT symbol, SUM(shares) FROM stocks WHERE user_id = :userid GROUP BY symbol", userid=active_user)
# render sell
return render_template("sell.html", symbols=stock)
@app.route("/settings", methods=["GET", "POST"])
@login_required
def settings():
""" change some values"""
# render settings template
return render_template("settings.html")
@app.route("/change_password", methods=["GET", "POST"])
@login_required
def change_password():
"""change the current password"""
# if reached via POST (as via a form)
if request.method == "POST":
# get id of active user
active_user = session["user_id"]
# ensure password check is not empty else paology
if not request.form.get("current-password"):
return apology("type your current password")
# ensure new password is not empty else apology
if not request.form.get("new-password"):
return apology("type your new password")
# ensure new password confirmation is not empty else apology
if not request.form.get("new-pass-confirmation"):
return apology("type your new password again")
# query for current password
current = db.execute("SELECT hash FROM users WHERE id = :userid", userid=active_user)
# check current password is correct
if not pwd_context.verify(request.form.get("current-password"), current[0]["hash"]):
return apology("current password wrong")
# check new password match
if request.form.get("new-password") != request.form.get("new-pass-confirmation"):
return apology("new password does not match")
# Update new password in the database
renew = db.execute("UPDATE users SET hash = :newhash WHERE id = :userid", newhash=pwd_context.hash(request.form.get("new-password")), userid=active_user)
# redirect to index
return redirect(url_for("index"))
# else reached via GET (as via a link)
else:
return render_template("change_password.html")
@app.route("/add_cash", methods=["GET", "POST"])
@login_required
def add_cash():
"""add more cash"""
# if reached via POST (as via a form)
if request.method == "POST":
# get id of active user
active_user = session["user_id"]
# ensure a quantity was entered else apology
if not request.form.get("new-cash"):
return apology("Enter a quantity")
# UPDATE cash in "users" adding the the cash entered to the current cash
row = db.execute("SELECT cash FROM users WHERE id = :userid", userid=active_user)
renew = db.execute("UPDATE users SET cash = :cash WHERE id = :userid", cash=int(request.form.get("new-cash"))+row[0]["cash"], userid=active_user)
updated_cash = db.execute("SELECT cash FROM users WHERE id = :userid", userid=active_user)
#redirect to new_cash BUG: when udate this page add money
#return render_template("new_cash.html", old_cash=row[0]["cash"], new_cash=updated_cash[0]["cash"], added_cash=request.form.get("new-cash"))
# redirect to index
return redirect(url_for("index"))
# else reached via GET (as via a link)
else:
# render add_cash
return render_template("add_cash.html")
| [
"[email protected]"
] | |
95576700b2a1d5ca06b2d8709e277a879181eff3 | c316dfca84f396c797091311d1b20d973a8fad43 | /pg_api_master/test_common/do_excel.py | 267d1f9848bc14752d8335050d22152b2f89a7a3 | [] | no_license | huididihappay/api | ae30be6f5d58b804550c36b393a6c43b28c3eab8 | 084b90786fca27c7a7cdeea5c5bc1e343a11478b | refs/heads/master | 2020-11-28T15:06:51.692155 | 2019-12-24T02:19:32 | 2019-12-24T02:19:32 | 229,853,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,914 | py | # -*-coding:utf-8-*-
# @time :2019/4/30 16:15
# Author :lemon_youran
# @Email :[email protected]
# @File :do_excel.PY
# @Software :PyCharm
from openpyxl import load_workbook
from test_common import project_path
from test_common.read_config import ReadConfig
class DoExcel:
"""该类完成测试数据的读取,以及测试结果的写回"""
def __init__(self, file_name, sheet_name):
self.file_name = file_name
self.sheet_name = sheet_name
def read_excel(self, section): # 配置文件里面的片段名
"""读取excel文件"""
# 从配置文件获取读取那些测试数据
case_id = ReadConfig(project_path.conf_path).get_data(section, 'case_id')
wb = load_workbook(self.file_name) # 打开工作簿
sheet = wb[self.sheet_name] # 定位表单
tel =self.get_tel()
test_data = []
for i in range(2, sheet.max_row+1):
#print(i)
row_data = {}
row_data['CaseId'] = sheet.cell(i, 1).value
row_data['Module'] = sheet.cell(i, 2).value
row_data['Title'] = sheet.cell(i, 3).value
row_data['Url'] = sheet.cell(i, 4).value
row_data['Method'] = sheet.cell(i, 5).value
if sheet.cell(i, 6).value.find('tel') != -1: # 注意这个方法的使用以及返回值 也可以用成员运算符
row_data['Params'] = sheet.cell(i, 6).value.replace('tel', str(tel)) # 替换值 tel 为啥要用str()
self.update_tel(int(tel)+1)
else:
row_data['Params'] = sheet.cell(i, 6).value
row_data['sql'] = sheet.cell(i, 7).value
row_data['ExpectedResult'] = sheet.cell(i, 8).value
test_data.append(row_data)
wb.close()
final_data = []
if case_id =='all':
final_data = test_data
else:
for i in case_id:
final_data.append(test_data[i-1])
return final_data
def get_tel(self):
"""获取excel里面的手机号码"""
wb = load_workbook(self.file_name)
sheet = wb['tel']
wb.close()
return sheet.cell(1, 2).value
def update_tel(self, new_tel):
"""写回手号码"""
wb = load_workbook(self.file_name)
sheet = wb['tel']
sheet.cell(1, 2, new_tel)
wb.save(self.file_name)
wb.close()
def write_back(self, row, col, value):
"""写回测试结果"""
wb = load_workbook(self.file_name)
sheet = wb[self.sheet_name]
sheet.cell(row, col).value = value # 函数中的参数value
wb.save(self.file_name)
wb.close()
if __name__ == '__main__':
file_name = r'H:\Http_test\test_case\test_api.xlsx'
sheet_name = 'add_loan' # recharge
res = DoExcel(file_name, sheet_name).read_excel('AddLOANCASE')
print(res)
| [
"[email protected]"
] | |
c1439ae2e2033a1beac08b8c7ba99e807da323fa | 4428d0b54dcd7125d7001d195d9b42a4ea16403b | /tastypie/resources.py | be384dd5a672fda6697f8da5e7848bd5e42b81b7 | [] | no_license | kthakur/dojo | eb7ee433a4fa2fd98825fa8807086cface37f984 | 33b2e3e877ac1afc4063ef5cb688e78502a56221 | refs/heads/master | 2020-05-18T01:20:26.123845 | 2012-08-12T11:40:26 | 2012-08-12T11:40:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79,349 | py | from __future__ import with_statement
import logging
import warnings
import django
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS, LOOKUP_SEP
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
if request.is_ajax():
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
return http.HttpBadRequest(e.args[0])
except ValidationError, e:
return http.HttpBadRequest(', '.join(e.messages))
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
if not isinstance(exception, NOT_FOUND_EXCEPTIONS):
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=sys.exc_info(), extra={'status_code': 500, 'request':request})
if django.VERSION < (1, 3, 0) and getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
# Due to the way Django parses URLs, ``get_multiple`` won't work without
# a trailing slash.
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<pk_list>\w[\w/;-]*)/$" % self._meta.resource_name, self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
A hook for adding your own URLs or overriding the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.override_urls() + self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.is_authorized(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(str.upper, allowed))
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authorized(self, request, object=None):
"""
Handles checking of permissions to see if the user has authorization
to GET, POST, PUT, or DELETE this resource. If ``object`` is provided,
the authorization backend can apply additional row-level permissions
checking.
"""
auth_result = self._meta.authorization.is_authorized(request, object)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpTooManyRequests())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def build_bundle(self, obj=None, data=None, request=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(obj=obj, data=data, request=request)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
# URL-related methods.
def get_resource_uri(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
A call to ``reverse()`` should be all that would be needed::
from django.core.urlresolvers import reverse
def get_resource_uri(self, bundle):
return reverse("api_dispatch_detail", kwargs={
'resource_name': self._meta.resource_name,
'pk': bundle.data['id'],
})
If you're using the :class:`~tastypie.api.Api` class to group your
URLs, you also need to pass the ``api_name`` together with the other
kwargs.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def get_resource_list_uri(self):
"""
Returns a URL specific to this resource's list endpoint.
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
try:
return self._build_reverse_url("api_dispatch_list", kwargs=kwargs)
except NoReverseMatch:
return None
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix) - 1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return self.obj_get(request=request, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
setattr(bundle.obj, field_object.attribute, value.obj)
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
if field_object.dehydrated_type == 'related':
if getattr(field_object, 'is_m2m', False):
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Allows the ``Authorization`` class to further limit the object list.
Also a hook to customize per ``Resource``.
"""
if hasattr(self._meta.authorization, 'apply_limits'):
object_list = self._meta.authorization.apply_limits(request, object_list)
return object_list
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, request=None, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, request=None, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(request=request, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, request=None, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, request=None, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
bundle = self._meta.cache.get(cache_key)
if bundle is None:
bundle = self.obj_get(request=request, **kwargs)
self._meta.cache.set(cache_key, bundle)
return bundle
def obj_create(self, bundle, request=None, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, request=None, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, request=None, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, request=None, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, errors, request):
if request:
desired_format = self.determine_format(request)
else:
desired_format = self._meta.default_format
serialized = self.serialize(request, errors, desired_format)
response = http.HttpBadRequest(content=serialized, content_type=build_content_type(desired_format))
raise ImmediateHttpResponse(response=response)
def is_valid(self, bundle, request=None):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_list_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized['objects']]
to_be_serialized['objects'] = [self.full_dehydrate(bundle) for bundle in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not 'objects' in deserialized:
raise BadRequest("Invalid data sent.")
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized['objects']:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized['objects'] = [self.full_dehydrate(bundle) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle, request=request, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
try:
self.obj_delete(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
if "objects" not in deserialized:
raise BadRequest("Invalid data sent.")
if len(deserialized["objects"]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for data in deserialized["objects"]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data))
self.obj_create(bundle, request=request)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data))
self.obj_create(bundle, request=request)
if len(deserialized.get('deleted_objects', [])) and 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deserialized.get('deleted_objects', []):
obj = self.get_via_uri(uri, request=request)
self.obj_delete(request=request, _obj=obj)
return http.HttpAccepted()
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
return self.obj_update(original_bundle, request=request, pk=original_bundle.obj.pk)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
obj_pks = kwargs.get('pk_list', '').split(';')
objects = []
not_found = []
for pk in obj_pks:
try:
obj = self.obj_get(request, pk=pk)
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
except ObjectDoesNotExist:
not_found.append(pk)
object_list = {
'objects': objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
if f.get_internal_type() in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif f.get_internal_type() in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif f.get_internal_type() in ('FloatField',):
result = fields.FloatField
elif f.get_internal_type() in ('DecimalField',):
result = fields.DecimalField
elif f.get_internal_type() in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField'):
result = fields.IntegerField
elif f.get_internal_type() in ('FileField', 'ImageField'):
result = fields.FileField
elif f.get_internal_type() == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif f.get_internal_type() == 'ForeignKey':
# result = ForeignKey
# elif f.get_internal_type() == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
if value in ['true', 'True', True]:
value = True
elif value in ['false', 'False', False]:
value = False
elif value in ('nil', 'none', 'None', None):
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if hasattr(self._meta, 'queryset'):
# Get the possible query terms from the current QuerySet.
query_terms = self._meta.queryset.query.query_terms.keys()
else:
query_terms = QUERY_TERMS.keys()
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(request, 'GET'):
# Grab a mutable copy.
filters = request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
base_object_list = self.apply_filters(request, applicable_filters)
return self.apply_authorization_limits(request, base_object_list)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
base_object_list = self.get_object_list(request).filter(**kwargs)
object_list = self.apply_authorization_limits(request, base_object_list)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
return object_list[0]
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
self.is_valid(bundle, request)
if bundle.errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save parent
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_update(self, bundle, request=None, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not bundle.obj.pk:
# Attempt to hydrate data from kwargs before doing a lookup for the object.
# This step is needed so certain values (like datetime) will pass model validation.
try:
bundle.obj = self.get_object_list(bundle.request).model()
bundle.data.update(kwargs)
bundle = self.full_hydrate(bundle)
lookup_kwargs = kwargs.copy()
for key in kwargs.keys():
if key == 'pk':
continue
elif getattr(bundle.obj, key, NOT_AVAILABLE) is not NOT_AVAILABLE:
lookup_kwargs[key] = getattr(bundle.obj, key)
else:
del lookup_kwargs[key]
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle.request, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
self.is_valid(bundle, request)
if bundle.errors and not skip_errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which can be used to narrow the query.
"""
base_object_list = self.get_object_list(request).filter(**kwargs)
authed_object_list = self.apply_authorization_limits(request, base_object_list)
if hasattr(authed_object_list, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
authed_object_list.delete()
else:
for authed_obj in authed_object_list:
authed_obj.delete()
def obj_delete(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
obj = kwargs.pop('_obj', None)
if not hasattr(obj, 'delete'):
try:
obj = self.obj_get(request, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
obj.delete()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
with transaction.commit_on_success():
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and getattr(bundle.obj, 'pk', None):
bundle.obj.delete()
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.blank and not bundle.data.has_key(field_name):
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = None
# Because sometimes it's ``None`` & that's OK.
if related_obj:
if field_object.related_name:
if not bundle.obj.pk:
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_obj.save()
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = getattr(bundle.obj, field_object.attribute)
if hasattr(related_mngr, 'clear'):
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_bundle.obj.save()
related_objs.append(related_bundle.obj)
related_mngr.add(*related_objs)
def get_resource_uri(self, bundle_or_obj):
"""
Handles generating a resource URI for a single resource.
Uses the model's ``pk`` in order to create the URI.
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if isinstance(bundle_or_obj, Bundle):
kwargs['pk'] = bundle_or_obj.obj.pk
else:
kwargs['pk'] = bundle_or_obj.id
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
return self._build_reverse_url("api_dispatch_detail", kwargs=kwargs)
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| [
"[email protected]"
] | |
503459a0e9fbecf95a1353bf2eee1e122a965527 | 6e1bdb4dc9e4f97ccfdd261a8110738e2aea65c8 | /source/demo/vgg16_bidirectional_lstm_hi_dim_train_predict.py | 38d143fc9562353c53199bfc10282f1c3b89b1b5 | [] | no_license | nimaaghli/keras-Image-sequence-classifier | 85d826e3e2ff87bdae044d96132d325a10cf1b43 | d4541d732926b04a865a4fd27ef500a6b91a5859 | refs/heads/master | 2020-04-19T12:01:18.712357 | 2019-01-29T23:21:32 | 2019-01-29T23:21:32 | 168,182,885 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,763 | py | import numpy as np
from keras import backend as K
import sys
import os
def main():
testsetCount = 5
accuracies = [0, 0, 0,0,0]
for testid in range(0,testsetCount):
print(testid)
K.set_image_dim_ordering('tf')
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from keras_video_classifier.library.recurrent_networks import VGG16BidirectionalLSTMVideoClassifier
from keras_video_classifier.library.utility.plot_utils import plot_and_save_history
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf
data_set_name = 'AM_pics_' + str(testid)
input_dir_path = os.path.join(os.path.dirname(__file__), 'AM_data')
output_dir_path = os.path.join(os.path.dirname(__file__), 'models', data_set_name)
report_dir_path = os.path.join(os.path.dirname(__file__), 'reports', data_set_name)
np.random.seed(42)
# this line downloads the video files of UCF-101 dataset if they are not available in the very_large_data folder
#load_ucf(input_dir_path)
classifier = VGG16BidirectionalLSTMVideoClassifier()
history = classifier.fit(data_dir_path=input_dir_path, model_dir_path=output_dir_path, vgg16_include_top=False,
data_set_name=data_set_name, from_picture=True)
plot_and_save_history(history, VGG16BidirectionalLSTMVideoClassifier.model_name,
report_dir_path + '/' + VGG16BidirectionalLSTMVideoClassifier.model_name + '-hi-dim-history.png')
print("predicting now !!!!!")
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from keras_video_classifier.library.recurrent_networks import VGG16BidirectionalLSTMVideoClassifier
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels
vgg16_include_top = False
data_dir_path = os.path.join(os.path.dirname(__file__), 'AM_data')
model_dir_path = os.path.join(os.path.dirname(__file__), 'models', data_set_name)
config_file_path = VGG16BidirectionalLSTMVideoClassifier.get_config_file_path(model_dir_path,
vgg16_include_top=vgg16_include_top)
weight_file_path = VGG16BidirectionalLSTMVideoClassifier.get_weight_file_path(model_dir_path,
vgg16_include_top=vgg16_include_top)
print("Reading weights from :", weight_file_path)
print("Reading Config from :", config_file_path)
np.random.seed(33)
#load_ucf(data_dir_path)
predictor = VGG16BidirectionalLSTMVideoClassifier()
predictor.load_model(config_file_path, weight_file_path)
videos = scan_ucf_with_labels(data_dir_path, [label for (label, label_index) in predictor.labels.items()],testid)
video_file_path_list = np.array([file_path for file_path in videos.keys()])
np.random.shuffle(video_file_path_list)
correct_count = 0
count = 0
for video_file_path in video_file_path_list:
label = videos[video_file_path]
predicted_label = predictor.predict(video_file_path,from_picture=True)
print('predicted: ' + predicted_label + ' actual: ' + label)
correct_count = correct_count + 1 if label == predicted_label else correct_count
count += 1
accuracy = correct_count / count
print('accuracy: ', accuracy)
accuracies[testid] = accuracy
print(accuracies)
print(sum(accuracies) / len(accuracies))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0fa1f9fb2a8b2502f1ddabc06e2fc22f77ab3e89 | e20a5139f89938536a78215852019bec1b1a049e | /homepage/urls.py | ab8fe820970e58916cdc9349a22dfe8784003001 | [] | no_license | varunnkrishna/digitalmarketing | ecb0177064c1b802c30bd2b29d0311c8cd67f7cb | 700c88a094fd11968f738850ffdcdb0cae748060 | refs/heads/master | 2022-11-06T09:04:33.763191 | 2020-06-20T09:47:43 | 2020-06-20T09:47:43 | 273,397,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from django.urls import path, include
from .import views
urlpatterns = [
path('', views.homepage_view, name='homepage_view'),
] | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.