blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e234a8079711be2c3d06150ede58ce02370a578b
|
c75ec82316ed5322c5844912ce9c528c24360b9f
|
/nsd1905/devweb/myansible/webadmin/admin.py
|
dcb81958f29fbd102b6bac0302d385ab3985e950
|
[] |
no_license
|
MrZhangzhg/nsd2019
|
a94cde22f2e4bd648bb9e56ca63827f558f3c083
|
54f6d2c7b348a69f13ad5f38f2fbdc8207528749
|
refs/heads/master
| 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 |
HTML
|
UTF-8
|
Python
| false | false | 167 |
py
|
from django.contrib import admin
from .models import HostGroup, Host, Module, Argument
for item in [HostGroup, Host, Module, Argument]:
admin.site.register(item)
|
[
"[email protected]"
] | |
111b0e3fdbd6beabd602738595a0fdf949089ff2
|
b65cfcda05fd72350c7b9e11e5995cc1d10fdd75
|
/shop/models.py
|
df6a76b04ed4d1f1473b36d641881c259f5e0b06
|
[] |
no_license
|
gauraviit1/pahal_project_2
|
f4e6a2cf1cfd613088ad27344279460bb72c9786
|
bc89c3848359ae0b95cb55c24d6fe24d637caabd
|
refs/heads/master
| 2021-04-28T21:20:44.614522 | 2017-01-01T09:57:25 | 2017-01-01T09:57:25 | 77,773,709 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,480 |
py
|
from django.db import models
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
from PIL import Image
from django.contrib.postgres.fields import HStoreField
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from decimal import Decimal
# Create your models here.
class Cateogry(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(db_index=True, unique=True)
class Meta:
ordering = ['name']
verbose_name = 'cateogry'
verbose_name_plural = 'cateogries'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_list_by_cateogry', args=[self.slug])
def save(self, *args, **kwargs):
for field_name in ['name',]:
val = getattr(self, field_name, False)
if val:
setattr(self, field_name, val.capitalize())
super(Cateogry, self).save(*args, **kwargs)
class Product(models.Model):
cateogry = models.ForeignKey('Cateogry', related_name='products')
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField(upload_to="products/%Y/%m/%d", blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
stock = models.PositiveIntegerField()
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['name']
index_together = [('id', 'slug')]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_detail', args=[self.id, self.slug])
class Attribute(models.Model):
product = models.ForeignKey('Product', related_name="patt")
weight = models.DecimalField(max_digits=7, decimal_places=3, blank=True, null=True)
waist_size = models.PositiveSmallIntegerField(blank=True, null=True)
size = models.CharField(max_length=2, blank=True, null=True)
def clean(self, *args, **kwargs):
super(Attribute, self).clean(*args, **kwargs)
if self.weight == Decimal('0.350'):
raise ValidationError({'weight': _('Cannot use this value')})
class Meta:
unique_together = ('product', 'weight')
|
[
"[email protected]"
] | |
2c4ab74cda2680598623c66912579b5d2540ef70
|
edf510cc5bbbe24469d8ff262c022b33b4d80a75
|
/tacotron2/model/tacotron2.py
|
fafca0078fcb2bc687a7f48b30a31e19137b81ac
|
[
"Apache-2.0"
] |
permissive
|
rheehot/Tacotron2
|
e8b8a4be614708800b10b9fa7829264407510fa8
|
ddbe55b426397d40cadd14f5040c55ba7c25615d
|
refs/heads/master
| 2022-12-26T14:13:39.966498 | 2020-10-06T18:34:57 | 2020-10-06T18:34:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,577 |
py
|
# -*- coding: utf-8 -*-
# Soohwan Kim @sooftware
# This source code is licensed under the Apache 2.0 License license found in the
# LICENSE file in the root directory of this source tree
import torch.nn as nn
from torch import Tensor
from typing import Optional
from tacotron2.model.encoder import Encoder
from tacotron2.model.decoder import Decoder
from tacotron2.model.postnet import PostNet
class Tacotron2(nn.Module):
""" Neural Speech-To-Text Models called Tacotron2 """
def __init__(self, args) -> None:
super(Tacotron2, self).__init__()
self.encoder = Encoder(
vocab_size=args.vocab_size,
embedding_dim=args.embedding_dim,
encoder_lstm_dim=args.encoder_lstm_dim,
num_lstm_layers=args.num_encoder_lstm_layers,
conv_dropout_p=args.conv_dropout_p,
num_conv_layers=args.num_encoder_conv_layers,
conv_kernel_size=args.encoder_conv_kernel_size,
lstm_bidirectional=args.encoder_lstm_bidirectional,
device=args.device
)
self.decoder = Decoder(
num_mel_bins=args.num_mel_bins,
prenet_dim=args.prenet_dim,
decoder_lstm_dim=args.decoder_lstm_dim,
attn_lstm_dim=args.attn_lstm_dim,
embedding_dim=args.embedding_dim,
attn_dim=args.attn_dim,
location_conv_filter_size=args.location_conv_filter_size,
location_conv_kernel_size=args.location_conv_kernel_size,
prenet_dropout_p=args.prenet_dropout_p,
attn_dropout_p=args.attn_dropout_p,
decoder_dropout_p=args.decoder_dropout_p,
max_decoding_step=args.max_decoding_step,
stop_threshold=args.stop_threshold
)
self.postnet = PostNet(
num_mel_bins=args.num_mel_bins,
postnet_dim=args.postnet_dim,
num_conv_layers=args.num_postnet_conv_layers,
kernel_size=args.postnet_conv_kernel_size,
dropout_p=args.postnet_dropout_p
)
def forward(
self,
inputs: Tensor,
input_lengths: Optional[Tensor] = None,
targets: Optional[Tensor] = None,
teacher_forcing_ratio: float = 1.0
):
encoder_outputs = self.encoder(inputs, input_lengths)
decoder_outputs = self.decoder(encoder_outputs, targets, teacher_forcing_ratio)
postnet_outputs = self.postnet(decoder_outputs["mel_outputs"])
decoder_outputs["mel_outputs"] += postnet_outputs
return decoder_outputs
|
[
"[email protected]"
] | |
d44cd5123695d6c48fef84f95857d085ddda8775
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/2D_20200722180003.py
|
f67079f7677c6a1e5d41c5b12f694fad5f417ffe
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,400 |
py
|
def array(n,m):
# where n is row size and m is column size
array = [[0 for x in range(n)] for x in range(m)]
print(array)
a = [[2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20]]
# where the first arguement reps the row and second arguement reps the column
print(a[0][3])
def hourGlass(arr):
# you have a 2d array
# get max hour glass
# var maxCount to keep record of the max count
# what do you know about an hourglass
# the indicies fall in a pattern where
# i and i+2 are not equal to 0 and i + 1 is equal to 0
maxCount = 1
totalCount = 0
count = 0
j = 3
if arr !=[]:
for i in range(len(arr)-2):
totalCount = 0
# remember j is looping through arr[i]
for j in range(len(arr[i])-2):
totalCount = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]
if totalCount > maxCount:
maxCount = totalCount
# print(arr[i][j],arr[i][j+1],arr[i][j+2],"below",arr[i+1][j+1],"next",arr[i+2][j],arr[i+2][j+1],arr[i+2][j+2])
print(maxCount)
else:
return 0
print(hourGlass([[1,1,1,0,0,0],[0,1,0,0,0,0],[1,1,1,0,0,0],[0,0,2,4,4,0],[0,0,0,2,0,0],[0,0,1,2,4,0]]))
|
[
"[email protected]"
] | |
3fd59004b8a3ada46670dc8f08e82e5d397cce55
|
b7b5f5b52f07b576a20e74839136d397f14d0566
|
/main/admin.py
|
cd3bbcc3572a48186b2724bb94ba97c49bfe0e18
|
[] |
no_license
|
Chudische/Shabashka
|
02d7e81cb2bd317b36e73620fc197868c4d65e1c
|
c3bab797601e8509439dc6538ec1f712755eb8c9
|
refs/heads/main
| 2023-07-08T07:54:04.044559 | 2021-08-18T13:40:44 | 2021-08-18T13:40:44 | 315,249,268 | 0 | 1 | null | 2021-08-10T06:42:42 | 2020-11-23T08:34:46 |
Python
|
UTF-8
|
Python
| false | false | 4,510 |
py
|
import datetime
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from import_export import resources
from .models import ShaUser, SubCategory, SuperCategory, Offer, AdditionalImage, Comment, ShaUserAvatar
from .models import UserReview, ChatMessage, Location
from .utilities import send_activation_notification
from .forms import SubCategoryForm
def send_activation_notifications(modeladmin, request, queryset):
""" Sending a messages with activation notification"""
for rec in queryset:
if not rec.is_activated:
send_activation_notification(rec)
modeladmin.message_user(request, "Письма с оповещением отправлены")
send_activation_notifications.short_description = 'Отправка писем с оповещением об активации'
class NonativatedFilter(admin.SimpleListFilter):
title = 'Прошли активацию?'
parameter_name = 'actstate'
def lookups(self, request, model_admin):
return (
("activated", "Прошли активацию"),
("threedays", "Не прошли более 3 дней"),
("week", "Не прошли более недели")
)
def queryset(self, request, queryset):
if self.value() == 'activated':
return queryset.filter(is_active=True, is_activated=True)
if self.value() == 'threedays':
date = datetime.date.today() - datetime.timedelta(days=3)
return queryset.filter(is_active=False, is_activated=False, date_joined__date__lt=date)
if self.value() == 'week':
date = datetime.date.today() - datetime.timedelta(weeks=1)
return queryset.filter(is_active=False, is_activated=False, date_joined__date__lt=date)
class LocationInline(admin.TabularInline):
model = Location
class ShaUserAdmin(admin.ModelAdmin):
list_display = ('__str__', 'is_activated', 'date_joined')
search_fields = ('username', 'email', 'first_name', 'last_name')
list_filter = (NonativatedFilter, )
inlines = (LocationInline, )
fields = (('username', 'email'), ('first_name', 'last_name'), 'average_rating',
('send_message', 'is_active', 'is_activated'),
('is_staff', 'is_superuser'),
'groups', 'user_permissions',
('last_login', 'date_joined'),
'favorite')
readonly_fields = ('last_login', 'date_joined')
actions = (send_activation_notifications, )
class SubCategoryInline(admin.TabularInline):
model = SubCategory
class SuperCategoryAdmin(admin.ModelAdmin):
exclude = ('super_category',)
inlines = (SubCategoryInline,)
class SubCategoryAdmin(admin.ModelAdmin):
form = SubCategoryForm
class AdditionalImageInline(admin.TabularInline):
model = AdditionalImage
class OfferAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'content', 'winner','author', 'created', 'status')
fields = (('category', 'author', 'status', 'winner'), 'title', 'content', 'price', 'image', 'is_active')
inlines = (AdditionalImageInline, LocationInline,)
class CommentAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'content', 'price', 'created', 'is_active')
fields = (('offer', 'author', 'created'), 'content', ('price', 'time_amount', 'measure'), 'is_active')
readonly_fields = ('created',)
class UserReviewAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'reviewal', 'speed', 'cost', 'accuracy', 'content', 'created')
fields = (('offer', 'author', 'reviewal', 'created'), ('speed', 'cost', 'accuracy'), 'content')
readonly_fields = ('created',)
class ChatMessageAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'receiver', 'content', 'created')
feields = ('offer', ('author', 'receiver, created'), 'content')
readonly_fields = ('created',)
class LocationAdmin(admin.ModelAdmin):
list_display = ('search_id', 'name')
# Register your models here.
admin.site.register(ShaUser, ShaUserAdmin)
admin.site.register(SuperCategory, SuperCategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(Offer, OfferAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(ShaUserAvatar)
admin.site.register(UserReview, UserReviewAdmin)
admin.site.register(ChatMessage, ChatMessageAdmin)
admin.site.register(Location, LocationAdmin)
|
[
"="
] |
=
|
93b4ff666a4c0dbc1475a16fb53d3a864ecec53d
|
1e0ec4d34def6d1d31665551b4aecbb644323249
|
/disambig_creation_constants.py
|
2fca0360764a115a37e4d1aa2de947aad7ea4777
|
[] |
no_license
|
RheingoldRiver/leaguepedia_archive
|
e10615530846080446fa5a56ae2e570f9376f875
|
52703d4fb0fef2345353945044a78915d78688bf
|
refs/heads/master
| 2022-06-19T21:37:47.480986 | 2022-06-01T18:44:32 | 2022-06-01T18:44:32 | 242,654,649 | 1 | 1 | null | 2021-12-15T20:07:19 | 2020-02-24T05:33:07 |
Python
|
UTF-8
|
Python
| false | false | 928 |
py
|
originalName = 'Limit'
irlName = 'Ju Min-gyu'
newName = '{} ({})'.format(originalName,irlName)
initmove = True
blankedit = False
limit = -1
timeoutLimit = 30
listplayerTemplates = ["listplayer", "listplayer/Current"]
rosterTemplates = ["ExtendedRosterLine", "ExtendedRosterLine/MultipleRoles"]
scoreboardTemplates = ["MatchRecap/Player", "MatchRecapS4/Player",
"MatchRecapS5/Player", "MatchRecapS6/Player",
"MatchRecapS7/Player", "MatchRecapS8/Player",
"MatchRecapS6NoSMW/Player", "MatchRecapS7NoKeystones/Player"]
statTemplates = ["IPS","CareerPlayerStats","MatchHistoryPlayer"]
rosterChangeTemplates = ["RosterChangeLine","RosterRumorLine2"]
summary = "Disambiguating {} to {}".format(originalName, newName)
cssStyle = "{\n color:orange!important;\n font-weight:bold;\n}"
origNameLC = originalName[0].lower() + originalName[1:]
newNameLC = newName[0].lower() + newName[1:]
blankEditThese = []
|
[
"[email protected]"
] | |
797e2d7a43e4b15dea8e59a7e042f26e1eb14caf
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/bin/format/pdb2/pdbreader/symbol/LocalData32MsSymbol.pyi
|
aede84acc0cf9dc7b6e4bd9ac3bccadd11448bf2
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,189 |
pyi
|
import ghidra.app.util.bin.format.pdb2.pdbreader
import ghidra.app.util.bin.format.pdb2.pdbreader.symbol
import java.lang
class LocalData32MsSymbol(ghidra.app.util.bin.format.pdb2.pdbreader.symbol.AbstractLocalDataMsSymbol):
PDB_ID: int = 4364
def __init__(self, __a0: ghidra.app.util.bin.format.pdb2.pdbreader.AbstractPdb, __a1: ghidra.app.util.bin.format.pdb2.pdbreader.PdbByteReader): ...
def emit(self, __a0: java.lang.StringBuilder) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getName(self) -> unicode: ...
def getOffset(self) -> long: ...
def getPdbId(self) -> int: ...
def getSegment(self) -> int: ...
def getTypeRecordNumber(self) -> ghidra.app.util.bin.format.pdb2.pdbreader.RecordNumber: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def pdbId(self) -> int: ...
|
[
"[email protected]"
] | |
b6f56697fb41c5e23e58b13a4e63f3780c4b41ea
|
db338cf7720a0ecbf181f7077b0dcf22b499d822
|
/src/mobot_client/migrations/0003_auto_20210902_2106.py
|
0f96c067c7d54e446c9525c7deab02ba86fffdfe
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
isabella232/mobot
|
94a7e33755cdf3b1916b6642ee7dc9bdfdebf84d
|
8a1fc884351211b4730e7de1c0bad1e18a1b1c8f
|
refs/heads/main
| 2023-08-31T17:00:21.341368 | 2021-09-16T00:55:35 | 2021-09-16T04:49:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,800 |
py
|
# Generated by Django 3.2.7 on 2021-09-02 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobot_client', '0002_auto_20210902_2053'),
]
operations = [
migrations.AlterField(
model_name='bonuscoin',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='chatbotsettings',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customer',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customerdroprefunds',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customerstorepreferences',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='drop',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='dropsession',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='message',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='order',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='sku',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='store',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"[email protected]"
] | |
2c615eeec86ee49817a3513724374a206511e132
|
060fbf2a69a90ad92de5fc877521d5ea6b298007
|
/test/vanilla/Expected/AcceptanceTests/BodyComplex/bodycomplex/models/double_wrapper.py
|
598e2b460d799b8c9b576803570caa93bfc99961
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
iscai-msft/autorest.python
|
db47a8f00253148fbc327fe0ae1b0f7921b397c6
|
a9f38dd762fbc046ce6197bfabea2f56045d2957
|
refs/heads/master
| 2021-08-02T13:06:34.768117 | 2018-11-21T00:29:31 | 2018-11-21T00:29:31 | 161,554,205 | 0 | 0 |
MIT
| 2018-12-12T22:42:14 | 2018-12-12T22:42:14 | null |
UTF-8
|
Python
| false | false | 1,552 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DoubleWrapper(Model):
"""DoubleWrapper.
:param field1:
:type field1: float
:param
field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose:
:type
field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose:
float
"""
_attribute_map = {
'field1': {'key': 'field1', 'type': 'float'},
'field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose': {'key': 'field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose', 'type': 'float'},
}
def __init__(self, **kwargs):
super(DoubleWrapper, self).__init__(**kwargs)
self.field1 = kwargs.get('field1', None)
self.field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose = kwargs.get('field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose', None)
|
[
"[email protected]"
] | |
b1dc65782f757d291f0b3c8796390124c41932ae
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_204/285.py
|
05697ee8e397c0a3ab1f3fbfedb01bf8d507a112
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,058 |
py
|
from __future__ import print_function, division
from math import ceil, floor
import numpy as np
# Fernando Gonzalez del Cueto. Code Jam 2017
#infile = 'test2.in'
infile = 'B-small-attempt2.in'
outfile = infile.replace('.in', '.out')
fid = open(infile, 'r')
n_cases = int(fid.readline().strip())
f_out = open(outfile, 'w')
def solver(rata_q, p):
assert isinstance(rata_q, np.ndarray)
assert isinstance(p, np.ndarray)
n_ingredients, n_packages = p.shape
taken = np.zeros_like(p, dtype=bool)
lb = int(floor(np.min(np.min(0.9*p / rata_q, axis=1))))
ub = int(ceil(np.max(np.max(1.1*p / rata_q, axis=1))))
kits = 0
for q in range(lb, ub+1):
if (p==0).all():
return kits
t = (p >= rata_q * (q * 0.9)) & (p <= rata_q * (q * 1.1))
can_make = t.astype(np.uint8).sum(axis=1)
max_kits = can_make.min()
if max_kits.min() > 0:
kits += max_kits
if test_case==88:
pass
for row in range(p.shape[0]):
eliminated = 0
for col in range(p.shape[1]):
if t[row,col]:
p[row,col] = 0 # used, take them out
eliminated += 1
if eliminated >= max_kits:
break
return kits
for test_case in range(1,n_cases+1):
n_ingredients, n_packages = map(int, fid.readline().strip().split())
rata_q = map(int, fid.readline().strip().split())
r = np.array(rata_q).reshape((n_ingredients,1))
l = []
for i_ing in range(n_ingredients):
l.append(map(int, fid.readline().strip().split()))
a = np.array(l, dtype=np.float64)
print('Case %i' % test_case)
print(n_ingredients, n_packages)
print(rata_q)
print(a)
if test_case == 5:
pass
sol = solver(r, a)
print(sol)
l = 'Case #%i: %i\n' % (test_case, sol)
print(l)
f_out.write(l)
f_out.close()
|
[
"[email protected]"
] | |
f095f493b8c86691cddc688e4d19ccaf71870c88
|
c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34
|
/source/Clarification/Backtracking/52.N皇后2.py
|
0850985c798d542a6dbf9bbf340bfa76bed00408
|
[
"MIT"
] |
permissive
|
zhangwang0537/LeetCode-Notebook
|
73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1
|
1dbd18114ed688ddeaa3ee83181d373dcc1429e5
|
refs/heads/master
| 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 |
MIT
| 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null |
UTF-8
|
Python
| false | false | 1,700 |
py
|
# n 皇后问题研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。
#
#
#
# 上图为 8 皇后问题的一种解法。
#
# 给定一个整数 n,返回 n 皇后不同的解决方案的数量。
#
# 示例:
#
# 输入: 4
# 输出: 2
# 解释: 4 皇后问题存在如下两个不同的解法。
# [
# [".Q..", // 解法 1
# "...Q",
# "Q...",
# "..Q."],
#
# ["..Q.", // 解法 2
# "Q...",
# "...Q",
# ".Q.."]
# ]
class Solution:
def totalNQueens(self, n: int) -> int:
def is_not_under_attack(row,col):
return not (rows[col] or hills[row - col] or dales[row + col])
# 放置皇后
def place_queen(row,col):
rows[col] = 1
hills[row - col] = 1 # 主对角线
dales[row + col] = 1 # 副对角线
# 移除皇后
def remove_queen(row,col):
rows[col] = 0
hills[row - col] = 0 # 主对角线
dales[row + col] = 0 # 副对角线
# 回溯
def backtrack(row = 0,count = 0):
for col in range(n):
if is_not_under_attack(row, col):
place_queen(row, col)
if row + 1 == n: # 如果放了n个皇后,则有一种解决方案
count += 1
else:
count = backtrack(row + 1,count)
remove_queen(row, col)
return count
rows = [0] * n
hills = [0] * (2 * n - 1) # 主对角线
dales = [0] * (2 * n - 1) # 副对角线
return backtrack()
|
[
"[email protected]"
] | |
f60622ab5bd5f34311c951a2a60f776f25a2aa47
|
33a50bb13812090a36257078522b798762978c66
|
/top/api/rest/SimbaNonsearchAllplacesGetRequest.py
|
6f07639d12207a871566ac06ac186d09de431e25
|
[] |
no_license
|
aa3632840/quanlin
|
52ac862073608cd5b977769c14a7f6dcfb556678
|
2890d35fa87367d77e295009f2d911d4b9b56761
|
refs/heads/master
| 2021-01-10T22:05:14.076949 | 2014-10-25T02:28:15 | 2014-10-25T02:28:15 | 23,178,087 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
'''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class SimbaNonsearchAllplacesGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'taobao.simba.nonsearch.allplaces.get'
|
[
"[email protected]"
] | |
9d9ed5f5767b7fd951eb6ad1a2a01ca63fc8e5ed
|
56b63ee537f872af0fc028016d1508b4c1dd5c60
|
/school/migrations/0267_auto_20210317_1657.py
|
926120cf7259917f9c79aaa27206e75ae9e960a4
|
[] |
no_license
|
jacknjillsolutionsrevanth/EMS1
|
01fc571120f765b0fbfe3aa654b15ff578d6e9b9
|
db14d8e6c15669b5938aa9276c5e22006218814a
|
refs/heads/main
| 2023-08-03T19:40:50.073133 | 2021-10-01T07:02:37 | 2021-10-01T07:02:37 | 410,202,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 591 |
py
|
# Generated by Django 3.1.4 on 2021-03-17 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0266_auto_20210315_1612'),
]
operations = [
migrations.AddField(
model_name='daily_data',
name='routename',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='milkdata',
name='branch',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"[email protected]"
] | |
1f6e97b6fae3bcc121943a41542b27b69deeafab
|
8c77dcc0fd3e497194e572c8641200f08b32dc97
|
/general/function_factory.py
|
f2c923b5d537856aae039428a6462973dfd14e56
|
[
"MIT"
] |
permissive
|
bpuderer/python-snippets
|
633a1e382f7c9812621d61ec16a15e106d1d5fc8
|
3277b76b03f3ceb11e4571be4cecae68051aac45
|
refs/heads/master
| 2021-12-27T09:19:27.212312 | 2021-12-22T13:08:56 | 2021-12-22T13:08:56 | 46,539,064 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
def raise_to(exp):
def raise_to_exp(x):
return pow(x, exp)
return raise_to_exp
cube = raise_to(3)
print(cube(4))
|
[
"[email protected]"
] | |
c10fbf1e704a93a27b39f55a903786ffa970dab7
|
f9d942b2fed83e9d6c101ebaedc1d4b36dee2754
|
/logistics/tests/util.py
|
b392e9568f05b3984dcf23beedbd376f6d40b26b
|
[] |
no_license
|
unicefuganda/rapidsms-logistics
|
7cde229ac2619366d253d099c0f222eb96b1468e
|
7d9609a7b9d6fa3f4502aba52ab56acc23a6e928
|
refs/heads/master
| 2020-12-25T05:26:59.459389 | 2012-12-17T12:00:52 | 2012-12-17T12:00:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 291 |
py
|
from rapidsms.tests.scripted import TestScript
from logistics import loader as logi_loader
def load_test_data():
logi_loader.init_reports()
logi_loader.init_supply_point_types()
logi_loader.init_test_location_and_supplypoints()
logi_loader.init_test_product_and_stock()
|
[
"[email protected]"
] | |
05d8af9bcacd6c3653138d5f6101b153625fb68c
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Docs/Controlling the Keyboard and Mouse/typer/docs_src/commands/callback/tutorial001.py
|
5438b3ead7b25f5be9db58189b57b8e6e3f410a3
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 128 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:f0921ed2e19ca15b1c6e7817d91c7dbefd69a5a39c79520e0d5e519b11cdd10c
size 756
|
[
"[email protected]"
] | |
2c2eba017b299584cc34574addc7412cb5c9635b
|
8ed4bf9fbead471c9e5f88e4d18ac432ec3d628b
|
/hackerrank/algorithm/string/gem_stones.py
|
e3ff22b12568fff4bf1fd684c35dd47da7151f2d
|
[] |
no_license
|
hizbul25/programming_problem
|
9bf26e49ed5bb8c9c829d00e765c9401222fb35c
|
2acca363704b993ffe5f6c2b00f81a4f4eca7204
|
refs/heads/master
| 2021-01-10T22:28:26.105787 | 2018-01-21T16:45:45 | 2018-01-21T16:45:45 | 65,394,734 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 170 |
py
|
#URL: https://www.hackerrank.com/challenges/gem-stones
n = int(input())
all_elem = set(input())
for g in range(n - 1):
all_elem &= set(input())
print(len(all_elem))
|
[
"[email protected]"
] | |
9df035da71a8354e73397ed5fd9483a3a837b5d5
|
62e985b6bc2cd04be506c9f4b586f6a0bd5a8b1c
|
/docs/_docs
|
2e46ca4836023863c54a487374eead67897a2d9d
|
[
"MIT"
] |
permissive
|
delfick/nose-focus
|
ece09553d26ce4323e449b5e50f98e63a21d1699
|
89ceae691fabb27c35d4a67f0edf8dec17737f3f
|
refs/heads/master
| 2023-07-10T22:44:29.271678 | 2023-06-23T06:36:00 | 2023-06-23T06:36:00 | 20,155,739 | 0 | 3 | null | 2019-11-06T22:59:43 | 2014-05-25T13:57:39 |
Python
|
UTF-8
|
Python
| false | false | 512 |
#!/usr/bin/env python3
from venvstarter import ignite
import runpy
import os
this_dir = os.path.dirname(__file__)
nose_focus_version = runpy.run_path(
os.path.join(this_dir, "..", "nose_focus", "__init__.py")
)["VERSION"]
with open(os.path.join(this_dir, "requirements.txt"), "r") as fle:
deps = [line.strip() for line in fle.readlines() if line.strip()]
deps.append(f"-e file:{this_dir}/..#egg=nose_focus=={nose_focus_version}")
ignite(this_dir, "sphinx-build", deps=deps, min_python_version=3.6)
|
[
"[email protected]"
] | ||
e842d47c65b49f7baf66ad14c86f7b7c9b1e413b
|
24d8cf871b092b2d60fc85d5320e1bc761a7cbe2
|
/BitPim/rev3177-3237/right-branch-3237/midifile.py
|
2c8d9d84bbc3c8f068fe079043d3fd8f31067e0b
|
[] |
no_license
|
joliebig/featurehouse_fstmerge_examples
|
af1b963537839d13e834f829cf51f8ad5e6ffe76
|
1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad
|
refs/heads/master
| 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,795 |
py
|
import common
import fileinfo
module_debug=False
class MIDIEvent(object):
META_EVENT=0
SYSEX_EVENT=1
SYSEX1_EVENT=2
MIDI_EVENT=3
LAST_MIDI_EVENT=4
type_str=('Meta', 'SYSEX', 'SYSEX cont', 'MIDI', 'Last MIDI')
def __init__(self, file, offset, last_cmd=None):
self.__f=file
self.__start=self.__ofs=offset
self.__time_delta=self.__get_var_len()
b=self.__get_int()
if b==0xff:
self.__get_meta_event()
elif b==0xf0 or b==0xf7:
self.__get_sysex_event(b)
else:
self.__get_midi_event(b, last_cmd)
self.__total_len=self.__ofs-self.__start
def __get_int(self):
i=int(self.__f.GetByte(self.__ofs))
self.__ofs+=1
return i
def __get_bytes(self, len):
data=self.__f.GetBytes(self.__ofs, len)
self.__ofs+=len
return data
def __get_var_len(self):
t=0
b=self.__get_int()
while (b&0x80):
t=(t<<7)|(b&0x7f)
b=self.__get_int()
return (t<<7)|(b&0x7f)
def __get_meta_event(self):
self.__type=self.META_EVENT
self.__cmd=self.__get_int()
self.__len=self.__get_var_len()
if self.__len:
self.__param1=self.__get_bytes(self.__len)
else:
self.__param1=None
self.__param2=None
def __get_sysex_event(self, cmd):
if cmd==0xf0:
self.__type=self.SYSEX_EVENT
else:
self.__type=self.SYSEX1_EVENT
self.__cmd=cmd
self.__len=self.__get_var_len()
if self.__len:
self.__param1=self.__get_bytes(self.__len)
else:
self.__param1=None
self.__param2=None
def __get_midi_event(self, cmd, last_cmd):
if cmd&0x80:
i=cmd
self.__type=self.MIDI_EVENT
self.__param1=self.__get_int()
else:
i=last_cmd
self.__type=self.LAST_MIDI_EVENT
self.__param1=cmd
self.__cmd=(i&0xf0)>>4
self.__midi_channel=i&0x0f
if self.__cmd==0x0c or self.__cmd==0x0d:
self.__len=1
self.__param2=None
else:
self.__len=2
self.__param2=self.__get_int()
def __get_type(self):
return self.__type
type=property(fget=__get_type)
def __get_time_delta(self):
return self.__time_delta
time_delta=property(fget=__get_time_delta)
def __get_total_len(self):
return self.__total_len
total_len=property(fget=__get_total_len)
def __get_cmd(self):
return self.__cmd
cmd=property(fget=__get_cmd)
def __get_midi_channel(self):
return self.__midi_channel
midi_channel=property(fget=__get_midi_channel)
def __get_param_len(self):
return self.__len
param_len=property(fget=__get_param_len)
def __get_params(self):
return self.__param1, self.__param2
params=property(fget=__get_params)
def __str__(self):
if self.type==self.MIDI_EVENT or \
self.type==self.LAST_MIDI_EVENT:
return '0x%04x: %s cmd: 0x%x, Channel: %d, Len: %d'%\
(self.time_delta, self.type_str[self.type],
self.cmd, self.midi_channel, self.param_len)
else:
return '0x%04x: %s cmd: 0x%x, Len: %d'%\
(self.time_delta, self.type_str[self.type],
self.cmd, self.param_len)
class MIDITrack(object):
def __init__(self, file, offset):
self.__f=file
self.__ofs=offset
if module_debug:
print 'New Track @ ofs:', offset
if self.__f.GetBytes(self.__ofs, 4)!='MTrk':
raise TypeError, 'not an MIDI track'
self.__len=self.__f.GetMSBUint32(self.__ofs+4)
ofs=self.__ofs+8
ofs_end=ofs+self.__len
last_cmd=None
self.__time_delta=0
self.__mpqn=None
while ofs<ofs_end:
e=MIDIEvent(file, ofs, last_cmd)
if module_debug:
print e
ofs+=e.total_len
self.__time_delta+=e.time_delta
if e.type==e.META_EVENT:
if e.cmd==0x51:
p1, p2=e.params
self.__mpqn=(ord(p1[0])<<16)|(ord(p1[1])<<8)|ord(p1[2])
if e.type==e.MIDI_EVENT or e.type==e.LAST_MIDI_EVENT:
last_cmd=(e.cmd<<4)|e.midi_channel
else:
last_cmd=e.cmd
self.__total_len=ofs-self.__ofs
if module_debug:
print 'self.__ofs', self.__ofs+8, 'self.__len:', self.__len, 'ofs: ', ofs
print 'time delta:', self.__time_delta, 'MPQN: ', self.__mpqn
def __get_time_delta(self):
return self.__time_delta
time_delta=property(fget=__get_time_delta)
def __get_total_len(self):
return self.__total_len
total_len=property(fget=__get_total_len)
def __get_mpqn(self):
return self.__mpqn
mpqn=property(fget=__get_mpqn)
class MIDIFile(object):
def __init__(self, file_wraper):
try:
self.__valid=False
self.__file=file_wraper
if self.__file.GetBytes(0, 4)!='MThd' or \
self.__file.GetMSBUint32(4)!=6:
return
self.__valid=True
self.__type=self.__file.GetMSBUint16(8)
self.__num_tracks=self.__file.GetMSBUint16(10)
self.__time_division=self.__file.GetMSBUint16(12)
self.__tracks=[]
self.__mpqn=2000000
file_ofs=14
time_delta=0
for i in range(self.__num_tracks):
trk=MIDITrack(self.__file, file_ofs)
self.__tracks.append(trk)
file_ofs+=trk.total_len
time_delta=max(time_delta, trk.time_delta)
if trk.mpqn is not None:
self.__mpqn=trk.mpqn
self.__duration=(self.__mpqn*time_delta/self.__time_division)/1000000.0
if module_debug:
print 'type:', self.__type
print 'time division:', self.__time_division
print 'num of tracks:', self.__num_tracks
print 'MPQN:', self.__mpqn
print 'longest time delta: ', time_delta
print 'duration:', self.__duration
except:
self.__valid=False
def __get_valid(self):
return self.__valid
valid=property(fget=__get_valid)
def __get_type(self):
return self.__type
type=property(fget=__get_type)
def __get_num_tracks(self):
return self.__num_tracks
num_tracks=property(fget=__get_num_tracks)
def __get_duration(self):
return self.__duration
duration=property(fget=__get_duration)
|
[
"[email protected]"
] | |
33490b42c85fee01be3f6432c411c486ae7157e5
|
aca253ff1a97c96a1a0a9a5802aa623789662bb1
|
/p034/statue_bar.py
|
ebd6c7ce45949c996b6d834401e27a09c8df4f7f
|
[] |
no_license
|
KD-huhu/PyQt5
|
a6128a34b93f6e2da7216d5818f66dc9614216bc
|
1c33a6549c2fcf663168256553d8c24e25d9a69c
|
refs/heads/master
| 2022-07-03T07:37:29.837547 | 2020-05-17T14:54:39 | 2020-05-17T14:54:39 | 261,768,854 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,052 |
py
|
import sys, math
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class StatusBar(QMainWindow):
def __init__(self):
super(StatusBar, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("状态栏演示")
self.resize(300, 200)
bar = self.menuBar() # 创建菜单对象
file = bar.addMenu("File") # 添加菜单对象
file.addAction("show")
file.triggered.connect(self.processTrigger) # 绑定槽
self.setCentralWidget(QTextEdit())
self.statusBar = QStatusBar() # 创建状态栏对象
self.setStatusBar(self.statusBar) # 设置状态栏
def processTrigger(self, q):
if q.text() == "show":
self.statusBar.showMessage(q.text() + " 菜单被点击了", 5000)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = StatusBar()
main.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
c73005c81aaec8e7c0613dea2e18f7b12afbb9dd
|
f45cc0049cd6c3a2b25de0e9bbc80c25c113a356
|
/LeetCode/双指针(two points)/16. 3Sum Closest.py
|
999c518d3f03cb923594bff7a42b551b460d21fb
|
[] |
no_license
|
yiming1012/MyLeetCode
|
4a387d024969bfd1cdccd4f581051a6e4104891a
|
e43ee86c5a8cdb808da09b4b6138e10275abadb5
|
refs/heads/master
| 2023-06-17T06:43:13.854862 | 2021-07-15T08:54:07 | 2021-07-15T08:54:07 | 261,663,876 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,263 |
py
|
'''
Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution.
Example:
Given array nums = [-1, 2, 1, -4], and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
'''
from typing import List
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
"""
思路:双指针
1. 主要在于优化
2. 首先得排序
3. if i>0 and nums[i]==nums[i-1]:continue
4. 如果最小的三个数大于target,此时最接近target的数已存在
5. 如果最大的三个数小于target,continue
"""
nums.sort()
n = len(nums)
res = nums[0] + nums[1] + nums[2]
for i in range(n - 2):
l, r = i + 1, n - 1
if i > 0 and nums[i] == nums[i - 1]:
continue
threeSum = nums[i] + nums[i + 1] + nums[i + 2]
if threeSum >= target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
return res
if nums[i] + nums[-1] + nums[-2] < target:
res = nums[i] + nums[-1] + nums[-2]
continue
while l < r:
threeSum = nums[i] + nums[l] + nums[r]
if threeSum < target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
l += 1
# 连续的数相等,则跳过
while l < r and nums[l] == nums[l - 1]:
l += 1
elif threeSum > target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
r -= 1
# 连续的数相等,则跳过
while l < r and nums[r] == nums[r + 1]:
r -= 1
else:
return target
return res
if __name__ == '__main__':
nums = [-1, 2, 1, -4]
target = 1
print(Solution().threeSumClosest(nums, target))
|
[
"[email protected]"
] | |
70add22be9a70d8ceca4e71014665764dd5f5aff
|
bc2945c99f828083ca78b3bfcfe220a134fbd8b0
|
/users/migrations/0010_auto_20200725_1159.py
|
c0fe7d7e4a714113bfff57bc26a2b57875bf0f3a
|
[] |
no_license
|
Kyeza/web_system
|
5bde9231551b7a94b535fe707db99ade351bd4fb
|
686a701469b13454d39e4f0c6b342b22befdb345
|
refs/heads/uganda-master
| 2022-12-14T13:43:17.833502 | 2020-12-11T07:23:19 | 2020-12-11T07:23:19 | 176,704,006 | 2 | 1 | null | 2022-12-08T11:07:51 | 2019-03-20T09:55:33 |
Python
|
UTF-8
|
Python
| false | false | 612 |
py
|
# Generated by Django 3.0.6 on 2020-07-25 11:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reports', '0014_auto_20200605_0638'),
('users', '0009_auto_20200721_0727'),
]
operations = [
migrations.AlterField(
model_name='payrollprocessors',
name='summary_report',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='earning_or_deduction', to='reports.ExtraSummaryReportInfo'),
),
]
|
[
"[email protected]"
] | |
14ec29e30beb9428142b51e4d0cb06ebde3e6971
|
a23ec1e8470f87d1b3fa34b01506d6bdd63f6569
|
/algorithms/282. Expression Add Operators.py
|
3f93dd1ffb26e5d220725678cb98469a2ceaaf91
|
[] |
no_license
|
xiaohai0520/Algorithm
|
ae41d2137e085a30b2ac1034b8ea00e6c9de3ef1
|
96945ffadd893c1be60c3bde70e1f1cd51edd834
|
refs/heads/master
| 2023-04-14T17:41:21.918167 | 2021-04-20T13:57:09 | 2021-04-20T13:57:09 | 156,438,761 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,107 |
py
|
class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
res, self.target = [], target
for i in range(1,len(num)+1):
if i == 1 or (i > 1 and num[0] != "0"): # prevent "00*" as a number
self.dfs(num[i:], num[:i], int(num[:i]), int(num[:i]), res) # this step put first number in the string
return res
def dfs(self, num, temp, cur, last, res):
if not num:
if cur == self.target:
res.append(temp)
return
for i in range(1, len(num)+1):
val = num[:i]
if i == 1 or (i > 1 and num[0] != "0"): # prevent "00*" as a number
self.dfs(num[i:], temp + "+" + val, cur+int(val), int(val), res)
self.dfs(num[i:], temp + "-" + val, cur-int(val), -int(val), res)
self.dfs(num[i:], temp + "*" + val, cur-last+last*int(val), last*int(val), res)
class Solution:
def addOperators(self, num: str, target: int) -> List[str]:
results = []
self.helper(num, 0, target, 0, 0, "", results)
return results
def helper(self, string, start, target, sum_so_far, last, path, results):
if start == len(string) and sum_so_far == target:
results.append(path)
for end in range(start+1, len(string)+1):
sub_string = string[start:end]
if len(sub_string) > 1 and sub_string[0] == '0':
break
cur = int(sub_string)
if start == 0:
self.helper(string, end, target, sum_so_far + cur, cur, path + sub_string, results)
else:
self.helper(string, end, target, sum_so_far + cur, cur, path + "+" + sub_string, results)
self.helper(string, end, target, sum_so_far - cur, -cur, path + "-" + sub_string, results)
self.helper(string, end, target, sum_so_far - last + cur * last, cur * last, path + "*" + sub_string, results)
|
[
"[email protected]"
] | |
3487f385af9cf1c3384d8a9a9c5360459fd67f89
|
93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3
|
/python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py
|
cb485609b55ec330ad7dff0ed4d10d8a13a8f865
|
[
"Apache-2.0"
] |
permissive
|
hutuxian/Paddle
|
f8b7693bccc6d56887164c1de0b6f6e91cffaae8
|
a1b640bc66a5cc9583de503e7406aeba67565e8d
|
refs/heads/develop
| 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 |
Apache-2.0
| 2023-06-16T09:47:39 | 2019-01-10T02:50:31 |
Python
|
UTF-8
|
Python
| false | false | 1,978 |
py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from pass_test import PassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
class FCFusePassTest(PassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[32, 128], dtype="float32", lod_level=0)
tmp_0 = fluid.layers.fc(input=data,
size=128,
num_flatten_dims=1,
act="relu")
tmp_1 = fluid.layers.fc(input=tmp_0, size=32, num_flatten_dims=1)
tmp_2 = fluid.layers.softmax(input=tmp_1)
self.feeds = {"data": np.random.random((32, 128)).astype("float32")}
self.fetch_list = [tmp_0, tmp_1, tmp_2]
self.pass_names = "fc_fuse_pass"
self.fused_op_type = "fc"
self.num_fused_ops = 2
def test_check_output(self):
use_gpu_set = [False]
if core.is_compiled_with_cuda():
use_gpu_set.append(True)
for use_gpu in use_gpu_set:
self.pass_attrs = {"fc_fuse_pass": {"use_gpu": use_gpu}}
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
self.check_output_with_place(place, startup_on_cpu=True)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
b20a8ceb62e68cea4660e241d323d08b5c8a9a34
|
b05b89e1f6378905bbb62e2a2bf2d4f8e3187932
|
/contiguousSubarrayWithMaxSum.py
|
cca7da339ae673798a2108e9eca5e36101113136
|
[
"MIT"
] |
permissive
|
anishmo99/Daily-Interview-Pro
|
c959cd336209132aebad67a409df685e654cfdfc
|
d8724e8feec558ab1882d22c9ca63b850b767753
|
refs/heads/master
| 2023-04-10T08:09:46.089227 | 2021-04-27T07:27:38 | 2021-04-27T07:27:38 | 269,157,996 | 1 | 1 |
MIT
| 2020-06-08T07:09:19 | 2020-06-03T17:57:21 |
C++
|
UTF-8
|
Python
| false | false | 257 |
py
|
class Solution:
def maxSubArraySum(self, arr: List[int]) -> int:
cur_sum,max_sum=arr[0],arr[0]
for i in range(1,len(arr)):
cur_sum = max(arr[i],arr[i]+cur_sum)
max_sum = max(cur_sum,max_sum)
return max_sum
|
[
"[email protected]"
] | |
aaac6a94ac555dc58dda780437398f3de9ad0d12
|
8ae5c8bd19fe77c44b8485f646ff78db2605522a
|
/control/test.py
|
fd1e4612d0f2926ef1c3bc836f63ac6f6fbc1337
|
[] |
no_license
|
yunshengtian/pendular-codesign
|
8bec44de67401d8db9b3e19b9afe4808e6eb84bd
|
7f939bb0b00907b367a9ad89a5004ecb3a6aad78
|
refs/heads/main
| 2023-05-03T22:54:07.379998 | 2021-05-23T04:57:25 | 2021-05-23T04:57:25 | 368,543,614 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,015 |
py
|
import gym
import numpy as np
from argparse import ArgumentParser
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import env
from env.utils import utils
from control import get_control
parser = ArgumentParser()
parser.add_argument('--env', type=str, default='pendulum', choices=['acrobot', 'pendulum'])
parser.add_argument('--control', type=str, default='ilqr', choices=['ilqr', 'mppi'])
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
np.random.seed(args.seed)
env = gym.make(f'{args.env}-v0')
Control = get_control(args.control)
control = Control(env)
x_trj, u_trj, info = control.solve()
if args.control == 'ilqr':
cost_trace = info['cost_trace']
final_cost = cost_trace[-1]
elif args.control == 'mppi':
final_cost = info['cost']
print(f'Final cost: {final_cost}')
design = env.sim.get_design_params(env.sim.design)
Animation = utils[args.env]['animate']
animation = Animation()
animation.show(design, x_trj)
|
[
"[email protected]"
] | |
64618a6ac65022117f48efe65d74d536eb1d4461
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/AntfortuneEquityShopCustrelationQueryRequest.py
|
fa4fa39402e4465d04ccbe7c01ba6dec5c1768fa
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 4,021 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AntfortuneEquityShopCustrelationQueryModel import AntfortuneEquityShopCustrelationQueryModel
class AntfortuneEquityShopCustrelationQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AntfortuneEquityShopCustrelationQueryModel):
self._biz_content = value
else:
self._biz_content = AntfortuneEquityShopCustrelationQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'antfortune.equity.shop.custrelation.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
[
"[email protected]"
] | |
7a90a3c285d5b1d163f9550befa75c5b01f6fdc4
|
0b3c5260cd5c33a1beccc5710a5d0fd097a5ea15
|
/anchore_engine/services/policy_engine/engine/policy/gates/npm_check.py
|
40e0d49fe309d0fdfc2a14343f4df6cec46099e9
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
omerlh/anchore-engine
|
fb2d7cb3d8bd259f6c973b450fbaa2c2e00497f0
|
669a0327f8baaee3f5c7c64b482909fe38830d80
|
refs/heads/master
| 2021-09-02T12:48:51.661648 | 2018-01-02T19:26:47 | 2018-01-02T19:26:47 | 116,236,136 | 1 | 0 | null | 2018-01-04T08:41:39 | 2018-01-04T08:41:39 | null |
UTF-8
|
Python
| false | false | 7,044 |
py
|
from anchore_engine.services.policy_engine.engine.policy.gate import Gate, BaseTrigger
from anchore_engine.services.policy_engine.engine.policy.utils import NameVersionListValidator, CommaDelimitedStringListValidator, barsplit_comma_delim_parser, delim_parser
from anchore_engine.db import NpmMetadata
from anchore_engine.services.policy_engine.engine.logs import get_logger
from anchore_engine.services.policy_engine.engine.feeds import DataFeeds
log = get_logger()
# TODO; generalize these for any feed, with base classes and children per feed type
FEED_KEY = 'npm'
NPM_LISTING_KEY = 'npms'
NPM_MATCH_KEY = 'matched_feed_npms'
class NotLatestTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOTLATEST'
__description__ = 'triggers if an installed NPM is not the latest version according to NPM data feed'
def evaluate(self, image_obj, context):
"""
Fire for any npm in the image that is in the official npm feed but is not the latest version.
Mutually exclusive to NPMNOTOFFICIAL and NPMBADVERSION
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.latest for p in feed_npms}
for npm, versions in img_npms.items():
if npm not in feed_names:
continue # Not an official
for v in versions:
if v and v != feed_names.get(npm):
self._fire("NPMNOTLATEST Package ("+npm+") version ("+v+") installed but is not the latest version ("+feed_names[npm]['latest']+")")
class NotOfficialTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOTOFFICIAL'
__description__ = 'triggers if an installed NPM is not in the official NPM database, according to NPM data feed'
def evaluate(self, image_obj, context):
"""
Fire for any npm that is not in the official npm feed data set.
Mutually exclusive to NPMNOTLATEST and NPMBADVERSION
:param image_obj:
:param context:
:return:
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.versions_json for p in feed_npms}
for npm in img_npms.keys():
if npm not in feed_names:
self._fire(msg="NPMNOTOFFICIAL Package ("+str(npm)+") in container but not in official NPM feed.")
class BadVersionTrigger(BaseTrigger):
__trigger_name__ = 'NPMBADVERSION'
__description__ = 'triggers if an installed NPM version is not listed in the official NPM feed as a valid version'
def evaluate(self, image_obj, context):
"""
Fire for any npm that is in the official npm set but is not one of the official versions.
Mutually exclusive to NPMNOTOFFICIAL and NPMNOTLATEST
:param image_obj:
:param context:
:return:
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.versions_json for p in feed_npms}
for npm, versions in img_npms.items():
if npm not in feed_names:
continue
non_official_versions = set(versions).difference(set(feed_names.get(npm, [])))
for v in non_official_versions:
self._fire(msg="NPMBADVERSION Package ("+npm+") version ("+v+") installed but version is not in the official feed for this package ("+str(feed_names.get(npm, '')) + ")")
class PkgFullMatchTrigger(BaseTrigger):
__trigger_name__ = 'NPMPKGFULLMATCH'
__description__ = 'triggers if the evaluated image has an NPM package installed that matches one in the list given as a param (package_name|vers)'
__params__ = {
'BLACKLIST_NPMFULLMATCH': NameVersionListValidator()
}
def evaluate(self, image_obj, context):
"""
Fire for any npm that is on the blacklist with a full name + version match
:param image_obj:
:param context:
:return:
"""
npms = image_obj.npms
if not npms:
return
pkgs = context.data.get(NPM_LISTING_KEY)
if not pkgs:
return
for pkg, vers in barsplit_comma_delim_parser(self.eval_params.get('BLACKLIST_NPMFULLMATCH', '')).items():
try:
if pkg in pkgs and vers in pkgs.get(pkg, []):
self._fire(msg='NPMPKGFULLMATCH Package is blacklisted: '+pkg+"-"+vers)
except Exception as e:
continue
class PkgNameMatchTrigger(BaseTrigger):
__trigger_name__ = 'NPMPKGNAMEMATCH'
__description__ = 'triggers if the evaluated image has an NPM package installed that matches one in the list given as a param (package_name)'
__params__ = {
'BLACKLIST_NPMNAMEMATCH': CommaDelimitedStringListValidator()
}
def evaluate(self, image_obj, context):
npms = image_obj.npms
if not npms:
return
pkgs = context.data.get(NPM_LISTING_KEY)
if not pkgs:
return
for match_val in delim_parser(self.eval_params.get('BLACKLIST_NPMNAMEMATCH', '')):
if match_val and match_val in pkgs:
self._fire(msg='NPMPKGNAMEMATCH Package is blacklisted: ' + match_val)
class NoFeedTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOFEED'
__description__ = 'triggers if anchore does not have access to the NPM data feed'
def evaluate(self, image_obj, context):
try:
feed_meta = DataFeeds.instance().packages.group_by_name(FEED_KEY)
if feed_meta and feed_meta[0].last_sync:
return
except Exception as e:
log.exception('Error determining feed presence for npms. Defaulting to firing trigger')
self._fire()
return
class NpmCheckGate(Gate):
__gate_name__ = "NPMCHECK"
__triggers__ = [
NotLatestTrigger,
NotOfficialTrigger,
BadVersionTrigger,
PkgFullMatchTrigger,
PkgNameMatchTrigger,
NoFeedTrigger
]
def prepare_context(self, image_obj, context):
"""
Prep the npm names and versions
:param image_obj:
:param context:
:return:
"""
if not image_obj.npms:
return context
context.data[NPM_LISTING_KEY] = {p.name: p.versions_json for p in image_obj.npms}
npms = context.data[NPM_LISTING_KEY].keys()
context.data[NPM_MATCH_KEY] = []
chunks = [npms[i: i+100] for i in xrange(0, len(npms), 100)]
for key_range in chunks:
context.data[NPM_MATCH_KEY] += context.db.query(NpmMetadata).filter(NpmMetadata.name.in_(key_range)).all()
return context
|
[
"[email protected]"
] | |
f711172c3480c5580dd6594014f2a13fb124054c
|
f26dd860c8d764fc7a47bde656f393795cd8d763
|
/david13.py
|
f0f78ee556259290f4fcefbd2eb9801ee2858e03
|
[] |
no_license
|
chokkuu1998/david
|
8e9fa162f657c8b9bb55502f1cdd730a08ff0235
|
4dc999cdb73383b5a5d7ed3d98b2c1a4d6b5f7ee
|
refs/heads/master
| 2020-03-28T17:05:04.046963 | 2019-07-16T08:07:37 | 2019-07-16T08:07:37 | 148,756,731 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 354 |
py
|
AA,BB=map(int,input().split())
CC=list(map(int,input().split()))
pp=list(map(int,input().split()))
qq=[]
rr=0
for i in range(AA):
x=pp[i]/C[i]
qq.append(x)
while B>=0 and len(qq)>0:
mindex=qq.index(max(qq))
if B>=C[mindex]:
rr=rr+pp[mindex]
B=B-C[mindex]
CC.pop(mindex)
pp.pop(mindex)
qq.pop(mindex)
print(rr)
|
[
"[email protected]"
] | |
e8e08e4b4c84e23d22c92940cf1d38e721e9617e
|
dc80f94c1a244002db468fc7242d5fcaafe439dc
|
/powerdns_client/api/stats_api.py
|
865ce4494cac1c4a36ceedb5e0f8587189c76576
|
[
"MIT"
] |
permissive
|
sanvu88/python-powerdns-client
|
f675e1ee162bb76190b41ddf0cfc34e2305a757b
|
57dd0460995a5407c6f5c963553b4df0f4859667
|
refs/heads/master
| 2023-02-04T07:05:31.095951 | 2020-12-15T16:48:15 | 2020-12-15T16:48:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,803 |
py
|
# coding: utf-8
"""
PowerDNS Authoritative HTTP API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.0.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from powerdns_client.api_client import ApiClient
class StatsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_stats(self, server_id, **kwargs): # noqa: E501
"""Query statistics. # noqa: E501
Query PowerDNS internal statistics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stats(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str statistic: When set to the name of a specific statistic, only this value is returned. If no statistic with that name exists, the response has a 422 status and an error message.
:param bool includerings: “true” (default) or “false”, whether to include the Ring items, which can contain thousands of log messages or queried domains. Setting this to ”false” may make the response a lot smaller.
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_stats_with_http_info(server_id, **kwargs) # noqa: E501
else:
(data) = self.get_stats_with_http_info(server_id, **kwargs) # noqa: E501
return data
def get_stats_with_http_info(self, server_id, **kwargs): # noqa: E501
"""Query statistics. # noqa: E501
Query PowerDNS internal statistics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stats_with_http_info(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str statistic: When set to the name of a specific statistic, only this value is returned. If no statistic with that name exists, the response has a 422 status and an error message.
:param bool includerings: “true” (default) or “false”, whether to include the Ring items, which can contain thousands of log messages or queried domains. Setting this to ”false” may make the response a lot smaller.
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['server_id', 'statistic', 'includerings'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stats" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'server_id' is set
if ('server_id' not in params or
params['server_id'] is None):
raise ValueError("Missing the required parameter `server_id` when calling `get_stats`") # noqa: E501
collection_formats = {}
path_params = {}
if 'server_id' in params:
path_params['server_id'] = params['server_id'] # noqa: E501
query_params = []
if 'statistic' in params:
query_params.append(('statistic', params['statistic'])) # noqa: E501
if 'includerings' in params:
query_params.append(('includerings', params['includerings'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/servers/{server_id}/statistics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[object]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"[email protected]"
] | |
6229e7231c45038a0d515693de51d6b3b5ee16fe
|
9b10d8482a7af9c90766747f5f2ddc343871d5fa
|
/Gemtek/AutoTest/DropAP/WRTM-326ACN-DropAP2/premises/library/test.py
|
f158319fe329093f6d1fd74a233f2a489a42b9b0
|
[] |
no_license
|
DarcyChang/MyProjects
|
86d33f5cf8bdfd4b21e64922e4eb25c1afc3c135
|
47efb2dfe13ace264f8943b59b701f39f23c4c17
|
refs/heads/master
| 2021-05-12T12:43:39.255082 | 2020-09-23T06:42:03 | 2020-09-23T06:42:03 | 117,419,269 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 609 |
py
|
__author__ = 'alu'
import re
import time
import cafe
from cafe.resp.response_map import ResponseMap
from collections import OrderedDict
from demo.alu_demo.User_Cases.test_lib import Teststeplib_e7 as e7_lib
res = "ONT Subscriber Info Status" \
"---------- ------------------------------------------------ ---------------" \
"205 <no subscriber ID> enabled" \
" Last Location: 2/1"
r = ResponseMap(res)
table1 = r.table_match_by_delimiter()
print"table1:",table1[-1]
print type(table1[-1])
|
[
"[email protected]"
] | |
d95f0b89899c28fd7e790e02a64cba46aff3d59d
|
1ad2ae0383341f2b92fe38173612be5d9c4970e8
|
/polls/models.py
|
75a460d4f5e68fc9d5052737ed7677900239b83f
|
[
"MIT"
] |
permissive
|
pizzapanther/ppp
|
9b0df90ddf2e52ffdaf43394026613dbd884c0e9
|
3286f39f8e90f3473841a154ff7189a3efd9ca94
|
refs/heads/master
| 2021-09-23T03:52:27.915606 | 2020-03-04T18:04:08 | 2020-03-04T18:04:08 | 222,154,111 | 0 | 0 |
MIT
| 2021-09-22T18:10:01 | 2019-11-16T20:16:27 |
Python
|
UTF-8
|
Python
| false | false | 1,400 |
py
|
from django.conf import settings
from django.db import models
from django.contrib.postgres.fields import ArrayField
class Presentation(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
def __str__(self):
return self.title
def current(self):
return self.poll_set.filter(live=True).first()
class Poll(models.Model):
question = models.CharField(max_length=254)
choices = ArrayField(models.CharField(max_length=254))
live = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
presentation = models.ForeignKey(Presentation, on_delete=models.SET_NULL, blank=True, null=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.question
def json_data(self):
votes = []
for (i, choice) in enumerate(self.choices):
votes.append(self.vote_set.filter(choice=i).count())
return {
'id': self.id,
'slug': self.presentation.slug,
'question': self.question,
'choices': self.choices,
'votes': votes,
'total': self.vote_set.all().count(),
}
class Vote(models.Model):
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
choice = models.PositiveSmallIntegerField()
def __str__(self):
return f'{self.poll} - {self.user}'
|
[
"[email protected]"
] | |
2b24ec034a34c513b9c6b1bd086580ec9964d106
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Geometry/HcalEventSetup/python/CaloTowerGeometryDBWriter_cfi.py
|
dc75ba33a6cedd5c4191026f97719656397c89c3
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 |
Apache-2.0
| 2023-09-14T19:14:28 | 2013-06-26T14:09:07 |
C++
|
UTF-8
|
Python
| false | false | 531 |
py
|
import FWCore.ParameterSet.Config as cms
CaloTowerHardcodeGeometryEP = cms.ESProducer( "CaloTowerHardcodeGeometryEP" ,
appendToDataLabel = cms.string("_master")
)
CaloTowerGeometryToDBEP = cms.ESProducer( "CaloTowerGeometryToDBEP" ,
applyAlignment = cms.bool(False) ,
appendToDataLabel = cms.string("_toDB")
)
|
[
"[email protected]"
] | |
35a47b027566248963ff354a2a07b0ef7377d61c
|
1bccf0b1374dcfddfc3e320fd5b6af499334df2d
|
/scripts/hashtagUserCounts.py
|
4a780d5a584536af79f7279e772bc4f2cc89c7c9
|
[
"Unlicense"
] |
permissive
|
chebee7i/twitter
|
6b245f5a7b7510089b62d48567e6208e1fe8a1db
|
ec1d772c3ef7d2288ac8051efb8637378f3ec195
|
refs/heads/master
| 2021-01-01T16:25:13.242941 | 2015-06-24T19:39:24 | 2015-06-24T19:39:24 | 23,846,593 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,533 |
py
|
"""
Insert the number of users that tweeted each hashtag.
"""
import twitterproj
import pymongo
from collections import defaultdict
import itertools
import json
import os
import io
db = twitterproj.connect()
def add_user_counts(bot_filtered=True):
collection = db.tweets.with_hashtags
if bot_filtered:
skip_users = twitterproj.subcollections.get_skip_users()
target = db.hashtags.bot_filtered
else:
skip_users = set([])
target = db.hashtags
counts = defaultdict(int)
users = defaultdict(set)
for i, tweet in enumerate(collection.find()):
user_id = tweet['user']['id']
if user_id in skip_users:
continue
for hashtag in tweet['hashtags']:
counts[hashtag] += 1
users[hashtag].add(user_id)
for i, (hashtag, count) in enumerate(counts.iteritems()):
target.update({'_id': hashtag, 'count': count},
{"$set": {'user_count': len(users[hashtag])}},
upsert=False)
def to_json(filename, mincount=1000, bot_filtered=True):
if bot_filtered:
collection = db.hashtags.bot_filtered
else:
collection = db.hashtags
rows = []
if mincount is not None:
it = collection.find({'user_count': {'$gte': mincount}})
else:
it = colelction.find()
for doc in it:
row = [doc['_id'], doc['count'], doc['user_count']]
rows.append(row)
data = {'data': rows}
with open(filename, 'w') as fobj:
json.dump(data, fobj)
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def to_csv(filename, mincount=1000, bot_filtered=True):
"""
Writes hashtags to CSV, filtering hashtags that were not mentioned by
some minimum number of users.
"""
if bot_filtered:
collection = db.hashtags.bot_filtered
else:
collection = db.hashtags
rows = []
if mincount is not None:
it = collection.find({'user_count': {'$gte': mincount}})
else:
it = colelction.find()
it = it.sort('user_count', pymongo.DESCENDING)
basename, ext = os.path.splitext(filename)
if not ext:
ext = '.csv'
data = """
This file contains information regarding the UTF-8 encoded CSV file:
{0}{1}
Each line of that file contains 3 pieces of information, separated by commas:
1. hashtag
2. number of times the hashtag was tweeted
3. number of users who tweeted the hashtag
Lines are sorted, descendingly, according to column 3.
Counts are tabulated wrt geotagged tweets in the contiguous states.
{2}
Hashtags were included only if they were tweeted by at least {3} users across all regions.
"""
if bot_filtered:
text = 'Tweets from users determined to be robots were excluded from the counting process.'
else:
text = ''
data = data.format(basename, ext, text, mincount)
with open(basename + '.txt', 'w') as fobj:
fobj.write(data)
with io.open(basename + ext, 'w', encoding='utf-8') as fobj:
for docs in grouper(10000, it):
rows = []
for doc in docs:
if doc is None:
break
row = [doc['_id'], str(doc['count']), str(doc['user_count'])]
rows.append(','.join(row))
fobj.write(u'\n'.join(rows))
fobj.write(u'\n') # So groups are separated.
|
[
"[email protected]"
] | |
ff8f86292617a8597edc809076063b0f6261283c
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/SiamFC/src/dataset.py
|
83c8e8c6e3e8ce864c6d87af664d12aa08b25bc6
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 |
Apache-2.0
| 2023-05-17T11:22:28 | 2021-10-15T06:38:37 |
Python
|
UTF-8
|
Python
| false | false | 5,068 |
py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""VID dataset"""
import os
import pickle
import hashlib
import cv2
import numpy as np
from src.config import config
class ImagnetVIDDataset():
"""
used in GeneratorDataset to deal with image pair
Args:
db : lmdb file
video_names : all video name
data_dir : the location of image pair
z_transforms : the transforms list used in exemplar
x_transforms : the transforms list used in instance
training : status of training
"""
def __init__(self, db, video_names, data_dir, z_transforms, x_transforms, training=True):
self.video_names = video_names
self.data_dir = data_dir
self.z_transforms = z_transforms
self.x_transforms = x_transforms
meta_data_path = os.path.join(data_dir, 'meta_data.pkl')
self.meta_data = pickle.load(open(meta_data_path, 'rb'))
self.meta_data = {x[0]: x[1] for x in self.meta_data}
for key in self.meta_data.keys():
trajs = self.meta_data[key]
for trkid in list(trajs.keys()):
if len(trajs[trkid]) < 2:
del trajs[trkid]
self.txn = db.begin(write=False)
self.num = len(self.video_names) if config.num_per_epoch is None or not \
training else config.num_per_epoch
def imread(self, path):
"""
read iamges according to path
Args :
path : the image path
"""
key = hashlib.md5(path.encode()).digest()
img_buffer = self.txn.get(key)
img_buffer = np.frombuffer(img_buffer, np.uint8)
img = cv2.imdecode(img_buffer, cv2.IMREAD_COLOR)
return img
def _sample_weights(self, center, low_idx, high_idx, s_type='uniform'):
"""
According to the center image to pick another image,setting the weights
will be used in different type distribution
Args:
center : the position of center image
low_idx : the minimum of id
high_idx : the max of id
s_type : choose different distribution. "uniform", "sqrt", "linear"
can be chosen
"""
weights = list(range(low_idx, high_idx))
weights.remove(center)
weights = np.array(weights)
if s_type == 'linear':
weights = abs(weights - center)
elif s_type == 'sqrt':
weights = np.sqrt(abs(weights - center))
elif s_type == 'uniform':
weights = np.ones_like(weights)
return weights / sum(weights)
def __getitem__(self, idx):
idx = idx % len(self.video_names)
video = self.video_names[idx]
trajs = self.meta_data[video]
trkid = np.random.choice(list(trajs.keys()))
traj = trajs[trkid]
assert len(traj) > 1, "video_name: {}".format(video)
exemplar_idx = np.random.choice(list(range(len(traj))))
exemplar_name = os.path.join(self.data_dir, video,
traj[exemplar_idx] + ".{:02d}.x.jpg".format(trkid))
exemplar_img = self.imread(exemplar_name)
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_BGR2RGB)
# sample instance
low_idx = max(0, exemplar_idx - config.frame_range)
up_idx = min(len(traj), exemplar_idx + config.frame_range)
weights = self._sample_weights(exemplar_idx, low_idx, up_idx, config.sample_type)
instance = np.random.choice(traj[low_idx:exemplar_idx] + traj[exemplar_idx + 1:up_idx],
p=weights)
instance_name = os.path.join(self.data_dir, video, instance + ".{:02d}.x.jpg".format(trkid))
instance_img = self.imread(instance_name)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_BGR2RGB)
if np.random.rand(1) < config.gray_ratio:
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_RGB2GRAY)
exemplar_img = cv2.cvtColor(exemplar_img, cv2.COLOR_GRAY2RGB)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_RGB2GRAY)
instance_img = cv2.cvtColor(instance_img, cv2.COLOR_GRAY2RGB)
exemplar_img = self.z_transforms(exemplar_img)
instance_img = self.x_transforms(instance_img)
return exemplar_img, instance_img
def __len__(self):
return self.num
|
[
"[email protected]"
] | |
8eeaa0ca64e1bf2b2d43b5a3ce16af064f666d4a
|
67a442ecabcdca9f54f5920874d0095d57f98ede
|
/gewittergefahr/gg_utils/dilation_test.py
|
ffb5a2725b02b9dd7ffdc08e4b856685a7be3f54
|
[
"MIT"
] |
permissive
|
thunderhoser/GewitterGefahr
|
58ba3446c1cc154f56c12c4354dff05b34c12b13
|
1835a71ababb7ad7e47bfa19e62948d466559d56
|
refs/heads/master
| 2022-07-23T06:47:13.883598 | 2022-07-15T12:43:48 | 2022-07-15T12:43:48 | 104,016,785 | 29 | 13 |
MIT
| 2020-12-18T20:44:33 | 2017-09-19T02:37:21 |
Python
|
UTF-8
|
Python
| false | false | 2,662 |
py
|
"""Unit tests for dilation.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import dilation
TOLERANCE = 1e-6
SMALL_PERCENTILE = 12.5
LARGE_PERCENTILE = 87.5
DILATION_HALF_WIDTH_IN_PIXELS = 1
INPUT_MATRIX = numpy.array(
[[-20., -15., -10., -5., 0.],
[-10., -5., 0., 5., 10.],
[0., 5., 10., numpy.nan, numpy.nan],
[10., 15., 20., numpy.nan, numpy.nan]])
OUTPUT_MATRIX_SMALL_PERCENTILE = numpy.array(
[[-15., -15., -10., -5., numpy.nan],
[-15., -15., -10., -5., numpy.nan],
[-5., -5., numpy.nan, numpy.nan, numpy.nan],
[numpy.nan, numpy.nan, numpy.nan, numpy.nan, numpy.nan]])
OUTPUT_MATRIX_LARGE_PERCENTILE = numpy.array(
[[numpy.nan, numpy.nan, numpy.nan, 5., 5.],
[numpy.nan, 5., 5., 10., 5.],
[10., 15., 15., 10., 5.],
[10., 15., 15., 10., numpy.nan]])
OUTPUT_MATRIX_LARGEST_ABS_VALUE = numpy.array(
[[-15., -15., -10., 5., 5.],
[-15., -15., -10., 10., 5.],
[10., 15., 15., 10., 5.],
[10., 15., 15., 10., numpy.nan]])
class DilationTests(unittest.TestCase):
"""Each method is a unit test for dilation.py."""
def test_dilate_2d_matrix_small_percentile(self):
"""Ensures correct output from dilate_2d_matrix with small prctile."""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=SMALL_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_SMALL_PERCENTILE, atol=TOLERANCE,
equal_nan=True))
def test_dilate_2d_matrix_large_percentile(self):
"""Ensures correct output from dilate_2d_matrix with large prctile."""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_LARGE_PERCENTILE, atol=TOLERANCE,
equal_nan=True))
def test_dilate_2d_matrix_take_largest_abs_value(self):
"""Ensures correct output from dilate_2d_matrix.
In this case, take_largest_absolute_value = True.
"""
this_output_matrix = dilation.dilate_2d_matrix(
INPUT_MATRIX, percentile_level=LARGE_PERCENTILE,
half_width_in_pixels=DILATION_HALF_WIDTH_IN_PIXELS,
take_largest_absolute_value=True)
self.assertTrue(numpy.allclose(
this_output_matrix, OUTPUT_MATRIX_LARGEST_ABS_VALUE, atol=TOLERANCE,
equal_nan=True))
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
2282c152c06546a82f340c485eafb5b3ed595424
|
f124cb2443577778d8708993c984eafbd1ae3ec3
|
/saleor/graphql/checkout/mutations/__init__.py
|
7718967b325810e573ce4fd4fe617dcc87905933
|
[
"BSD-3-Clause"
] |
permissive
|
quangtynu/saleor
|
ac467193a7779fed93c80251828ac85d92d71d83
|
5b0e5206c5fd30d81438b6489d0441df51038a85
|
refs/heads/master
| 2023-03-07T19:41:20.361624 | 2022-10-20T13:19:25 | 2022-10-20T13:19:25 | 245,860,106 | 1 | 0 |
BSD-3-Clause
| 2023-03-06T05:46:25 | 2020-03-08T17:44:18 |
Python
|
UTF-8
|
Python
| false | false | 1,556 |
py
|
from .checkout_add_promo_code import CheckoutAddPromoCode
from .checkout_billing_address_update import CheckoutBillingAddressUpdate
from .checkout_complete import CheckoutComplete
from .checkout_create import CheckoutCreate
from .checkout_customer_attach import CheckoutCustomerAttach
from .checkout_customer_detach import CheckoutCustomerDetach
from .checkout_delivery_method_update import CheckoutDeliveryMethodUpdate
from .checkout_email_update import CheckoutEmailUpdate
from .checkout_language_code_update import CheckoutLanguageCodeUpdate
from .checkout_line_delete import CheckoutLineDelete
from .checkout_lines_add import CheckoutLinesAdd
from .checkout_lines_delete import CheckoutLinesDelete
from .checkout_lines_update import CheckoutLinesUpdate
from .checkout_remove_promo_code import CheckoutRemovePromoCode
from .checkout_shipping_address_update import CheckoutShippingAddressUpdate
from .checkout_shipping_method_update import CheckoutShippingMethodUpdate
from .order_create_from_checkout import OrderCreateFromCheckout
__all__ = [
"CheckoutAddPromoCode",
"CheckoutBillingAddressUpdate",
"CheckoutComplete",
"CheckoutCreate",
"CheckoutCustomerAttach",
"CheckoutCustomerDetach",
"CheckoutDeliveryMethodUpdate",
"CheckoutEmailUpdate",
"CheckoutLanguageCodeUpdate",
"CheckoutLineDelete",
"CheckoutLinesAdd",
"CheckoutLinesDelete",
"CheckoutLinesUpdate",
"CheckoutRemovePromoCode",
"CheckoutShippingAddressUpdate",
"CheckoutShippingMethodUpdate",
"OrderCreateFromCheckout",
]
|
[
"[email protected]"
] | |
838f2f8902ca4fdcf743b209c0a1ff7c7ab3412d
|
229ed0dad61f9e855de604c230d034a0bd9b3882
|
/EdabitPractice/evenOddCounter.py
|
06c675245b08e406cfb9c3b1124f90e1dd4de379
|
[] |
no_license
|
Darrenrodricks/EdabitPythonPractice
|
987d534dd149ddaef6219df381df850eabbe80b2
|
c1be8b10a6fcc1085640a1128f022c05fb2890a9
|
refs/heads/main
| 2023-07-17T00:36:43.772435 | 2021-08-31T16:24:07 | 2021-08-31T16:24:07 | 400,630,327 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
# Python program to count Even and Odd numbers in a List
# Input: list1 = [2, 7, 5, 64, 14]
# Output: Even = 3, odd = 2
a = 0
b = 0
list1 = [2, 7, 5, 64, 14]
for i in range(0, len(list1)):
if i % 2 == 0:
a += 1
else:
b += 1
print("There are {} Even, and {} Odd".format(a, b))
|
[
"[email protected]"
] | |
9cca242910678dbdb4fce620cc6f69091f65087c
|
539b031a4edd1aec31af5b6658f25a0de03776a4
|
/strings_and_text/sub_re_groups1.py
|
b91fbc11289d3b5f5a17a2b714d35dde5bec785c
|
[] |
no_license
|
leogtzr/python-cookbook-code-snippets
|
c517e7f14e468e1aa8def71d3389348150d43085
|
a3f189c26ba38bc982dd140b3b4d6326b39671dc
|
refs/heads/main
| 2023-01-23T07:16:30.292456 | 2020-11-28T04:29:42 | 2020-11-28T04:29:42 | 309,217,104 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 477 |
py
|
import re
from calendar import month_abbr
def change_date(m):
mon_name = month_abbr[int(m.group(1))]
return '[{}] ({}) |{}|'.format(m.group(2), mon_name, m.group(3))
datepat = re.compile(r'(\d+)/(\d+)/(\d+)')
text = 'Today is 11/27/2012. PyCon starts 3/13/2013.'
# a substitution callback function
print(datepat.sub(change_date, text))
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
# print(month_abbr[12])
for month_num in range(1, 13):
print(month_abbr[month_num])
|
[
"[email protected]"
] | |
0998ba915d80be9aaf99b9cef30acdb467528d1c
|
2e145222a18d4509d937951f5cec4df0e26ee86f
|
/vas/gemfire/CacheServerInstances.py
|
9405a719dd10335d0ccd1c552ba07c8a6ef1c57d
|
[
"Apache-2.0"
] |
permissive
|
vdreamakitex/vas-python-api
|
7627b7e3fcf76c16b1ea8b9fb670fdb708eff083
|
ce7148a2044863e078e78b47abbaafc426f732ee
|
refs/heads/master
| 2021-01-18T05:13:25.459916 | 2012-11-05T09:58:45 | 2012-11-05T09:58:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,867 |
py
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vas.shared.Instance import Instance
from vas.shared.MutableCollection import MutableCollection
from vas.util.LinkUtils import LinkUtils
class CacheServerInstances(MutableCollection):
"""Used to enumerate, create, and delete cache server instances
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(CacheServerInstances, self).__init__(client, location, 'cache-server-group-instances',
CacheServerInstance)
def create(self, installation, name):
"""Creates a new cache server instance
:param `vas.gemfire.Installations.Installation` installation: The installation to be used by the instance
:param str name: The name of the instance
:rtype: :class:`vas.gemfire.CacheServerInstances.CacheServerInstance`
:return: The new cache server instance
"""
payload = {'installation': installation._location, 'name': name}
return self._create(payload, 'cache-server-group-instance')
class CacheServerInstance(Instance):
"""A cache server instance
:ivar `vas.gemfire.Groups.Group` group: The group that contains this instance
:ivar `vas.gemfire.Installations.Installation` installation: The installation that this instance is using
:ivar `vas.gemfire.LiveApplicationCodes.LiveApplicationCodes` live_application_code: The instance's live
application code
:ivar `vas.gemfire.CacheServerLiveConfigurations.CacheServerLiveConfigurations` live_configurations: The instance's live
configurations
:ivar str name: The instance's name
:ivar list node_instances: The instance's individual node instances
:ivar `vas.gemfire.PendingApplicationCodes.PendingApplicationCodes` pending_application_code: The instance's
pending application
code
:ivar `vas.gemfire.CacheServerPendingConfigurations.CacheServerPendingConfigurations` pending_configurations: The instance's
pending configurations
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar str state: Retrieves the state of the resource from the server.
Will be one of:
* ``STARTING``
* ``STARTED``
* ``STOPPING``
* ``STOPPED``
"""
__live_application_code = None
__pending_application_code = None
@property
def live_application_code(self):
self.__live_application_code = self.__live_application_code or LiveApplicationCodes(self._client,
self.__live_application_code_location)
return self.__live_application_code
@property
def pending_application_code(self):
self.__pending_application_code = self.__pending_application_code or PendingApplicationCodes(self._client,
self.__pending_application_code_location)
return self.__pending_application_code
def __init__(self, client, location):
super(CacheServerInstance, self).__init__(client, location, Group, Installation, CacheServerLiveConfigurations,
CacheServerPendingConfigurations, CacheServerNodeInstance, 'cache-server-node-instance')
self.__live_application_code_location = LinkUtils.get_link_href(self._details, 'live-application-code')
self.__pending_application_code_location = LinkUtils.get_link_href(self._details, 'pending-application-code')
def update(self, installation):
"""Updates the instance to use a different installation
:param `vas.gemfire.Installations.Installation` installation: The installation that the instance should use
"""
self._client.post(self._location, {'installation': installation._location})
self.reload()
from vas.gemfire.CacheServerLiveConfigurations import CacheServerLiveConfigurations
from vas.gemfire.CacheServerNodeInstances import CacheServerNodeInstance
from vas.gemfire.CacheServerPendingConfigurations import CacheServerPendingConfigurations
from vas.gemfire.Groups import Group
from vas.gemfire.Installations import Installation
from vas.gemfire.LiveApplicationCodes import LiveApplicationCodes
from vas.gemfire.PendingApplicationCodes import PendingApplicationCodes
|
[
"[email protected]"
] | |
53bc8edebb6fabc73a2cacad23ca6d8b08fa9b0a
|
16450d59c820298f8803fd40a1ffa2dd5887e103
|
/baekjoon/2667.py
|
b85ee7659586696418e866ced977042046429337
|
[] |
no_license
|
egyeasy/TIL_public
|
f78c11f81d159eedb420f5fa177c05d310c4a039
|
e2f40eda09cb0a65cc064d9ba9b0e2fa7cbbcb38
|
refs/heads/master
| 2021-06-21T01:22:16.516777 | 2021-02-02T13:16:21 | 2021-02-02T13:16:21 | 167,803,551 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,257 |
py
|
"""
<그림 1>과 같이 정사각형 모양의 지도가 있다. 1은 집이 있는 곳을, 0은 집이 없는 곳을 나타낸다. 철수는 이 지도를 가지고 연결된 집들의 모임인 단지를 정의하고, 단지에 번호를 붙이려 한다. 여기서 연결되었다는 것은 어떤 집이 좌우, 혹은 아래위로 다른 집이 있는 경우를 말한다. 대각선상에 집이 있는 경우는 연결된 것이 아니다. <그림 2>는 <그림 1>을 단지별로 번호를 붙인 것이다. 지도를 입력하여 단지수를 출력하고, 각 단지에 속하는 집의 수를 오름차순으로 정렬하여 출력하는 프로그램을 작성하시오.
> input
첫 번째 줄에는 지도의 크기 N(정사각형이므로 가로와 세로의 크기는 같으며 5≤N≤25)이 입력되고, 그 다음 N줄에는 각각 N개의 자료(0혹은 1)가 입력된다.
7
0110100
0110101
1110101
0000111
0100000
0111110
0111000
> output
첫 번째 줄에는 총 단지수를 출력하시오. 그리고 각 단지내 집의 수를 오름차순으로 정렬하여 한 줄에 하나씩 출력하시오.
3
7
8
9
"""
import sys
sys.stdin = open('2667.txt', 'r')
each_cnt = 0
def DFS(s):
global each_cnt
visited[s[0]][s[1]] = 1
each_cnt += 1
go_list = [[-1, 0], [0, 1], [1, 0], [0, -1]]
for go in go_list:
if matrix[s[0] + go[0]][s[1] + go[1]] == 1 and not visited[s[0] + go[0]][s[1] + go[1]]:
DFS([s[0] + go[0], s[1] + go[1]])
m = int(input())
matrix = [[0] * (m + 2) for i in range(m + 2)]
visited = [[0] * (m + 2) for i in range(m + 2)]
for i in range(m):
aline = list(map(int, input()))
for j in range(m):
matrix[i + 1][j + 1] = aline[j]
# for i in matrix:
# print(i)
total_cnt = 0
each_cnt = 0
cnts = [0] * (m**2)
idx = 0
for i in range(1, m + 2):
for j in range(1, m + 2):
if matrix[i][j] == 1 and not visited[i][j]:
each_cnt = 0
total_cnt += 1
DFS([i, j])
# print(each_cnt)
cnts[idx] = each_cnt
idx += 1
print(total_cnt)
for i in sorted(cnts[:total_cnt]):
print(i)
# idea
# 1. Some details are added in DFS problem.
# 2. Most important: Catching this is DFS problem.
|
[
"[email protected]"
] | |
49f6016496073d31808c5ceda4ff0bb6ac102c09
|
974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184
|
/sdk/ml/azure-ai-ml/tests/compute/unittests/test_compute_operations.py
|
ecfc11bd07cf4aafd9e8a34abaa324d6be10f0af
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
gaoyp830/azure-sdk-for-python
|
4816f04c554dcffb7510a6b7044b0c86a2dd32e1
|
1c66defa502b754abcc9e5afa444ca03c609342f
|
refs/heads/master
| 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 |
MIT
| 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null |
UTF-8
|
Python
| false | false | 4,501 |
py
|
from typing import Callable
from unittest.mock import Mock
import pytest
import vcr
from pytest_mock import MockFixture
from azure.ai.ml import load_compute
from azure.ai.ml._scope_dependent_operations import OperationConfig, OperationScope
from azure.ai.ml.entities import AmlCompute, Compute, ComputeInstance, IdentityConfiguration, UserAssignedIdentity
from azure.ai.ml.operations import ComputeOperations
from azure.identity import DefaultAzureCredential
@pytest.fixture
def mock_compute_operation(
mock_workspace_scope: OperationScope, mock_operation_config: OperationConfig, mock_aml_services_2021_10_01: Mock
) -> ComputeOperations:
yield ComputeOperations(
operation_scope=mock_workspace_scope,
operation_config=mock_operation_config,
service_client=mock_aml_services_2021_10_01,
)
class funny:
def __init__(self):
self.location = "somelocation"
@pytest.mark.unittest
class TestComputeOperation:
def test_list(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.list()
mock_compute_operation._operation.list.assert_called_once()
def test_create_compute_instance(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
mocker.patch(
"azure.ai.ml._restclient.v2021_10_01.workspaces.get",
return_value=funny(),
)
mocker.patch(
"azure.ai.ml.entities.Compute._from_rest_object",
return_value=ComputeInstance(name="name", resource_id="test_resource_id"),
)
compute = load_compute("./tests/test_configs/compute/compute-ci-unit.yaml")
mock_compute_operation.begin_create_or_update(compute=compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_create_aml_compute(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
mocker.patch("azure.ai.ml._restclient.v2021_10_01.workspaces.get", return_value=funny())
compute = load_compute("./tests/test_configs/compute/compute-aml.yaml")
mock_compute_operation.begin_create_or_update(compute=compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_delete(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_delete("randstr")
mock_compute_operation._operation.begin_delete.assert_called_once()
def test_show(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.get("randstr")
mock_compute_operation._operation.get.assert_called_once()
def test_start(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_start("randstr")
mock_compute_operation._operation.begin_start.assert_called_once()
def test_stop(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_stop("randstr")
mock_compute_operation._operation.begin_stop.assert_called_once()
def test_restart(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_restart("randstr")
mock_compute_operation._operation.begin_restart.assert_called_once()
def test_update_aml_compute(
self, mock_compute_operation: ComputeOperations, mocker: MockFixture
) -> None:
compute = AmlCompute(
name="name",
tags={"key1": "value1", "key2": "value2"},
min_instances=0,
max_instances=10,
idle_time_before_scale_down=100,
identity=IdentityConfiguration(
type="UserAssigned",
user_assigned_identities=[
UserAssignedIdentity(
resource_id="/subscriptions/b17253fa-f327-42d6-9686-f3e553e24763/resourcegroups/MC_banibatch_bani-aks_eastus/providers/Microsoft.ManagedIdentity/userAssignedIdentities/omsagent-bani-aks"
)
],
),
)
mock_compute_operation.begin_update(compute)
mock_compute_operation._operation.begin_create_or_update.assert_called_once()
def test_detach(self, mock_compute_operation: ComputeOperations) -> None:
mock_compute_operation.begin_delete(
name="randstr",
action="Detach",
)
mock_compute_operation._operation.begin_delete.assert_called_once()
|
[
"[email protected]"
] | |
6229d71ac4298b44124dd4b8e60fbc94f362f721
|
22f57701df31b3182f3bcb83da729ecc584f8fb6
|
/December-12/py_anuppriya_revsinglylinkedlist.py
|
eca33e40110bce5169894193a53714e455c02d79
|
[] |
no_license
|
Prashant-Bharaj/A-December-of-Algorithms
|
e88640c711abbe2e6cac71cb4652dac243984484
|
7bbd56572f4ddc9648e90615ee810765544c56e4
|
refs/heads/master
| 2023-08-05T15:37:20.362561 | 2021-09-19T05:51:53 | 2021-09-19T05:51:53 | 287,055,360 | 0 | 0 | null | 2020-08-12T15:53:05 | 2020-08-12T15:53:04 | null |
UTF-8
|
Python
| false | false | 995 |
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class LinkedList:
def __init__(self):
self.head = None
def reverse(self):
prev = None
current = self.head
while(current is not None):
next = current.next
current.next = prev
prev = current
current = next
self.head = prev
def push(self, new_data):
new_node = Node(new_data)
new_node.next = self.head
self.head = new_node
def printList(self):
temp = self.head
while(temp):
print (temp.data)
temp = temp.next
llist = LinkedList()
llist.push(13)
llist.push(18)
llist.push(22)
llist.push(48)
print( "Given Linked List")
llist.printList()
llist.reverse()
print ("\nReversed Linked List")
llist.printList()
|
[
"[email protected]"
] | |
e8c2786ad69cfccec2ad37b66382443519baed1a
|
59fb17c240b261040026d713a6ac9c97d6a9f265
|
/gym/gym/envs/registration.py
|
18519749167fe193d8d2cb3b3348653ae837fd17
|
[
"MIT"
] |
permissive
|
dmeger/TeachingImitation
|
3fb97499e76929959913266f127154f6ae5a8e99
|
5f4dba7e49987924c3d55cd27579cad4c71ef7a4
|
refs/heads/master
| 2023-03-28T13:25:01.307382 | 2021-04-06T15:07:08 | 2021-04-06T15:07:08 | 355,223,500 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,883 |
py
|
import re
import copy
import importlib
import warnings
from gym import error, logger
# This format is true today, but it's *not* an official spec.
# [username/](env-name)-v(version) env-name is group 1, version is group 2
#
# 2016-10-31: We're experimentally expanding the environment ID format
# to include an optional username.
env_id_re = re.compile(r'^(?:[\w:-]+\/)?([\w:.-]+)-v(\d+)$')
def load(name):
mod_name, attr_name = name.split(":")
mod = importlib.import_module(mod_name)
fn = getattr(mod, attr_name)
return fn
class EnvSpec(object):
"""A specification for a particular instance of the environment. Used
to register the parameters for official evaluations.
Args:
id (str): The official environment ID
entry_point (Optional[str]): The Python entrypoint of the environment class (e.g. module.name:Class)
reward_threshold (Optional[int]): The reward threshold before the task is considered solved
nondeterministic (bool): Whether this environment is non-deterministic even after seeding
max_episode_steps (Optional[int]): The maximum number of steps that an episode can consist of
kwargs (dict): The kwargs to pass to the environment class
"""
def __init__(self, id, entry_point=None, reward_threshold=None, nondeterministic=False, max_episode_steps=None, kwargs=None):
self.id = id
self.entry_point = entry_point
self.reward_threshold = reward_threshold
self.nondeterministic = nondeterministic
self.max_episode_steps = max_episode_steps
self._kwargs = {} if kwargs is None else kwargs
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to register malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id, env_id_re.pattern))
self._env_name = match.group(1)
def make(self, **kwargs):
"""Instantiates an instance of the environment with appropriate kwargs"""
if self.entry_point is None:
raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id))
_kwargs = self._kwargs.copy()
_kwargs.update(kwargs)
if callable(self.entry_point):
env = self.entry_point(**_kwargs)
else:
cls = load(self.entry_point)
env = cls(**_kwargs)
# Make the environment aware of which spec it came from.
spec = copy.deepcopy(self)
spec._kwargs = _kwargs
env.unwrapped.spec = spec
return env
def __repr__(self):
return "EnvSpec({})".format(self.id)
class EnvRegistry(object):
"""Register an env by ID. IDs remain stable over time and are
guaranteed to resolve to the same environment dynamics (or be
desupported). The goal is that results on a particular environment
should always be comparable, and not depend on the version of the
code that was running.
"""
def __init__(self):
self.env_specs = {}
def make(self, path, **kwargs):
if len(kwargs) > 0:
logger.info('Making new env: %s (%s)', path, kwargs)
else:
logger.info('Making new env: %s', path)
spec = self.spec(path)
env = spec.make(**kwargs)
# We used to have people override _reset/_step rather than
# reset/step. Set _gym_disable_underscore_compat = True on
# your environment if you use these methods and don't want
# compatibility code to be invoked.
if hasattr(env, "_reset") and hasattr(env, "_step") and not getattr(env, "_gym_disable_underscore_compat", False):
patch_deprecated_methods(env)
if env.spec.max_episode_steps is not None:
from gym.wrappers.time_limit import TimeLimit
env = TimeLimit(env, max_episode_steps=env.spec.max_episode_steps)
return env
def all(self):
return self.env_specs.values()
def spec(self, path):
if ':' in path:
mod_name, _sep, id = path.partition(':')
try:
importlib.import_module(mod_name)
# catch ImportError for python2.7 compatibility
except ImportError:
raise error.Error('A module ({}) was specified for the environment but was not found, make sure the package is installed with `pip install` before calling `gym.make()`'.format(mod_name))
else:
id = path
match = env_id_re.search(id)
if not match:
raise error.Error('Attempted to look up malformed environment ID: {}. (Currently all IDs must be of the form {}.)'.format(id.encode('utf-8'), env_id_re.pattern))
try:
return self.env_specs[id]
except KeyError:
# Parse the env name and check to see if it matches the non-version
# part of a valid env (could also check the exact number here)
env_name = match.group(1)
matching_envs = [valid_env_name for valid_env_name, valid_env_spec in self.env_specs.items()
if env_name == valid_env_spec._env_name]
if matching_envs:
raise error.DeprecatedEnv('Env {} not found (valid versions include {})'.format(id, matching_envs))
else:
raise error.UnregisteredEnv('No registered env with id: {}'.format(id))
def register(self, id, **kwargs):
if id in self.env_specs:
raise error.Error('Cannot re-register id: {}'.format(id))
self.env_specs[id] = EnvSpec(id, **kwargs)
# Have a global registry
registry = EnvRegistry()
def register(id, **kwargs):
return registry.register(id, **kwargs)
def make(id, **kwargs):
return registry.make(id, **kwargs)
def spec(id):
return registry.spec(id)
warn_once = True
def patch_deprecated_methods(env):
"""
Methods renamed from '_method' to 'method', render() no longer has 'close' parameter, close is a separate method.
For backward compatibility, this makes it possible to work with unmodified environments.
"""
global warn_once
if warn_once:
logger.warn("Environment '%s' has deprecated methods '_step' and '_reset' rather than 'step' and 'reset'. Compatibility code invoked. Set _gym_disable_underscore_compat = True to disable this behavior." % str(type(env)))
warn_once = False
env.reset = env._reset
env.step = env._step
env.seed = env._seed
def render(mode):
return env._render(mode, close=False)
def close():
env._render("human", close=True)
env.render = render
env.close = close
|
[
"[email protected]"
] | |
5a34b1c2505774cc28123bf7867e9d5b84e9422c
|
ea5de3d347ef4e1dcac9ee37da2d9850888d9ecc
|
/pawn_brotherhood.py
|
57460c2b269a4526698cd78561b3aa401f2e81a2
|
[] |
no_license
|
skoriy88/Chekio
|
4d50c18c54741c425d468a80a24ceb526a13dabe
|
fcbc291ca624cb9d5415128e605ea27d5e50983e
|
refs/heads/master
| 2020-03-18T11:26:09.966384 | 2018-05-25T13:52:54 | 2018-05-25T13:52:54 | 134,671,092 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
lst = {"b4", "d4", "f4", "c3", "e3", "g5", "d2"}
def safe_pawns(inp):
new = {(ord(i[0]), int(i[1])) for i in inp}
safe = sum(1 for pawn in new if(pawn[0]-1, pawn[1]-1) in new or (pawn[0]+1, pawn[1]-1) in new)
#print(safe)
return safe
safe_pawns(lst)
'''
print(ord('a'))
print(ord('b'))
print(ord('c'))
print(ord('d'))
print(ord('e'))
print(ord('f'))
print(ord('g'))
print(ord('h'))
'''
|
[
"[email protected]"
] | |
0c952f5626c7a7187c2ce0175469a5ae5d62cbc9
|
26a0941b02286518e382fe86daa0dd5c0f596a9a
|
/stage_scenes.py
|
26bf98175d74488c0e99843bcaa5d0d4709e9ced
|
[
"MIT"
] |
permissive
|
Gargaran/videos
|
729c3c7e91cb20e5377b5e397b3b90ea91e3f8a1
|
26458da42fc665eb4ae844168c16ebb0526cc231
|
refs/heads/master
| 2023-08-22T16:36:33.235479 | 2021-10-06T22:48:08 | 2021-10-06T22:48:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,104 |
py
|
#!/usr/bin/env python
import inspect
import os
import sys
import importlib
from manimlib.config import get_module
from manimlib.extract_scene import is_child_scene
def get_sorted_scene_classes(module_name):
module = get_module(module_name)
if hasattr(module, "SCENES_IN_ORDER"):
return module.SCENES_IN_ORDER
# Otherwise, deduce from the order in which
# they're defined in a file
importlib.import_module(module.__name__)
line_to_scene = {}
name_scene_list = inspect.getmembers(
module,
lambda obj: is_child_scene(obj, module)
)
for name, scene_class in name_scene_list:
if inspect.getmodule(scene_class).__name__ != module.__name__:
continue
lines, line_no = inspect.getsourcelines(scene_class)
line_to_scene[line_no] = scene_class
return [
line_to_scene[index]
for index in sorted(line_to_scene.keys())
]
def stage_scenes(module_name):
scene_classes = get_sorted_scene_classes(module_name)
if len(scene_classes) == 0:
print("There are no rendered animations from this module")
return
# TODO, fix this
animation_dir = os.path.join(
os.path.expanduser('~'),
"Dropbox/3Blue1Brown/videos/2021/poly_fractal/videos"
)
#
files = os.listdir(animation_dir)
sorted_files = []
for scene_class in scene_classes:
scene_name = scene_class.__name__
clips = [f for f in files if f.startswith(scene_name + ".")]
for clip in clips:
sorted_files.append(os.path.join(animation_dir, clip))
# Partial movie file directory
# movie_dir = get_movie_output_directory(
# scene_class, **output_directory_kwargs
# )
# if os.path.exists(movie_dir):
# for extension in [".mov", ".mp4"]:
# int_files = get_sorted_integer_files(
# pmf_dir, extension=extension
# )
# for file in int_files:
# sorted_files.append(os.path.join(pmf_dir, file))
# else:
# animation_subdir = os.path.dirname(animation_dir)
count = 0
while True:
staged_scenes_dir = os.path.join(
animation_dir,
os.pardir,
"staged_scenes_{}".format(count)
)
if not os.path.exists(staged_scenes_dir):
os.makedirs(staged_scenes_dir)
break
# Otherwise, keep trying new names until
# there is a free one
count += 1
for count, f in reversed(list(enumerate(sorted_files))):
# Going in reversed order means that when finder
# sorts by date modified, it shows up in the
# correct order
symlink_name = os.path.join(
staged_scenes_dir,
"Scene_{:03}_{}".format(
count, f.split(os.sep)[-1]
)
)
os.symlink(f, symlink_name)
if __name__ == "__main__":
if len(sys.argv) < 2:
raise Exception("No module given.")
module_name = sys.argv[1]
stage_scenes(module_name)
|
[
"[email protected]"
] | |
852411151db8afff623d48a858ba720238508dd7
|
faaf12ab18978082233c09628b815a69e73868e4
|
/codechef/practice/easy/lebombs.py
|
3c70653a1ff04cd448e8e83575cc876a870c045a
|
[
"WTFPL"
] |
permissive
|
ferhatelmas/algo
|
6826bcf0be782cb102c1ee20dce8d4345e1fd6d2
|
7b867f6d2c8a9fb896f464168b50dfc115617e56
|
refs/heads/master
| 2023-08-18T19:59:58.435696 | 2023-08-14T10:16:00 | 2023-08-14T10:16:00 | 3,813,734 | 27 | 16 |
WTFPL
| 2020-10-25T23:00:16 | 2012-03-23T23:43:31 |
Java
|
UTF-8
|
Python
| false | false | 229 |
py
|
from sys import stdin
from itertools import groupby
for i, ln in enumerate(stdin):
if i > 0 and i % 2 == 0:
s = "0" + ln.rstrip() + "0"
print(sum(max(len(list(g)) - 2, 0) for k, g in groupby(s) if k == "0"))
|
[
"[email protected]"
] | |
0271e2bd69581d5e5dc88b564ddc46e9e59ed80e
|
06289aabd78e6a0e5e5ab8360fffbf9a8504d615
|
/api/budget/serializers/expense_serializer.py
|
0f5fd044d6751912c22d145c7efad0ab32499638
|
[] |
no_license
|
jtclayt/finance_planner_api
|
6ca8130c761999abc01e03429a0676c0c803b640
|
06cd592e479145cbeb6acad4574021ef7515b33b
|
refs/heads/main
| 2023-08-15T02:20:34.455483 | 2021-09-22T16:15:49 | 2021-09-22T16:15:49 | 409,044,817 | 0 | 0 | null | 2021-09-22T05:08:48 | 2021-09-22T02:54:50 |
Python
|
UTF-8
|
Python
| false | false | 627 |
py
|
from rest_framework import serializers
from ..models.budget import Budget
from ..models.expense import Expense
class ExpenseSerializer(serializers.HyperlinkedModelSerializer):
'''Serializer for list view of expenses'''
url = serializers.HyperlinkedIdentityField(view_name='budget:expense-detail')
budget = serializers.PrimaryKeyRelatedField(queryset=Budget.objects.all())
class Meta:
model = Expense
fields = (
'id', 'url', 'description', 'annual_amount', 'monthly_amount',
'budget', 'user_id', 'created_at', 'updated_at'
)
read_only_fields = ('id',)
|
[
"[email protected]"
] | |
5ef953377a82188de0c437031ecd64571429c4dd
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/special-array-with-x-elements-greater-than-or-equal-x.py
|
01c11c68d89db61b02579b98174c1831b10e0923
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566 | 2023-08-28T10:11:12 | 2023-08-28T10:11:12 | 152,631,182 | 4,549 | 1,651 |
MIT
| 2023-05-31T06:10:33 | 2018-10-11T17:38:35 |
C++
|
UTF-8
|
Python
| false | false | 3,409 |
py
|
# Time: O(n)
# Space: O(1)
# counting sort solution
class Solution(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
n = len(nums)
for i in xrange(len(count)):
if i == n:
return i
n -= count[i]
return -1
# Time: O(n)
# Space: O(1)
# counting sort + binary search solution
class Solution2(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def inplace_counting_sort(nums, reverse=False): # Time: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
for i in reversed(xrange(len(nums))): # inplace but unstable sort
while nums[i] >= 0:
count[nums[i]] -= 1
j = count[nums[i]]
nums[i], nums[j] = nums[j], ~nums[i]
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
if reverse: # unstable sort
nums.reverse()
inplace_counting_sort(nums, reverse=True)
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(n)
# Space: O(n)
# counting sort + binary search solution
class Solution3(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MAX_NUM = 1000
def counting_sort(nums, reverse=False): # Time: O(n), Space: O(n)
count = [0]*(MAX_NUM+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
result = [0]*len(nums)
if not reverse:
for num in reversed(nums): # stable sort
count[num] -= 1
result[count[num]] = num
else:
for num in nums: # stable sort
count[num] -= 1
result[count[num]] = num
result.reverse()
return result
nums = counting_sort(nums, reverse=True) # extra O(n) space for stable sort
left, right = 0, len(nums)-1
while left <= right: # Time: O(logn)
mid = left + (right-left)//2
if nums[mid] <= mid:
right = mid-1
else:
left = mid+1
return -1 if left < len(nums) and nums[left] == left else left
# Time: O(nlogn)
# Space: O(1)
# sort solution
class Solution4(object):
def specialArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort(reverse=True) # Time: O(nlogn)
for i in xrange(len(nums)): # Time: O(n)
if nums[i] <= i:
break
else:
i += 1
return -1 if i < len(nums) and nums[i] == i else i
|
[
"[email protected]"
] | |
d17776b6855dfcc141feea8086af080f6d09fc11
|
8bcf5bf18f6e9c1d5871ef8a88ef5921e03e9b02
|
/koldunov/api/urls.py
|
a4efa9857dea75f924535c42e25c43c5803313cc
|
[] |
no_license
|
volgoweb/rest_example
|
73f5fc26cce45c0aae49247768f74ffa2f4c01d4
|
7ee8b87914d6c69c80158e7e22a6b454c3e7f76b
|
refs/heads/master
| 2021-01-10T01:32:44.098668 | 2017-09-05T12:42:00 | 2017-09-05T12:42:00 | 51,444,569 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
# -*- coding: utf-8 -*-
from rest_framework import routers
from .views.product_views import CategoryViewSet, ItemViewSet
from .views.stat_views import StatViewSet
router = routers.SimpleRouter()
router.register(r'category', CategoryViewSet)
router.register(r'item', ItemViewSet)
router.register(r'stat', StatViewSet)
urlpatterns = router.urls
|
[
"[email protected]"
] | |
fb28d6bd690ff888dfd3ea29b317ae4cf3d2be7a
|
8eca0a7a9ae207113f9f9ed98d093cbe21ffcd8a
|
/Maths_based_problems/grid_unique_paths/solution2_dp.py
|
eed4c34ad0482d36605fb37189b7a03c658eb218
|
[] |
no_license
|
Ranjit007ai/-Interviewbit-Maths
|
3238c720bb5e0765eef0e0e1a39549eff1ba788d
|
044627422fc38ee3e5aaa9cbfc8f00398d1f9bb5
|
refs/heads/main
| 2023-03-25T06:06:14.206384 | 2021-03-27T14:38:16 | 2021-03-27T14:38:16 | 352,065,531 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 568 |
py
|
def unique_paths(m,n):
dp = [[0]*n for _ in range(0,m)]
# each position in dp show the no of way to reach their
# now the first row and first col will be 1 ,since their is 1 way to traverse from one position to another in single row or column vector
for row in range(0,m):
dp[row][0] = 1
for col in range(0,n):
dp[0][col] = 1
for row in range(1,m):
for col in range(1,n):
dp[row][col] = dp[row-1][col] + dp[row][col-1]
return dp[m-1][n-1]
m =3
n =
ans = unique_paths(m,n)
print(ans)
|
[
"[email protected]"
] | |
b3096c51d5f0148b23157700f003b048c28d4cb6
|
efd6c1d24b0a392a177679429d53dd2f515d0d95
|
/bi_auth/migrations/0001_initial.py
|
fd577d05d960ad8b413cd4b9a52c1be60fe0f81b
|
[] |
no_license
|
mitshel/TakedaAnalitic
|
5ccfb4aa83a056cbeaebce03df41819c7ece7985
|
b04b08fb053bff238a1ce68df423f99314827b48
|
refs/heads/master
| 2021-07-02T21:27:10.023816 | 2019-02-14T14:32:18 | 2019-02-14T14:32:42 | 153,908,777 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 979 |
py
|
# Generated by Django 2.1.2 on 2018-12-15 11:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_orgadmin', models.BooleanField(verbose_name='Администратор организации')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='user_profile', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Профиль',
'verbose_name_plural': 'Профили',
},
),
]
|
[
"[email protected]"
] | |
5fe19acc4de946e7408e43a378612cded89edc88
|
aa91f6e8d59286f65e7f6ed065823c80b7694439
|
/scripts/analysis/baostock/yangxian/yangxian.py
|
adf483a1883a24304215af71aef322817a97af98
|
[
"MIT"
] |
permissive
|
davidyuqiwei/davidyu_stock
|
7f93bcc2c50a0e2c356e3b517dbf7e2e7467093f
|
c234911f49d5980f2dff651333f8ca957333e094
|
refs/heads/master
| 2023-01-07T02:08:04.330559 | 2023-01-02T16:31:37 | 2023-01-02T16:31:37 | 163,694,812 | 13 | 2 | null | 2022-12-18T03:55:10 | 2018-12-31T20:07:30 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,725 |
py
|
from davidyu_cfg import *
from functions.data_dir import *
from functions.get_datetime import *
from functions.run_combine_all_csv import *
from functions.colNames import *
from functions.day_history.kLines import klineDate
from functions.LinearReg import *
from functions.common.dfProcess import *
from functions.common.loadModule.load_module_kdj import *
from scipy.stats import linregress
def stock_data(stock_index,start_date,end_date):
df_dir = os.path.join(data_path,"history_data","baostock","2020-12-17")
df1 = pd.read_csv(os.path.join(df_dir,stock_index+".csv"))
df1 = df1[(df1["dt"]>=start_date)&(df1["dt"]<=end_date)]
df1 = df1.drop_duplicates()
df1 = df1.sort_values("date")
df1["stock_index"] = [ x[3:9] for x in df1["code"]]
return df1
def get_3_pos_line(df1):
df1["line"] = df1["close"] - df1["open"]
df1["line"][df1["line"]>0]=1
df1["line"][df1["line"]<=0]=0
df1['mv_close'] = df1.close.rolling(window=3).mean()
df1['mv_close_120'] = df1.close.rolling(window=120).mean()
df1['mv_close_250'] = df1.close.rolling(window=250).mean()
df1['line_check_5'] = df1.line.rolling(window=5).sum()
df1['line_check_3'] = df1.line.rolling(window=3).sum()
df2 = df1[(df1["line_check_3"]==3)&(df1["close"]<df1['mv_close_250'])]
return df2
if __name__ =='__main__':
stock_index = sys.argv[1]
start_date = '2017-01-01'
end_date = '2020-12-17'
try:
df1 = stock_data(stock_index,start_date,end_date)
df2 = get_3_pos_line(df1)
#df3 = df2.tail(1)
#print("{},{}".format(df2['date'].values,df2['code'].values))
print(df2[["date","code"]].to_string(index=False,header=None))
except:
pass
|
[
"[email protected]"
] | |
b468d83e6f86299cc5a6da5cc3813594132a55dc
|
30b232051b10753e9103a70d88a387dfa1aca63f
|
/164.py
|
2f3c183e1682f60665db64f6c436ac296f2bf23b
|
[] |
no_license
|
samrithasudhagar/guvi2
|
fe6d7af8a73cef515991524d7abad754c3700dc5
|
f7eb8a8b2cd701c2708c414939cc139414d3310d
|
refs/heads/master
| 2020-04-20T12:27:47.748859 | 2019-05-26T09:45:42 | 2019-05-26T09:45:42 | 168,843,977 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 175 |
py
|
n,k=map(int,input().split())
l=list(map(int,input().split()))
m=0
if k in l:
print(k)
else:
for i in l:
if i>m and i <k:
m=i
print(m)
|
[
"[email protected]"
] | |
8fb33330b1462f23987648fc31eb06140b7e5caa
|
1e03cd80d27d35ffdc8f68f70a36a461eaae4b9d
|
/apps/common/views.py
|
9da3528729470edad1fb1663ca5e9291ee3c0179
|
[] |
no_license
|
paddy375691/flask_zlbbs
|
bee8f15497c58bd5f1f614d6a686b93301f93d0a
|
90fb142b33aecca6ff66013953ecf6e3e39b6139
|
refs/heads/master
| 2023-02-04T07:55:00.798789 | 2020-12-25T08:51:13 | 2020-12-25T08:51:13 | 324,265,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 144 |
py
|
from flask import Blueprint
bp = Blueprint('common', __name__, url_prefix='/common')
@bp.route('/')
def index():
return 'common index'
|
[
"[email protected]"
] | |
3ad52e8c095b3ad50975940c78e30707311ab01e
|
32ba9f1c35ae916d33b121daeeea8e1910a447d7
|
/utils/tag.py
|
99083b27f2ee3987fa2f8e96f2665c4f0533c66f
|
[
"MIT"
] |
permissive
|
rituparna/glTools
|
8b02fa2751e1b997f7a202c7df8a3dd3d3032722
|
c512a96c20ba7a4ee93a123690b626bb408a8fcd
|
refs/heads/master
| 2020-03-19T19:23:47.684580 | 2018-06-10T23:53:58 | 2018-06-10T23:53:58 | 136,853,456 | 0 | 0 | null | 2018-06-10T23:46:54 | 2018-06-10T23:46:53 | null |
UTF-8
|
Python
| false | false | 2,893 |
py
|
import maya.cmds as mc
import glTools.tools.namingConvention
class Tag( object ):
def __init__(self):
'''
'''
self.nameTagAttr = 'nameTag'
def addNameTag(self,control,tag):
'''
Set the name tag value for the specified control
'''
# Check control
if not mc.objExists(control): raise Exception('Object '+control+' does not exist!')
# Add Tag attribute
if mc.objExists(control+'.'+self.nameTagAttr):
mc.addAttr(control,ln=self.nameTagAttr,dt='string')
mc.setAttr(control+'.'+self.nameTagAttr,tag,type='string')
def getNameTag(self,control):
'''
Return the name tag value of the specified control
'''
# Check control
if not mc.objExists(control): raise Exception('Object '+control+' does not exist!')
# Check tag attribute
if not mc.objExists(control+'.'+self.nameTagAttr): raise Exception('Object '+control+' does not have a "'+self.nameTagAttr+'" attribute!')
# Return tag string value
return mc.getAttr(control+'.'+self.nameTagAttr)
def guessNameTag(self,control,side=True,part=True,optSide=True,subPart=True,node=False):
'''
Return a best guess name tag based on a controls current name.
Uses name element comparison to our naming convention module.
'''
tag = ''
# Get naming convention dictionaries
nameConvention = glTools.tools.namingConvention.NamingConvention()
sideDict = dict((value, key) for key, value in nameConvention.side.iteritems())
partDict = dict((value, key) for key, value in nameConvention.part.iteritems())
subPartDict = dict((value, key) for key, value in nameConvention.subPart.iteritems())
nodeDict = dict((value, key) for key, value in nameConvention.node.iteritems())
# Get name elements
controlElem = control.split(nameConvention.delineator)
controlElemCnt = len(controlElem)
controlElemInd = 0
# Check number of elements
if controlElemCnt < 3: print 'Warning: Name pattern does not match naming convention'
# Get side
if side and sideDict.has_key(controlElem[controlElemInd]):
if controlElem[controlElemInd] != nameConvention.side['center']:
tag += sideDict[controlElem[controlElemInd]].capitalize()
controlElemInd += 1
else: return
# Get part
if part and partDict.has_key(controlElem[controlElemInd][0:-2]):
tag += partDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
else: return
# Get optional side
if optSide and sideDict.has_key(controlElem[controlElemInd][0:-2]):
tag += sideDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
# Get sub-part
if subPart and subPartDict.has_key(controlElem[controlElemInd][0:-2]):
tag += subPartDict[controlElem[controlElemInd][0:-2]].capitalize()
controlElemInd += 1
# Get type
if node and nodeDict.has_key(controlElem[controlElemInd]):
tag += nodeDict[controlElem[controlElemInd]].capitalize()
return tag
|
[
"[email protected]"
] | |
e037ce0f746846b6294b60c884db7209be1e7464
|
efbc8c73e9ac5cbcb9321518ab06b3965369a5f0
|
/SWEA/D2/1974_스도쿠 검증.py
|
5c70e3a29c30f7e85436a3f0b4edb86ade4466a6
|
[] |
no_license
|
AshOil/APS
|
56b9395dcbb8eeec87a047407d4326b879481612
|
fe5a2cd63448fcc4b11b5e5bc060976234ed8eea
|
refs/heads/master
| 2023-07-15T17:32:20.684742 | 2021-08-23T13:04:05 | 2021-08-23T13:04:05 | 283,709,661 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,389 |
py
|
import sys
sys.stdin = open('input_data/1974.txt',"r")
num_dict = {}
T = int(input())
for t in range(1, T+1):
for tt in range(1,10):
num_dict[tt] = list(map(int, input() .split()))
result = True
# 가로부터 검사하자
for hori in num_dict.values():
if sorted(hori) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
#세로 검사
for num in range(9):
verti_check = []
for verti in num_dict.values():
verti_check.append(verti[num])
verti_result = verti_check
if sorted(verti_check) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
#블록검사
line_start = 0
line_end = 3
block_list = list(num_dict.values())
for __ in range(3):
turn_block_list = block_list[line_start:line_end]
block_start = 0
block_end = 3
for _ in range(3):
block_check = []
for turn in range(3):
for block in turn_block_list[turn][block_start:block_end]:
block_check.append(block)
block_start += 3
block_end += 3
if sorted(block_check) != [1, 2, 3, 4, 5, 6, 7, 8, 9]:
result = False
line_start += 3
line_end += 3
if result:
print('#{} 1'.format(t))
else:
print('#{} 0'.format(t))
|
[
"[email protected]"
] | |
49adb1a0d02abd33be4f5345e463f2839479722a
|
b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a
|
/examples/pwr_run/checkpointing/short/max_pwr/job18.py
|
8d86f952590173e5884246a93a2efab702b53071
|
[
"MIT"
] |
permissive
|
boringlee24/keras_old
|
3bf7e3ef455dd4262e41248f13c04c071039270e
|
1e1176c45c4952ba1b9b9e58e9cc4df027ab111d
|
refs/heads/master
| 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,216 |
py
|
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.007
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_pwr/' + job_name + '*'
total_epochs = 19
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_pwr/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# creates an file if job qualified for checkpoint
open('ckpt_qual/' + job_name + '.txt', 'a').close()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
try:
os.rename('finish.json', 'finish_lock.json')
break
except Exception:
pass
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
|
[
"[email protected]"
] | |
1fc74891fa1324f804b07585e2b154d9b49afdf6
|
de681ebfa95a07c04fbb1280bf722847b06ee548
|
/migrations/versions/3fd0d7bc25ea_create_tag_model.py
|
c5297e733152e5145f95a89eca64b85173b984bb
|
[] |
no_license
|
Dzhoker/flask-lessons
|
156957ed29a674df474cfc6b8cdca12adae021d7
|
590e436516dbd8a3a9af4ad33aafbc854088a6aa
|
refs/heads/master
| 2023-03-18T09:44:19.431920 | 2021-02-18T02:52:07 | 2021-02-18T02:52:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 761 |
py
|
"""create Tag model
Revision ID: 3fd0d7bc25ea
Revises: 1b2fd89e61b5
Create Date: 2021-01-12 07:40:03.728879
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3fd0d7bc25ea'
down_revision = '1b2fd89e61b5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tag',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), server_default='', nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tag')
# ### end Alembic commands ###
|
[
"[email protected]"
] | |
1bd246b511bcb25535f008e43dec5d7633a97690
|
2c112f781016f2022dc7ff1c616b1f57185fe8f8
|
/tests/conftest.py
|
34a2935ba0d6d69229c0b0455e16b60a8fcb1f85
|
[] |
no_license
|
dominicgs/Website
|
c15312a5b081b42db880b99df6811c8c04777824
|
fc3587daacff20ec3ab590df121c9f693f09a8ce
|
refs/heads/master
| 2020-03-21T16:49:07.492309 | 2018-06-26T21:41:50 | 2018-06-26T21:41:50 | 64,015,414 | 0 | 1 | null | 2016-07-23T12:33:16 | 2016-07-23T12:33:16 | null |
UTF-8
|
Python
| false | false | 2,265 |
py
|
" PyTest Config. This contains global-level pytest fixtures. "
import os
import os.path
import pytest
import shutil
from models.user import User
from main import create_app, db as db_obj, Mail
from utils import CreateBankAccounts, CreateTickets
@pytest.fixture(scope="module")
def app():
""" Fixture to provide an instance of the app.
This will also create a Flask app_context and tear it down.
This fixture is scoped to the module level to avoid too much
Postgres teardown/creation activity which is slow.
"""
if 'SETTINGS_FILE' not in os.environ:
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
os.environ['SETTINGS_FILE'] = os.path.join(root, 'config', 'test.cfg')
tmpdir = os.environ.get('TMPDIR', '/tmp')
prometheus_dir = os.path.join(tmpdir, 'emf_test_prometheus')
os.environ['prometheus_multiproc_dir'] = prometheus_dir
if os.path.exists(prometheus_dir):
shutil.rmtree(prometheus_dir)
if not os.path.exists(prometheus_dir):
os.mkdir(prometheus_dir)
app = create_app()
with app.app_context():
try:
db_obj.session.close()
except:
pass
db_obj.drop_all()
db_obj.create_all()
CreateBankAccounts().run()
CreateTickets().run()
yield app
db_obj.session.close()
db_obj.drop_all()
@pytest.fixture
def client(app):
" Yield a test HTTP client for the app "
yield app.test_client()
@pytest.fixture
def db(app):
" Yield the DB object "
yield db_obj
@pytest.fixture
def request_context(app):
" Run the test in an app request context "
with app.test_request_context('/') as c:
yield c
@pytest.fixture
def user(db):
" Yield a test user. Note that this user will be identical across all tests in a module. "
email = '[email protected]'
user = User.query.filter(User.email == email).one_or_none()
if not user:
user = User(email, 'Test User')
db.session.add(user)
db.session.commit()
yield user
@pytest.fixture
def outbox(app):
" Capture mail and yield the outbox. "
mail_obj = Mail()
with mail_obj.record_messages() as outbox:
yield outbox
|
[
"[email protected]"
] | |
c80abae38d1dabb5dfaa1cc1b9606faa528421bd
|
13b72e5c48f5f7213d9a569f699dc1554bc363dd
|
/demo/libdemo/list_git__repos.py
|
35f238aa8d2d69b1030b7d8cfefa92fded15d932
|
[] |
no_license
|
srikanthpragada/PYTHON_02_MAR_2021
|
6997fece4ad627bb767c0bca5a5e166369087e68
|
5dfd0c471378bd22379ac0d66f8785d4d315017b
|
refs/heads/master
| 2023-04-04T20:28:22.738042 | 2021-04-17T14:19:48 | 2021-04-17T14:19:48 | 344,498,123 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 362 |
py
|
import requests
user = "srikanthpragada"
response = requests.get(f"https://api.github.com/users/{user}/repos")
if response.status_code != 200:
print(f"Sorry! Could not get details for {user} from github!")
exit()
repos = response.json() # Convert JSON to dict
for repo in repos:
print(repo['name'])
print(repo['description'])
print('-' * 50)
|
[
"[email protected]"
] | |
e1f046bf1125c305df03d5d353029498f0cbe882
|
56d41bbc6b5d831ba699ad4a44f5880ba3d195c8
|
/thread_sync.py
|
bcd0c1730eb098adb4c5bb1a0e3dc4d595662b6d
|
[] |
no_license
|
pigliangliang/2018-07-05-08
|
4635e4dc1926f3f17eae7f607a0b188f6aaf9f43
|
ba95331f4b0cc0316377a5c67f86d03e8cc257b8
|
refs/heads/master
| 2020-03-22T08:51:08.209718 | 2018-07-08T07:58:06 | 2018-07-08T07:58:06 | 139,795,833 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 606 |
py
|
#author_by zhuxiaoliang
#2018-07-05 下午3:39
"""
A. Semaphore(信号量)
在多线程编程中,为了防止不同的线程同时对一个公用的资源(比如全部变量)进行修改,需要进行同时访问的数量(通常是1)的限制。信号量同步基于内部计数器,每调用一次acquire(),计数器减1;每调用一次release(),计数器加1.当计数器为0时,acquire()调用被阻塞。
"""
import time
from random import random
from threading import Thread,Semaphore,enumerate
sema = Semaphore(3)
def foo(tid):
with sema:
print()
|
[
"[email protected]"
] | |
6844ce56ffa18f4d971b348110a9f410a1502c7e
|
a3c662a5eda4e269a8c81c99e229879b946a76f6
|
/.venv/lib/python3.7/site-packages/pylint/test/input/func_noerror_yield_assign_py25.py
|
f40d8d96e837e9022fc2596b23ce8733990a450c
|
[
"MIT"
] |
permissive
|
ahmadreza-smdi/ms-shop
|
0c29da82c58b243507575672bbc94fb6e8068aeb
|
65ba3f3061e2ac5c63115b08dadfe7d67f645fb6
|
refs/heads/master
| 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 |
MIT
| 2023-04-21T20:51:21 | 2019-11-23T16:09:03 |
Python
|
UTF-8
|
Python
| false | false | 387 |
py
|
"""http://www.logilab.org/ticket/8771"""
from __future__ import print_function
def generator():
"""yield as assignment"""
yield 45
xxxx = yield 123
print(xxxx)
def generator_fp1(seq):
"""W0631 false positive"""
for val in seq:
pass
for val in seq:
yield val
def generator_fp2():
"""E0601 false positive"""
xxxx = 12
yield xxxx
|
[
"[email protected]"
] | |
40dbe20a67504c37c5be2abfab99add67569df21
|
585bac463cb1919ac697391ff130bbced73d6307
|
/36_ValidSudoku/solution1.py
|
91569e66f6dee678a57f5dd30000308804bcd22a
|
[] |
no_license
|
llgeek/leetcode
|
ce236cf3d3e3084933a7a4a5e8c7766f7f407285
|
4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c
|
refs/heads/master
| 2021-01-22T23:44:13.318127 | 2020-03-11T00:59:05 | 2020-03-11T00:59:05 | 85,667,214 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 769 |
py
|
class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
seen = set()
for i in range(len(board)):
for j in range(len(board[0])):
val = board[i][j]
if val == '.': continue
if str(val) + 'in row ' + str(i) in seen:
return False
seen.add(str(val) + 'in row ' + str(i))
if str(val) + 'in column ' + str(j) in seen:
return False
seen.add(str(val) + 'in column ' + str(j))
if str(val) + 'in grid ' + str(i // 3) + ' ' + str(j // 3) in seen:
return False
seen.add(str(val) + 'in grid ' + str(i // 3) + ' ' + str(j // 3))
return True
|
[
"[email protected]"
] | |
bf439e9862b4ae08f44e047b1d51ff58c9ae6f67
|
c6666d0235d1d03ed9a5a2d1a3cfa9ccc9d9e88c
|
/webcore/migrations/0001_initial.py
|
bc167afd1bfb17d3738481c8cc02fc4ac5b3fcf0
|
[] |
no_license
|
boiyelove/savingsensei
|
67dc8a5690c7599dd126159837af6e567852aa73
|
8acd46ebd770b9e18f64e14ff08bfd2ddbcc0edc
|
refs/heads/master
| 2021-10-20T01:32:10.775234 | 2019-02-25T03:27:31 | 2019-02-25T03:27:31 | 172,426,033 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,812 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 12:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Banner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('desc', models.CharField(max_length=60)),
('btn_link', models.URLField()),
('btn_title', models.CharField(max_length=18)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=30)),
('content', models.TextField()),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Newsletter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('created', models.DateTimeField(auto_now=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
]
|
[
"[email protected]"
] | |
4b1ecbe8bfc1dfb288e7e30b8ba859c26d6a53c9
|
b13ca274b4463c9900840ee6516094b7509b6041
|
/empower/lvapp/lvaphandler.py
|
b5c42204161a1e3cc0f451116cafa63d304a1803
|
[
"Apache-2.0"
] |
permissive
|
imec-idlab/sdn_wifi_manager
|
09d206f2f649aa715752d3c44e011d3f54faf592
|
eda52649f855722fdec1d02e25a28c61a8fbda06
|
refs/heads/master
| 2021-06-23T08:03:22.482931 | 2020-12-03T11:30:10 | 2020-12-03T11:30:10 | 162,106,793 | 0 | 0 |
Apache-2.0
| 2019-03-27T16:23:31 | 2018-12-17T09:33:47 |
Python
|
UTF-8
|
Python
| false | false | 3,522 |
py
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LVAPs Handerler."""
import tornado.web
import tornado.httpserver
from empower.datatypes.etheraddress import EtherAddress
from empower.restserver.apihandlers import EmpowerAPIHandler
from empower.core.resourcepool import ResourceBlock
from empower.main import RUNTIME
class LVAPHandler(EmpowerAPIHandler):
"""LVAP handler. Used to view LVAPs (controller-wide)."""
HANDLERS = [r"/api/v1/lvaps/?",
r"/api/v1/lvaps/([a-zA-Z0-9:]*)/?"]
def get(self, *args, **kwargs):
""" Get all LVAPs or just the specified one.
Args:
lvap_id: the lvap address
Example URLs:
GET /api/v1/lvaps
GET /api/v1/lvaps/11:22:33:44:55:66
"""
try:
if len(args) > 1:
raise ValueError("Invalid URL")
if not args:
self.write_as_json(RUNTIME.lvaps.values())
else:
lvap = EtherAddress(args[0])
self.write_as_json(RUNTIME.lvaps[lvap])
except KeyError as ex:
self.send_error(404, message=ex)
except ValueError as ex:
self.send_error(400, message=ex)
self.set_status(200, None)
def put(self, *args, **kwargs):
""" Set the WTP for a given LVAP, effectivelly hands-over the LVAP to
another WTP
Args:
lvap_id: the lvap address
Request:
version: the protocol version (1.0)
Example URLs:
PUT /api/v1/lvaps/11:22:33:44:55:66
"""
try:
if len(args) != 1:
raise ValueError("Invalid URL")
request = tornado.escape.json_decode(self.request.body)
if "version" not in request:
raise ValueError("missing version element")
lvap_addr = EtherAddress(args[0])
lvap = RUNTIME.lvaps[lvap_addr]
if "wtp" in request:
wtp_addr = EtherAddress(request['wtp'])
wtp = RUNTIME.wtps[wtp_addr]
lvap.wtp = wtp
elif "blocks" in request:
pool = []
for block in request["blocks"]:
wtp_addr = EtherAddress(block['wtp'])
wtp = RUNTIME.wtps[wtp_addr]
hwaddr = EtherAddress(block['hwaddr'])
channel = int(block['channel'])
band = int(block['band'])
r_block = ResourceBlock(wtp, hwaddr, channel, band)
pool.append(r_block)
lvap.blocks = pool
if "encap" in request:
encap = EtherAddress(request["encap"])
lvap.encap = encap
except KeyError as ex:
self.send_error(404, message=ex)
except ValueError as ex:
self.send_error(400, message=ex)
self.set_status(204, None)
|
[
"[email protected]"
] | |
1e273a85868f0f6b461bfd41551779c6a908e717
|
eab72229ae04d1160704cbf90a08a582802a739c
|
/pipeline.py
|
951739aed5ac7ad0818e105dbff2397a48108344
|
[
"MIT"
] |
permissive
|
megatazm/Crowd-Counting
|
444d39b0e3d6e98995f53badf4c073829038b6b7
|
647a055baccee2c3b6b780f38930e2ffd14d1664
|
refs/heads/master
| 2022-04-01T04:49:16.409675 | 2020-01-31T21:24:02 | 2020-01-31T21:24:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
import os
# Crop area
#os.system("python3 crop.py")
## APPROACH 1 MCNN
os.system("python3 put_zero_image.py")
os.system("python3 test.py")
os.system("python3 put_zero_den.py")
os.system("python3 find_people.py")
os.system("python3 position.py")
## APPROACH 2 - RNN
#os.system("python3 tiny_face_eval.py --weight_file_path weight --prob_thresh 0.04 --nms_thresh 0.0")
## TRACKING
# Put heads into file
#os.system("python3 get_heads.py")
# Track heads among videos
#os.system("python3 track_video.py")
|
[
"[email protected]"
] | |
934e6966fbd17ae8a420204911909a52151bbaf6
|
8d5f49fa1fda8ffc473e7f5a62786c77838a5820
|
/website/load_tests/drawquest/test_scripts/utils.py
|
e305eef730b14c15bd7911f0cf1ade88885204ff
|
[
"BSD-3-Clause"
] |
permissive
|
MichaelBechHansen/drawquest-web
|
dfc6f5d9541860a5df23db678e82564a230bd42e
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
refs/heads/master
| 2021-01-14T10:30:10.861222 | 2015-11-10T03:13:42 | 2015-11-10T03:13:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,724 |
py
|
import json
import uuid
import requests
PASSWORD = 'testpassword'
#QUEST_ID = 658
#QUEST_ID = 926 #staging smilie
QUEST_ID = 7004
PLAYBACK_DATA = ''
TEST_USERNAME = 'test_account__'
TEST_PASSWORD = 'testaccount'
class ApiError(Exception):
pass
class HttpError(Exception):
pass
class ApiConsumer(object):
def __init__(self):
self.session_id = None
def call(self, endpoint, params={}):
payload = json.dumps(params)
headers = {
'content-type': 'application/json',
}
if self.session_id:
headers['X-SESSIONID'] = self.session_id
ret = requests.post('http://api.staging.example.com/' + endpoint, data=payload, headers=headers)
if ret.status_code != 200:
raise HttpError(ret.status_code)
if not ret.json.get('success'):
raise ApiError(ret.json)
return ret.json
def signup(self, username=None):
if not username:
username = '_TEST_' + str(uuid.uuid4())[-10:].replace('-', '_')
ret = self.call('auth/signup', {
'username': username,
'email': '{}@example.example'.format(username),
'password': PASSWORD,
})
self.session_id = ret['sessionid']
def heavy_state_sync(self):
return self.call('heavy_state_sync')
def onboarding_quest(self):
return self.call('quests/onboarding')
def quest_comments(self, quest_id):
return self.call('quests/comments', {'quest_id': quest_id})
class DrawquestTransaction(object):
def __init__(self):
self.custom_timers = {}
def main(trans_cls):
trans = trans_cls()
trans.run()
print trans.custom_timers
|
[
"[email protected]"
] | |
5dbd16bad92c13444eb77d53b650fba51d099460
|
7f8cebd9315129bcdb7ef220dc449cda26a19ce4
|
/models/aetanh.py
|
bcff65d94ee5b2f960314125e4beb4f15db6e754
|
[] |
no_license
|
KaiqianZhang/dpcca_v8
|
75477b1768905b6c41838c8da9ff77fba13b5a45
|
1b65fc0c3ec6b182907ba070e859c1d92fc98942
|
refs/heads/master
| 2020-08-30T09:32:58.485684 | 2019-11-11T17:34:55 | 2019-11-11T17:34:55 | 218,334,012 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,639 |
py
|
"""=============================================================================
Autoencoder.
============================================================================="""
import numpy as np
from torch import nn
# ------------------------------------------------------------------------------
class AETanH(nn.Module):
def __name__(self):
return 'AE'
# ------------------------------------------------------------------------------
def __init__(self, cfg):
super(AETanH, self).__init__()
assert cfg.GENE_EMBED_DIM < 12
self.nc = cfg.N_CHANNELS
self.w = cfg.IMG_SIZE
self.input_dim = cfg.N_GENES
self.encoder = nn.Sequential(
nn.Linear(self.input_dim, 128),
nn.Tanh(),
nn.Linear(128, 64),
nn.Tanh(),
nn.Linear(64, cfg.GENE_EMBED_DIM)
)
self.decoder = nn.Sequential(
nn.Linear(cfg.GENE_EMBED_DIM, 64),
nn.Tanh(),
nn.Linear(64, 128),
nn.Tanh(),
nn.Linear(128, self.input_dim)
)
# ------------------------------------------------------------------------------
def encode(self, x):
x = x.view(-1, np.prod(x.shape[1:]))
return self.encoder(x)
# ------------------------------------------------------------------------------
def decode(self, z):
x = self.decoder(z)
return x.view(-1, self.input_dim)
# ------------------------------------------------------------------------------
def forward(self, x):
x = self.encode(x)
x = self.decode(x)
return x
|
[
"[email protected]"
] | |
5824f026706f22fed9333ce3b0f3cdc2674fb5cf
|
afb7d4d6013b6a9022d707d5835a3dd578214b2e
|
/Bite_172.py
|
d38f7db655c51e87afd6b54e249df6347f9a2efa
|
[] |
no_license
|
JB0925/Bites
|
86f0bd49d8b53376257c14df280ae0a9643139a2
|
f884ce4ffd7ce39afcea5b86a80cec14c607a4f0
|
refs/heads/master
| 2023-03-29T21:48:42.849729 | 2021-03-29T01:37:48 | 2021-03-29T01:37:48 | 316,419,350 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 375 |
py
|
from functools import partial
# create 2 partials:
# - 'rounder_int' rounds to int (0 places)
# - 'rounder_detailed' rounds to 4 places
rounder_int = 0
rounder_detailed = 0
def round_to_int(num, places):
return round(num, places)
rounder_int = partial(round_to_int, places=0)
rounder_detailed = partial(round_to_int, places=4)
print(rounder_detailed(10.4232567))
|
[
"[email protected]"
] | |
6bb7901debec9f9ddd547ba4fb9d52462ca74c58
|
fa45fe7eaba7ef7c27ecf95db7c460ca189ce0d4
|
/everydays/BookBeingRead/python高级编程/day12.1.py
|
0d2033516213b11dfa91ea44119d6e37e17ceb4c
|
[] |
no_license
|
jake20001/Hello
|
be1a2bb5331f2ad4c1d8f30c6a9a530aff79e605
|
08217871bb17152eb09e68cd154937ebe5d59d2c
|
refs/heads/master
| 2021-07-10T09:48:15.883716 | 2021-04-23T14:49:03 | 2021-04-23T14:49:03 | 56,282,358 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 774 |
py
|
# -*- coding:utf-8 -*-
# -------------------------------
# ProjectName : autoDemo
# Author : zhangjk
# CreateTime : 2020/12/18 11:05
# FileName : day12.1
# Description :
# --------------------------------
import os
class DublinCoreAdapter(object):
def __init__(self,filename):
self._filename = filename
def title(self):
return os.path.splitext(self._filename)[0]
def creater(self):
return "Someone"
def language(self):
return ('en',)
class DublinCoreInfo(object):
def summary(self,dc_ob):
print('Title %s'%dc_ob.title())
print('Create %s'%dc_ob.creater())
print('Languge %s'%','.join(dc_ob.language()))
adapter = DublinCoreAdapter('1.txt')
infos = DublinCoreInfo()
infos.summary(adapter)
|
[
"[email protected]"
] | |
12036ced2dc9a7de9f3d4d79fc1ad4e7fbcbe6cd
|
8fef8af953e8dafde78c671e8ee9813d08ab2d60
|
/trees/BST/LowestCommAncestor.py
|
f1e104c3496e69731c8a6af520b879abc8aa4736
|
[
"MIT"
] |
permissive
|
htrahddis-hub/DSA-Together-HacktoberFest
|
037b009c744863070e0f1b61167c18f9101335f2
|
a5c6165c449c5b5b91e56815f2a38d5fd23bf354
|
refs/heads/main
| 2023-08-23T18:52:55.654386 | 2021-10-17T15:45:14 | 2021-10-17T15:45:14 | 418,180,825 | 1 | 0 |
MIT
| 2021-10-17T15:56:21 | 2021-10-17T15:56:21 | null |
UTF-8
|
Python
| false | false | 2,808 |
py
|
# Link to the problem :https://practice.geeksforgeeks.org/problems/lowest-common-ancestor-in-a-bst/1#
#Function to find the lowest common ancestor in a BST.
# We are looking for a node which is closest to both the nodes
def LCA(root, n1, n2):
#code here.
while(root):
# If the root is greater than both nodes , then we are looking for something smaller , so go to left
if(root.data > n1 and root.data > n2):
root = root.left
# If the root is smaller than both nodes , then we are looking for something greater than this and go to right
elif(root.data < n1 and root.data < n2):
root = root.right
#If the root is not greater or smaller then we have found something closest to both the nodes , so returns the root
else:
break
return root
#{
# Driver Code Starts
#Initial Template for Python 3
from collections import deque
# Tree Node
class Node:
def __init__(self, val):
self.right = None
self.data = val
self.left = None
# Function to Build Tree
def buildTree(s):
#Corner Case
if(len(s)==0 or s[0]=="N"):
return None
# Creating list of strings from input
# string after spliting by space
ip=list(map(str,s.split()))
# Create the root of the tree
root=Node(int(ip[0]))
size=0
q=deque()
# Push the root to the queue
q.append(root)
size=size+1
# Starting from the second element
i=1
while(size>0 and i<len(ip)):
# Get and remove the front of the queue
currNode=q[0]
q.popleft()
size=size-1
# Get the current node's value from the string
currVal=ip[i]
# If the left child is not null
if(currVal!="N"):
# Create the left child for the current node
currNode.left=Node(int(currVal))
# Push it to the queue
q.append(currNode.left)
size=size+1
# For the right child
i=i+1
if(i>=len(ip)):
break
currVal=ip[i]
# If the right child is not null
if(currVal!="N"):
# Create the right child for the current node
currNode.right=Node(int(currVal))
# Push it to the queue
q.append(currNode.right)
size=size+1
i=i+1
return root
if __name__=="__main__":
t=int(input())
for _ in range(0,t):
s=input()
root=buildTree(s)
n1,n2=list(map(int,input().split()))
print(LCA(root,n1,n2).data);
# } Driver Code Ends
|
[
"[email protected]"
] | |
1eba6ca236ff4f6105330a8c2c4442d3537a21a8
|
00b762e37ecef30ed04698033f719f04be9c5545
|
/scripts/test_results/scikit-learn_test_results/conflicts/52_bench_sgd_covertype_actual.py
|
f88cf076b4bf0da384e6c9ba249ccf1ec8f143b1
|
[] |
no_license
|
kenji-nicholson/smerge
|
4f9af17e2e516333b041727b77b8330e3255b7c2
|
3da9ebfdee02f9b4c882af1f26fe2e15d037271b
|
refs/heads/master
| 2020-07-22T02:32:03.579003 | 2018-06-08T00:40:53 | 2018-06-08T00:40:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,491 |
py
|
"""
================================
Covertype dataset with dense SGD
================================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset of
Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is low-
dimensional with 54 features and a sparsity of approx. 23%. Here, we consider
the task of predicting class 1 (spruce/fir). The classification performance of
SGD is competitive with Liblinear while being two orders of magnitude faster to
train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
Liblinear 11.8977s 0.0285s 0.2305
GaussianNB 3.5931s 0.6645s 0.6367
SGD 0.2924s 0.0114s 0.2300
CART 39.9829s 0.0345s 0.0476
RandomForest 794.6232s 1.0526s 0.0249
Extra-Trees 1401.7051s 1.1181s 0.0230
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
To run this example use your favorite python shell::
% ipython benchmark/bench_sgd_covertype.py
"""
from __future__ import division
print __doc__
# Author: Peter Prettenhoer <[email protected]>
# License: BSD Style.
# $Id$
from time import time
import os
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn import metrics
######################################################################
## Download the data, if not already on disk
if not os.path.exists('covtype.data.gz'):
# Download the data
import urllib
print "Downloading data, Please Wait (11MB)..."
opener = urllib.urlopen(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/covtype/covtype.data.gz')
open('covtype.data.gz', 'wb').write(opener.read())
######################################################################
## Load dataset
print("Loading dataset...")
import gzip
f = gzip.open('covtype.data.gz')
X = np.fromstring(f.read().replace(",", " "), dtype=np.float64, sep=" ",
count=-1)
X = X.reshape((581012, 55))
f.close()
# class 1 vs. all others.
y = np.ones(X.shape[0]) * -1
y[np.where(X[:, -1] == 1)] = 1
X = X[:, :-1]
######################################################################
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
train_idx = idx[:522911]
test_idx = idx[522911:]
X_train = X[train_idx]
y_train = y[train_idx]
X_test = X[test_idx]
y_test = y[test_idx]
# free memory
del X
del y
######################################################################
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
######################################################################
## Print dataset statistics
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25),
X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25),
np.unique(y_train).shape[0]))
print("%s %d (%d, %d)" % ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == -1)))
print("%s %d (%d, %d)" % ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == -1)))
print("")
print("Training classifiers...")
print("")
######################################################################
## Benchmark classifiers
def benchmark(clf):
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
err = metrics.zero_one(y_test, pred) / float(pred.shape[0])
return err, train_time, test_time
######################################################################
## Train Liblinear model
liblinear_parameters = {
'loss': 'l2',
'penalty': 'l2',
'C': 1000,
'dual': False,
'tol': 1e-3,
}
liblinear_res = benchmark(LinearSVC(**liblinear_parameters))
liblinear_err, liblinear_train_time, liblinear_test_time = liblinear_res
######################################################################
## Train GaussianNB model
gnb_err, gnb_train_time, gnb_test_time = benchmark(GaussianNB())
######################################################################
## Train SGD model
sgd_parameters = {
'alpha': 0.001,
'n_iter': 2,
}
sgd_err, sgd_train_time, sgd_test_time = benchmark(SGDClassifier(
**sgd_parameters))
## Train CART model
<<<<<<< REMOTE
cart_err, cart_train_time, cart_test_time = benchmark(
DecisionTreeClassifier(min_split=5,
max_depth=None))
=======
## print("Training GB model")
>>>>>>> LOCAL
<<<<<<< REMOTE
=======
## gb_err, gb_train_time, gb_test_time = benchmark(
>>>>>>> LOCAL
<<<<<<< REMOTE
######################################################################
=======
## GradientBoostingClassifier(min_split=5, max_depth=10, n_iter=20,
>>>>>>> LOCAL
<<<<<<< REMOTE
## Train RandomForest model
=======
## learn_rate=.8, subsample=0.5))
>>>>>>> LOCAL
<<<<<<< REMOTE
print("")
=======
>>>>>>> LOCAL
## print_row("GB", gb_train_time, gb_test_time, gb_err)
######################################################################
## Print classification performance
print_row("RandomForest", rf_train_time, rf_test_time, rf_err)
print_row("Extra-Trees", et_train_time, et_test_time, et_err)
print("Classification performance:")
print("===========================")
print("")
def print_row(clf_type, train_time, test_time, err):
print("%s %s %s %s" % (clf_type.ljust(12),
("%.4fs" % train_time).center(10),
("%.4fs" % test_time).center(10),
("%.4f" % err).center(10)))
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"error-rate"))
print("-" * 44)
print_row("Liblinear", liblinear_train_time, liblinear_test_time,
liblinear_err)
print_row("GaussianNB", gnb_train_time, gnb_test_time, gnb_err)
print_row("SGD", sgd_train_time, sgd_test_time, sgd_err)
print_row("CART", cart_train_time, cart_test_time, cart_err)
print("")
print("")
|
[
"[email protected]"
] | |
516ddce9995ee16a9c3d14b282864b36283da25f
|
0805420ce1890c36aa9e0cc1a782945464433ef6
|
/client/eve/common/lib/eveLocalization/__init__.py
|
a26d9acf4f873ae1332caf2913e0b18ee75e8119
|
[] |
no_license
|
cnrat/dec-eve-serenity
|
4ebc3b2ab8faa6e6714dbb72b7ebcf92c4b2d75c
|
37519e66a5fbb0d7c417d5cf9778636991efbed8
|
refs/heads/master
| 2021-01-21T03:39:48.969227 | 2016-08-10T05:25:07 | 2016-08-10T05:25:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 211 |
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\common\lib\eveLocalization\__init__.py
from _evelocalization import *
|
[
"[email protected]"
] | |
9107cd52b4f5cb29c06fa7c3b10e07dbb89fe3a2
|
e230e3c1d6935d36b7074390f096d782cabd75af
|
/dailyfresh/settings.py
|
520e1cbe63fe0018a6d3e7702bc98f883808c38e
|
[] |
no_license
|
PeterZhangxing/dailyfresh_ori
|
603e7e42457d27ffefb6a4601f9b6826a3a55a6f
|
19b6d667d6f49a528aeb6f4430e2537c933936f0
|
refs/heads/master
| 2020-12-02T01:41:32.160278 | 2019-12-30T04:24:50 | 2019-12-30T04:24:50 | 230,846,590 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,607 |
py
|
"""
Django settings for dailyfresh project.
Generated by 'django-admin startproject' using Django 2.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,os.path.join(BASE_DIR,'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h2)2bq3(3=-9a#8m$t-ci9t91o*tr%xs%@3g2^e-4^)i$(335l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tinymce', # 富文本编辑器
'haystack', # 注册全文检索框架
'user', # 用户模块
'goods', # 商品模块
'cart', # 购物车模块
'order', # 订单模块
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dailyfresh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dailyfresh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dailyfresh',
'USER': 'zx2005',
'PASSWORD': 'redhat',
'HOST': '10.1.1.128',
'PORT':3306,
}
}
# 告诉django其自带的认证系统,使用哪个模型类
AUTH_USER_MODEL='user.User'
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans' # 本地化
TIME_ZONE = 'Asia/Shanghai' # 本地化
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static'),
]
# 富文本编辑器配置
TINYMCE_DEFAULT_CONFIG = {
'theme': 'advance',
'width': 600,
'height': 400,
}
# 发送邮件配置
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# smpt服务地址
EMAIL_HOST = 'smtp.qq.com'
EMAIL_PORT = 25
# 发送邮件的邮箱
EMAIL_HOST_USER = '[email protected]'
# 在邮箱中设置的客户端授权密码
EMAIL_HOST_PASSWORD = 'cdbnlajjhfctbjhb'
# 收件人看到的发件人
EMAIL_FROM = '天天吃屎<[email protected]>'
# Django的缓存配置
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://10.1.1.128:6379/9",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# 配置session存储在缓存中,就是上面的缓存
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
# 配置django系统自带认证失败后,默认跳转的地址
LOGIN_URL='/user/login'
# 设置Django的文件存储类
DEFAULT_FILE_STORAGE='utils.fdfs.storage.FdfsStorage'
# 设置fdfs使用的client.conf文件路径
FDFS_CLIENT_CONF='./utils/fdfs/client.conf'
# 设置fdfs存储服务器上nginx的IP和端口号
FDFS_URL='http://10.1.1.128:8888/'
# 全文检索框架的配置
HAYSTACK_CONNECTIONS = {
'default': {
# 使用whoosh引擎
# 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'ENGINE': 'haystack.backends.whoosh_cn_backend.WhooshEngine',
# 索引文件路径
'PATH': os.path.join(BASE_DIR, 'whoosh_index'),
}
}
# 当添加、修改、删除数据时,自动生成索引
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
# 指定搜索结果每页显示的条数
HAYSTACK_SEARCH_RESULTS_PER_PAGE=1
|
[
"[email protected]"
] | |
c564381b8a3786274c292ddc6a57ed24ad1e6895
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03681/s311284212.py
|
d8b25fb6e7dd6d5384882806daa8e1c440d5c178
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
n, m = map(int, input().split())
mod = 10 ** 9 + 7
if abs(n - m) >= 2:
print(0)
else:
res = 1
for i in range(1, n+1):
res = res * i % mod
for i in range(1, m+1):
res = res * i % mod
if abs(n - m) == 1:
print(res)
else:
print(res * 2 % mod)
|
[
"[email protected]"
] | |
fbc9661d70e561d78342cfa587b4a738aa00e9e6
|
c85ec43e50f81f8e20c883eae9e06a5c8c621f8e
|
/caldera/utils/__init__.py
|
b2967128d628a732cece629c50a123db23a166f8
|
[
"MIT"
] |
permissive
|
jvrana/caldera
|
b6cc0faed560df6bfa15a3f460fed4ea18b8a55a
|
a346324e77f20739e00a82f97530dda4906f59dd
|
refs/heads/master
| 2023-04-27T04:19:05.499430 | 2021-03-09T16:37:50 | 2021-03-09T16:37:50 | 266,161,720 | 0 | 0 |
MIT
| 2020-08-12T01:40:48 | 2020-05-22T16:49:35 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,795 |
py
|
r"""
Caldera utility functions.
.. autosummary::
:toctree: generated/
dict_join
# pairwise
Indexing
--------
.. autosummary::
:toctree: generated/
reindex_tensor
unravel_index
Tensor
------
Utilities for :class:`torch.Tensor`
.. autosummary::
:toctree: generated/
scatter_coo
scatter_indices
torch_coo_to_scipy_coo
deterministic_seed
long_isin
same_storage
stable_arg_sort_long
tensor_is_empty
torch_scatter_group
Functional
----------
Functional programming module.
.. autosummary::
:toctree: generated/
:recursive:
functional
Networkx Utilities
------------------
Extra :mod:`networkx` utilities
.. autosummary::
:toctree: generated/
:recursive:
"""
from ._dict_join import dict_join
from ._iteration import _first
from ._iteration import pairwise
from caldera.utils.indexing import reindex_tensor
from caldera.utils.indexing import unravel_index
from caldera.utils.np import replace_nan_with_inf
from caldera.utils.sparse import scatter_coo
from caldera.utils.sparse import scatter_indices
from caldera.utils.sparse import torch_coo_to_scipy_coo
from caldera.utils.tensor import deterministic_seed
from caldera.utils.tensor import long_isin
from caldera.utils.tensor import same_storage
from caldera.utils.tensor import stable_arg_sort_long
from caldera.utils.tensor import tensor_is_empty
from caldera.utils.tensor import torch_scatter_group
__all__ = [
"reindex_tensor",
"unravel_index",
"scatter_coo",
"scatter_indices",
"torch_coo_to_scipy_coo",
"deterministic_seed",
"long_isin",
"same_storage",
"stable_arg_sort_long",
"tensor_is_empty",
"torch_scatter_group",
"dict_join",
"pairwise",
"_first",
"replace_nan_with_inf",
]
|
[
"[email protected]"
] | |
db704e6bc73086f4b513638afc26cfca69671862
|
4618c0152d45bcb5f54e298661a1479c643353f4
|
/pyengine/api/v1/GetProductDetail.py
|
d47126c2ec1ccde23882d215ccd30c526680ccaf
|
[] |
no_license
|
pyengine/catalog
|
07312fb7606f6ff0b7e55359740af4a4e5d509f4
|
2403563c1f93d988466a12b870ce25475b0d1d92
|
refs/heads/master
| 2021-01-10T04:47:24.200088 | 2016-04-13T15:04:47 | 2016-04-13T15:04:47 | 55,772,134 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 454 |
py
|
from pyengine.lib.error import *
from pyengine.lib.command import Command
class GetProductDetail(Command):
# Request Parameter Info
req_params = {
'uuid': ('r', 'str'),
}
def __init__(self, api_request):
super(self.__class__, self).__init__(api_request)
def execute(self):
mgr = self.locator.getManager('ProductManager')
info = mgr.getProductDetail(self.params)
return info.result()
|
[
"[email protected]"
] | |
0f40308406e38359eb00bd87c471b5f1ff5f6778
|
0fba89a7703d883231decbb5b748d4df22832e6a
|
/recipe_scrapers/_decorators.py
|
87fb968ea2b97ba5373f8906f7b9acf4f19879ef
|
[
"MIT"
] |
permissive
|
tobiaghiraldini/recipe-scrapers
|
c66f1fb448f6e696677ec95d43a595be8470e890
|
1ced80d25dcc6e88877c26187990f112f3134e67
|
refs/heads/master
| 2022-07-04T20:31:07.114353 | 2020-05-20T10:42:26 | 2020-05-20T10:42:26 | 262,996,294 | 0 | 0 |
MIT
| 2020-05-11T09:23:45 | 2020-05-11T09:23:45 | null |
UTF-8
|
Python
| false | false | 1,895 |
py
|
import functools
from language_tags import tags
from ._schemaorg import SchemaOrgException
class Decorators:
@staticmethod
def schema_org_priority(decorated):
"""
Use SchemaOrg parser with priority (if there's data in it)
On exception raised - continue by default.
If there's no data (no schema implemented on the site) - continue by default
"""
@functools.wraps(decorated)
def schema_org_priority_wrapper(self, *args, **kwargs):
function = getattr(self.schema, decorated.__name__)
if not function:
raise SchemaOrgException(
"Function '{}' not found in schema"
.format(decorated.__name)
)
if not self.schema.data:
return decorated(self, *args, **kwargs)
try:
value = function(*args, **kwargs)
except SchemaOrgException:
return decorated(self, *args, **kwargs)
return value or decorated(self, *args, **kwargs)
return schema_org_priority_wrapper
@staticmethod
def og_image_get(decorated):
@functools.wraps(decorated)
def og_image_get_wrapper(self, *args, **kwargs):
try:
image = self.soup.find(
'meta',
{'property': 'og:image', 'content': True}
)
return image.get('content')
except AttributeError:
return decorated(self, *args, **kwargs)
return og_image_get_wrapper
@staticmethod
def bcp47_validate(decorated):
@functools.wraps(decorated)
def bcp47_validate_wrapper(self, *args, **kwargs):
tag = tags.tag(decorated(self, *args, **kwargs))
return str(tag) if tag.valid else None
return bcp47_validate_wrapper
|
[
"[email protected]"
] | |
d9e5e750b84c63450d958537f59dbc8b3863f3b4
|
2194df5490666825d382e6e47bd33139b1faf0df
|
/vtools/videotoimage.py
|
ff6b9cb5e919adadbff64930f5eb8a56adafd551
|
[] |
no_license
|
aiporre/video_tools
|
a88a3134c6148bd384c71e846aeab49da6bfab8e
|
f955c22fc7259a4b45592f522bb80f0533e6093d
|
refs/heads/master
| 2021-08-02T21:03:53.344844 | 2021-07-28T16:45:57 | 2021-07-28T16:45:57 | 213,970,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,568 |
py
|
import cv2
import argparse
import os
from tqdm import tqdm
class VideoToImage(object):
def __init__(self, src=0, output_path = './', extension = '.jpg', prefix='frame_', padding=-1):
# Create a VideoCapture object
self.capture = cv2.VideoCapture(src)
self.output_path = output_path
self.frame_counter = 0
# resolution of the video
self.frame_width = int(self.capture.get(3))
self.frame_height = int(self.capture.get(4))
self.n_frames = int(self.capture.get(7))
self.extension = extension
self.prefix = prefix
self.padding = padding
def update(self):
# Read the next frame
if self.capture.isOpened():
(self.status, self.frame) = self.capture.read()
self.frame_counter +=1
def show_frame(self):
# Convert to grayscale and display frames
if self.status:
cv2.imshow('frame', self.frame)
# Press 'q' on keyboard to stop recording
key = cv2.waitKey(1)
if key == ord('q'):
self.capture.release()
cv2.destroyAllWindows()
exit(1)
def save_frame(self):
# Save grayscale frame into video output file
if self.status: # self.capture.isOpened():
if self.padding > 0:
filename = os.path.join(self.output_path, self.prefix + "{1:0{0}}".format(self.padding,self.frame_counter) + self.extension)
else:
filename = os.path.join(self.output_path, self.prefix + str(self.frame_counter) + self.extension)
cv2.imwrite(filename, self.frame)
def close(self, exit=False):
self.capture.release()
cv2.destroyAllWindows()
if exit:
exit(1)
class VideoToGrayImage(VideoToImage):
def __init__(self, src=0, output_path = './', extension = '.jpg', prefix='frame_', padding=-1):
super(VideoToGrayImage,self).__init__(src=src, output_path = output_path, extension = extension, prefix=prefix, padding=padding)
def update(self):
super().update()
if self.status:
self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
def run(video_src, output_path=None, extension ='.png', plot='n', prefix='frame_', padding=-1, gray = 'y'):
'''
run default video to image
'''
if output_path is None:
output_path = os.path.dirname(video_src)
output_path = os.path.join(output_path,'video_images')
if not os.path.exists(output_path):
os.mkdir(output_path)
if gray == 'y':
video_stream_widget = VideoToGrayImage(video_src, output_path = output_path, extension = extension, prefix=prefix, padding=padding)
else:
video_stream_widget = VideoToImage(video_src, output_path=output_path, extension=extension, prefix=prefix, padding=padding)
if plot == 'y':
print('stop convertion by pressing q')
for _ in tqdm(range(video_stream_widget.n_frames)):
if video_stream_widget.capture.isOpened():
try:
video_stream_widget.update()
if plot == 'y':
video_stream_widget.show_frame()
video_stream_widget.save_frame()
except AttributeError:
pass
else:
video_stream_widget.close()
video_stream_widget.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert to gray avi videos.')
parser.add_argument('--target', metavar='target', type=str,
help='target avi video full path')
parser.add_argument('--output', metavar='output', type=str,
help='output path where the images are saved')
parser.add_argument('--plot', metavar='plot', type=str, default='y',
help='show video during convertion flag (y(default), or n))')
parser.add_argument('--extension', metavar='extension', type=str, default='.jpg',
help='extension of the imamge output (default: .jpg)')
args = parser.parse_args()
video_src = args.target
print(video_src)
video_stream_widget = VideoToGrayImage(video_src, output_path = args.output, extension = args.extension)
print('stop convertion by pressing q')
while video_stream_widget.capture.isOpened():
try:
video_stream_widget.update()
if args.plot == 'y':
video_stream_widget.show_frame()
video_stream_widget.save_frame()
except AttributeError:
pass
|
[
"[email protected]"
] | |
dab5a55c04a4f4242ed5725c95704470f8d27791
|
aa30891b324f86fe9c6a3eeeb6a9b8ae64b7d81d
|
/ex043.py
|
3f7ab5d16e10be268d9e4f0765ca04086af2ad88
|
[] |
no_license
|
JoamirS/Exercicios-Python
|
0055c5f73b9d0fb2d5d780c620bb0c4840c7d1b8
|
09b74babdfdf7142254a8d14132859e52f7b52b6
|
refs/heads/master
| 2023-08-31T13:21:03.310332 | 2023-08-26T03:42:58 | 2023-08-26T03:42:58 | 178,745,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 874 |
py
|
'''
Desenvolva uma lógica que leia o peso e a altura de uma pessoa, calcule seu IMC e mostre seu status, de acordo
com a tabela abaixo:
- Abaixo de 18.5: Abaixo do Peso | - Entre 18.5 e 25: Peso ideal | 25 até 30: Sobrepeso | 30 até 40: Obesidade
- Acima de 40: Obesidade Morbida
'''
#Declarando as variáveis
print('\033[31mExemplo: KG 70\033[0;0m')
weight = float(input('Digite seu peso: KG '))
print('\033[31mExemplo: M 1.85\033[0;0m')
height = float(input('Digite sua altura: M '))
imc = weight / (height ** 2)
print('O IMC desta pessoa é {:.1f}'.format(imc))
#Declarando as condições
if imc < 18.5:
print('Você está abaixo do peso')
elif 18.5 <= imc < 25:
print('Você está na faixa de peso ideal')
elif 25 <= imc < 30:
print('Sobrepeso')
elif 30 <= imc < 40:
print('Obesidade')
elif imc >= 40:
print('Obesidade Mórbida')
|
[
"[email protected]"
] | |
caa5d7f22e33db8b41abcb461289fd84c5a814ee
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/78/24413/submittedfiles/main.py
|
eab508f3756a8f0f59276fbd4bed79017c152c6b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 332 |
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import funcoes
#COMECE AQUI
m=int(input('digite o valor de m:')
e=input('digite o valor de epsilon:')
m=funcoes.absoluto(m)
pi=funcoes.pi(m)
cosseno=funcoes.cosseno(pi/5,e)
razaoaurea=funcoes.razaoaurea(m,e)
print('%.15f' %pi)
print('%.15f' %razaoaurea)
|
[
"[email protected]"
] | |
b0b53b387467c7290b49d7c01a16691b782d9100
|
951b605ea41da28dccba6d3de63fb9211b7ad5b1
|
/Mains/main.py
|
f3cabc1b8e650a5af81217b1b118a57e8a7327f4
|
[
"MIT"
] |
permissive
|
tiangeluo/DefectiveCNN
|
99296f7a86efd3c4d044701f4e94388989cbd66a
|
fdbf5235adffa846630fadb4ff910de50870c077
|
refs/heads/master
| 2022-01-29T14:23:10.198712 | 2022-01-08T22:20:54 | 2022-01-08T22:20:54 | 222,830,775 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,648 |
py
|
'''Train CIFAR10 with PyTorch.'''
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
#from models import *
from resnet import ResNet18
#from resnet_drop import ResNet18
from utils import progress_bar
from torch.optim.lr_scheduler import MultiStepLR
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Model
print('==> Building model..')
#net = VGG('VGG19')
#net = ResNet18()
# net = PreActResNet18()
#net = GoogLeNet()
#net = DenseNet121()
#net = ResNet50()
#net = ResNeXt29_2x64d()
# net = MobileNet()
#net = MobileNetV2()
#net = DPN92()
# net = ShuffleNetG2()
#net = SENet18()
net = ResNet18()
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.t7')
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
scheduler = MultiStepLR(optimizer, milestones=[150,250], gamma=0.1)
# Training
def train(epoch):
scheduler.step()
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt7.t7')
best_acc = acc
for epoch in range(start_epoch, start_epoch+350):
train(epoch)
if epoch % 5 == 0:
test(epoch)
|
[
"[email protected]"
] | |
13b2b9c390f93b4c58274db5a361c530327c3a2b
|
bbe74f172bf1f1cca1c77bd249c6f9a97ca897a4
|
/probs11-20/prob13.py
|
0e84fcee2846a2e45db91e51d3eefd773b8d39cf
|
[] |
no_license
|
kruthar/euler
|
5b32b7780502ff82e855c0c9670c91aff3938c5d
|
18a59531f2108074de3a7db29a77017663753abc
|
refs/heads/master
| 2021-01-13T13:19:54.723543 | 2016-02-22T14:53:45 | 2016-02-22T14:53:45 | 52,280,655 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,357 |
py
|
__author__ = 'kruthar'
'''
Large Sum
Work out the first ten digits of the sum of the following one-hundred 50-digit numbers.
37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690
'''
f = open('../data/data-prob13.txt', 'r');
total = 0
for line in f.readlines():
total += int(line)
print str(total)[0:10]
|
[
"[email protected]"
] | |
19a5eb94d0a3c8ccb52b085d6825e08f5a8062ca
|
51f2492a5c207e3664de8f6b2d54bb93e313ca63
|
/atcoder/soundhound2018-summer-qual/c.py
|
93091c2550ea9792540a7ddf7fe97eb7d9c2060f
|
[
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
abeaumont/competitive-programming
|
23c5aabd587d7bb15a61efd3428838cb934233dd
|
a24c9b89941a59d344b51dc1010de66522b1a0dd
|
refs/heads/master
| 2023-09-01T09:50:58.267361 | 2023-07-31T18:00:10 | 2023-07-31T18:00:10 | 117,589,708 | 618 | 262 |
WTFPL
| 2023-07-12T17:36:20 | 2018-01-15T20:00:56 |
C++
|
UTF-8
|
Python
| false | false | 296 |
py
|
#!/usr/bin/env python3
# https://soundhound2018-summer-qual.contest.atcoder.jp/tasks/soundhound2018_summer_qual_c
n, m, d = map(int, input().split())
if d == 0: print('{:.10f}'.format((m - 1) / n))
else:
t = n * (n - 1) // 2
print('{:.10f}'.format((m - 1) * (n - 1) * (n - d) / (t * n)))
|
[
"[email protected]"
] | |
8a2eb862ad50edda68a729d3dc9f11fc97df64e8
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/examples/docs_snippets/docs_snippets_tests/concepts_tests/resources_tests/test_resources.py
|
5e07b899452a7f25971b2a9d834e8dd7bb8a8a0f
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794 | 2023-09-05T19:54:52 | 2023-09-05T19:54:52 | 131,619,646 | 8,565 | 1,154 |
Apache-2.0
| 2023-09-14T21:57:37 | 2018-04-30T16:30:04 |
Python
|
UTF-8
|
Python
| false | false | 1,581 |
py
|
from dagster import build_init_resource_context, build_op_context
from docs_snippets.concepts.resources.resources import (
cereal_fetcher,
connect,
db_connection,
db_resource,
do_database_stuff_dev,
do_database_stuff_job,
do_database_stuff_prod,
op_requires_resources,
test_cm_resource,
test_my_resource,
test_my_resource_with_context,
use_db_connection,
uses_db_connection,
)
def test_cereal_fetcher():
assert cereal_fetcher(None)
def test_database_resource():
class BasicDatabase:
def execute_query(self, query):
pass
op_requires_resources(build_op_context(resources={"database": BasicDatabase()}))
def test_resource_testing_examples():
test_my_resource()
test_my_resource_with_context()
test_cm_resource()
def test_resource_deps_job():
result = connect.execute_in_process()
assert result.success
def test_resource_config_example():
dbconn = db_resource(build_init_resource_context(config={"connection": "foo"}))
assert dbconn.connection == "foo"
def test_jobs():
assert do_database_stuff_job.execute_in_process().success
assert do_database_stuff_dev.execute_in_process().success
assert do_database_stuff_prod.execute_in_process().success
def test_cm_resource_example():
with db_connection() as db_conn:
assert db_conn
def test_cm_resource_op():
with build_op_context(resources={"db_connection": db_connection}) as context:
use_db_connection(context)
def test_build_resources_example():
uses_db_connection()
|
[
"[email protected]"
] | |
1856c7c864ac34d62c6c9bc7de93fbbd76a236f0
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2009.1/x11/terminal/xterm/actions.py
|
0bc996b345bff3ffad1468eeeffc1e93bc0c3d83
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,072 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
shelltools.export("CC", get.CC())
autotools.configure(" \
--disable-full-tgetent \
--with-app-defaults=/usr/share/X11/app-defaults \
--disable-desktop \
--with-utempter \
--with-tty-group=tty \
--enable-256-color \
--enable-exec-xterm \
--enable-freetype \
--enable-luit \
--enable-wide-chars \
--enable-warnings \
")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.removeDir("/usr/share/pixmaps")
pisitools.dodoc("README.i18n", "xterm.log.html", "ctlseqs.txt", "16colors.txt")
|
[
"[email protected]"
] | |
0a53ab68989d286f013da079bf2fa922a9c6acde
|
8dd000d05a29cece1460fd48c4f6b12c56281ca1
|
/ugly/default_settings.py
|
e3dea2571f19db51dc2da821e5e99b015d7ad1a8
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
imclab/ugly
|
3e2717982b6e999c99d3e884d7f4d48d08fc7609
|
bc09834849184552619ee926d7563ed37630accb
|
refs/heads/master
| 2021-01-15T19:35:38.835572 | 2014-01-08T20:24:56 | 2014-01-08T20:24:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 398 |
py
|
# Flask stuff.
DEBUG = False
TESTING = False
SECRET_KEY = "development key"
# App stuff.
ADMIN_EMAIL = "Ugly Reader <[email protected]>"
BASE_MAILBOX = "[Ugly Reader]"
AES_KEY = b"test AES key... change this in production"
MAX_FEEDS = 100
# Database stuff.
SQLALCHEMY_DATABASE_URI = "postgresql://localhost/ugly"
# Google OAuth stuff.
GOOGLE_OAUTH2_CLIENT_ID = None
GOOGLE_OAUTH2_CLIENT_SECRET = None
|
[
"[email protected]"
] | |
8f55e7fc73404cd650b20ca669fd313db96f1b3c
|
4c67112b8e4c1ed7fd2f636a0dcee4972eeb79e6
|
/deployment/GPT2/encoder.py
|
f6508e866e80f4de9aaa34474e404aae72cbb3bd
|
[
"MIT"
] |
permissive
|
t04glovern/gpt2-k8s-cloud-run
|
700cc8da97e8b42ca39fb0aed9a26f7edebb090b
|
687a20f76c3e53f917ea9553e569be52deb323d6
|
refs/heads/master
| 2023-06-04T14:07:50.532901 | 2022-09-03T12:58:48 | 2022-09-03T12:58:48 | 180,802,919 | 8 | 1 |
MIT
| 2023-05-22T21:56:35 | 2019-04-11T13:53:44 |
Python
|
UTF-8
|
Python
| false | false | 4,156 |
py
|
"""Byte pair encoding utilities"""
import os
import json
import regex as re
from functools import lru_cache
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = encoder
self.decoder = {v:k for k,v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def get_encoder():
with open('./GPT2/encoder.json', 'r') as f:
encoder = json.load(f)
with open('./GPT2/vocab.bpe', 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
|
[
"[email protected]"
] | |
dfea14f587580d86c76f3dbc73c65587e1154af8
|
faaf12ab18978082233c09628b815a69e73868e4
|
/leetcode/algorithms/easy/keep_multiplying_found_values_by_two.py
|
9d03b8b29a25813877514664235bcbeb70bc846b
|
[
"WTFPL"
] |
permissive
|
ferhatelmas/algo
|
6826bcf0be782cb102c1ee20dce8d4345e1fd6d2
|
7b867f6d2c8a9fb896f464168b50dfc115617e56
|
refs/heads/master
| 2023-08-18T19:59:58.435696 | 2023-08-14T10:16:00 | 2023-08-14T10:16:00 | 3,813,734 | 27 | 16 |
WTFPL
| 2020-10-25T23:00:16 | 2012-03-23T23:43:31 |
Java
|
UTF-8
|
Python
| false | false | 212 |
py
|
from typing import List
class Solution:
def findFinalValue(self, nums: List[int], original: int) -> int:
s = set(nums)
o = original
while o in s:
o *= 2
return o
|
[
"[email protected]"
] | |
5a43f55a19e3c63e780c242dc3f5a1013c94a070
|
a951ccc03e99ae61178ab85f6db0fd5968709280
|
/prefix_sums/genomic_range.py
|
04f8f6028e9ab8f7e8919e44da513188dc5cd481
|
[] |
no_license
|
mmanishh/codilitysolution
|
37142e66c25f786ef7bedaebbe0b164e50ff7804
|
d3487be50e52861cc59d3651e996d4d23cb32613
|
refs/heads/master
| 2021-07-07T12:58:07.651699 | 2020-08-07T10:00:21 | 2020-08-07T10:00:21 | 163,286,608 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 429 |
py
|
def genomic_range(S,P,Q):
S = list(S)
new_s = []
result = []
impact = {'A':1,'C':2,'G':3,'T':4}
for s in S:
new_s.append(impact[s])
for i in range(len(P)):
l ,r = P[i] , Q[i]
sliced = new_s[l:r+1]
result.append(min(sliced))
return result
if __name__ == '__main__':
S = 'CAGCCTA'
P = [2,5,0]
Q = [4,5,6]
print(genomic_range(S,P,Q))
|
[
"[email protected]"
] | |
b53210f45388c5820faf0c133ad6ef73039b955b
|
9a034b12c845d01f36aff2e5fdbf8486a9e8a642
|
/faketrudy/trudy_api/migrations/0005_child_tweets.py
|
58e5ce1a352b4425e107065b667d213e62e02fbe
|
[] |
no_license
|
piyush6191996/Django-Rest-Framework
|
2d1cd89de700e7aa68f93f9104418c05c70e800a
|
3950a72bed52fd4bcbec3de439fe9f1130df10f9
|
refs/heads/master
| 2020-03-15T06:00:31.362680 | 2018-05-07T19:09:17 | 2018-05-07T19:09:17 | 131,998,569 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,464 |
py
|
# Generated by Django 2.0.2 on 2018-04-10 08:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('trudy_api', '0004_auto_20180410_1229'),
]
operations = [
migrations.CreateModel(
name='Child',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('age', models.IntegerField()),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('twitter_token', models.CharField(blank=True, max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tweets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tweets', models.TextField()),
('sentiment', models.CharField(max_length=255)),
('child', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='trudy_api.Child')),
],
),
]
|
[
"[email protected]"
] | |
fac948d696d4a82b62dca8ce6557a6b4e27a4e6e
|
0ecb1763b4cab08a1fb80234639e46afc8921e2f
|
/further/routing_1.py
|
882cf1231be2c220621e4dd32a8a4aea3cdd9566
|
[] |
no_license
|
mach8686devops/pyside6-demo
|
4eed3713288ec21b0ec4b8561290f87925693b89
|
848302ff9c1536034cf5f225fa953944d011c2a4
|
refs/heads/main
| 2023-05-05T11:12:20.711846 | 2021-05-28T13:44:41 | 2021-05-28T13:44:41 | 371,714,201 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,491 |
py
|
import sys
from PySide6.QtCore import QSize, Qt
from PySide6.QtWidgets import QApplication, QLabel, QMainWindow
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.label = QLabel("Click in this window")
self.status = self.statusBar()
self.setFixedSize(QSize(200, 100))
self.setCentralWidget(self.label)
def mouseMoveEvent(self, e):
self.label.setText("mouseMoveEvent")
def mousePressEvent(self, e):
button = e.button()
if button == Qt.LeftButton:
self.label.setText("mousePressEvent LEFT")
if e.x() < 100:
self.status.showMessage("Left click on left")
self.move(self.x() - 10, self.y())
else:
self.status.showMessage("Left click on right")
self.move(self.x() + 10, self.y())
elif button == Qt.MiddleButton:
self.label.setText("mousePressEvent MIDDLE")
elif button == Qt.RightButton:
self.label.setText("mousePressEvent RIGHT")
if e.x() < 100:
self.status.showMessage("Right click on left")
print("Something else here.")
self.move(10, 10)
else:
self.status.showMessage("Right click on right")
self.move(400, 400)
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
|
[
"[email protected]"
] | |
f9a3bff56e5ed0ba4f874a6571ecf9e908e79f95
|
de1f9d660cfb738afdb66e4a2d63a4577c07d9c6
|
/xcube/webapi/defaults.py
|
e2f0580e213aeaa838812aab943976b33b2c918e
|
[
"MIT"
] |
permissive
|
rabaneda/xcube
|
db47eb416db85df891a924063482a7943cae9d4f
|
0d38ca513987184dbc4a37da1616e4076964d0f1
|
refs/heads/master
| 2020-11-24T00:11:17.107630 | 2020-02-11T10:11:34 | 2020-02-11T10:11:34 | 227,877,138 | 0 | 0 |
MIT
| 2019-12-13T16:14:51 | 2019-12-13T16:14:50 | null |
UTF-8
|
Python
| false | false | 1,831 |
py
|
# The MIT License (MIT)
# Copyright (c) 2019 by the xcube development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
SERVER_NAME = 'xcube Server'
SERVER_DESCRIPTION = f'WMTS, catalogue, data access, tile, feature, time-series services for' \
' xarray-enabled data cubes'
DEFAULT_ADDRESS = 'localhost'
DEFAULT_PORT = 8080
DEFAULT_TILE_CACHE_SIZE = "512M"
DEFAULT_UPDATE_PERIOD = 2.
DEFAULT_LOG_PREFIX = 'xcube-serve.log'
DEFAULT_TILE_COMP_MODE = 0
DEFAULT_TRACE_PERF = False
DEFAULT_CMAP_NAME = 'viridis'
DEFAULT_CMAP_VMIN = 0.
DEFAULT_CMAP_VMAX = 1.
DEFAULT_CMAP_WIDTH = 1
DEFAULT_CMAP_HEIGHT = 5
_GIGAS = 1000 * 1000 * 1000
FILE_TILE_CACHE_CAPACITY = 20 * _GIGAS
FILE_TILE_CACHE_ENABLED = False
FILE_TILE_CACHE_PATH = './image-cache'
MEM_TILE_CACHE_CAPACITY = 2 * _GIGAS
|
[
"[email protected]"
] | |
16788fb6c4d87a3d199099337d60a972ac10c1d0
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5631989306621952_1/Python/gvalli/2016-1A-A-lastword.py
|
becda986965852bb63622f5a8164983cb9663cf1
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 444 |
py
|
#! /#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
fh = open(sys.argv[1], 'r')
T = int(fh.readline()) # number of test cases
for t in range(T):
S = fh.readline().split()[0] # string of letters
res = ''
oldval = -1
for c in S:
val = ord(c)
if val >= oldval:
res = c + res
oldval = ord(c)
else:
res = res + c
print('Case #{:d}: {}'.format(t + 1, res))
|
[
"[email protected]"
] | |
def6c18b46463b5c3cd481ceefdafb7b8c4e49d6
|
98a936d5372294ed892a9bf9cf98646c72af515c
|
/usage/lab/explorer_usage.py
|
fd4a3b0be9636dbe6d5abd61ffe6a45858e3c81c
|
[
"MIT"
] |
permissive
|
edublancas/pipeline
|
f6d22ad07b134be98c139d1de6ca7d8321072ba8
|
5bef04d77fdadc1dc4ec22b9b346f0a062cca1ce
|
refs/heads/master
| 2021-05-15T01:09:50.072378 | 2016-12-29T05:45:48 | 2016-12-29T05:45:48 | 59,692,708 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,169 |
py
|
from pipeline import ExperimentExplorer
# load everything
explorer = ExperimentExplorer()
# just load results from my_experiment_a
explorer = ExperimentExplorer('my_experiment_a')
# load results from my_experiment_a and my_experiment_b
explorer = ExperimentExplorer(['my_experiment_a', 'my_experiment_b'])
# compute new metric for every model
explorer.apply(lambda m: m.compute_new_metric)
# store this new metric for every model affected
explorer.save()
# after plotting, analyzing results, I want to get the
# trained model
model = explorer.get('some_id')
metric = model.compute_metric()
print 'metric is {}'.format(metric)
# the problem is: should I pickle models? I should NOT pickle everything
# buf it logger is smart enoigh I may be able to just pickle the top models
# another option is to just re-train the model...
# independent of the options the API should be transparent for the user
# since he does not need to know and just be able to recover the object
# - problem with re-training: I need the data. Assuming the data is still the
# same I can do that, but if the numbers have changed and the columns
# are named the same I'm gonna have a baaad time
|
[
"[email protected]"
] | |
c5a4840e2abacff143dd7d855e796d90b83c83fe
|
d9eef8dd3489682c8db41f2311e3058d1f369780
|
/.history/abel-network-files/metis_transf_20180709124830.py
|
42a9fae8f327a0df02f62926b8ffe1d5dacf3f19
|
[] |
no_license
|
McKenzie-Lamb/Gerrymandering
|
93fe4a49fe39a0b307ed341e46ba8620ea1225be
|
b7a7c4129d6b0fcd760ba8952de51eafa701eac3
|
refs/heads/master
| 2021-01-25T06:06:43.824339 | 2018-10-16T14:27:01 | 2018-10-16T14:27:01 | 93,526,515 | 0 | 0 | null | 2018-07-12T19:07:35 | 2017-06-06T14:17:47 |
Python
|
UTF-8
|
Python
| false | false | 2,331 |
py
|
# Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import graph_tool.all as gt
import metis
from pathlib import Path
# Paths
main_folder = Path("abel-network-files/")
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph100.gt"))
name = graph.new_vertex_property('string')
color = graph.new_vertex_property('string')
adjlist_pop = []
nodew_pop = []
for i in graph.vertices():
neighbors = tuple([j for j in i.all_neighbors()])
adjlist_pop.append(neighbors)
#print(graph.vp.data[i]['PERSONS'])
weights = (graph.vp.data[i]['PERSONS'], graph.vp.data[i][int('CONREP14']/graph.vp.data[i]['CONDEM14']))
nodew_pop.append(weights)
metis_graph = metis.adjlist_to_metis(adjlist_pop, nodew=nodew_pop)
objval, parts = metis.part_graph(metis_graph, nparts=4)
for i in range(len(parts)):
name[graph.vertex(i)] = parts[i]
if graph.vp.data[graph.vertex(i)]['CONREP14'] > graph.vp.data[graph.vertex(i)]['CONDEM14']:
color[graph.vertex(i)] = 'red'
else:
color[graph.vertex(i)] = 'blue'
gt.graph_draw(graph, pos=graph.vp.pos, vertex_text=name, output=str(main_folder / 'tmp_metis_init.png'))
adjlist = []
nodew = []
for i in graph.vertices():
neighbors = tuple([j for j in i.all_neighbors()])
adjlist.append(neighbors)
#print(graph.vp.data[i]['PERSONS'])
weights = (graph.vp.data[i]['PERSONS'], int(graph.vp.data[i]['CONREP14']/graph.vp.data[i]['CONDEM14']))
nodew.append(weights)
metis_graph = metis.adjlist_to_metis(adjlist, nodew=nodew)
objval, parts = metis.part_graph(metis_graph, nparts=4, tpwgts=[(0.25,0.50),(0.25,0.10),(0.25, 0.30),(0.25, 0.10)])
for i in range(len(parts)):
name[graph.vertex(i)] = parts[i]
if graph.vp.data[graph.vertex(i)]['CONREP14'] > graph.vp.data[graph.vertex(i)]['CONDEM14']:
color[graph.vertex(i)] = 'red'
else:
color[graph.vertex(i)] = 'blue'
gt.graph_draw(graph, pos=graph.vp.pos, vertex_text=name, output=str(main_folder / 'tmp_metis_fin.png'))
|
[
"[email protected]"
] | |
cde74c8664798c8237fa5329c575a705974c6f41
|
34c5a03855ab0aca39acea941be520157f7d0b74
|
/lib/ansible/modules/cloud/alicloud/ali_slb_vsg_info.py
|
72e35f09e490e814c2cd95556da2fa6bd18f6359
|
[
"Apache-2.0"
] |
permissive
|
lixue323/ansible-provider
|
1260d1bc17a2fa7bf4c0f387a33dd942059850ed
|
aae2658532afcbcdf471609fae0e2108fb57af3b
|
refs/heads/master
| 2020-08-11T21:44:37.685788 | 2019-12-13T03:11:23 | 2019-12-13T04:00:45 | 214,633,323 | 0 | 1 |
Apache-2.0
| 2019-10-12T11:12:07 | 2019-10-12T11:12:07 | null |
UTF-8
|
Python
| false | false | 6,239 |
py
|
#!/usr/bin/python
# Copyright (c) 2017-present Alibaba Group Holding Limited. He Guimin <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see http://www.gnu.org/licenses/.
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ali_slb_vsg_info
version_added: "2.8"
short_description: Gather facts on virtual server group of Alibaba Cloud SLB.
description:
- This module fetches virtual server groups data from the Open API in Alibaba Cloud.
options:
load_balancer_id:
description:
- ID of server load balancer.
required: true
aliases: ["lb_id"]
vserver_group_ids:
description:
- A list of SLB vserver group ids.
required: false
aliases: ["group_ids", "ids"]
name_prefix:
description:
- Use a vritual server group name prefix to filter vserver groups.
author:
- "He Guimin (@xiaozhu36)"
requirements:
- "python >= 2.6"
- "footmark >= 1.9.0"
extends_documentation_fragment:
- alicloud
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the Alibaba Cloud Guide for details.
- name: Retrieving vsgs using slb id
ali_slb_vsg_info:
lb_id: '{{item}}'
with_items: '{{slbs.ids}}'
- name: Filter vsg using name_regex
ali_slb_vsg_info:
name_prefix: 'ansible-foo'
lb_id: 'lb-cn3cn34'
'''
RETURN = '''
ids:
description: List ids of being fetched virtual server group.
returned: when success
type: list
sample: ["rsp-2zehblhcv", "rsp-f22c4lhcv"]
names:
description: List name of being fetched virtual server group.
returned: when success
type: list
sample: ["ansible-1", "ansible-2"]
vserver_groups:
description:
- info about the virtual server group that was created or deleted.
returned: on present
type: complex
contains:
address:
description: The IP address of the loal balancer
returned: always
type: string
sample: "47.94.26.126"
backend_servers:
description: The load balancer's backend servers
returned: always
type: complex
contains:
port:
description: The backend server port
returned: always
type: int
sample: 22
server_id:
description: The backend server id
returned: always
type: string
sample: "i-vqunci342"
type:
description: The backend server type, ecs or eni
returned: always
type: string
sample: "ecs"
weight:
description: The backend server weight
returned: always
type: int
sample: 100
id:
description: The ID of the virtual server group was created. Same as vserver_group_id.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_id:
description: The ID of the virtual server group was created.
returned: always
type: string
sample: "rsp-2zehblhcv"
vserver_group_name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
name:
description: The name of the virtual server group was created.
returned: always
type: string
sample: "ansible-ali_slb_vsg"
tags:
description: The load balancer tags
returned: always
type: complex
sample: {}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.alicloud_ecs import ecs_argument_spec, slb_connect
HAS_FOOTMARK = False
try:
from footmark.exception import SLBResponseError
HAS_FOOTMARK = True
except ImportError:
HAS_FOOTMARK = False
def main():
argument_spec = ecs_argument_spec()
argument_spec.update(dict(
load_balancer_id=dict(type='str', aliases=['lb_id'], required=True),
vserver_group_ids=dict(type='list', aliases=['group_ids', 'ids']),
name_prefix=dict(type='str')
))
module = AnsibleModule(argument_spec=argument_spec)
if HAS_FOOTMARK is False:
module.fail_json(msg="Package 'footmark' required for this module.")
vsg_ids = module.params['vserver_group_ids']
name_prefix = module.params['name_prefix']
ids = []
vsgs = []
names = []
try:
slb = slb_connect(module)
groups = slb.describe_vserver_groups(**{'load_balancer_id': module.params['load_balancer_id']})
if groups:
for group in groups:
if vsg_ids and group.id not in vsg_ids:
continue
if name_prefix and not str(group.name).startswith(name_prefix):
continue
vsgs.append(group.read())
ids.append(group.id)
names.append(group.name)
module.exit_json(changed=False, vserver_groups=vsgs, ids=ids, names=names)
except Exception as e:
module.fail_json(msg=str("Unable to describe slb vserver groups, error:{0}".format(e)))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
4067eaa4a5851aa47554afb318aa9f0825522d89
|
c9490d7bb9c3add1a5e71b06c9180260ffc1fff5
|
/web_dynamic/2-hbnb.py
|
2eadee48a25ba93f32aa643310baf9dfb56b7b2c
|
[
"MIT"
] |
permissive
|
PierreBeaujuge/AirBnB_clone_v4
|
54a255023587e6e291f41410f124da8089f2a5b7
|
f93bb1f22660f4497fb942abe120a5e69815affc
|
refs/heads/master
| 2021-01-04T15:00:01.541582 | 2020-10-08T09:04:29 | 2020-10-08T09:04:29 | 240,601,631 | 0 | 1 |
MIT
| 2020-02-18T02:25:15 | 2020-02-14T21:28:36 |
HTML
|
UTF-8
|
Python
| false | false | 1,351 |
py
|
#!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
import uuid
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/2-hbnb/')
def hbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
cache_id = uuid.uuid4()
return render_template('2-hbnb.html',
states=states,
amens=amens,
places=places,
users=users,
cache_id=cache_id)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
|
[
"[email protected]"
] | |
d891da04d501abe4b1f6da6ca84babc9ccac723d
|
d7fb8eacd8a1aae8fe6eb49111f93090b7e87ce0
|
/backend/tstcr2020102701_dev_14091/settings.py
|
e74bd1f3e5a9f57d885d8b38f60ca2550b592ad3
|
[] |
no_license
|
crowdbotics-apps/tstcr2020102701-dev-14091
|
4d5bcfc2b0aa29e67cebcd8948258b75e8ad9c6b
|
cc6ba4999444c7e93943f76af75c2506048bf2b6
|
refs/heads/master
| 2023-01-03T05:09:02.457778 | 2020-10-28T21:59:17 | 2020-10-28T21:59:17 | 307,772,734 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,125 |
py
|
"""
Django settings for tstcr2020102701_dev_14091 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tstcr2020102701_dev_14091.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tstcr2020102701_dev_14091.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.