blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
00ae17b2c630ccf0d4036a300ee15ed0a9356121 | 4e3c976773526fd610d64ffb83589bccfaee5e68 | /sponge-app/sponge-app-demo-service/sponge/sponge_demo_depending.py | ff40131a936612f5da6d6cd33534a1a8234f44d8 | [
"Apache-2.0"
] | permissive | softelnet/sponge | 2313d2328953fcff49a002e727bb803757870627 | 7190f23ae888bbef49d0fbb85157444d6ea48bcd | refs/heads/master | 2022-10-28T16:19:55.619882 | 2021-09-16T19:50:08 | 2021-09-16T19:50:08 | 95,256,030 | 10 | 2 | Apache-2.0 | 2022-10-04T23:55:09 | 2017-06-23T20:58:49 | Java | UTF-8 | Python | false | false | 2,884 | py | """
Sponge Knowledge Base
Demo
"""
class DependingArgumentsAction(Action):
def onConfigure(self):
self.withLabel("Depending arguments")
self.withArgs([
StringType("continent").withLabel("Continent").withProvided(ProvidedMeta().withValueSet()),
StringType("country").withLabel("Country").withProvided(ProvidedMeta().withValueSet().withDependency("continent")),
StringType("city").withLabel("City").withProvided(ProvidedMeta().withValueSet().withDependency("country")),
StringType("river").withLabel("River").withProvided(ProvidedMeta().withValueSet().withDependency("continent")),
StringType("weather").withLabel("Weather").withProvided(ProvidedMeta().withValueSet())
]).withResult(StringType().withLabel("Sentences"))
self.withFeatures({"icon":"flag", "showClear":True, "showCancel":True})
def onCall(self, continent, country, city, river, weather):
return "There is a city {} in {} in {}. The river {} flows in {}. It's {}.".format(city, country, continent, river, continent, weather.lower())
def onInit(self):
self.countries = {
"Africa":["Nigeria", "Ethiopia", "Egypt"],
"Asia":["China", "India", "Indonesia"],
"Europe":["Russia", "Germany", "Turkey"]
}
self.cities = {
"Nigeria":["Lagos", "Kano", "Ibadan"],
"Ethiopia":["Addis Ababa", "Gondar", "Mek'ele"],
"Egypt":["Cairo", "Alexandria", "Giza"],
"China":["Guangzhou", "Shanghai", "Chongqing"],
"India":["Mumbai", "Delhi", "Bangalore"],
"Indonesia":["Jakarta", "Surabaya", "Medan"],
"Russia":["Moscow", "Saint Petersburg", "Novosibirsk"],
"Germany":["Berlin", "Hamburg", "Munich"],
"Turkey":["Istanbul", "Ankara", "Izmir"]
}
self.rivers = {
"Africa":["Nile", "Chambeshi", "Niger"],
"Asia":["Yangtze", "Yellow River", "Mekong"],
"Europe":["Volga", "Danube", "Dnepr"]
}
def onProvideArgs(self, context):
if "continent" in context.provide:
context.provided["continent"] = ProvidedValue().withValueSet(["Africa", "Asia", "Europe"])
if "country" in context.provide:
context.provided["country"] = ProvidedValue().withValueSet(self.countries.get(context.current["continent"], []))
if "city" in context.provide:
context.provided["city"] = ProvidedValue().withValueSet(self.cities.get(context.current["country"], []))
if "river" in context.provide:
context.provided["river"] = ProvidedValue().withValueSet(self.rivers.get(context.current["continent"], []))
if "weather" in context.provide:
context.provided["weather"] = ProvidedValue().withValueSet(["Sunny", "Cloudy", "Raining", "Snowing"])
| [
"[email protected]"
] | |
b55fd799bada92e8f1cd6d17a26da62618bdf02a | f6a8d93c0b764f84b9e90eaf4415ab09d8060ec8 | /Lists Advanced/the_office.py | de39a3b8b66d23417344eae1ded709f3c883b3b7 | [] | no_license | DimoDimchev/SoftUni-Python-Fundamentals | 90c92f6e8128b62954c4f9c32b01ff4fbb405a02 | 970360dd6ffd54b852946a37d81b5b16248871ec | refs/heads/main | 2023-03-18T17:44:11.856197 | 2021-03-06T12:00:32 | 2021-03-06T12:00:32 | 329,729,960 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | employees_list = [int(x) for x in (input().split(" "))]
HIF = int(input()) # happiness improvement factor
happy_count = 0
increased_happiness_list = list(map(lambda employee: employee * HIF, employees_list))
average_happiness = sum(increased_happiness_list) / len(increased_happiness_list)
happy_list = list(filter(lambda employee: employee >= average_happiness, increased_happiness_list))
for i in range(len(happy_list)):
happy_count += 1
if happy_count >= len(employees_list)/2:
print(f"Score: {happy_count}/{len(employees_list)}. Employees are happy!")
else:
print(f"Score: {happy_count}/{len(employees_list)}. Employees are not happy!") | [
"[email protected]"
] | |
8bc823c166c4a65c4048e30e2d7438e795a32306 | 018d804d6b53cc544e0adf8c38656bf27152706c | /ucsd_catalog_order.py | ed744750f3c2affb71f43eccdfbf1a19bb0c13f8 | [] | no_license | luisroco/cisco_cloud | c664520eb1021c7b36577a08d23dbf1b8dd7bd75 | 6bbf7c4f0c0af47860170835cfebc924f1b4c867 | refs/heads/master | 2021-01-09T20:11:19.048918 | 2017-02-07T19:06:58 | 2017-02-07T19:06:58 | 81,242,442 | 0 | 0 | null | 2017-02-07T18:53:53 | 2017-02-07T18:53:53 | null | UTF-8 | Python | false | false | 3,208 | py | #! /usr/bin/env python
'''
Command Line Utility to order a Catalog option
'''
import requests
import json
from ucsd_library import catalog_order
if __name__ == '__main__':
import sys
from pprint import pprint
from argparse import ArgumentParser, FileType
p = ArgumentParser()
p.add_argument('catalog', # Name stored in namespace
metavar = 'UCSD Catalog', # Arguement name displayed to user
help = 'The UCSD Catalog to order',
type = str
)
p.add_argument('-v', '--vdc', # Name stored in namespace
metavar = 'UCSD VDC', # Arguement name displayed to user
help = 'The UCSD VDC to place the cVM in',
type = str
)
p.add_argument('-c', '--comment', # Name stored in namespace
metavar = 'UCSD Comment', # Arguement name displayed to user
help = 'The comment to record - default blank',
type = str, default=""
)
p.add_argument('-g', '--group', # Name stored in namespace
metavar = 'UCSD Group', # Arguement name displayed to user
help = 'The group to order on behalf of',
type = str, default=""
)
p.add_argument('-n', '--vmname', # Name stored in namespace
metavar = 'UCSD VMname', # Arguement name displayed to user
help = 'The VM Name or prefix',
type = str, default=""
)
p.add_argument('--vcpus', # Name stored in namespace
metavar = 'vCPU Count', # Arguement name displayed to user
help = 'The number of vCPUs. Only used if vDC allows',
type = str, default="0"
)
p.add_argument('--vram', # Name stored in namespace
metavar = 'vRAM Count', # Arguement name displayed to user
help = 'The amount of vRAM. Only used if vDC allows',
type = str, default="0"
)
p.add_argument('--datastores', # Name stored in namespace
metavar = 'Datastore details', # Arguement name displayed to user
help = 'The datastore details. Only used if vDC allows.',
type = str, default=""
)
p.add_argument('--vnics', # Name stored in namespace
metavar = 'vNIC Details', # Arguement name displayed to user
help = 'The details for vNICS. Only used if vDC allows',
type = str, default=""
)
ns = p.parse_args()
result = catalog_order(ns.catalog, ns.vdc, ns.group, ns.comment, ns.vmname, ns.vcpus, ns.vram, ns.datastores, ns.vnics)
pprint (result)
| [
"[email protected]"
] | |
362cdc331020a5268fd371e1eac03259c7a14bba | f3d01659c2a4465cdf7a5903d18058da008f1aac | /src/sentry/models/groupbookmark.py | f6cee4369c180e59d520ca7fe8093daee2869739 | [
"BSD-2-Clause"
] | permissive | Mattlk13/sentry-1 | f81a1e5dc5d02a07e5c6bbcdb5e1ce53f24f53c1 | 19b0870916b80250f3cb69277641bfdd03320415 | refs/heads/master | 2023-08-30T21:49:49.319791 | 2019-07-30T19:23:07 | 2019-07-30T19:23:07 | 81,418,058 | 0 | 1 | BSD-3-Clause | 2023-04-04T00:22:49 | 2017-02-09T06:36:41 | Python | UTF-8 | Python | false | false | 1,064 | py | from __future__ import absolute_import
from django.conf import settings
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, BaseManager, sane_repr
class GroupBookmark(Model):
"""
Identifies a bookmark relationship between a user and an
aggregated event (Group).
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', related_name="bookmark_set")
group = FlexibleForeignKey('sentry.Group', related_name="bookmark_set")
# namespace related_name on User since we don't own the model
user = FlexibleForeignKey(settings.AUTH_USER_MODEL, related_name="sentry_bookmark_set")
date_added = models.DateTimeField(default=timezone.now, null=True)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupbookmark'
# composite index includes project for efficient queries
unique_together = (('project', 'user', 'group'), )
__repr__ = sane_repr('project_id', 'group_id', 'user_id')
| [
"[email protected]"
] | |
7c25ff18b341cd872a8a25f0dcfbf1023a780010 | 48ca6f9f041a1e9f563500c8a7fa04dbb18fa949 | /pygears/typing/qround.py | ea79fe0998313278f899a4b014df440c38f3cbb8 | [
"MIT"
] | permissive | bogdanvuk/pygears | 71404e53d4689ec9cdd9db546bfc0f229a7e02da | 705b11ab6de79868b25753fa9d0ce7128791b346 | refs/heads/master | 2023-07-08T11:38:54.625172 | 2022-03-07T12:29:00 | 2022-03-07T12:29:00 | 124,890,922 | 146 | 16 | MIT | 2022-08-15T07:57:08 | 2018-03-12T13:10:06 | Python | UTF-8 | Python | false | false | 1,286 | py | from .cast import value_cast, type_cast
from .fixp import Fixp, Ufixp
from .uint import Uint, Bool, Int, code
def get_out_type(val_type, fract):
if get_cut_bits(val_type, fract) <= 0:
raise TypeError(
f'Cannot qround type "{val_type}" with "{val_type.fract}" '
f'fractional bits, to produce the type with more fractional '
f'bits "fract={fract}"'
)
if fract != 0:
return val_type.base[val_type.integer + 1, val_type.integer + fract + 1]
else:
return (Int if val_type.signed else Uint)[val_type.integer + 1]
def get_cut_bits(val_type, fract):
return val_type.fract - fract
def qround(val, fract=0):
cut_bits = get_cut_bits(type(val), fract)
out_type = get_out_type(type(val), fract)
val_coded = code(val, Int) if type(val).signed else code(val)
res = val_coded + (Bool(1) << (cut_bits - 1))
return out_type.decode(res[cut_bits:])
def qround_even(val, fract=0):
cut_bits = get_cut_bits(type(val), fract)
out_type = get_out_type(type(val), fract)
val_coded = code(val, Int) if type(val).signed else code(val)
round_bit = val_coded[cut_bits]
res = val_coded + Uint([round_bit] + [~round_bit] * (cut_bits - 1))
return out_type.decode(res[cut_bits:])
| [
"[email protected]"
] | |
673ab9861bcae85a1a55c3ed742550710ec90195 | 99d7a6448a15e7770e3b6f3859da043300097136 | /src/hardware/core/i_core_device.py | 653c0e71ab0666d2da9b754da7fe944a400daac1 | [] | no_license | softtrainee/arlab | 125c5943f83b37bc7431ae985ac7b936e08a8fe4 | b691b6be8214dcb56921c55daed4d009b0b62027 | refs/heads/master | 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | #===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
#============= enthought library imports =======================
from traits.api import Interface
#============= standard library imports ========================
#============= local library imports ==========================
class ICoreDevice(Interface):
def get(self):
'''
'''
def set(self, *args, **kw):
'''
'''
#============= views ===================================
#============= EOF ====================================
| [
"jirhiker@localhost"
] | jirhiker@localhost |
1e8c67d8c6ce32961276b4ea876788f030175bf7 | d9b2805a8b39f147bd77e35c8e96e0cbd5eaa726 | /flask공부/flaskTest/bin/pip | 7eb65fc06f5c5c461cfe88d74e5a3c61d6549aab | [] | no_license | LeeInHaeng/Study | ca8e3e2d4111dc3f742eefea541a67739d729e75 | 96bdb1d224702cebb8a6de6bbd596b075ee33f7b | refs/heads/master | 2020-03-28T11:03:03.848316 | 2019-04-20T08:33:26 | 2019-04-20T08:33:26 | 148,172,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | #!/home/lih/pythonTest/flaskTest/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
2f1f274dd1ad0310608a42872e14fff7fbf05b1f | c65dfb808e23263b8f3f703a4f31ea7e153b4efd | /tockstats.py | 62fa54d729eb6e180e8686f52ea5921fa2030dd9 | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | 18F/quick-stats | 68fcd3bc9fde390f1a74a370f232dd8086865b07 | 400b48bcebea242ac574dd30f870ed1687c3b863 | refs/heads/master | 2021-01-17T06:08:00.304550 | 2016-07-27T16:09:22 | 2016-07-27T16:09:22 | 64,323,703 | 0 | 2 | null | 2016-08-08T15:25:00 | 2016-07-27T16:15:43 | Python | UTF-8 | Python | false | false | 1,030 | py | """Hours statistics from Tock exports"""
from collections import Counter
from csv import DictReader
import sys
def file_to_counter(filename):
"""Read CSV, convert it to a counter of hours by project"""
counter = Counter()
with open(filename) as csvfile:
reader = DictReader(csvfile)
for row in reader:
counter[row['Project']] += float(row['Number of Hours'])
return counter
def merge_counters(counters):
totals = Counter()
for counter in counters:
for key, value in counter.items():
totals[key] += value
return totals
def print_totals(totals):
total = sum(totals.values())
for project, amount in totals.most_common(20):
print("{}: {}/{} = {}".format(project, amount, total, amount/total))
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python tockstats.py FILE.csv [FILE2.csv ...]")
else:
counters = [file_to_counter(f) for f in sys.argv[1:]]
print_totals(merge_counters(counters))
| [
"[email protected]"
] | |
ba92d4f9f437fcf74daf2e0b5f28089408f310c4 | aaa06c63f0fba6c5aad5121d83715d0be828ce4e | /OpenStreetMap/models.py | 6746038957e195d82202ad40ba008a0f5667564b | [] | no_license | scotm/Comrade | b023b338f0daf5d083ae37e2e3a73d3d424f8a7c | c7186f00cd20916a78cc2282ea201f440102ebb7 | refs/heads/master | 2020-05-18T06:49:01.411310 | 2014-07-25T08:13:10 | 2014-07-25T08:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,658 | py | from django.contrib.gis.db import models
class BaseOsmModel(models.Model):
access = models.TextField(blank=True)
addr_housename = models.TextField(db_column='addr:housename', blank=True)
addr_housenumber = models.TextField(db_column='addr:housenumber', blank=True)
addr_interpolation = models.TextField(db_column='addr:interpolation', blank=True)
admin_level = models.TextField(blank=True)
aerialway = models.TextField(blank=True)
aeroway = models.TextField(blank=True)
amenity = models.TextField(blank=True)
area = models.TextField(blank=True)
barrier = models.TextField(blank=True)
bicycle = models.TextField(blank=True)
boundary = models.TextField(blank=True)
brand = models.TextField(blank=True)
bridge = models.TextField(blank=True)
building = models.TextField(blank=True)
construction = models.TextField(blank=True)
covered = models.TextField(blank=True)
culvert = models.TextField(blank=True)
cutting = models.TextField(blank=True)
denomination = models.TextField(blank=True)
disused = models.TextField(blank=True)
embankment = models.TextField(blank=True)
foot = models.TextField(blank=True)
generator_source = models.TextField(db_column='generator:source', blank=True)
harbour = models.TextField(blank=True)
highway = models.TextField(blank=True)
historic = models.TextField(blank=True)
horse = models.TextField(blank=True)
intermittent = models.TextField(blank=True)
junction = models.TextField(blank=True)
landuse = models.TextField(blank=True)
layer = models.TextField(blank=True)
leisure = models.TextField(blank=True)
lock = models.TextField(blank=True)
man_made = models.TextField(blank=True)
military = models.TextField(blank=True)
motorcar = models.TextField(blank=True)
name = models.TextField(blank=True)
natural = models.TextField(blank=True)
office = models.TextField(blank=True)
oneway = models.TextField(blank=True)
operator = models.TextField(blank=True)
place = models.TextField(blank=True)
population = models.TextField(blank=True)
power = models.TextField(blank=True)
power_source = models.TextField(blank=True)
public_transport = models.TextField(blank=True)
railway = models.TextField(blank=True)
ref = models.TextField(blank=True)
religion = models.TextField(blank=True)
route = models.TextField(blank=True)
service = models.TextField(blank=True)
shop = models.TextField(blank=True)
sport = models.TextField(blank=True)
surface = models.TextField(blank=True)
toll = models.TextField(blank=True)
tourism = models.TextField(blank=True)
tower_type = models.TextField(db_column='tower:type', blank=True)
tunnel = models.TextField(blank=True)
water = models.TextField(blank=True)
waterway = models.TextField(blank=True)
wetland = models.TextField(blank=True)
width = models.TextField(blank=True)
wood = models.TextField(blank=True)
z_order = models.IntegerField(blank=True, null=True)
class Meta:
abstract = True
# Create your models here.
class PlanetOsmLine(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
way_area = models.FloatField(blank=True, null=True)
way = models.LineStringField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_line'
class PlanetOsmPoint(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
capital = models.TextField(blank=True)
ele = models.TextField(blank=True)
poi = models.TextField(blank=True)
way = models.PointField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_point'
class PlanetOsmPolygon(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
tracktype = models.TextField(blank=True)
way_area = models.FloatField(blank=True, null=True)
way = models.GeometryField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_polygon'
class PlanetOsmRoads(BaseOsmModel):
osm_id = models.BigIntegerField(blank=True, primary_key=True)
tracktype = models.TextField(blank=True)
way_area = models.FloatField(blank=True, null=True)
way = models.LineStringField(srid=900913, blank=True, null=True)
objects = models.GeoManager()
class Meta:
managed = False
db_table = 'planet_osm_roads'
| [
"[email protected]"
] | |
97fe4b22a0d5bd7822f3f5e943e4fad93fc6b66d | 9de9e636cf845c681fdbf1c6c058cc69d5d05da5 | /IO/Writer.py | 5ed136de1753e4ebcc60d562cf59aef0e316b217 | [] | no_license | dxcv/Portfolio-Management-1 | 4278eebb5c91a3a02ea76398b681ef9dc5beeb1f | 9f188aeab3177d0a13bae32e3a318a4f18642a3c | refs/heads/master | 2020-12-05T21:48:37.690004 | 2019-01-03T01:34:41 | 2019-01-03T01:34:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,408 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 12:51:44 2018
Write to excel function
@author: ACM05
"""
import pandas as pd
import IO.IO_Tools_Func as IO_TF
class Writer():
def __init__( self,
f_name ):
""" Writer object for
defined format
"""
self.f_name = f_name
self.writer = pd.ExcelWriter( f_name,
engine='xlsxwriter',
options={'nan_inf_to_errors': True})
self.book = self.writer.book
""" Loading all format settings
"""
self.header_format = self.book.add_format(IO_TF.get_format())
self.ticker_format = self.book.add_format(IO_TF.get_ticker_format())
self.thousand_format = self.book.add_format(IO_TF.get_num_k_format())
self.bold_format = self.book.add_format(IO_TF.get_format_bold())
self.pct_format = self.book.add_format(IO_TF.get_num_pct_format())
self.BPS_format = self.book.add_format(IO_TF.get_num_BPS_format())
def add_sheet( self,
s_name ):
""" Add sheets into this workbook
Please pre define all worksheet names
"""
workbook = self.writer.book
worksheet = workbook.add_worksheet( s_name )
self.writer.sheets[s_name] = worksheet
def write_ticker( self,
s_name,
i_row,
i_col,
i_string ):
""" Write tickers with defined formatting
"""
worksheet = self.writer.sheets[s_name]
worksheet.write( i_row, i_col,
i_string, self.ticker_format )
def write_raw( self,
s_name,
i_row,
i_col,
i_string ):
""" Write string into given file with sheet name
raw data without design
"""
worksheet = self.writer.sheets[s_name]
worksheet.write( i_row, i_col,
i_string, self.bold_format )
def write_df( self,
i_row,
i_col,
df,
s_name ):
""" Write to excel given
file name and sheet name
"""
""" Step one load formatting
"""
worksheet = self.writer.sheets[s_name]
""" Step two write df into this work sheet
"""
df = df.reset_index()
df = IO_TF.Add_Sum_Row_df(df, "ALL")
df.to_excel( self.writer,
s_name,
startrow = i_row,
startcol = i_col,
index = False )
for col, value in enumerate(df.columns.values):
worksheet.write( i_row, col+i_col,
value, self.header_format )
for col, value in enumerate(df.iloc[-1]):
if value == value:
worksheet.write( i_row+df.shape[0], col+i_col,
value, self.bold_format )
else:
worksheet.write( i_row+df.shape[0], col+i_col,
"", self.bold_format )
def close( self ):
self.writer.save()
| [
"[email protected]"
] | |
d0fd9ae97dd8894464641a2387bc5db31a6ea3a3 | 04bd3387ed96a9856c14f76e3022171305203a72 | /GetPopuler.py | 348fc46c31c5691ec2af8fdeaedfdaec2f02e79d | [] | no_license | Yashwant94308/ATG-Selenium | bb3fff41b642951db3b5ab605d524ddcee4794f1 | 39424bee93e49f752105dd35311c2569e1a2de43 | refs/heads/master | 2023-05-26T04:36:58.998935 | 2021-05-29T08:34:26 | 2021-05-29T08:34:26 | 371,921,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import requests, json
response = requests.get(
'https://www.flickr.com/services/rest/?method=flickr.photos.getPopular&api_key=22a1377a56b4c384b61b723a80a73492'
'&user_id=193065083%40N04&format=json&nojsoncallback=1')
print(response.json())
| [
"[email protected]"
] | |
5ddbda28127ab2fb18249701f06df9c1649219a4 | 8fe781f8ac5b1c1d5214ac5a87c5ad855f791a6d | /src/clean_data.py | 90720e0134fea7776aa816fbd08598bb52e51b1b | [] | no_license | ternaus/kaggle_ultrasound | fabf45b89f5ab0888bb22e9b5205d90b14ce8f06 | 2d688d0cea8e2b1651980e972b1d6400b797c70b | refs/heads/master | 2021-01-11T15:48:19.835115 | 2016-08-20T01:47:37 | 2016-08-20T01:47:37 | 64,818,757 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | from __future__ import division
"""
This script makes train data less noisy in a way:
Finds similar images assigns to these clusters of images max mask
"""
import networkx as nx
import os
import pandas as pd
from tqdm import tqdm
from PIL import Image
import glob
import pandas as pd
import cv2
import os
import numpy as np
from pylab import *
from tqdm import tqdm
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import pdist, squareform
image_rows = 420
image_cols = 580
data_path = '../data'
train_data_path = os.path.join(data_path, 'train')
images = os.listdir(train_data_path)
total = len(images) / 2
imgs = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
imgs_mask = np.ndarray((total, 1, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in tqdm(images):
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.tif'
img = cv2.imread(os.path.join(train_data_path, image_name), cv2.IMREAD_GRAYSCALE)
img_mask = cv2.imread(os.path.join(train_data_path, image_mask_name), cv2.IMREAD_GRAYSCALE)
img = np.array([img])
img_mask = np.array([img_mask])
imgs[i] = img
imgs_mask[i] = img_mask
i += 1
print('Loading done.')
train_ids = [x for x in images if 'mask' not in x]
train = pd.DataFrame()
train['subject'] = map(lambda x: int(x.split('_')[0]), train_ids)
train['filename'] = train_ids
train['image_num'] = map(lambda x: int(x.split('.')[0].split('_')[1]), train_ids)
imgs_flat = np.reshape(imgs, (5635, 420*580))
for subject in train['subject'].unique():
a = imgs_flat[(train['subject'] == subject).astype(int).values == 1]
b = squareform(pdist(a))
graph = []
for i in range(1, 2000):
for j in range(i + 1, 120):
if (b < 5000)[(i, j)]:
graph += [(i, j)]
G = nx.Graph()
G.add_edges_from(graph)
connected_components = list(map(list, nx.connected_component_subgraphs(G)))
clusters = pd.DataFrame(zip(range(len(connected_components), connected_components)),
columns=['cluster_name', 'components'])
temp = pd.DataFrame()
temp['image_num'] = train.loc[(train['subject'] == subject), 'image_num']
temp['subject'] = subject
| [
"[email protected]"
] | |
ac8bb2b49f625d413a32f8fef679bc03ce802ab6 | ade22d64b99e7306eaeaf06684cc9c4f2d539881 | /oscquintette/tests/v1/test_plugin.py | 36de987851507a942e89237853e783acf38e25f1 | [
"Apache-2.0"
] | permissive | dtroyer/osc-quintette | 59204e4ad2e25be237fb3ec13cbb5087518197d6 | e37585936b1db9e87ab52e11e714afaf167a0039 | refs/heads/master | 2020-04-04T22:57:54.745055 | 2015-01-15T06:42:16 | 2015-01-15T06:42:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,266 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from oscquintette.tests import base
from oscquintette.tests import fakes
from oscquintette.v1 import plugin
# Load the plugin init module for the plugin list and show commands
import oscquintette.plugin
plugin_name = 'oscquintette'
plugin_client = 'oscquintette.plugin'
class FakePluginV1Client(object):
def __init__(self, **kwargs):
#self.servers = mock.Mock()
#self.servers.resource_class = fakes.FakeResource(None, {})
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
class TestPluginV1(base.TestCommand):
def setUp(self):
super(TestPluginV1, self).setUp()
self.app.client_manager.oscquintette = FakePluginV1Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
# Get a shortcut to the Service Catalog Mock
#self.catalog_mock = self.app.client_manager.identity.service_catalog
#self.catalog_mock.reset_mock()
class TestPluginList(TestPluginV1):
def setUp(self):
super(TestPluginList, self).setUp()
self.app.ext_modules = [
sys.modules[plugin_client],
]
# Get the command object to test
self.cmd = plugin.ListPlugin(self.app, None)
def test_plugin_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
collist = ('Name', 'Versions', 'Module')
self.assertEqual(columns, collist)
datalist = ((
plugin_name,
oscquintette.plugin.API_VERSIONS.keys(),
plugin_client,
), )
self.assertEqual(tuple(data), datalist)
class TestPluginShow(TestPluginV1):
def setUp(self):
super(TestPluginShow, self).setUp()
self.app.ext_modules = [
sys.modules[plugin_client],
]
# Get the command object to test
self.cmd = plugin.ShowPlugin(self.app, None)
def test_plugin_show(self):
arglist = [
plugin_name,
]
verifylist = [
('name', plugin_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
collist = ('1', 'module', 'name')
self.assertEqual(columns, collist)
datalist = (
oscquintette.plugin.API_VERSIONS['1'],
plugin_client,
plugin_name,
)
self.assertEqual(data, datalist)
| [
"[email protected]"
] | |
13cd0c8c3642448ab20d30e377b9836c2e2b3b0f | 7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd | /Python Built-in Modules/Python Itertools Module/1.Infinite Iterators/1.3.repeat()/1.Example_Of_repeat.py | b1c087d52f7e27726d3536cbc8a9c1f00b84432f | [] | no_license | satyam-seth-learnings/python_learning | 5a7f75bb613dcd7fedc31a1567a434039b9417f8 | 7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da | refs/heads/main | 2023-08-25T14:08:11.423875 | 2021-10-09T13:00:49 | 2021-10-09T13:00:49 | 333,840,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | from itertools import repeat
print ("Printing the numbers repeatedly : ")
print (list(repeat(25, 4))) | [
"[email protected]"
] | |
dd84a0764d1cd38b85cddd32caf67859a5427497 | 4ac77337083c7fdb28a901831003cfd0e0ef7bf1 | /any_urlfield/models/fields.py | 2d6a67b84cb2f887d661bd1a22600a432304957f | [
"Apache-2.0"
] | permissive | borgstrom/django-any-urlfield | deb6a10b87c26f53bb3ca5085d486238ab6c2a6c | 3f97bfd628a5770268b715ee8f796aaab89cf841 | refs/heads/master | 2020-12-11T02:13:14.725873 | 2013-12-12T21:55:12 | 2013-12-12T21:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,655 | py | """
Custom model fields to link to CMS content.
"""
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.db import models
from any_urlfield.models.values import AnyUrlValue
from any_urlfield.registry import UrlTypeRegistry
class AnyUrlField(models.CharField):
"""
A CharField that can either refer to a CMS page ID, or external URL.
.. figure:: /images/anyurlfield1.*
:width: 363px
:height: 74px
:alt: AnyUrlField, with external URL input.
.. figure:: /images/anyurlfield2.*
:width: 290px
:height: 76px
:alt: AnyUrlField, with internal page input.
By default, the ``AnyUrlField`` only supports linking to external pages.
To add support for your own models (e.g. an ``Article`` model),
include the following code in :file:`models.py`:
.. code-block:: python
from any_urlfield.models import AnyUrlField
AnyUrlField.register_model(Article)
Now, the ``AnyUrlField`` offers users a dropdown field to directly select an article.
By default, it uses a :class:`django.forms.ModelChoiceField` field with a :class:`django.forms.Select` widget
to render the field. This can be customized using the ``form_field`` and ``widget`` parameters:
.. code-block:: python
from any_urlfield.models import AnyUrlField
from any_urlfield.forms import SimpleRawIdWidget
AnyUrlField.register_model(Article, widget=SimpleRawIdWidget(Article))
Now, the ``Article`` model will be displayed as raw input field with a browse button.
"""
__metaclass__ = models.SubfieldBase
_static_registry = UrlTypeRegistry() # Also accessed by AnyUrlValue as internal field.
def __init__(self, *args, **kwargs):
if not kwargs.has_key('max_length'):
kwargs['max_length'] = 300
super(AnyUrlField, self).__init__(*args, **kwargs)
@classmethod
def register_model(cls, ModelClass, form_field=None, widget=None, title=None, prefix=None):
"""
Register a model to use in the URL field.
This function needs to be called once for every model
that should be selectable in the URL field.
:param ModelClass: The model to register.
:param form_field: The form field class used to render the field.
:param widget: The widget class, can be used instead of the form field.
:param title: The title of the model, by default it uses the models ``verbose_name``.
:param prefix: A custom prefix for the model in the serialized database format. By default it uses "appname.modelname".
"""
cls._static_registry.register(ModelClass, form_field, widget, title, prefix)
def formfield(self, **kwargs):
# Associate formfield.
# Import locally to avoid circular references.
from any_urlfield.forms.fields import AnyUrlField as AnyUrlFormField
kwargs['form_class'] = AnyUrlFormField
kwargs['url_type_registry'] = self._static_registry
if kwargs.has_key('widget'):
del kwargs['widget']
return super(AnyUrlField, self).formfield(**kwargs)
def to_python(self, value):
if isinstance(value, AnyUrlValue):
return value
# Convert the string value
if value is None:
return None
return AnyUrlValue.from_db_value(value, self._static_registry)
def get_prep_value(self, value):
if isinstance(value, basestring):
# Happens with south migration
return value
elif value is None:
return None if self.null else ''
else:
# Convert back to string
return value.to_db_value()
def value_to_string(self, obj):
# For dumpdata
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
def validate(self, value, model_instance):
# Final validation of the field, before storing in the DB.
super(AnyUrlField, self).validate(value, model_instance)
if value:
if value.type_prefix == 'http':
validate_url = URLValidator()
validate_url(value.type_value)
elif value.type_value:
if not value.exists():
raise ValidationError(self.error_messages['invalid_choice'] % value.type_value)
# Tell South how to create custom fields
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], [
"^" + __name__.replace(".", "\.") + "\.AnyUrlField",
])
except ImportError:
pass
| [
"[email protected]"
] | |
b5ffffc779bb9a663018d7d61a89415f02b6c32a | 3f9163cc45befbc20b19a19bf1fd875b483c2965 | /python/paddle/distributed/passes/auto_parallel_gradient_merge.py | bc40dad8ac0d9a69f37ef6d6704bd644b87522cc | [
"Apache-2.0"
] | permissive | forschumi/Paddle | 27926b1ddb76be08dc3f768df787fc9a4078f8e4 | 58d2949d6d2a1689e17527fb501d69c3501adf9f | refs/heads/develop | 2022-07-04T03:07:52.446858 | 2022-06-05T11:44:04 | 2022-06-05T11:44:04 | 83,878,029 | 0 | 0 | null | 2017-03-04T08:11:50 | 2017-03-04T08:11:49 | null | UTF-8 | Python | false | false | 14,250 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from collections import OrderedDict
from typing import List, Tuple, Dict, Any
import paddle
from paddle.framework import core
from paddle.fluid.framework import program_guard, device_guard
from paddle.fluid import unique_name, layers
from paddle.fluid.clip import append_gradient_clip_ops
from .pass_base import PassBase, PassType, register_pass
from paddle.distributed.auto_parallel.utils import set_var_dist_attr
from paddle.distributed.auto_parallel.utils import naive_set_dist_op_attr_for_program_by_mesh_and_mapping
from paddle.distributed.auto_parallel.process_group import get_world_process_group
world_process_group = get_world_process_group()
def _is_the_backward_op(op):
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
OpRole = core.op_proto_and_checker_maker.OpRole
return OP_ROLE_KEY in op.attr_names and \
int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Backward)
def _is_the_optimizer_op(op):
OP_ROLE_KEY = core.op_proto_and_checker_maker.kOpRoleAttrName()
OpRole = core.op_proto_and_checker_maker.OpRole
return OP_ROLE_KEY in op.attr_names and \
int(op.all_attrs()[OP_ROLE_KEY]) & int(OpRole.Optimize)
def _remove_and_get_optimizer_op(main_program, dist_context):
# 1 create tmp block
# 2 mv optimizer op from global program to tmp block
# 3 del the op from dist_context
from paddle.distributed.fleet.meta_optimizers.common import OpRole
main_block = main_program.global_block()
temp_block = main_program._create_block()
removed_op_idx = []
optimize_ops_desc = []
for idx, op in enumerate(main_block.ops):
if _is_the_optimizer_op(op):
# append optimizer op to tmp block
new_op_desc = temp_block.desc.append_op()
new_op_desc.copy_from(op.desc)
optimize_ops_desc.append(new_op_desc)
removed_op_idx.append(idx)
# del op from dist_context
if dist_context:
dist_context.del_dist_op_for_program(op)
for idx in removed_op_idx[::-1]:
main_block._remove_op(idx)
return optimize_ops_desc
def _remove_op_role_var(param, grad):
op_maker = core.op_proto_and_checker_maker
op = grad.op
if op.has_attr(op_maker.kOpRoleVarAttrName()):
op._remove_attr(op_maker.kOpRoleVarAttrName())
def _get_gm_cond_var(main_program, k_steps, dist_context):
main_block = main_program.global_block()
# Add const var
k_step_var = layers.create_global_var(name="gradient_merge_k",
shape=[1],
value=int(k_steps),
dtype='int32',
persistable=True,
force_cpu=True)
set_var_dist_attr(dist_context, k_step_var, [-1], world_process_group.ranks)
zero_var = layers.create_global_var(name="gradient_merge_zero",
shape=[1],
value=int(0),
dtype='int32',
persistable=True,
force_cpu=True)
set_var_dist_attr(dist_context, zero_var, [-1], world_process_group.ranks)
# Add step var & cond var
step_var = layers.create_global_var(name="gradient_merge_step",
shape=[1],
value=int(0),
dtype='int32',
persistable=True,
force_cpu=True)
set_var_dist_attr(dist_context, step_var, [-1], world_process_group.ranks)
cond_var = main_block.create_var(name="gradient_merge_cond",
shape=[1],
dtype='bool')
set_var_dist_attr(dist_context, cond_var, [-1], world_process_group.ranks)
with device_guard("cpu"):
# step_var = (step_var + 1) % k_step
layers.increment(x=step_var, value=1.0, in_place=True)
elementwise_mod_op = main_block.append_op(type='elementwise_mod',
inputs={
'X': step_var,
'Y': k_step_var
},
outputs={'Out': step_var},
attrs={
'axis': -1,
'use_mkldnn': False
})
naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
elementwise_mod_op, world_process_group.ranks, [-1], dist_context)
# cond_var = (step_var == 0)
equal_op = main_block.append_op(type='equal',
inputs={
'X': step_var,
'Y': zero_var
},
outputs={'Out': cond_var})
naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
equal_op, world_process_group.ranks, [-1], dist_context)
return cond_var
def _append_gradient_merge_backward_op(
main_program, startup_program, params_grads: List[Tuple[Any, Any]],
cond_var_name: str,
dist_context) -> Tuple[List[Tuple[Any, Any]], Dict[str, Any]]:
main_block = main_program.global_block()
startup_block = startup_program.global_block()
# step1: remove grad.op's op_role_var
for param, grad in params_grads:
assert (
param.type != core.VarDesc.VarType.SELECTED_ROWS
), "SELECTED_ROWS is not supported in GradientMergeOptimizer for now"
_remove_op_role_var(param, grad)
param_to_gradient_merge = {}
new_params_to_grads = []
# step2: create gradient_merge var and init with 0
for param, grad in params_grads:
param_name = param.name
param_var = main_block.var(param_name)
assert (param_var is not None)
ref_dist_attr = dist_context.get_tensor_dist_attr_for_program(param_var)
assert ref_dist_attr is not None
gradient_merge_var = main_block.create_var(name=param_name +
"@GRAD@GradientMerge",
shape=param_var.shape,
dtype=param_var.dtype,
persistable=True)
param_to_gradient_merge[param_name] = gradient_merge_var
ref_process_mesh = ref_dist_attr.process_mesh
ref_dims_mapping = ref_dist_attr.dims_mapping
set_var_dist_attr(dist_context, gradient_merge_var, ref_dims_mapping,
ref_process_mesh)
startup_gradient_merge_var = startup_block.create_var(
name=param_name + "@GRAD@GradientMerge",
shape=param_var.shape,
dtype=param_var.dtype,
persistable=True)
startup_block.append_op(type="fill_constant",
outputs={"Out": startup_gradient_merge_var},
attrs={
"shape": param_var.shape,
"dtype": param_var.dtype,
"value": float(0),
})
# grad_merge += grad
new_grad_op = main_block.append_op(type="elementwise_add",
inputs={
'X': grad,
'Y': gradient_merge_var
},
outputs={'Out': gradient_merge_var},
attrs={
'axis': -1,
'use_mkldnn': False
})
new_params_to_grads.append([param, gradient_merge_var])
naive_set_dist_op_attr_for_program_by_mesh_and_mapping(
new_grad_op, ref_process_mesh, ref_dims_mapping, dist_context)
return new_params_to_grads, param_to_gradient_merge
def _create_cond_block_and_update_optimizer(
main_program, cond_var, new_params_to_grads: List[Tuple[Any, Any]],
param_to_gradient_merge: Dict[str, Any], optimize_ops_desc: List[Any],
k_steps, avg):
def true_apply_gradient():
cur_block_idx = main_program.current_block_idx
cur_block = main_program.current_block()
# cur_block's forward_block & backward_block is itself
cur_block._set_forward_block_idx(cur_block_idx)
op_maker = core.op_proto_and_checker_maker
if avg:
for param, new_grad in new_params_to_grads:
# grad /= k_steps
cur_block.append_op(type='scale',
inputs={'X': new_grad},
outputs={'Out': new_grad},
attrs={
'scale': 1.0 / k_steps,
'bias': 0.0,
'bias_after_scale': False
})
new_grad.op._set_attr(op_maker.kOpRoleAttrName(),
op_maker.OpRole.Optimize)
# append optimizer ops
for op_desc in optimize_ops_desc:
new_op_desc = cur_block.desc.append_op()
new_op_desc.copy_from(op_desc)
#update input/output
for input_name in new_op_desc.input_arg_names():
if input_name in new_params_to_grads:
new_op_desc._rename_input(input_name,
new_params_to_grads[input_name])
for output_name in new_op_desc.output_arg_names():
if output_name in new_params_to_grads:
new_op_desc._rename_output(output_name,
new_params_to_grads[output_name])
# remove op_role_var
if new_op_desc.has_attr(op_maker.kOpRoleVarAttrName()):
new_op_desc.remove_attr(op_maker.kOpRoleVarAttrName())
# op's update Grad
if core.grad_var_suffix() in new_op_desc.input_arg_names():
grad_value = new_op_desc.input("Grad")[0]
# TODO FIXME(xym) support fp16
grad_merge_value = grad_value + '@GradientMerge'
new_op_desc.set_input("Grad", [grad_merge_value])
main_program.global_block()._sync_with_cpp()
cur_block._sync_with_cpp()
# clear gradient_merge_vars
for param, new_grad in new_params_to_grads:
layers.fill_constant(shape=new_grad.shape,
dtype=new_grad.dtype,
value=0.0,
out=new_grad)
new_grad.op._set_attr(op_maker.kOpRoleAttrName(),
op_maker.OpRole.Optimize)
layers.cond(cond_var, true_fn=true_apply_gradient, false_fn=None)
def parse_program(main_program, startup_program, params_grads, k_steps, avg,
dist_context):
# 1 create gradient_merge_cond
cond_var = _get_gm_cond_var(main_program, k_steps, dist_context)
# 2 remove optimizer_op from main_program
optimize_ops_desc = _remove_and_get_optimizer_op(main_program, dist_context)
# back to block 0
main_program._rollback()
# 3 append gradient merge backward op to main_program
new_params_to_grads, param_to_gradient_merge = _append_gradient_merge_backward_op(
main_program, startup_program, params_grads, cond_var.name,
dist_context)
# 4 create ConditionalBlock and append gradient merge optimizer ops
_create_cond_block_and_update_optimizer(main_program, cond_var,
new_params_to_grads,
param_to_gradient_merge,
optimize_ops_desc, k_steps, avg)
@register_pass("auto_parallel_gradient_merge_pass")
class GradientMergePass(PassBase):
def __init__(self):
super(GradientMergePass, self).__init__()
self.set_attr("k_steps", -1)
self.set_attr("avg", True)
self.set_attr("inner_optimizer", None)
def _check_self(self):
if self.get_attr("k_steps") < 1:
return False
return True
def _check_conflict(self, other_pass):
return True
def _type(self):
return PassType.COMM_OPT
def _apply_single_impl(self, main_program, startup_program, context):
k_steps = self.get_attr("k_steps", -1)
avg = self.get_attr("avg", False)
dist_context = self.get_attr("dist_context")
params_grads = self.get_attr("params_grads")
with paddle.static.program_guard(main_program, startup_program):
parse_program(main_program, startup_program, params_grads, k_steps,
avg, dist_context)
main_program._sync_with_cpp()
| [
"[email protected]"
] | |
40a3d067d1e3b7a8dc8e422b14866b6111bd77a8 | 3e9ac661325657664f3f7fa26ff2edf5310a8341 | /python/demo100/15.py | 82e514ce0d7a6957012d7aafb52d784906df006e | [] | no_license | JollenWang/study | 47d1c22a6e15cb82d0ecfc6f43e32e3c61fbad36 | 660a47fd60dd1415f71da362232d710b322b932f | refs/heads/master | 2020-06-15T23:53:37.625988 | 2017-04-21T11:18:20 | 2017-04-21T11:18:20 | 75,257,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
#author : Jollen Wang
#date : 2016/05/10
#version: 1.0
'''
题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,60分以下的用C表示。
'''
def main():
score = int(raw_input("$>Enter the score:"))
print "grade=",
if score >= 90:
print "A"
elif score >= 60:
print "B"
else:
print "C"
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
f502a1ab4fbd3fb3c402deb9bcb0a91171c04ca9 | 3ea75e35408de10bba250f52120b5424bd50fdd9 | /py/plotSigzFunc.py | 21812552da7cc7dfcdf7898e0e091f012d051cf2 | [] | no_license | jobovy/segue-maps | 9848fe59ee24a11a751df4f8855c40f2480aef23 | ed20b1058a98618700a20da5aa9b5ebd2ea7719b | refs/heads/main | 2022-11-30T15:27:08.079999 | 2016-12-20T04:28:26 | 2016-12-20T04:28:26 | 40,663,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,199 | py | import os, os.path
import math
import numpy
import cPickle as pickle
from matplotlib import pyplot
from optparse import OptionParser
from scipy import optimize, special
from galpy.util import bovy_coords, bovy_plot
def plotSigzFunc(parser):
(options,args)= parser.parse_args()
if len(args) == 0:
parser.print_help()
return
if os.path.exists(args[0]):#Load savefile
savefile= open(args[0],'rb')
params1= pickle.load(savefile)
samples1= pickle.load(savefile)
savefile.close()
else:
print "Need to give filename ..."
if os.path.exists(args[1]):#Load savefile
savefile= open(args[1],'rb')
params1= pickle.load(savefile)
samples2= pickle.load(savefile)
savefile.close()
else:
print "Need to give filename ..."
#First one
zs= numpy.linspace(0.3,1.2,1001)
xrange= [0.,1.3]
yrange= [0.,60.]
#Now plot the mean and std-dev from the posterior
zmean= numpy.zeros(len(zs))
nsigs= 3
zsigs= numpy.zeros((len(zs),2*nsigs))
fs= numpy.zeros((len(zs),len(samples1)))
ds= zs-0.5
for ii in range(len(samples1)):
thisparams= samples1[ii]
fs[:,ii]= math.exp(thisparams[1])+thisparams[2]*ds+thisparams[3]*ds**2.
#Record mean and std-devs
zmean[:]= numpy.mean(fs,axis=1)
bovy_plot.bovy_print()
xlabel=r'$|z|\ [\mathrm{kpc}]$'
ylabel=r'$\sigma_z\ [\mathrm{km\ s}^{-1}]$'
bovy_plot.bovy_plot(zs,zmean,'k-',xrange=xrange,yrange=yrange,
xlabel=xlabel,
ylabel=ylabel)
for ii in range(nsigs):
for jj in range(len(zs)):
thisf= sorted(fs[jj,:])
thiscut= 0.5*special.erfc((ii+1.)/math.sqrt(2.))
zsigs[jj,2*ii]= thisf[int(math.floor(thiscut*len(samples1)))]
thiscut= 1.-thiscut
zsigs[jj,2*ii+1]= thisf[int(math.floor(thiscut*len(samples1)))]
colord, cc= (1.-0.75)/nsigs, 1
nsigma= nsigs
pyplot.fill_between(zs,zsigs[:,0],zsigs[:,1],color='0.75')
while nsigma > 1:
pyplot.fill_between(zs,zsigs[:,cc+1],zsigs[:,cc-1],
color='%f' % (.75+colord*cc))
pyplot.fill_between(zs,zsigs[:,cc],zsigs[:,cc+2],
color='%f' % (.75+colord*cc))
cc+= 1.
nsigma-= 1
bovy_plot.bovy_plot(zs,zmean,'k-',overplot=True)
#Second one
zmean= numpy.zeros(len(zs))
zsigs= numpy.zeros((len(zs),2*nsigs))
fs= numpy.zeros((len(zs),len(samples2)))
for ii in range(len(samples2)):
thisparams= samples2[ii]
fs[:,ii]= math.exp(thisparams[1])+thisparams[2]*ds+thisparams[3]*ds**2.
#Record mean and std-devs
zmean[:]= numpy.mean(fs,axis=1)
for ii in range(nsigs):
for jj in range(len(zs)):
thisf= sorted(fs[jj,:])
thiscut= 0.5*special.erfc((ii+1.)/math.sqrt(2.))
zsigs[jj,2*ii]= thisf[int(math.ceil(thiscut*len(samples2)))]
thiscut= 1.-thiscut
zsigs[jj,2*ii+1]= thisf[int(math.ceil(thiscut*len(samples2)))]
colord, cc= (1.-0.75)/nsigs, 1
nsigma= nsigs
pyplot.fill_between(zs,zsigs[:,0],zsigs[:,1],color='0.75')
while nsigma > 1:
pyplot.fill_between(zs,zsigs[:,cc+1],zsigs[:,cc-1],
color='%f' % (.75+colord*cc))
pyplot.fill_between(zs,zsigs[:,cc],zsigs[:,cc+2],
color='%f' % (.75+colord*cc))
cc+= 1.
nsigma-= 1
bovy_plot.bovy_plot(zs,zmean,'k-',overplot=True)
bovy_plot.bovy_text(r'$-0.4 < [\mathrm{Fe/H}] < 0.5\,, \ \ -0.25 < [\alpha/\mathrm{Fe}] < 0.2$',bottom_right=True)
bovy_plot.bovy_text(r'$-1.5 < [\mathrm{Fe/H}] < -0.5\,, \ \ 0.25 < [\alpha/\mathrm{Fe}] < 0.5$',top_left=True)
bovy_plot.bovy_end_print(options.plotfile)
return None
def get_options():
usage = "usage: %prog [options] <savefilename>\n\nsavefilename= name of the file that the fit/samples will be saved to"
parser = OptionParser(usage=usage)
parser.add_option("-o",dest='plotfile',
help="Name of file for plot")
return parser
if __name__ == '__main__':
plotSigzFunc(get_options())
| [
"[email protected]"
] | |
28bf8e32b2fc71691571cc473c7d4d6c7cefcf3a | fe98f7502a5724be0ec7ec3ae73ff4703d299d6e | /Neural Tree/data.py | 1b85e274b45f66319d308125e39f23e90bf4375f | [] | no_license | SoumitraAgarwal/BTP | 92ab095aacf3dd374148f40b9e777bb49c4253f1 | 07df960ad7e8680680a9d3494c8a860b394867d1 | refs/heads/master | 2020-03-16T12:39:13.548988 | 2018-05-09T06:09:11 | 2018-05-09T06:09:11 | 132,671,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | import random
import math
import pandas as pd
import numpy as np
from sklearn import preprocessing
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D
plt.switch_backend('agg')
random.seed(311)
def generate(radius, centre):
alpha = 2 * math.pi * random.random()
r = radius*random.random()
x = r*math.cos(alpha) + centre[0]
y = r*math.sin(alpha) + centre[1]
return [x,y]
k = 10
n = 600
ranger = 500
C = []
X = []
Y = []
for j in range(k):
T = [random.uniform(0, ranger), random.uniform(0, ranger)]
temp = []
C.append([[j*ranger + random.uniform(0, ranger), ranger*random.uniform(0, k)], 400*random.uniform(0, 1)])
for i in range(n):
temp.append(generate(C[j][1], C[j][0]))
temp = np.asarray(temp)
Y.append(np.matmul(temp,T))
X.append(temp)
X = np.asarray(X)
Y = np.asarray(Y)
fig = plt.figure()
ax1 = fig.add_subplot(111)
colors = cm.rainbow(np.linspace(0, 1, len(Y)))
for i in range(k):
x1, y1 = X[i].T
ax1.scatter( x1,y1, s = 3, marker="o", label='target', color=colors[i])
plt.savefig('Data.png')
X1 = []
X2 = []
for i in range(k):
x1,x2 = X[i].T
X1.append(x1)
X2.append(x2)
X1 = np.asarray(X1)
X2 = np.asarray(X2)
Y = Y.ravel()
X1 = X1.ravel()
X2 = X2.ravel()
X1 = preprocessing.scale(X1)
X2 = preprocessing.scale(X2)
Y = preprocessing.scale(Y)
data = pd.DataFrame(data = {
'X1':X1,
'X2':X2,
'Y' :Y
})
data = data.sample(frac=1).reset_index(drop=True)
data.to_csv('data.csv', index = False) | [
"[email protected]"
] | |
ef3d990361a736c2c8243ef71653066e995e9f04 | a1c7b21d96d6326790831b2b3115fcd2563655a4 | /pylidc/__init__.py | 95c187456f43a5b9aafdc6d2673def316432c058 | [
"MIT"
] | permissive | jovsa/pylidc | 3837b17fbe02bc60817081a349681612f24b2f81 | bd378a60a4b0e6dfb569afb25c3dfcbbcd169550 | refs/heads/master | 2021-06-13T02:45:41.359793 | 2017-03-10T23:14:57 | 2017-03-10T23:14:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,068 | py | """
--------------------------------------------------------
Author: Matt Hancock, [email protected]
--------------------------------------------------------
This python module implements an (ORM) object relational mapping
to an sqlite database containing the annotation information from
the XML files provided by the LIDC dataset. The purpose of this
module is to make for easier data querying and to include
functional aspects of the data models in addition to pure
attribute information, e.g., computing nodule centroids from
contour attribtues.
The ORM is implemented using sqlalchemy. There are three data models:
Scan, Annotation, and Contour
The relationships are "one to many" for each model going left
to right, i.e., scans have many annotations and annotations
have many contours.
For more information, see the model classes themselves.
"""
from __future__ import print_function as _pf
__version__ = '0.1.2'
# Hidden stuff.
import os as _os
import pkg_resources as _pr
from sqlalchemy import create_engine as _create_engine
from sqlalchemy.orm import sessionmaker as _sessionmaker
_dbpath = _pr.resource_filename('pylidc', 'pylidc.sqlite')
_engine = _create_engine('sqlite:///'+_dbpath)
_session = _sessionmaker(bind=_engine)()
# Public stuff.
from .Scan import Scan
from .Scan import dicompath
from .Annotation import Annotation
from .Contour import Contour
def query(*args):
"""
Wraps the sqlalchemy session object. Some example usage:
>>> import pylidc as pl
>>> qu = pl.query(pl.Scan).filter(pl.Scan.slice_thickness <= 1.)
>>> print qu.count()
>>> # => 97
>>> scan = qu.first()
>>> print len(scan.annotations)
>>> # => 11
>>> qu = pl.query(pl.Annotation).filter((pl.Annotation.malignancy > 3), (pl.Annotation.spiculation < 3))
>>> print qu.count()
>>> # => 1083
>>> annotation = qu.first()
>>> print annotation.estimate_volume()
>>> # => 5230.33874999
"""
return _session.query(*args)
| [
"[email protected]"
] | |
8c4a5643db05aa582d6890691f9259ba39448975 | 455ce0c304e4a9f080862cb8459066ac741f3d38 | /day06/funcAsVar.py | abdfe02a2e1454e4e58302d9b697a4725542a345 | [] | no_license | venkatram64/python3_work | 23a835b0f8f690ca167e74bbbe94f46e3bd8c99f | 754f42f6fa2be4446264a8e2532abd55213af4df | refs/heads/master | 2021-06-15T22:41:31.031925 | 2021-02-16T04:56:50 | 2021-02-16T04:56:50 | 153,217,220 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | def square(num):
return num * num
s = square # function is assigned to a variable, and later can be excuted
print(s(5)) | [
"[email protected]"
] | |
d476c12d19016fedb10bf55bbe245feb207b93ac | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_Lag1Trend_NoCycle_NoAR.py | ff7ef11eee723d83fe871324617d9665f298f2bc | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 154 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['Lag1Trend'] , ['NoCycle'] , ['NoAR'] ); | [
"[email protected]"
] | |
5be51dbb88aa58f10058062d78de161544e789e6 | cc2fcc1a0c5ea9789f98ec97614d7b25b03ba101 | /st2common/tests/unit/test_configs_registrar.py | e23dfe74262ed4c55b95ba299c1a0f50fbeb08c9 | [
"Apache-2.0"
] | permissive | Junsheng-Wu/st2 | 6451808da7de84798641882ca202c3d1688f8ba8 | c3cdf657f7008095f3c68b4132b9fe76d2f52d81 | refs/heads/master | 2022-04-30T21:32:44.039258 | 2020-03-03T07:03:57 | 2020-03-03T07:03:57 | 244,301,363 | 0 | 0 | Apache-2.0 | 2022-03-29T22:04:26 | 2020-03-02T06:53:58 | Python | UTF-8 | Python | false | false | 4,577 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2common.content import utils as content_utils
from st2common.bootstrap.configsregistrar import ConfigsRegistrar
from st2common.persistence.pack import Pack
from st2common.persistence.pack import Config
from st2tests.api import SUPER_SECRET_PARAMETER
from st2tests.base import CleanDbTestCase
from st2tests import fixturesloader
__all__ = [
'ConfigsRegistrarTestCase'
]
PACK_1_PATH = os.path.join(fixturesloader.get_fixtures_packs_base_path(), 'dummy_pack_1')
PACK_6_PATH = os.path.join(fixturesloader.get_fixtures_packs_base_path(), 'dummy_pack_6')
class ConfigsRegistrarTestCase(CleanDbTestCase):
def test_register_configs_for_all_packs(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_1': PACK_1_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
config_db = config_dbs[0]
self.assertEqual(config_db.values['api_key'], '{{st2kv.user.api_key}}')
self.assertEqual(config_db.values['api_secret'], SUPER_SECRET_PARAMETER)
self.assertEqual(config_db.values['region'], 'us-west-1')
def test_register_all_configs_invalid_config_no_config_schema(self):
# verify_ configs is on, but ConfigSchema for the pack doesn't exist so
# validation should proceed normally
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False, validate_configs=False)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_6': PACK_6_PATH}
packs_base_paths = content_utils.get_packs_base_paths()
registrar.register_from_packs(base_dirs=packs_base_paths)
# Verify pack and schema have been registered
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 1)
self.assertEqual(len(config_dbs), 1)
def test_register_all_configs_with_config_schema_validation_validation_failure(self):
# Verify DB is empty
pack_dbs = Pack.get_all()
config_dbs = Config.get_all()
self.assertEqual(len(pack_dbs), 0)
self.assertEqual(len(config_dbs), 0)
registrar = ConfigsRegistrar(use_pack_cache=False, fail_on_failure=True,
validate_configs=True)
registrar._pack_loader.get_packs = mock.Mock()
registrar._pack_loader.get_packs.return_value = {'dummy_pack_6': PACK_6_PATH}
# Register ConfigSchema for pack
registrar._register_pack_db = mock.Mock()
registrar._register_pack(pack_name='dummy_pack_5', pack_dir=PACK_6_PATH)
packs_base_paths = content_utils.get_packs_base_paths()
expected_msg = ('Failed validating attribute "regions" in config for pack "dummy_pack_6" '
'(.*?): 1000 is not of type u\'array\'')
self.assertRaisesRegexp(ValueError, expected_msg,
registrar.register_from_packs,
base_dirs=packs_base_paths)
| [
"[email protected]"
] | |
196964f8812712d14c761353096cc995312f630d | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/containsDuplicate_20200907093833.py | a13711fe822495f778880bcdac9e84cd2d398e7d | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | def duplicate(nums,k,t):
number = None
for i in range(len(nums)):
for j in range(i+1,len(nums)):
if nu
| [
"[email protected]"
] | |
286901b3a6a7ec15eaad0c29b53851f0e00a3e81 | 599db5e2e3c4d6c296de25a8ef8e95a862df032b | /OpenAI Gym/(clear)breakout-ramdeterministic-v4/model6/14000epi/modelPlay.py | 7e5537269ecb2dd11378115b992616635625fad7 | [] | no_license | wantyouring/ML-practice-code | bb7577e99f22587c7ca016c1c4d067175e5ce9d9 | a3efbb4d252bacc831c5d7a01daf6476e7a755e4 | refs/heads/master | 2020-05-14T17:45:17.735081 | 2019-06-30T14:43:25 | 2019-06-30T14:43:25 | 181,898,074 | 0 | 1 | null | 2019-06-15T05:52:44 | 2019-04-17T13:31:24 | Python | UTF-8 | Python | false | false | 3,233 | py | # 학습모델 play. random action과 학습model 비교.
import gym
import pylab
import numpy as np
import gym.wrappers as wrappers
from doubleDQN2 import DoubleDQNAgent
EPISODES = 1 # 처음은 random으로 수행, 나중에는 학습model로 수행
global_step = 0
def change_action(action):
if action == 0:
return 0
elif action == 1:
return 2
elif action == 2:
return 3
elif action == 3:
return 3
if __name__ == "__main__":
env = gym.make('Breakout-ramDeterministic-v4')
env = wrappers.Monitor(env,"./results",force = True)
state_size = 128
action_size = 3
agent = DoubleDQNAgent(state_size, action_size)
agent.load_model()
agent.epsilon = -1 # Q value에 의해서만 움직이게끔.
agent.render = True
scores, episodes = [], []
random_success_cnt = 0
model_success_cnt = 0
# 랜덤액션 진행시
for e in range(EPISODES):
done = False
score = 0
life = 5
env.reset()
for i in range(5):
env.step(1) # 시작 action.
while not done:
action = env.action_space.sample()
_, reward, done, info = env.step(change_action(action))
score += reward
if done:
if score > 0:
random_success_cnt += 1
print("episode:", e, " score:", score)
scores.append(score)
episodes.append(e)
break
if life != info['ale.lives']:
life = info['ale.lives']
for i in range(5):
state, _, _, _ = env.step(1)
state = np.reshape(state, [1, 128])
# 학습모델 play
for e in range(EPISODES,EPISODES*2):
done = False
life = 5
score = 0
state = env.reset()
for i in range(5):
state, _, _, _ = env.step(1) # 시작 action.
state = np.reshape(state,[1,128])
while not done:
global_step += 1
if agent.render:
env.render()
# 현재 s에서 a취해 s`, r, done 정보 얻기.
action = agent.get_action(state)
next_state, reward, done, info = env.step(change_action(action))
score += reward
state = next_state
state = np.reshape(state, [1, 128])
if done:
if score > 0 :
model_success_cnt += 1
print("episode:", e, " score:", score)
scores.append(score)
episodes.append(e)
if e % 5 == 0:
pylab.plot(episodes, scores)
pylab.savefig("./play_score.png")
break
if life != info['ale.lives']:
life = info['ale.lives']
for i in range(5):
state, _, _, _ = env.step(1)
state = np.reshape(state, [1, 128])
env.close()
print("random : {}/{} success. rate : {}".format(random_success_cnt,EPISODES,random_success_cnt/EPISODES))
print("model : {}/{} success. rate : {}".format(model_success_cnt,EPISODES,model_success_cnt/EPISODES)) | [
"[email protected]"
] | |
4a18ef0719c0058c463c0200d66e76acbe62ccfd | e49b654d3db99773390c5b9686df9c99fbf92b2a | /linked_lists/is_palindrome.py | 703a4960dc6cfbe7e741efde1dd056a7ede1b2cc | [] | no_license | hao89/diary_of_programming_puzzles | 467e8264d0ad38768ba5ac3cfb45301293d79943 | 0e05d3716f28075f99bbd7b433d16a383209e57c | refs/heads/master | 2021-01-16T00:49:38.956102 | 2015-08-25T13:44:53 | 2015-08-25T13:44:53 | 41,692,587 | 1 | 0 | null | 2015-08-31T18:20:38 | 2015-08-31T18:20:36 | Python | UTF-8 | Python | false | false | 831 | py | """
Implement a function to check if a linked list is a palindrome
"""
import random
from linked_list import LinkedListNode
def is_palindrome1(linked_list):
# reverse and compare
pass
def build_palindrome_list():
root = LinkedListNode(5)
previous_node = root
for i in range(0, 2):
new_node = LinkedListNode(random.randint(0, 9))
previous_node.next = new_node
previous_node = new_node
stack = []
current_node = root
while current_node.next != None: # all but the last one
stack.append(current_node.data)
current_node = current_node.next
while len(stack) != 0:
data = stack.pop()
new_node = LinkedListNode(data)
previous_node.next = new_node
previous_node = new_node
return root
def build_random_list():
pass | [
"[email protected]"
] | |
07f54965bf19a638d7de2870978fd0fccb3c3b59 | 635670997e25d7fd578701995fe0422dd5671528 | /src/models_VAE/best_models/vae/encoder.py | 48db109dad68d468093e78e6d9e4cbd35e10fc19 | [] | no_license | QuangNamVu/thesis | 5126c0281d93e7a5c2c3a5784363d7f6c6baadfd | 01a404de2dfb70f13f3e61a9a8f3b73c88d93502 | refs/heads/master | 2022-12-24T10:08:33.472729 | 2019-12-21T16:27:07 | 2019-12-21T16:27:07 | 174,741,015 | 0 | 3 | null | 2022-12-14T06:56:36 | 2019-03-09T20:09:03 | Jupyter Notebook | UTF-8 | Python | false | false | 1,892 | py | import tensorflow as tf
from tensorpack import *
from tf_utils.ar_layers import *
from tf_utils.common import *
def encoder(self, x):
is_training = get_current_tower_context().is_training
# [M, T, D] => [M, T, f0]
fc_l1 = gaussian_dense(name='encode_l1', inputs=x, out_C=self.hps.f[0])
activate_l1 = tf.nn.elu(fc_l1)
out_l1 = tf.layers.dropout(inputs=activate_l1, rate=self.hps.dropout_rate, training=is_training)
# [M, T, f0] => [M, T, f1]
fc_l2 = gaussian_dense(name='encode_l2', inputs=out_l1, out_C=self.hps.f[0])
activate_l2 = tf.nn.tanh(fc_l2)
out_l2 = tf.layers.dropout(inputs=activate_l2, rate=self.hps.dropout_rate, training=is_training)
cell = tf.nn.rnn_cell.LSTMCell(num_units=self.hps.lstm_units, state_is_tuple=True)
# z: [M, T, o]
# h: [M, o]
# c: [M, o]
# [M, T, f1] => [M, T, o]
outputs, state = tf.nn.dynamic_rnn(cell, out_l2, sequence_length=[self.hps.T] * self.hps.M, dtype=tf.float32,
parallel_iterations=64)
# [M, T, o] => [M, T * o] => [M, n_z]
next_seq = tf.reshape(outputs, shape=[-1, self.hps.T * self.hps.lstm_units])
state_c = state.c
if self.hps.is_VDE:
# z_lst = tf.contrib.layers.fully_connected(inputs=next_seq, out_C=2 * self.hps.n_z)
z_lst = gaussian_dense(name='encode_fc1', inputs=next_seq, out_C=2 * self.hps.n_z)
else:
rs_l3 = tf.reshape(out_l2, [-1, self.hps.T * self.hps.f[1]])
z_lst = gaussian_dense(name='encode_fc2', inputs=rs_l3, out_C=2 * self.hps.n_z)
z_mu, z_std1 = split(z_lst, split_dim=1, split_sizes=[self.hps.n_z, self.hps.n_z])
z_std = 1e-10 + tf.nn.softplus(z_std1)
if self.hps.is_VAE:
noise = tf.random_normal(shape=tf.shape(z_mu), mean=0.0, stddev=1.0)
z = z_mu + noise * z_std
else:
z = z_mu
return z_mu, z_std, z, state_c
| [
"[email protected]"
] | |
9a985c189a5328bcc149258b1faf5e97c967615f | ab66cdd15bb1ad964e21ce236e3e524b1eebb58a | /build/toolchain/win/setup_toolchain.py | 66e840bb9bee4e29ff2d61f9afdf05d8d839e3f3 | [
"BSD-3-Clause"
] | permissive | tainyiPeter/build-gn | 729659f3af90318d8ca80caa0a2f72d9bbfc595e | 59376ea32237f28525173e25fe1ce4a5c19ad659 | refs/heads/master | 2020-04-19T09:52:23.499802 | 2018-12-18T10:43:26 | 2018-12-18T10:56:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,687 | py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Copies the given "win tool" (which the toolchain uses to wrap compiler
# invocations) and the environment blocks for the 32-bit and 64-bit builds on
# Windows to the build directory.
#
# The arguments are the visual studio install location and the location of the
# win tool. The script assumes that the root build directory is the current dir
# and the files will be written to the current directory.
import errno
import json
import os
import re
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
import gn_helpers
SCRIPT_DIR = os.path.dirname(__file__)
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
# This occasionally happens and leads to misleading SYSTEMROOT error messages
# if not caught here.
if output_of_set.count('=') == 0:
raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set)
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules and actions in Chromium rely on python being in the
# path. Add the path to this python here so that if it's not in the
# path when ninja is run later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting.lower()
break
if sys.platform in ('win32', 'cygwin'):
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _DetectVisualStudioPath():
"""Return path to the GYP_MSVS_VERSION of Visual Studio.
"""
# Use the code in build/vs_toolchain.py to avoid duplicating code.
chromium_dir = os.path.abspath(os.path.join(SCRIPT_DIR, '..', '..', '..'))
sys.path.append(os.path.join(chromium_dir, 'build'))
import vs_toolchain
return vs_toolchain.DetectVisualStudioPath()
def _LoadEnvFromBat(args):
"""Given a bat command, runs it and returns env vars set by it."""
args = args[:]
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
if popen.returncode != 0:
raise Exception('"%s" failed with error %d' % (args, popen.returncode))
return variables
def _LoadToolchainEnv(cpu, sdk_dir):
"""Returns a dictionary with environment variables that must be set while
running binaries from the toolchain (e.g. INCLUDE and PATH for cl.exe)."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |cpu| should be either
# 'x86' or 'x64'.
assert cpu in ('x86', 'x64')
# PATCH(build-gn): Do not assume depot_tools by default.
if bool(int(os.environ.get('DEPOT_TOOLS_WIN_TOOLCHAIN', 0))) and sdk_dir:
# Load environment from json file.
env = os.path.normpath(os.path.join(sdk_dir, 'bin/SetEnv.%s.json' % cpu))
env = json.load(open(env))['env']
for k in env:
entries = [os.path.join(*([os.path.join(sdk_dir, 'bin')] + e))
for e in env[k]]
# clang-cl wants INCLUDE to be ;-separated even on non-Windows,
# lld-link wants LIB to be ;-separated even on non-Windows. Path gets :.
# The separator for INCLUDE here must match the one used in main() below.
sep = os.pathsep if k == 'PATH' else ';'
env[k] = sep.join(entries)
# PATH is a bit of a special case, it's in addition to the current PATH.
env['PATH'] = env['PATH'] + os.pathsep + os.environ['PATH']
# Augment with the current env to pick up TEMP and friends.
for k in os.environ:
if k not in env:
env[k] = os.environ[k]
varlines = []
for k in sorted(env.keys()):
varlines.append('%s=%s' % (str(k), str(env[k])))
variables = '\n'.join(varlines)
# Check that the json file contained the same environment as the .cmd file.
if sys.platform in ('win32', 'cygwin'):
script = os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.cmd'))
assert _ExtractImportantEnvironment(variables) == \
_ExtractImportantEnvironment(_LoadEnvFromBat([script, '/' + cpu]))
else:
if 'GYP_MSVS_OVERRIDE_PATH' not in os.environ:
os.environ['GYP_MSVS_OVERRIDE_PATH'] = _DetectVisualStudioPath()
# We only support x64-hosted tools.
script_path = os.path.normpath(os.path.join(
os.environ['GYP_MSVS_OVERRIDE_PATH'],
'VC/vcvarsall.bat'))
if not os.path.exists(script_path):
# vcvarsall.bat for VS 2017 fails if run after running vcvarsall.bat from
# VS 2013 or VS 2015. Fix this by clearing the vsinstalldir environment
# variable.
if 'VSINSTALLDIR' in os.environ:
del os.environ['VSINSTALLDIR']
other_path = os.path.normpath(os.path.join(
os.environ['GYP_MSVS_OVERRIDE_PATH'],
'VC/Auxiliary/Build/vcvarsall.bat'))
if not os.path.exists(other_path):
raise Exception('%s is missing - make sure VC++ tools are installed.' %
script_path)
script_path = other_path
# Chromium requires the 10.0.15063.468 SDK - previous versions don't have
# all of the required declarations and 10.0.16299.0 has some
# incompatibilities (crbug.com/773476).
args = [script_path, 'amd64_x86' if cpu == 'x86' else 'amd64',
'10.0.15063.0']
variables = _LoadEnvFromBat(args)
return _ExtractImportantEnvironment(variables)
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def main():
if len(sys.argv) != 6:
print('Usage setup_toolchain.py '
'<visual studio path> <win sdk path> '
'<runtime dirs> <target_cpu> <goma_disabled>')
sys.exit(2)
win_sdk_path = sys.argv[2]
runtime_dirs = sys.argv[3]
target_cpu = sys.argv[4]
goma_disabled = sys.argv[5]
cpus = ('x86', 'x64')
assert target_cpu in cpus
vc_bin_dir = ''
include = ''
# TODO(scottmg|goma): Do we need an equivalent of
# ninja_use_custom_environment_files?
for cpu in cpus:
# Extract environment variables for subprocesses.
env = _LoadToolchainEnv(cpu, win_sdk_path)
env['PATH'] = runtime_dirs + os.pathsep + env['PATH']
env['GOMA_DISABLED'] = goma_disabled
if cpu == target_cpu:
for path in env['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, 'cl.exe')):
vc_bin_dir = os.path.realpath(path)
break
# The separator for INCLUDE here must match the one used in
# _LoadToolchainEnv() above.
include = [p.replace('"', r'\"') for p in env['INCLUDE'].split(';') if p]
include_I = ' '.join(['"/I' + i + '"' for i in include])
include_imsvc = ' '.join(['"-imsvc' + i + '"' for i in include])
env_block = _FormatAsEnvironmentBlock(env)
with open('environment.' + cpu, 'wb') as f:
f.write(env_block)
# Create a store app version of the environment.
if 'LIB' in env:
env['LIB'] = env['LIB'] .replace(r'\VC\LIB', r'\VC\LIB\STORE')
if 'LIBPATH' in env:
env['LIBPATH'] = env['LIBPATH'].replace(r'\VC\LIB', r'\VC\LIB\STORE')
env_block = _FormatAsEnvironmentBlock(env)
with open('environment.winrt_' + cpu, 'wb') as f:
f.write(env_block)
assert vc_bin_dir
print 'vc_bin_dir = ' + gn_helpers.ToGNString(vc_bin_dir)
assert include_I
print 'include_flags_I = ' + gn_helpers.ToGNString(include_I)
assert include_imsvc
print 'include_flags_imsvc = ' + gn_helpers.ToGNString(include_imsvc)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a96bac8257857719d4e612c36e2dc88f720a5690 | ad212b92beac17c4d061848c1dcd443d02a168c8 | /monthly_challenge/202008/19_goat_latin.py | e866084a3ba4569f5efdb64fd6aa23d3416e864d | [] | no_license | 21eleven/leetcode-solutions | 5ec97e4391c8ebaa77f4404a1155f3ef464953b3 | 35c91e6f5f5ed348186b8641e6fc49c825322d32 | refs/heads/master | 2023-03-03T10:22:41.726612 | 2021-02-13T21:02:13 | 2021-02-13T21:02:13 | 260,374,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | """
A sentence S is given, composed of words separated by spaces. Each word consists of lowercase and uppercase letters only.
We would like to convert the sentence to "Goat Latin" (a made-up language similar to Pig Latin.)
The rules of Goat Latin are as follows:
If a word begins with a vowel (a, e, i, o, or u), append "ma" to the end of the word.
For example, the word 'apple' becomes 'applema'.
If a word begins with a consonant (i.e. not a vowel), remove the first letter and append it to the end, then add "ma".
For example, the word "goat" becomes "oatgma".
Add one letter 'a' to the end of each word per its word index in the sentence, starting with 1.
For example, the first word gets "a" added to the end, the second word gets "aa" added to the end and so on.
Return the final sentence representing the conversion from S to Goat Latin.
Example 1:
Input: "I speak Goat Latin"
Output: "Imaa peaksmaaa oatGmaaaa atinLmaaaaa"
"""
class Solution:
def toGoatLatin(self, S: str) -> str:
words = S.split()
vowels = set(["a", "e", "i", "o", "u"])
goat = []
idx = 2
a = 'a'
for w in words:
if w[0].lower() in vowels:
goat.append(f"{w}m{a*idx}")
else:
goat.append(f"{w[1:]}{w[0]}m{a*idx}")
idx += 1
return ' '.join(goat)
| [
"[email protected]"
] | |
2caa36497292851a2824c6d22461f476df9e29db | 8d113f0a487dab55c733ff63da5bba9e20f69b69 | /config/settings.py | bca76446204a4d8a3e3373d62517eb9c85a8dc70 | [
"MIT"
] | permissive | AktanKasymaliev/django-video-hosting | c33d341a7709a21869c44a15eb6a3b6e9a783f54 | b201ed3421025da22b43405452bde617ea26a90f | refs/heads/main | 2023-07-18T08:10:00.289537 | 2021-09-02T20:15:41 | 2021-09-02T20:15:41 | 387,730,116 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,039 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("DJANGO_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = eval(os.environ.get("DJANGO_DEBUG"))
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
"videoApp",
"channels",
"django_cleanup",
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
ASGI_APPLICATION = 'config.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
'hosts': [('127.0.0.1', 6379)],
}
}
}
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get("DATABASE_NAME"),
'USER': os.environ.get("DATABASE_USER"),
'PASSWORD': os.environ.get("DATABASE_PASSW"),
'HOST': os.environ.get("DATABASE_HOST"),
'PORT': os.environ.get("DATABASE_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_DIR = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [STATIC_DIR]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
b5915b18fbbba281d99d4d188ad3de150336d99e | dbaad22aa8aa6f0ebdeacfbe9588b281e4e2a106 | /0423 Pandas/1-複習-pandas/Pandas1_csv_-plot1.py | 58fb6438f52cd86d1347b1d57e7b87de2c826879 | [
"MIT"
] | permissive | ccrain78990s/Python-Exercise | b4ecec6a653afd90de855a64fbf587032705fa8f | a9d09d5f3484efc2b9d9a53b71307257a51be160 | refs/heads/main | 2023-07-18T08:31:39.557299 | 2021-09-06T15:26:19 | 2021-09-06T15:26:19 | 357,761,471 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,289 | py | #!/usr/bin/env python
# -*- coding=utf-8 -*-
__author__ = "Chen"
# 0423 練習
"""
資料出處:
公開發行公司股票發行概況統計表_New
https://data.gov.tw/dataset/103533
主要欄位說明:
年月、上市家數、上市資本額_十億元、上市成長率、上市面值_十億元、上市市值_十億元、
上櫃家數、上櫃資本額_十億元、上櫃成長率、上櫃面值_十億元、上櫃市值_十億元、
未上市上櫃家數、未上市上櫃資本額_十億元、公告日期
"""
import pandas as pd
df = pd.read_csv('每月_公開發行公司股票發行概況統計表.csv')
print(df.head())
print(type(df))
# 查看自資料訊息
print("=====資料訊息=====")
print(df.info())
# 資料大小
print("=====資料大小=====")
print(df.shape)
print("=====欄位名稱=====")
print(df.columns)
print("=====索引指數=====")
print(df.index)
print("=====統計描述=====")
print(df.describe())
print("**小練習***************************************************************")
print("========1.把 [年月,上市家數,上市資本額_十億元] 印出來========")
print(df[['年月','上市家數','上市資本額_十億元']])
#df2=df[['年月','上市家數','上市資本額_十億元']]
print("========2.找出 2019 年的資料========")
print(df[(df['年月']<=201999) & (df['年月']>=201900) ])
print("========3.找出 上市成長率 最高的年月========")
#print(df.上市成長率.max())
#print(df.上市成長率.idxmax())
max1=df.上市成長率.idxmax()
print(df[max1:max1+1])
print("========4.找出 2019 年的[上市成長率] 最高的月份========")
df2=df[(df['年月']<=201999) & (df['年月']>=201900) ]
max2=df2.上市成長率.idxmax()
print(df[max2:max2+1])
print("========5.找出 2018 年的資料========")
print(df[(df['年月']<=201899) & (df['年月']>=201800) ])
"""
未做完 可以參考老師解答
print("========6.比較 2017 和 2018 年的[上市資本額_十億元] 情況 (差異)========")
df3=df[(df['年月']<=201799) & (df['年月']>=201700) ]
df4=df[(df['年月']<=201899) & (df['年月']>=201800) ]
df5=df3[['年月','上市資本額_十億元']]
df6=df4[['年月','上市資本額_十億元']]
#df7=pd.concat([df5, df6], ignore_index=True)
df7=pd.merge(df5, df6,how='')
print(df7)
""" | [
"[email protected]"
] | |
473edc044398c5b3eca2579faca5a7c518d2a277 | 10e5b1b2e42a2ff6ec998ed900071e8b5da2e74e | /array/0509_fibonacci_number/0509_fibonacci_number.py | 5d815145881d946e9ff8a001d2a66e9ff2dcd44e | [] | no_license | zdyxry/LeetCode | 1f71092d687316de1901156b74fbc03588f0b0a5 | b149d1e8a83b0dfc724bd9dc129a1cad407dd91f | refs/heads/master | 2023-01-29T11:59:14.162531 | 2023-01-26T03:20:23 | 2023-01-26T03:20:23 | 178,754,208 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | # -*- coding: utf-8 -*-
class Solution(object):
def fib(self, N):
if N <= 1:
return N
return self.fib(N - 1) + self.fib(N - 2)
def fib2(self, N):
array = [i for i in xrange(N)]
for i in xrange(2, N+1):
array[i] = array[i-1] + array[i-2]
return array[-1]
def fib3(self, N):
if N <=1:
return N
left = 0
right =1
for i in range(2,N+1):
left, right = right, left + right
return right
def fib4(self, N):
array =[i for i in range(N+1)]
return self.fibola(array, N)
def fibola(self, array, N):
if N <= 1:
return N
array[N] = self.fibola(array, N-1) + array[N-2]
return array[N]
print(Solution().fib4(6)) | [
"[email protected]"
] | |
0a6f99febf158ce23215714249263dc107358160 | 2724412db1fc69b67b74a7d1c4ca4731962908d3 | /tests/test_streams.py | fd7c66a0bdc95eac88148387db0573a5c90b4496 | [
"BSD-3-Clause"
] | permissive | Tijani-Dia/websockets | a981267685e681df822307bce4ec7eb781e9927d | ed9a7b446c7147f6f88dbeb1d86546ad754e435e | refs/heads/main | 2023-08-23T13:10:16.030126 | 2021-10-08T20:18:24 | 2021-10-28T20:17:30 | 425,114,573 | 1 | 0 | BSD-3-Clause | 2021-11-05T23:56:39 | 2021-11-05T23:56:39 | null | UTF-8 | Python | false | false | 6,055 | py | from websockets.streams import StreamReader
from .utils import GeneratorTestCase
class StreamReaderTests(GeneratorTestCase):
def setUp(self):
self.reader = StreamReader()
def test_read_line(self):
self.reader.feed_data(b"spam\neggs\n")
gen = self.reader.read_line(32)
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"spam\n")
gen = self.reader.read_line(32)
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"eggs\n")
def test_read_line_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_line(32)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"m\neg")
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"spam\n")
gen = self.reader.read_line(32)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"gs\n")
line = self.assertGeneratorReturns(gen)
self.assertEqual(line, b"eggs\n")
def test_read_line_not_enough_data(self):
self.reader.feed_data(b"spa")
self.reader.feed_eof()
gen = self.reader.read_line(32)
with self.assertRaises(EOFError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"stream ends after 3 bytes, before end of line",
)
def test_read_line_too_long(self):
self.reader.feed_data(b"spam\neggs\n")
gen = self.reader.read_line(2)
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 5 bytes, expected no more than 2 bytes",
)
def test_read_line_too_long_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_line(2)
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 3 bytes, expected no more than 2 bytes",
)
def test_read_exact(self):
self.reader.feed_data(b"spameggs")
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"eggs")
def test_read_exact_need_more_data(self):
self.reader.feed_data(b"spa")
gen = self.reader.read_exact(4)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"meg")
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.read_exact(4)
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"gs")
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"eggs")
def test_read_exact_not_enough_data(self):
self.reader.feed_data(b"spa")
self.reader.feed_eof()
gen = self.reader.read_exact(4)
with self.assertRaises(EOFError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"stream ends after 3 bytes, expected 4 bytes",
)
def test_read_to_eof(self):
gen = self.reader.read_to_eof(32)
self.reader.feed_data(b"spam")
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
def test_read_to_eof_at_eof(self):
self.reader.feed_eof()
gen = self.reader.read_to_eof(32)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"")
def test_read_to_eof_too_long(self):
gen = self.reader.read_to_eof(2)
self.reader.feed_data(b"spam")
with self.assertRaises(RuntimeError) as raised:
next(gen)
self.assertEqual(
str(raised.exception),
"read 4 bytes, expected no more than 2 bytes",
)
def test_at_eof_after_feed_data(self):
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
self.reader.feed_data(b"spam")
eof = self.assertGeneratorReturns(gen)
self.assertFalse(eof)
def test_at_eof_after_feed_eof(self):
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
eof = self.assertGeneratorReturns(gen)
self.assertTrue(eof)
def test_feed_data_after_feed_data(self):
self.reader.feed_data(b"spam")
self.reader.feed_data(b"eggs")
gen = self.reader.read_exact(8)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spameggs")
gen = self.reader.at_eof()
self.assertGeneratorRunning(gen)
def test_feed_eof_after_feed_data(self):
self.reader.feed_data(b"spam")
self.reader.feed_eof()
gen = self.reader.read_exact(4)
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"spam")
gen = self.reader.at_eof()
eof = self.assertGeneratorReturns(gen)
self.assertTrue(eof)
def test_feed_data_after_feed_eof(self):
self.reader.feed_eof()
with self.assertRaises(EOFError) as raised:
self.reader.feed_data(b"spam")
self.assertEqual(
str(raised.exception),
"stream ended",
)
def test_feed_eof_after_feed_eof(self):
self.reader.feed_eof()
with self.assertRaises(EOFError) as raised:
self.reader.feed_eof()
self.assertEqual(
str(raised.exception),
"stream ended",
)
def test_discard(self):
gen = self.reader.read_to_eof(32)
self.reader.feed_data(b"spam")
self.reader.discard()
self.assertGeneratorRunning(gen)
self.reader.feed_eof()
data = self.assertGeneratorReturns(gen)
self.assertEqual(data, b"")
| [
"[email protected]"
] | |
4b14a84a25716004baaf55a0e43796fab1a29293 | a137466dbaa5d704cd5a15ab9dfd17907b24be04 | /algo2/mrdqn/agent.py | 21a44e7aca50bb7bc677d14406d87263a932f502 | [
"Apache-2.0"
] | permissive | xlnwel/g2rl | 92c15b8b9d0cd75b6d2dc8df20e6717e1a621ff6 | e1261fdd2ce70724a99ddd174616cf013917b241 | refs/heads/master | 2023-08-30T10:29:44.169523 | 2021-11-08T07:50:43 | 2021-11-08T07:50:43 | 422,582,891 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,158 | py | import tensorflow as tf
from utility.tf_utils import softmax, log_softmax, explained_variance
from utility.rl_utils import *
from utility.rl_loss import retrace
from core.decorator import override
from algo.mrdqn.base import RDQNBase, get_data_format, collect
class Agent(RDQNBase):
""" MRDQN methods """
@tf.function
def _learn(self, obs, action, reward, discount, mu, mask,
IS_ratio=1, state=None, prev_action=None, prev_reward=None):
obs, action, mu, mask, target, state, add_inp, terms = \
self._compute_target_and_process_data(
obs, action, reward, discount, mu, mask,
state, prev_action, prev_reward)
with tf.GradientTape() as tape:
x, _ = self._compute_embed(obs, mask, state, add_inp)
qs = self.q(x)
q = tf.reduce_sum(qs * action, -1)
error = target - q
value_loss = tf.reduce_mean(.5 * error**2, axis=-1)
value_loss = tf.reduce_mean(IS_ratio * value_loss)
terms['value_loss'] = value_loss
tf.debugging.assert_shapes([
[q, (None, self._sample_size)],
[target, (None, self._sample_size)],
[error, (None, self._sample_size)],
[IS_ratio, (None,)],
[value_loss, ()]
])
terms['value_norm'] = self._value_opt(tape, value_loss)
if 'actor' in self.model:
with tf.GradientTape() as tape:
pi, logpi = self.actor.train_step(x)
pi_a = tf.reduce_sum(pi * action, -1)
reinforce = tf.minimum(1. / mu, self._loo_c) * error * pi_a
v = tf.reduce_sum(qs * pi, axis=-1)
regularization = -tf.reduce_sum(pi * logpi, axis=-1)
loo_loss = -(self._v_pi_coef * v + self._reinforce_coef * reinforce)
tf.debugging.assert_shapes([
[pi, (None, self._sample_size, self._action_dim)],
[qs, (None, self._sample_size, self._action_dim)],
[v, (None, self._sample_size)],
[reinforce, (None, self._sample_size)],
[regularization, (None, self._sample_size)],
])
loo_loss = tf.reduce_mean(loo_loss, axis=-1)
regularization = tf.reduce_mean(regularization, axis=-1)
actor_loss = loo_loss - self._tau * regularization
actor_loss = tf.reduce_mean(IS_ratio * actor_loss)
terms.update(dict(
reinforce=reinforce,
v=v,
loo_loss=loo_loss,
regularization=regularization,
actor_loss=actor_loss,
ratio=tf.reduce_mean(pi_a / mu),
pi_min=tf.reduce_min(pi),
pi_std=tf.math.reduce_std(pi)
))
terms['actor_norm'] = self._actor_opt(tape, actor_loss)
if self._is_per:
priority = self._compute_priority(tf.abs(error))
terms['priority'] = priority
terms.update(dict(
q=q,
q_std=tf.math.reduce_std(q),
error=error,
error_std=tf.math.reduce_std(error),
mu_min=tf.reduce_min(mu),
mu=mu,
mu_inv=tf.reduce_mean(1/mu),
mu_std=tf.math.reduce_std(mu),
target=target,
explained_variance_q=explained_variance(target, q)
))
return terms
@override(RDQNBase)
def _compute_target(self, obs, action, reward, discount,
mu, mask, state, add_inp):
terms = {}
x, _ = self._compute_embed(obs, mask, state, add_inp, online=False)
if self._burn_in_size:
bis = self._burn_in_size
ss = self._sample_size
_, reward = tf.split(reward, [bis, ss], 1)
_, discount = tf.split(discount, [bis, ss], 1)
_, next_mu_a = tf.split(mu, [bis+1, ss], 1)
_, next_x = tf.split(x, [bis+1, ss], 1)
_, next_action = tf.split(action, [bis+1, ss], 1)
else:
_, next_mu_a = tf.split(mu, [1, self._sample_size], 1)
_, next_x = tf.split(x, [1, self._sample_size], 1)
_, next_action = tf.split(action, [1, self._sample_size], 1)
next_qs = self.target_q(next_x)
regularization = None
if 'actor' in self.model:
next_pi, next_logpi = self.target_actor.train_step(next_x)
if self._probabilistic_regularization == 'entropy':
regularization = tf.reduce_sum(
self._tau * next_pi * next_logpi, axis=-1)
else:
if self._probabilistic_regularization is None:
if self._double: # don't suggest to use double Q here, but implement it anyway
online_x, _ = self._compute_embed(obs, mask, state, add_inp)
next_online_x = tf.split(online_x, [bis+1, ss-1], 1)
next_online_qs = self.q(next_online_x)
next_pi = self.q.compute_greedy_action(next_online_qs, one_hot=True)
else:
next_pi = self.target_q.compute_greedy_action(next_qs, one_hot=True)
elif self._probabilistic_regularization == 'prob':
next_pi = softmax(next_qs, self._tau)
elif self._probabilistic_regularization == 'entropy':
next_pi = softmax(next_qs, self._tau)
next_logpi = log_softmax(next_qs, self._tau)
regularization = tf.reduce_sum(next_pi * next_logpi, axis=-1)
terms['next_entropy'] = - regularization / self._tau
else:
raise ValueError(self._probabilistic_regularization)
discount = discount * self._gamma
target = retrace(
reward, next_qs, next_action,
next_pi, next_mu_a, discount,
lambda_=self._lambda,
axis=1, tbo=self._tbo,
regularization=regularization)
return target, terms
| [
"[email protected]"
] | |
b77ad5adbfe3bdc3c5a57d4185371cc854289ac2 | a07124716edd86159dff277010132ba9c5cd0f75 | /Text-Based Browser/task/tests.py | 3ca883bce21f71b1767281f280b941e8d1d999d1 | [
"MIT"
] | permissive | drtierney/hyperskill-TextBasedBrowser-Python | 27a15fa0bd44a927a9552d4815a0b4ab69375710 | a4f2ac60643559e580b75a02078a679e5f1f0a2c | refs/heads/main | 2023-08-28T04:24:51.693648 | 2021-10-25T17:34:58 | 2021-10-25T17:34:58 | 415,304,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,637 | py | from hstest.stage_test import StageTest
from hstest.test_case import TestCase
from hstest.check_result import CheckResult
import os
import shutil
import sys
if sys.platform.startswith("win"):
import _locale
# pylint: disable=protected-access
_locale._getdefaultlocale = (lambda *args: ['en_US', 'utf8'])
CheckResult.correct = lambda: CheckResult(True, '')
CheckResult.wrong = lambda feedback: CheckResult(False, feedback)
class TextBasedBrowserTest(StageTest):
def generate(self):
return [
TestCase(
stdin='bloomberg.com\nbloomberg\nexit',
attach=('Bloomberg', 'New York Times', 'bloomberg'),
args=['tb_tabs']
),
TestCase(
stdin='nytimes.com\nnytimes\nexit',
attach=('New York Times', 'Bloomberg', 'nytimes'),
args=['tb_tabs']
),
TestCase(
stdin='nytimescom\nexit',
args=['tb_tabs']
),
TestCase(
stdin='blooomberg.com\nexit',
args=['tb_tabs']
),
TestCase(
stdin='blooomberg.com\nnytimes.com\nexit',
attach=(None, 'New York Times', 'Bloomberg', 'nytimes'),
args=['tb_tabs']
),
TestCase(
stdin='nytimescom\nbloomberg.com\nexit',
attach=(None, 'Bloomberg', 'New York Times', 'bloomberg'),
args=['tb_tabs']
),
TestCase(
stdin='bloomberg.com\nnytimes.com\nback\nexit',
attach={
'This New Liquid Is Magnetic, and Mesmerizing': (1, 'New York Times'),
'The Space Race: From Apollo 11 to Elon Musk': (2, 'Bloomberg')
},
args=['tb_tabs']
),
TestCase(
stdin='nytimes.com\nbloomberg.com\nback\nexit',
attach={
'This New Liquid Is Magnetic, and Mesmerizing': (2, 'New York Times'),
'The Space Race: From Apollo 11 to Elon Musk': (1, 'Bloomberg')
},
args=['tb_tabs']
),
]
def _check_files(self, path_for_tabs: str, right_word: str) -> int:
"""
Helper which checks that browser saves visited url in files and
provides access to them.
:param path_for_tabs: directory which must contain saved tabs
:param right_word: Word-marker which must be in right tab
:return: True, if right_words is present in saved tab
"""
for path, dirs, files in os.walk(path_for_tabs):
for file in files:
with open(os.path.join(path_for_tabs, file), 'r') as tab:
try:
content = tab.read()
except UnicodeDecodeError:
return -1
if right_word in content:
return 1
break
return 0
def check(self, reply, attach):
# Incorrect URL
if attach is None:
if 'error' in reply.lower():
return CheckResult.correct()
else:
return CheckResult.wrong('There was no "error" word, but should be.')
# Correct URL
if isinstance(attach, tuple):
if len(attach) == 4:
_, *attach = attach
if 'error' not in reply.lower():
return CheckResult.wrong('There was no "error" word, but should be.')
right_word, wrong_word, correct_file_name = attach
path_for_tabs = 'tb_tabs'
if not os.path.isdir(path_for_tabs):
return CheckResult.wrong(
"Can't find a directory \"" + path_for_tabs + "\" "
"in which you should save your web pages.")
check_files_result = self._check_files(path_for_tabs, right_word)
if not check_files_result:
return CheckResult.wrong(
"Seems like you did\'n save the web page "
"\"" + right_word + "\" into the "
"directory \"" + path_for_tabs + "\". "
"This file with page should be named \"" + correct_file_name + "\"")
elif check_files_result == -1:
return CheckResult.wrong('An error occurred while reading your saved tab. '
'Perhaps you used the wrong encoding?')
try:
shutil.rmtree(path_for_tabs)
except PermissionError:
return CheckResult.wrong("Impossible to remove the directory for tabs. Perhaps you haven't closed some file?")
if wrong_word in reply:
return CheckResult.wrong('It seems like you printed wrong variable')
if right_word in reply:
return CheckResult.correct()
return CheckResult.wrong('You printed neither bloomberg_com nor nytimes_com')
if isinstance(attach, dict):
for key, value in attach.items():
count, site = value
real_count = reply.count(key)
if reply.count(key) != count:
return CheckResult.wrong(
f'The site "{site}" should be displayed {count} time(s).\n'
f'Actually displayed: {real_count} time(s).'
)
return CheckResult.correct()
TextBasedBrowserTest().run_tests()
| [
"[email protected]"
] | |
4918810498af75369329a2204c7cccbe0e40efb1 | 40dd8330e5f78c4348bbddc2c5acfd59d793dd51 | /tools/model_converters/twins2mmseg.py | 647d41784aa07468be4b3f2e183064ad55266ad1 | [
"Apache-2.0"
] | permissive | open-mmlab/mmsegmentation | 0d12092312e2c465ede1fd7dd9847b6f2b37049c | 30a3f94f3e2916e27fa38c67cc3b8c69c1893fe8 | refs/heads/main | 2023-09-04T10:54:52.299711 | 2023-07-24T07:28:21 | 2023-07-24T07:28:21 | 272,133,018 | 6,534 | 2,375 | Apache-2.0 | 2023-09-14T01:22:32 | 2020-06-14T04:32:33 | Python | UTF-8 | Python | false | false | 2,764 | py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from collections import OrderedDict
import mmengine
import torch
from mmengine.runner import CheckpointLoader
def convert_twins(args, ckpt):
new_ckpt = OrderedDict()
for k, v in list(ckpt.items()):
new_v = v
if k.startswith('head'):
continue
elif k.startswith('patch_embeds'):
if 'proj.' in k:
new_k = k.replace('proj.', 'projection.')
else:
new_k = k
elif k.startswith('blocks'):
# Union
if 'attn.q.' in k:
new_k = k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[k.replace('attn.q.', 'attn.kv.')]],
dim=0)
elif 'mlp.fc1' in k:
new_k = k.replace('mlp.fc1', 'ffn.layers.0.0')
elif 'mlp.fc2' in k:
new_k = k.replace('mlp.fc2', 'ffn.layers.1')
# Only pcpvt
elif args.model == 'pcpvt':
if 'attn.proj.' in k:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
# Only svt
else:
if 'attn.proj.' in k:
k_lst = k.split('.')
if int(k_lst[2]) % 2 == 1:
new_k = k.replace('proj.', 'attn.out_proj.')
else:
new_k = k
else:
new_k = k
new_k = new_k.replace('blocks.', 'layers.')
elif k.startswith('pos_block'):
new_k = k.replace('pos_block', 'position_encodings')
if 'proj.0.' in new_k:
new_k = new_k.replace('proj.0.', 'proj.')
else:
new_k = k
if 'attn.kv.' not in k:
new_ckpt[new_k] = new_v
return new_ckpt
def main():
parser = argparse.ArgumentParser(
description='Convert keys in timm pretrained vit models to '
'MMSegmentation style.')
parser.add_argument('src', help='src model path or url')
# The dst path must be a full path of the new checkpoint.
parser.add_argument('dst', help='save path')
parser.add_argument('model', help='model: pcpvt or svt')
args = parser.parse_args()
checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')
if 'state_dict' in checkpoint:
# timm checkpoint
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
weight = convert_twins(args, state_dict)
mmengine.mkdir_or_exist(osp.dirname(args.dst))
torch.save(weight, args.dst)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c436a852bf1b29fdd43c22fec676f7de2348174a | da7a165522daea7c346693c5f32850017c482967 | /abc51-100/abc066/c.py | 09ed9795009df321637516a4eee2dcfb604ef0b8 | [] | no_license | SShayashi/ABC | 19f8750919208c5ff8935638dbaab941c255f914 | 3cbfee0c5251c1bb0df6306166d8d4b33bf7bb2c | refs/heads/master | 2021-05-04T21:06:10.720367 | 2020-07-11T13:59:16 | 2020-07-11T13:59:29 | 119,886,572 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | def main():
n = int(input())
a = list(map(int, input().split()))
even = a[1::2]
odd = a[::2]
ans = []
if n % 2 == 0:
even.reverse()
ans = even + odd
else:
odd.reverse()
ans = odd + even
to_str = map(str, ans)
return " ".join(to_str)
print(main()) | [
"[email protected]"
] | |
16f56f1f208c469a2d92b843ac849f98a7111d08 | 7087a5dd1772c9456f098bc024a894dcaeef5432 | /backup/build/new-calkube/kubernetes-6.0.0_snapshot-py2.7.egg/kubernetes/client/models/v1_delete_options.py | 575bcf443ee81e1402ef6bba7a8c440f8590df0a | [] | no_license | santhoshchami/kubecctl-python | 5be7a5a17cc6f08ec717b3eb1c11719ef7653aba | cd45af465e25b0799d65c573e841e2acb983ee68 | refs/heads/master | 2021-06-23T11:00:43.615062 | 2019-07-10T16:57:06 | 2019-07-10T16:57:06 | 145,669,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,530 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1DeleteOptions(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'grace_period_seconds': 'int',
'kind': 'str',
'orphan_dependents': 'bool',
'preconditions': 'V1Preconditions',
'propagation_policy': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'grace_period_seconds': 'gracePeriodSeconds',
'kind': 'kind',
'orphan_dependents': 'orphanDependents',
'preconditions': 'preconditions',
'propagation_policy': 'propagationPolicy'
}
def __init__(self, api_version=None, grace_period_seconds=None, kind=None, orphan_dependents=None, preconditions=None, propagation_policy=None):
"""
V1DeleteOptions - a model defined in Swagger
"""
self._api_version = None
self._grace_period_seconds = None
self._kind = None
self._orphan_dependents = None
self._preconditions = None
self._propagation_policy = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if grace_period_seconds is not None:
self.grace_period_seconds = grace_period_seconds
if kind is not None:
self.kind = kind
if orphan_dependents is not None:
self.orphan_dependents = orphan_dependents
if preconditions is not None:
self.preconditions = preconditions
if propagation_policy is not None:
self.propagation_policy = propagation_policy
@property
def api_version(self):
"""
Gets the api_version of this V1DeleteOptions.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1DeleteOptions.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1DeleteOptions.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1DeleteOptions.
:type: str
"""
self._api_version = api_version
@property
def grace_period_seconds(self):
"""
Gets the grace_period_seconds of this V1DeleteOptions.
The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:return: The grace_period_seconds of this V1DeleteOptions.
:rtype: int
"""
return self._grace_period_seconds
@grace_period_seconds.setter
def grace_period_seconds(self, grace_period_seconds):
"""
Sets the grace_period_seconds of this V1DeleteOptions.
The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param grace_period_seconds: The grace_period_seconds of this V1DeleteOptions.
:type: int
"""
self._grace_period_seconds = grace_period_seconds
@property
def kind(self):
"""
Gets the kind of this V1DeleteOptions.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1DeleteOptions.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1DeleteOptions.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1DeleteOptions.
:type: str
"""
self._kind = kind
@property
def orphan_dependents(self):
"""
Gets the orphan_dependents of this V1DeleteOptions.
Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:return: The orphan_dependents of this V1DeleteOptions.
:rtype: bool
"""
return self._orphan_dependents
@orphan_dependents.setter
def orphan_dependents(self, orphan_dependents):
"""
Sets the orphan_dependents of this V1DeleteOptions.
Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param orphan_dependents: The orphan_dependents of this V1DeleteOptions.
:type: bool
"""
self._orphan_dependents = orphan_dependents
@property
def preconditions(self):
"""
Gets the preconditions of this V1DeleteOptions.
Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.
:return: The preconditions of this V1DeleteOptions.
:rtype: V1Preconditions
"""
return self._preconditions
@preconditions.setter
def preconditions(self, preconditions):
"""
Sets the preconditions of this V1DeleteOptions.
Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.
:param preconditions: The preconditions of this V1DeleteOptions.
:type: V1Preconditions
"""
self._preconditions = preconditions
@property
def propagation_policy(self):
"""
Gets the propagation_policy of this V1DeleteOptions.
Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: The propagation_policy of this V1DeleteOptions.
:rtype: str
"""
return self._propagation_policy
@propagation_policy.setter
def propagation_policy(self, propagation_policy):
"""
Sets the propagation_policy of this V1DeleteOptions.
Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param propagation_policy: The propagation_policy of this V1DeleteOptions.
:type: str
"""
self._propagation_policy = propagation_policy
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1DeleteOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
5a1800a557704e33d4f51badeae781b4ae00bcca | c3a01f8bcece48f94a347b92694f90227708f507 | /pyvisa/testsuite/test_constants.py | 8c5add8034b3b0c9c0686b60af1742adea537ea8 | [
"MIT"
] | permissive | panlun/pyvisa | e16a6cdaae47bc69d932538f14c62015d17be7ab | 124c46bd2ad89e49031339d6181255c2808fecbc | refs/heads/master | 2022-11-21T13:07:29.280849 | 2020-06-24T22:23:27 | 2020-06-24T22:23:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # -*- coding: utf-8 -*-
"""Test objects from constants.
This file is part of PyVISA.
:copyright: 2019-2020 by PyVISA Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from pyvisa.constants import DataWidth
from . import BaseTestCase
class TestDataWidth(BaseTestCase):
def test_conversion_from_literal(self):
for v, e in zip(
(8, 16, 32, 64),
(DataWidth.bit_8, DataWidth.bit_16, DataWidth.bit_32, DataWidth.bit_64),
):
self.assertEqual(DataWidth.from_literal(v), e)
with self.assertRaises(ValueError):
DataWidth.from_literal(0)
| [
"[email protected]"
] | |
8dcbda8c71b778dae427d765d9b4621c3b6fc340 | 00af09f4ac6f98203910d86c3791c152184ace9a | /Lib/ctypes/test/test_arrays.py | ad3451f5cf9457610f9655188ac524bc3ba9104d | [] | no_license | orf53975/CarnosOS | 621d641df02d742a2452fde2f28a28c74b32695a | d06849064e4e9f30ef901ad8cf90960e1bec0805 | refs/heads/master | 2023-03-24T08:06:48.274566 | 2017-01-05T16:41:01 | 2017-01-05T16:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,024 | py | <<<<<<< HEAD
<<<<<<< HEAD
import unittest
from ctypes import *
from ctypes.test import need_symbol
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
@need_symbol('create_unicode_buffer')
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
import sys
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
_length_ = 1.87
if __name__ == '__main__':
unittest.main()
=======
import unittest
from ctypes import *
from ctypes.test import need_symbol
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
@need_symbol('create_unicode_buffer')
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
import sys
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
_length_ = 1.87
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import unittest
from ctypes import *
from ctypes.test import need_symbol
formats = "bBhHiIlLqQfd"
formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
c_long, c_ulonglong, c_float, c_double, c_longdouble
class ArrayTestCase(unittest.TestCase):
def test_simple(self):
# create classes holding simple numeric types, and check
# various properties.
init = list(range(15, 25))
for fmt in formats:
alen = len(init)
int_array = ARRAY(fmt, alen)
ia = int_array(*init)
# length of instance ok?
self.assertEqual(len(ia), alen)
# slot values ok?
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, init)
# change the items
from operator import setitem
new_values = list(range(42, 42+alen))
[setitem(ia, n, new_values[n]) for n in range(alen)]
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, new_values)
# are the items initialized to 0?
ia = int_array()
values = [ia[i] for i in range(len(init))]
self.assertEqual(values, [0] * len(init))
# Too many initializers should be caught
self.assertRaises(IndexError, int_array, *range(alen*2))
CharArray = ARRAY(c_char, 3)
ca = CharArray(b"a", b"b", b"c")
# Should this work? It doesn't:
# CharArray("abc")
self.assertRaises(TypeError, CharArray, "abc")
self.assertEqual(ca[0], b"a")
self.assertEqual(ca[1], b"b")
self.assertEqual(ca[2], b"c")
self.assertEqual(ca[-3], b"a")
self.assertEqual(ca[-2], b"b")
self.assertEqual(ca[-1], b"c")
self.assertEqual(len(ca), 3)
# cannot delete items
from operator import delitem
self.assertRaises(TypeError, delitem, ca, 0)
def test_numeric_arrays(self):
alen = 5
numarray = ARRAY(c_int, alen)
na = numarray()
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0] * alen)
na = numarray(*[c_int()] * alen)
values = [na[i] for i in range(alen)]
self.assertEqual(values, [0]*alen)
na = numarray(1, 2, 3, 4, 5)
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
na = numarray(*map(c_int, (1, 2, 3, 4, 5)))
values = [i for i in na]
self.assertEqual(values, [1, 2, 3, 4, 5])
def test_classcache(self):
self.assertIsNot(ARRAY(c_int, 3), ARRAY(c_int, 4))
self.assertIs(ARRAY(c_int, 3), ARRAY(c_int, 3))
def test_from_address(self):
# Failed with 0.9.8, reported by JUrner
p = create_string_buffer(b"foo")
sz = (c_char * 3).from_address(addressof(p))
self.assertEqual(sz[:], b"foo")
self.assertEqual(sz[::], b"foo")
self.assertEqual(sz[::-1], b"oof")
self.assertEqual(sz[::3], b"f")
self.assertEqual(sz[1:4:2], b"o")
self.assertEqual(sz.value, b"foo")
@need_symbol('create_unicode_buffer')
def test_from_addressW(self):
p = create_unicode_buffer("foo")
sz = (c_wchar * 3).from_address(addressof(p))
self.assertEqual(sz[:], "foo")
self.assertEqual(sz[::], "foo")
self.assertEqual(sz[::-1], "oof")
self.assertEqual(sz[::3], "f")
self.assertEqual(sz[1:4:2], "o")
self.assertEqual(sz.value, "foo")
def test_cache(self):
# Array types are cached internally in the _ctypes extension,
# in a WeakValueDictionary. Make sure the array type is
# removed from the cache when the itemtype goes away. This
# test will not fail, but will show a leak in the testsuite.
# Create a new type:
class my_int(c_int):
pass
# Create a new array type based on it:
t1 = my_int * 1
t2 = my_int * 1
self.assertIs(t1, t2)
def test_subclass(self):
class T(Array):
_type_ = c_int
_length_ = 13
class U(T):
pass
class V(U):
pass
class W(V):
pass
class X(T):
_type_ = c_short
class Y(T):
_length_ = 187
for c in [T, U, V, W]:
self.assertEqual(c._type_, c_int)
self.assertEqual(c._length_, 13)
self.assertEqual(c()._type_, c_int)
self.assertEqual(c()._length_, 13)
self.assertEqual(X._type_, c_short)
self.assertEqual(X._length_, 13)
self.assertEqual(X()._type_, c_short)
self.assertEqual(X()._length_, 13)
self.assertEqual(Y._type_, c_int)
self.assertEqual(Y._length_, 187)
self.assertEqual(Y()._type_, c_int)
self.assertEqual(Y()._length_, 187)
def test_bad_subclass(self):
import sys
with self.assertRaises(AttributeError):
class T(Array):
pass
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
with self.assertRaises(AttributeError):
class T(Array):
_length_ = 13
with self.assertRaises(OverflowError):
class T(Array):
_type_ = c_int
_length_ = sys.maxsize * 2
with self.assertRaises(AttributeError):
class T(Array):
_type_ = c_int
_length_ = 1.87
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| [
"[email protected]"
] | |
ccc0c33067aa23f9329f7727f8ce57f7f5cf29b1 | fff24c6c6123e5e90ac2fae26536150449140c6d | /setup.py | 0b74facb61fe0ac8600893175528a1d17392e7ab | [
"ISC"
] | permissive | binaryf/demosys-py | 83da9f9ddd8d1672413f89153012ab6bb7fae6ab | f11b09cb6502adfaa437c8cbe780039c49b72524 | refs/heads/master | 2020-03-22T16:30:16.767030 | 2018-07-24T11:19:22 | 2018-07-24T11:19:22 | 140,331,208 | 1 | 0 | null | 2018-07-09T19:12:49 | 2018-07-09T19:12:48 | null | UTF-8 | Python | false | false | 1,375 | py | from setuptools import setup
setup(
name="demosys-py",
version="1.0.4",
description="Modern OpenGL 3.3+ Framework inspired by Django",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url="https://github.com/Contraz/demosys-py",
author="Einar Forselv",
author_email="[email protected]",
maintainer="Einar Forselv",
maintainer_email="[email protected]",
packages=['demosys'],
include_package_data=True,
keywords = ['opengl', 'framework', 'demoscene'],
classifiers=[
'Programming Language :: Python',
'Environment :: MacOS X',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'License :: OSI Approved :: ISC License (ISCL)',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
install_requires=[
'PyOpenGL==3.1.0',
'glfw==1.6.0',
'moderngl==5.3.0',
'pyrr==0.9.2',
'Pillow==5.1.0',
'pyrocket==0.2.7',
'PyWavefront==0.3.2',
# 'pygame==1.9.3',
],
entry_points={'console_scripts': [
'demosys-admin = demosys.core.management:execute_from_command_line',
]},
)
| [
"[email protected]"
] | |
dfc119e744be40778ca5cd17e33454a7d7001076 | fd18ce27b66746f932a65488aad04494202e2e0d | /day34/farms/categories/categories/categories/pipelines.py | dba029c921f259cfcbba84dba0b24d192b7fa697 | [] | no_license | daofeng123/ClassCodes | 1acbd843836e550c9cebf67ef21dfca9f6b9fc87 | fbcd1f24d79b8bb56ad0669b07ad118064609612 | refs/heads/master | 2020-06-24T12:34:28.148197 | 2019-08-15T03:56:40 | 2019-08-15T03:56:40 | 198,963,469 | 3 | 0 | null | 2019-07-26T06:53:45 | 2019-07-26T06:53:44 | null | UTF-8 | Python | false | false | 558 | py | # -*- coding: utf-8 -*-
import json
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from categories.dbs.redismq import RedisMQ
class CategoriesPipeline(object):
redis_mq = RedisMQ()
def process_item(self, item, spider):
# 做一下简单的json格式处理
content = json.dumps(dict(item), ensure_ascii=False)
# 发送采集任务到队列
self.redis_mq.push_task(content)
return item
| [
"[email protected]"
] | |
f7b22c64ab658985f221cf7076cee8fc91505b98 | a360a22af5e0b385db438b1324564ef317ff2f38 | /idex_module/views.py | a846edfb5592c73af23acdf636aeb55d68b6c4af | [] | no_license | ogglin/exchange_comparison | 3eb2d849e731f94e67509e4ce9130e33bb37bbaf | f3feae64aff26b574f7ecd24e6f7aff7bb95ec65 | refs/heads/master | 2023-04-26T07:45:06.229584 | 2021-05-31T18:52:29 | 2021-05-31T18:52:29 | 287,036,194 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | from rest_framework.response import Response
from rest_framework.views import APIView
from .functions import idex_profits
# Create your views here.
class idex(APIView):
def get(self, request):
# hotbit_result = hotbit_profits()
idex_result = idex_profits()
results = []
# idex_init()
# exchanges_init()
# for result in hotbit_result:
# results.append(result)
for result in idex_result:
results.append(result)
return Response(results)
| [
"[email protected]"
] | |
8a6af64bda1660fee7c263541b1c3e8425df645e | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AlipayOpenMiniInstantiateQueryRequest.py | 2e24f412ecf6d93999b8fecab84c0b693832af9e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 3,979 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenMiniInstantiateQueryModel import AlipayOpenMiniInstantiateQueryModel
class AlipayOpenMiniInstantiateQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenMiniInstantiateQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenMiniInstantiateQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.mini.instantiate.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
7fa407813dc0e9f4324ea6fa68186ad55071a769 | fda201d7cca34e216a17d97665c8457c72e66cb2 | /register/tests/test_center_csv.py | 5106fe3734607be0a86be712d190cf444751a602 | [
"Apache-2.0"
] | permissive | SmartElect/SmartElect | 94ab192beb32320e9ae8ae222f90ee531037c1c6 | d6d35f2fa8f60e756ad5247f8f0a5f05830e92f8 | refs/heads/develop | 2020-12-26T04:04:42.753741 | 2019-07-17T17:08:25 | 2019-07-17T17:08:25 | 44,687,036 | 24 | 12 | Apache-2.0 | 2020-06-06T07:16:48 | 2015-10-21T15:47:07 | Python | UTF-8 | Python | false | false | 28,774 | py | import os
import shutil
import tempfile
from django.test import TestCase
from django.urls import reverse
from ..models import RegistrationCenter, Office, Constituency, SubConstituency
from .. import utils
from .factories import OfficeFactory, ConstituencyFactory, SubConstituencyFactory, \
RegistrationCenterFactory
from libya_elections.constants import NO_NAMEDTHING
from staff.tests.base import StaffUserMixin
def get_copy_center_base_csv():
"""Return the base CSV for copy centers as a lists of lists (rows & columns)"""
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(current_dir, 'uploads', 'copy_center_base.csv')
with open(file_path, 'rb') as f:
lines = f.read().decode('utf-8').split('\n')
return [line.split(',') for line in lines if line]
class CSVColumnConstants(object):
"""Constants mapping CSV columns to ints"""
CENTER_ID = 0
NAME = 1
COPY_OF_ID = 2
CENTER_TYPE = 12
class CenterFileTestMixin(object):
def setUp(self):
super(CenterFileTestMixin, self).setUp()
self.url = reverse('upload-centers-csv')
RegistrationCenterFactory(name="Deleted center", deleted=True)
def tearDown(self):
if hasattr(self, 'file'):
self.file.close()
def get_csv_file(self, filename):
# generates a simple csv we can use for tests
current_dir = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(current_dir, 'uploads', filename)
self.file = open(file_path, 'rb')
return self.file
@staticmethod
def get_messages(response):
messages = response.context['messages']
return [str(msg) for msg in messages]
def upload_csv(self, filename='valid_ecc.csv', follow=True):
csv_file = self.get_csv_file(filename)
response = self.client.post(self.url, data={'csv': csv_file}, follow=follow)
return response
class CenterFileUpload(CenterFileTestMixin, StaffUserMixin, TestCase):
# tests for the ecc file upload functionality
permissions = ['add_registrationcenter']
model = RegistrationCenter
@classmethod
def setUpClass(klass): # Files only
# Create a temp dir for CSV files created on the fly.
klass.temp_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(klass): # Files only
# Clean up temp CSV files.
shutil.rmtree(klass.temp_dir)
def setUp(self):
super(CenterFileUpload, self).setUp()
# Create some things
for id in [1, NO_NAMEDTHING]:
# create one test instance and one special 'no-named-thing' instance (999)
if not Office.objects.filter(id=id).exists():
OfficeFactory(id=id)
if not Constituency.objects.filter(id=id).exists():
ConstituencyFactory(id=id)
if not SubConstituency.objects.filter(id=id).exists():
SubConstituencyFactory(id=id)
def write_csv(self, rows):
"""Given a list of lists, write them as a CSV to a temp file and return the filename.
The list of lists should be rows and columns as returned by get_copy_center_base_csv().
"""
fh, filename = tempfile.mkstemp(suffix='.csv', dir=self.temp_dir)
os.close(fh)
with open(filename, 'wb') as f:
f.write('\n'.join([','.join(row) for row in rows]).encode('utf-8'))
return filename
def test_upload_page_works(self):
# requesting the upload page works and the right template it's used
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'register/upload_centers_csv.html')
def test_empty_upload(self):
# form does not validate if an empty form it's submitted.
# same template as the one we landed on it's used and the form
# has an error.
response = self.client.post(self.url, data={})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'register/upload_centers_csv.html')
self.assertFormError(response, 'form', 'csv', 'This field is required.')
def test_success_upload_page(self):
# after successfully uploading a file we are presented with a
# success template.
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'register/upload_centers_csv.html')
def test_upload_new_centers(self):
# Uploading a csv file with new center information creates new entries
# in the database.
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=3, updated=0, dupes=0, blank=0),
messages
)
def test_upload_dupes(self):
# Upload does not create or update records if they did not change.
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
response = self.upload_csv()
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=0, updated=0, dupes=3, blank=0),
messages
)
def test_upload_after_delete(self):
# Upload, mark records deleted, upload again
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
RegistrationCenter.objects.all().update(deleted=True)
response = self.upload_csv()
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=3, updated=0, dupes=0, blank=0),
messages
)
def test_upload_update(self):
# CSV updates a record if its attributes differ from those in the db.
RegistrationCenter.objects.create(center_id=11001, name="Center 3")
RegistrationCenter.objects.create(center_id=11001, name="Center 3", deleted=True)
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
reg_center = RegistrationCenter.objects.get(center_id=11001)
self.assertNotEqual(reg_center.name, "Center 3")
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=2, updated=1, dupes=0, blank=0),
messages
)
def test_non_csv(self):
# Non a CSV file should be generate a specific error.
response = self.upload_csv(filename='icon_clock.gif')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(utils.COULD_NOT_PARSE_ERROR, messages)
def test_bad_formatted_csv(self):
# CSV files that contain rows with the wrong number of columns are not accepted.
# Even compliant rows are not imported.
response = self.upload_csv(filename='too_many_columns.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# file contained one valid center but it should not have been imported
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=2, columns=", ".join(utils.CSV_FIELDS)),
messages[0]
)
def test_too_many_headers(self):
# If the number of headers exceeds the number of columns expected,
# fail gracefully and inform the user that their file has the wrong format
response = self.upload_csv(filename='too_many_headers.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Too many headers ==> entire file is rejected
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=1, columns=", ".join(utils.CSV_FIELDS)),
messages[0]
)
def test_too_few_headers(self):
# If the number of headers less than the number of columns expected,
# fail gracefully and inform the user that their file has the wrong format
response = self.upload_csv(filename='too_few_headers.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Too few headers ==> entire file is rejected
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=1, columns=", ".join(utils.CSV_FIELDS)),
messages[0]
)
def test_wrong_file_headers(self):
# Uploading a csv file with columns in the wrong order should fail
response = self.upload_csv(filename='wrong_headers.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# no centers were created because we encountered an error on line 1.
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.PARSING_ERROR.format(line_number=1, columns=", ".join(utils.CSV_FIELDS)),
messages
)
def test_blank_csv(self):
# Uploading a blank csv file should not create any centers
response = self.upload_csv(filename='blank.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# No records were created
self.assertEqual(centers.count(), 0)
def test_blank_inbetween_csv(self):
# Blank lines are valid in between two rows
response = self.upload_csv(filename='blank_inbetween.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 5)
messages = self.get_messages(response)
self.assertIn(
utils.STATUS_MESSAGE.format(created=5, updated=0, dupes=0, blank=3),
messages
)
def test_noninteger_center_id_csv(self):
# center id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='noninteger_center_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="center_id", value="110A1", line_number=2,
error='Enter a whole number.'),
messages[0]
)
def test_wrong_length_center_id_csv(self):
response = self.upload_csv(filename='wrong_length_center_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="center_id", value="110001", line_number=2,
error='Ensure this value is less than or equal to'),
messages[0]
)
def test_bad_office_id_csv(self):
# office id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='bad_office_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="office_id", value="", line_number=2,
error='This field is required.'),
messages[0]
)
def test_centers_not_associated_with_office_con_subcon_csv(self):
# Some Centers are not associated with offices, cons or subcons. For this purpose,
# each of these NamedThing models has a special instance with an ID of NO_NAMEDTHING
# (999) to represent the 'Absence of an associated NamedThing'.
# https://github.com/hnec-vr/libya-elections/issues/949
response = self.upload_csv(filename='no_associated_namedthings.csv')
self.assertEqual(response.status_code, 200)
# 1 center was created
ecc = RegistrationCenter.objects.get()
self.assertEqual(NO_NAMEDTHING, ecc.office.id)
self.assertEqual(NO_NAMEDTHING, ecc.constituency.id)
self.assertEqual(NO_NAMEDTHING, ecc.subconstituency.id)
def test_bad_constituency_id_csv(self):
# constituency id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='bad_constituency_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="constituency_id", value="x", line_number=2,
error='Enter a whole number.'),
messages[0]
)
def test_bad_subconstituency_id_csv(self):
# subconstituency id should be able to be cast into an integer otherwise a
# parsing error will occur and a message indicating the line number
# where the error occurred will be presented to the user.
response = self.upload_csv(filename='bad_subconstituency_id.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(field_name="subconstituency_id", value="x", line_number=2,
error='Enter a whole number.'),
messages[0]
)
def test_just_one_latlong(self):
# Providing just one of lat, long is an error
response = self.upload_csv(filename='just_one_latlong.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(line_number=2,
error='Either set both latitude and longitude or neither'),
messages[0]
)
def test_invalid_lat(self):
# Invalid latitude
response = self.upload_csv(filename='invalid_lat.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='center_lat', value="1234",
error='Ensure that there are no more than 3 digits before the decimal'),
messages[0]
)
def test_nonexistent_office(self):
response = self.upload_csv(filename='nonexistent_office.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='office_id', value='22',
error='Office does not exist.'),
messages[0]
)
def test_nonexistent_constituency(self):
response = self.upload_csv(filename='nonexistent_constituency.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='constituency_id', value='22',
error='Constituency does not exist.'),
messages[0]
)
def test_nonexistent_subconstituency(self):
response = self.upload_csv(filename='nonexistent_subconstituency.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='subconstituency_id', value='22',
error='Subconstituency does not exist.'),
messages[0]
)
def test_blank_center_name(self):
response = self.upload_csv(filename='blank_center_name.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='name', value='',
error='This field is required.'),
messages[0]
)
def test_newline_in_center_name(self):
response = self.upload_csv(filename='newline_center_name.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='name', value='new\nline',
error='Newlines are not allowed.'),
messages[0]
)
def test_reg_open_field_set_to_true(self):
# The 'reg_open' field is not included in the CSV file.
# We should ensure that it is set to True (the model default)
response = self.upload_csv()
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 3)
for ecc in centers:
self.assertEqual(ecc.reg_open, True)
def test_simple_copy_center_ok(self):
# test that simple copy center creation works
RegistrationCenterFactory(center_id=70001)
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.COPY_OF_ID] = '70001'
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 2)
self.assertEqual(centers[0].copy_of, centers[1])
self.assertEqual(list(centers[1].copied_by.all()), [centers[0]])
def test_copy_center_same_file_reference_ok(self):
# test that a copy center can reference an original created in the same file
csv = get_copy_center_base_csv()
# Duplicate the data row and make row the 2nd data row refer to the first.
csv.append(csv[1][::])
csv[2][CSVColumnConstants.CENTER_ID] = '70002'
csv[2][CSVColumnConstants.COPY_OF_ID] = '70000'
csv[2][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 2)
self.assertEqual(centers[1].copy_of, centers[0])
self.assertEqual(list(centers[0].copied_by.all()), [centers[1]])
def test_copy_center_failed_reference(self):
# test that one can't create a copy center that refers to a non-existent center.
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.COPY_OF_ID] = '70001'
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
# Due to error, no centers were created
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='copy_of_id', value='70001',
error='Copy centre does not exist.'),
messages[0]
)
def test_copy_center_read_only(self):
# test that copy centers are read only
original_center = RegistrationCenterFactory(center_id=70000)
copy_center = RegistrationCenterFactory(center_id=70001)
copy_center.copy_of = original_center
copy_center.save()
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_ID] = '70001'
csv[1][CSVColumnConstants.NAME] = 'different_name_to_trigger_an_attempt_to_edit'
csv[1][CSVColumnConstants.COPY_OF_ID] = '70000'
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000, 70001])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Copy centres are read-only.'),
messages[0]
)
def test_existing_center_cant_become_copy_center(self):
# test that an existing center can't be turned into a copy center.
RegistrationCenterFactory(center_id=70000)
RegistrationCenterFactory(center_id=70001)
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.COPY_OF_ID] = '70001'
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['en'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000, 70001])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='A centre may not be changed to a copy centre.'),
messages[0]
)
def test_existing_center_must_remain_copy_center(self):
# test that an existing copy center can't become a non-copy center.
original_center = RegistrationCenterFactory(center_id=70000)
copy_center = RegistrationCenterFactory(center_id=70001)
copy_center.copy_of = original_center
copy_center.save()
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_ID] = '70001'
csv[1][CSVColumnConstants.COPY_OF_ID] = ''
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['en'][RegistrationCenter.Types.GENERAL]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000, 70001])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Copy centres are read-only.'),
messages[0]
)
def test_center_type_valid(self):
# In the CSV file, 'center_type' is an arabic string field. We should
# parse it and convert to a corresponding integer from RegistrationCenter.Types.CHOICES.
response = self.upload_csv(filename='valid_center_types.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 13)
# The first 6 centers in the test CSV have Arabic names. (At present we don't have have
# an Arabic translation for "Split" so there's no point in testing it.)
for i, center in enumerate(centers[:6]):
self.assertEqual(center.center_type, RegistrationCenter.Types.CHOICES[i][0])
# The last 7 centers in the test CSV have English names.
for i, center in enumerate(centers[6:]):
self.assertEqual(center.center_type, RegistrationCenter.Types.CHOICES[i][0])
def test_center_type_invalid(self):
# If we don't recognize the value in the 'center_type' field, then return an error.
response = self.upload_csv(filename='invalid_center_types.csv')
self.assertEqual(response.status_code, 200)
centers = RegistrationCenter.objects.all()
self.assertEqual(centers.count(), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_FIELD_ERROR.format(
line_number=2, field_name='center_type', value='invalid_center_type',
error='That is not a valid center_type'),
messages[0]
)
def test_center_type_copy_required_for_copy_centers(self):
# Copy centers must have the copy center type
RegistrationCenterFactory(center_id=70000)
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_ID] = '70001'
csv[1][CSVColumnConstants.COPY_OF_ID] = '70000'
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.OIL]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertListEqual([center.center_id for center in centers], [70000])
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Copy centre type must be "copy".'),
messages[0]
)
def test_center_type_copy_rejected_for_noncopy_centers(self):
# Non-copy centers may not have the copy center type
csv = get_copy_center_base_csv()
csv[1][CSVColumnConstants.CENTER_TYPE] = \
RegistrationCenter.Types.NAMES['ar'][RegistrationCenter.Types.COPY]
response = self.upload_csv(filename=self.write_csv(csv))
self.assertEqual(response.status_code, 200)
# No new centers should have been created
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 0)
messages = self.get_messages(response)
self.assertIn(
utils.FORM_ERROR.format(
line_number=2, error='Centre type "copy" requires copy centre information.'),
messages[0]
)
| [
"[email protected]"
] | |
33f88b3804973bb17c410c2bdf24456d89324c34 | 9bcfbdf23c9ac156e0cdf5b5b5e06f18a1ad6fae | /pre_code/stock/xueqiuPawer.py | 1a9ff34bb4d43402fbd96230452d2828ff831e48 | [] | no_license | haoson888/vnpy_future | a7576513b7ecf50c36d730c647263c6d1e44f3a6 | 89df2d5079a2e6d3782531369675248e38b2ff00 | refs/heads/origin | 2020-09-04T15:04:55.368725 | 2017-12-10T10:37:47 | 2017-12-10T10:37:47 | 219,762,816 | 0 | 1 | null | 2019-11-05T15:12:32 | 2019-11-05T14:18:18 | null | UTF-8 | Python | false | false | 6,873 | py | #coding: utf-8
from selenium import webdriver
import re
from urllib import request as urllib2
import sys
import os
from datetime import datetime
from datetime import timedelta
from datetime import date
import xlrd
from xlrd import open_workbook
from xlutils.copy import copy
#import nltk
import time
description_id = 1
#windows
#browser = webdriver.Chrome(executable_path='F:\chromedriver_win32\chromedriver.exe')
#mac
browser = webdriver.Chrome(executable_path='/Users/daitechang/Documents/stock/chromedriver')
def start(url, d, today, vstock):
# try:
global description_id
global browser
url = url
try:
browser.get(url)
t = browser.page_source
pn = re.compile(r'(.*)"statuses":(.*?)}]', re.S)
match = pn.match(t)
if not match:
# browser.close()
# browser.quit()
return 0
result = match.group(2)
result = result + '}]'
decode = json.loads(result)
startDetect = time.time()
st = int(time.mktime(datetime.strptime(datetime.strftime(today, "%Y-%m-%d"), "%Y-%m-%d").timetuple()))
ed = int(time.mktime(datetime.strptime(datetime.strftime(today + timedelta(days = 1), "%Y-%m-%d"), "%Y-%m-%d").timetuple()))
st = str(st) + '000'
print(st)
ed = str(ed) + '000'
print(ed)
s_today = datetime.strftime(today, "%Y-%m-%d")
for i in range(len(vstock)):
for item in decode:
if item['mark'] == 1:
continue
#print item['created_at'], st, ed
#print item['description'].encode('utf-8'), vstock[i]._name
if str(item['created_at']) > st and str(item['created_at']) < ed:
if item['text'].encode('utf-8').find(vstock[i]._name) != -1:
ff = open('corpus/' + s_today + '_' + str(description_id) + '.txt', 'w')
ff.write(item['text'].encode('utf-8'))
ff.close()
description_id += 1
#print vstock[i]._name, item['description'].encode('utf-8')
if d.has_key(i):
d[i] = d[i] + 1
else:
d[i] = 1
elif str(item['created_at']) < st and i == len(vstock) -1:
#print 1
# browser.close()
# browser.quit()
#if i == len(vstock) -1:
return 0
#print array[0], array[1]
# print decode[0]['description'].encode('utf-8')
# browser.close()
# browser.quit()
return 1
except Exception as e:
print(e)
# browser.close()
# browser.quit()
return 0
import json
#获取热门用户列表
def get_id():
f = open('id.txt', 'w')
for i in range(25):
url = 'http://xueqiu.com/recommend/user/industry.json?detail=1&index=' + str(i)
#browser.get(url)
#t = browser.page_source
print(url)
# print t.encode('utf-8')
cookie = '''s=10ht15dh2y; xq_a_token=5e47e2777e3b08d99725fe0f9f78815eb1cb8374; xqat=5e47e2777e3b08d99725fe0f9f78815eb1cb8374; xq_r_token=c38fedb2680c6b923eb4c87f16ebf19f574c3eca; xq_is_login=1; u=6585534947; xq_token_expire=Sun%20Nov%2015%202015%2009%3A14%3A02%20GMT%2B0800%20(CST); bid=73fe343eeb79fd513ae47464f938acf9_ig040t46; snbim_minify=true; __utmt=1; __utma=1.2082135748.1445390046.1445497172.1445504051.8; __utmb=1.14.10.1445504051; __utmc=1; __utmz=1.1445390046.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); Hm_lvt_1db88642e346389874251b5a1eded6e3=1445390044; Hm_lpvt_1db88642e346389874251b5a1eded6e3=1445506132'''
headers = {"User-Agent":"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6","Cookie":cookie }
req = urllib2.Request( url, headers = headers)
try:
content = urllib2.urlopen(req).read()
except Exception as e:
print(e)
#return
js = json.loads(content)
#print js
industries = js['industries']
#print industries
if industries:
for industry in industries:
for user in industry['users']:
print(user['id'], user['screen_name'].encode('utf-8'))
f.write((str(user['id'])) + ' ' + (user['screen_name']).encode('utf-8') + '\n')
#f.write(p[0].get('value').encode('utf-8') + ' ' + p[1].get('value').encode('utf-8') + '\n')
class stock:
_id = ''
_name = ''
_industry = ''
def __init__(self, id, name, industry):
self._id = id
self._name = name
self._industry = industry
def pawner(day, t2):
today = date.today()
delta = -1
os.mkdir('corpus')
while 1:
f = open('id.txt', 'r')
delta += 1
if delta >= t2:
break
yesterday1 = today - timedelta(days = day - delta)
yesterday = datetime.strftime(yesterday1, "%Y-%m-%d")
score_file = 'score' + yesterday + '.txt'
industry_file = 'industry' + yesterday + '.txt'
#ff = open('score' + yesterday + '.txt', 'r')
d = {}
print score_file
vstock = []
#ff = open('stock.txt', 'r')
wb = xlrd.open_workbook('stock.xls')
sh = wb.sheet_by_name('stock')
for rownum in range(sh.nrows):
if rownum < 2:
continue
s = stock(str(sh.cell(rownum, 0).value), sh.cell(rownum, 1).value.encode('utf-8'), sh.cell(rownum, 2).value.encode('utf-8'))
vstock.append(s)
print(len(vstock))
print(repr(vstock[0]._name))
while 1:
try:
line = f.readline()
# user = str(i)
if not line:
break
array = line[:-1].split(' ')
user = array[0]
print(array[0], array[1])
#user = "1676206424"
page = 1
while 1:
url = "http://xueqiu.com/" + user + "?page=" + str(page)
ret = start(url, d, yesterday1, vstock)
if ret == 0:
#print i
break
page = page + 1
time.sleep(2)
except Exception as e:
print(e)
continue
#break
#i = i + 1
#if i >=9999999999:
# break
f.close()
ff = open(score_file, 'w')
industry_p = open(industry_file, 'w')
rb = open_workbook('stock.xls')
rs = rb.sheet_by_name('stock')
wb = copy(rb)
ws = wb.get_sheet(0)
ncol = rs.ncols
ws.write(1, ncol, yesterday)
industry_d = {}
t = sorted(d.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
for key in t:
print(str(vstock[key[0]]._name) + '%' + str(vstock[key[0]]._industry) + '%'+ str(key[1]) + '\n')
ff.write(str(vstock[key[0]]._name) + '%' + str(vstock[key[0]]._industry) + '%'+ str(key[1]) + '\n')
if industry_d.has_key(vstock[key[0]]._industry):
industry_d[vstock[key[0]]._industry] += 1
else:
industry_d[vstock[key[0]]._industry] = 1
ws.write(key[0] + 2, ncol, key[1])
t = sorted(industry_d.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)
for key in t:
print(str(key[0]) + '%' + str(key[1]) + '\n')
industry_p.write(str(key[0]) + '%' + str(key[1]) + '\n')
print(industry_d)
wb.save('stock.xls')
browser.close()
browser.quit()
# timer = threading.Timer(7200, pawner)
# timer.start()
if __name__ == "__main__":
#nltk.download()
#negids = movie_reviews.fileids('neg')
#posids = movie_reviews.fileids('pos')
#print 1
## timer = threading.Timer(7200, pawner)
# timer.start()
t = int(sys.argv[1])
t2 = int(sys.argv[2])
#get_id()
pawner(t, t2)
| [
"[email protected]"
] | |
291f1107e0a99ce49de7bd1a42bab6e7fa9b9073 | ffae55f50f9eb0ae028d9f46cebea565f3700585 | /18/VAJets/PKUTreeMaker/test/CrabJobsSrc/MC/crab3_analysisWZ_v1.py | 7d3a2cd976ff0802af00a8aafe4ae252256a8d2a | [] | no_license | JINGFFF/test | 57a92eb2c3143bcfa5776fc87d3ff16ff7cdc04b | d48c2be6387dfaff3eb37e28ff116c91c3eaf67e | refs/heads/master | 2021-02-06T21:00:52.184508 | 2020-04-26T04:35:04 | 2020-04-26T04:35:04 | 243,942,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_("General")
config.General.requestName = 'WZ_v1_2'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.maxMemoryMB = 3000
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
config.JobType.psetName = 'analysis_mc.py'
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
##config.Data.outputPrimaryDataset = 'VBS_WGAMMA_94X'
config.Data.inputDataset = '/WZ_TuneCUETP8M1_13TeV-pythia8/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3_ext1-v2/MINIAODSIM'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 2
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outputDatasetTag = 'WZ_v1_2'
config.section_("Site")
config.Site.storageSite = 'T2_CN_Beijing'
| [
"[email protected]"
] | |
5396d59485edcffb1060921d5fc348209d891fe0 | b13a326c8aac68f72c71169187a4aa8d4fe1438f | /environment/envs/icra.py | eaa3aafeecc909022ff8d9a459423e63e37e2866 | [] | no_license | zy10zm/Pulsar | 9f1d9abdf90d94e80c6dba2a02630bfe4b4e2115 | 714ee2d78577e59077af7c0f890e639879490eb8 | refs/heads/master | 2023-02-22T20:26:42.995175 | 2021-01-23T04:35:38 | 2021-01-23T04:35:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,882 | py | import os, sys
import numpy as np
import logging
from copy import deepcopy
from mujoco_worldgen import Floor
from mujoco_worldgen import ObjFromXML
from mujoco_worldgen.util.sim_funcs import qpos_idxs_from_joint_prefix, qvel_idxs_from_joint_prefix
from environment.worldgen.battlefield import Battlefield
from environment.worldgen.builder import WorldBuilder
from environment.worldgen.core import WorldParams
from environment.worldgen.env import Env
from environment.module.agents import Agents
from environment.wrappers.util_w import DiscardMujocoExceptionEpisodes, DiscretizeActionWrapper, AddConstantObservationsWrapper, ConcatenateObsWrapper
from environment.wrappers.lidar import Lidar
from environment.wrappers.multi_agent import SplitMultiAgentActions, SplitObservations, SelectKeysWrapper
from environment.wrappers.line_of_sight import AgentAgentObsMask2D
from environment.wrappers.buff import BuffWrapper
from environment.wrappers.collision import CollisionWrapper
from environment.wrappers.health import HealthWrapper
from environment.wrappers.prep import PrepWrapper
from environment.wrappers.projectile import ProjectileWrapper
from environment.wrappers.outcome import OutcomeWrapper
from environment.wrappers.no_enter_zone import NoEnterZoneWrapper
from environment.objects.lidarsites import LidarSites
class IcraBase(Env):
'''
Icra base environment.
Args:
horizon (int): Number of steps agent gets to act
n_substeps (int): Number of mujoco simulation steps per outer environment time-step
n_agents (int): number of agents in the environment
mjco_ts (float): seconds for one mujoco simulation step
action_lims (float tuple): lower and upper limit of mujoco actions
deterministic_mode (bool): if True, seeds are incremented rather than randomly sampled.
meshdir (string): directory for meshes
texturedir (string): directory for textures
set_action (function): function for setting actions
env_no (int): number for environment file
'''
def __init__(self, horizon=250, n_substeps=3, n_agents=2, mjco_ts=0.002,
action_lims=(-200.0, 200.0), deterministic_mode=False,
meshdir="assets/stls", texturedir="assets/texture",
set_action=None,
env_no=1, **kwargs):
super().__init__(get_sim=self._get_sim,
get_obs=self._get_obs,
action_space=tuple(action_lims),
horizon=horizon,
set_action=set_action,
deterministic_mode=deterministic_mode)
self.env_no = env_no
self.mjco_ts = mjco_ts
self.n_agents = n_agents
self.metadata['n_actors'] = n_agents
self.horizon = horizon
self.n_substeps = n_substeps
self.kwargs = kwargs
self.modules = []
self.meshdir = meshdir
self.texturedir = texturedir
self.placement_size = (8080, 4480)
def add_module(self, module):
self.modules.append(module)
def _get_obs(self, sim):
'''
Loops through modules, calls their observation_step functions, and
adds the result to the observation dictionary.
'''
obs = {}
for module in self.modules:
obs.update(module.observation_step(self, self.sim))
return obs
def _get_sim(self, seed):
'''
Calls build_world_step and then modify_sim_step for each module. If
a build_world_step failed, then restarts.
'''
world_params = WorldParams(size=(self.placement_size[0], self.placement_size[1], 100),
num_substeps=self.n_substeps)
successful_placement = False
failures = 0
while not successful_placement:
if (failures + 1) % 10 == 0:
logging.warning(f"Failed {failures} times in creating environment")
builder = WorldBuilder(world_params, self.meshdir, self.texturedir, seed, env_no=self.env_no)
battlefield = Battlefield()
builder.append(battlefield)
self.placement_grid = np.zeros((self.placement_size[0], self.placement_size[1]))
successful_placement = np.all([module.build_world_step(self, battlefield, self.placement_size)
for module in self.modules])
failures += 1
sim = builder.get_sim()
for module in self.modules:
module.modify_sim_step(self, sim)
return sim
def get_ts(self):
return self.t
def get_horizon(self):
return self.horizon
def secs_to_steps(self, secs):
return int(secs / (self.mjco_ts * self.n_substeps))
def make_env(deterministic_mode=False, n_agents=4, env_no=1, add_bullets_visual=False):
'''
Response time = 0.02 seconds
Game time = 180 seconds
Decisions = 180 / 0.02 = 9000
Total steps = 9000
Seconds per simulated step = 0.002 seconds
Seconds for each run = 9000 * 0.002 = 18 seconds
'''
mjco_ts = 0.002
n_substeps = 1
horizon = 90000
# Setup action functions
motor_trans_max, motor_forw_max, motor_z_max = 2000.0, 3000.0, 47123.9
action_scale = (motor_trans_max, motor_forw_max, motor_z_max)
action_lims = (-1.0, 1.0)
def icra_ctrl_set_action(sim, action):
"""
For velocity actuators it copies the action into mujoco ctrl field.
"""
if sim.model.nmocap > 0:
_, action = np.split(action, (sim.model.nmocap * 7, ))
if sim.data.ctrl is not None:
for a_idx in range(n_agents):
for as_idx in range(3):
sim.data.ctrl[a_idx*3 + as_idx] = action[a_idx*3 + as_idx] * action_scale[as_idx]
# Create base environment for battlefield
env = IcraBase(n_agents=n_agents,
n_substeps=n_substeps,
horizon=horizon,
mjco_ts=mjco_ts,
action_lims=action_lims,
deterministic_mode=deterministic_mode,
env_no=env_no,
set_action=icra_ctrl_set_action,
meshdir=os.path.join(os.getcwd(), "environment", "assets", "stls"),
texturedir=os.path.join(os.getcwd(), "environment", "assets", "textures"))
# Add bullets just for visualization
nbullets = 25
env.add_module(Agents(n_agents, action_scale=action_scale, add_bullets_visual=add_bullets_visual, nbullets=nbullets))
env.reset()
# PrepWrapper must always be on-top
env = PrepWrapper(env)
env = BuffWrapper(env)
env = CollisionWrapper(env)
env = ProjectileWrapper(env, add_bullets_visual, nbullets)
env = NoEnterZoneWrapper(env)
# OutcomeWrapper must always be lowest, after HealthWrapper
env = HealthWrapper(env)
env = OutcomeWrapper(env)
keys_self = ['agent_qpos_qvel']
global_obs = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'Agent:buff', 'colli_dmg',
'proj_dmg', 'nprojectiles', 'agents_health', 'agent_teams',
'agent_local_qvel']
keys_external = deepcopy(global_obs)
keys_copy = deepcopy(global_obs)
keys_mask_self = []
keys_mask_external = []
env = SplitMultiAgentActions(env)
#env = DiscretizeActionWrapper(env, 'action_movement')
env = SplitObservations(env, keys_self + keys_mask_self, keys_copy=keys_copy)
env = DiscardMujocoExceptionEpisodes(env)
env = SelectKeysWrapper(env, keys_self=keys_self,
keys_external=keys_external,
keys_mask=keys_mask_self + keys_mask_external,
flatten=False)
return env
| [
"[email protected]"
] | |
38cce542df0415d2d792a37b8355ec7ce0f789d3 | 9e2d467de2d665f41dc94799f0acb98479571922 | /_error.py | cf399a395d6e832d683a0de18251cbd067d4a2f6 | [] | no_license | pytsite/plugin-geo_ip | c63ecd12c95004c05fdae76b20a9343b52fb923f | db71e67651eb57b6ca76136d0014eaadf2cb6ffb | refs/heads/master | 2021-10-23T09:29:32.580289 | 2019-03-16T22:04:28 | 2019-03-16T22:04:28 | 112,030,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | """PytSite GeoIP Errors.
"""
__author__ = 'Oleksandr Shepetko'
__email__ = '[email protected]'
__license__ = 'MIT'
class ResolveError(Exception):
pass
| [
"[email protected]"
] | |
eb3a8d5c498c7474673b63e103c93f49315218fa | 3ff9821b1984417a83a75c7d186da9228e13ead9 | /No_0122_Best Time to Buy and Sell Stock II/by_dynamic_programming.py | 5874db8b00a7a87dcea7b16d8be839baf34edc99 | [
"MIT"
] | permissive | brianchiang-tw/leetcode | fd4df1917daef403c48cb5a3f5834579526ad0c2 | 6978acfb8cb767002cb953d02be68999845425f3 | refs/heads/master | 2023-06-11T00:44:01.423772 | 2023-06-01T03:52:00 | 2023-06-01T03:52:00 | 222,939,709 | 41 | 12 | null | null | null | null | UTF-8 | Python | false | false | 2,646 | py | '''
Description:
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: [7,1,5,3,6,4]
Output: 7
Explanation: Buy on day 2 (price = 1) and sell on day 3 (price = 5), profit = 5-1 = 4.
Then buy on day 4 (price = 3) and sell on day 5 (price = 6), profit = 6-3 = 3.
Example 2:
Input: [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are
engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
'''
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
# It is impossible to sell stock on first day, set -infinity as initial value for cur_hold
cur_hold, cur_not_hold = -float('inf'), 0
for stock_price in prices:
prev_hold, prev_not_hold = cur_hold, cur_not_hold
# either keep hold, or buy in stock today at stock price
cur_hold = max( prev_hold, prev_not_hold - stock_price )
# either keep not-hold, or sell out stock today at stock price
cur_not_hold = max( prev_not_hold, prev_hold + stock_price )
# maximum profit must be in not-hold state
return cur_not_hold if prices else 0
# n : the length of input list, prices.
## Time Complexity: O( n )
#
# The overhead in time is the cost of for loop, which is of O( n )
## Space Complexity: O( 1 )
#
# The overhead in space is the storage for loop index and temporary vairable, which is of O( 1 )
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'stock_sequence')
def test_bench():
test_data = [
TestEntry( stock_sequence = [7,1,5,3,6,4] ),
TestEntry( stock_sequence = [1,2,3,4,5] ),
TestEntry( stock_sequence = [7,6,4,3,1] ),
]
# expected output:
'''
7
4
0
'''
for t in test_data:
print( Solution().maxProfit( prices = t.stock_sequence) )
return
if __name__ == '__main__':
test_bench()
| [
"[email protected]"
] | |
a87804d2d25c07f79802384fc10580b459fae10e | 96dcea595e7c16cec07b3f649afd65f3660a0bad | /tests/components/media_source/test_local_source.py | bc637caab808964792012429bc925ee473d435b8 | [
"Apache-2.0"
] | permissive | home-assistant/core | 3455eac2e9d925c92d30178643b1aaccf3a6484f | 80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743 | refs/heads/dev | 2023-08-31T15:41:06.299469 | 2023-08-31T14:50:53 | 2023-08-31T14:50:53 | 12,888,993 | 35,501 | 20,617 | Apache-2.0 | 2023-09-14T21:50:15 | 2013-09-17T07:29:48 | Python | UTF-8 | Python | false | false | 10,965 | py | """Test Local Media Source."""
from collections.abc import AsyncGenerator
from http import HTTPStatus
import io
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import patch
import pytest
from homeassistant.components import media_source, websocket_api
from homeassistant.components.media_source import const
from homeassistant.config import async_process_ha_core_config
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockUser
from tests.typing import ClientSessionGenerator, WebSocketGenerator
@pytest.fixture
async def temp_dir(hass: HomeAssistant) -> AsyncGenerator[str, None]:
"""Return a temp dir."""
with TemporaryDirectory() as tmpdirname:
target_dir = Path(tmpdirname) / "another_subdir"
target_dir.mkdir()
await async_process_ha_core_config(
hass, {"media_dirs": {"test_dir": str(target_dir)}}
)
assert await async_setup_component(hass, const.DOMAIN, {})
yield str(target_dir)
async def test_async_browse_media(hass: HomeAssistant) -> None:
"""Test browse media."""
local_media = hass.config.path("media")
await async_process_ha_core_config(
hass, {"media_dirs": {"local": local_media, "recordings": local_media}}
)
await hass.async_block_till_done()
assert await async_setup_component(hass, const.DOMAIN, {})
await hass.async_block_till_done()
# Test path not exists
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/test/not/exist"
)
assert str(excinfo.value) == "Path does not exist."
# Test browse file
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/test.mp3"
)
assert str(excinfo.value) == "Path is not a directory."
# Test invalid base
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/invalid/base"
)
assert str(excinfo.value) == "Unknown source directory."
# Test directory traversal
with pytest.raises(media_source.BrowseError) as excinfo:
await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/../configuration.yaml"
)
assert str(excinfo.value) == "Invalid path."
# Test successful listing
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}"
)
assert media
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/local/."
)
assert media
media = await media_source.async_browse_media(
hass, f"{const.URI_SCHEME}{const.DOMAIN}/recordings/."
)
assert media
async def test_media_view(
hass: HomeAssistant, hass_client: ClientSessionGenerator
) -> None:
"""Test media view."""
local_media = hass.config.path("media")
await async_process_ha_core_config(
hass, {"media_dirs": {"local": local_media, "recordings": local_media}}
)
await hass.async_block_till_done()
assert await async_setup_component(hass, const.DOMAIN, {})
await hass.async_block_till_done()
client = await hass_client()
# Protects against non-existent files
resp = await client.get("/media/local/invalid.txt")
assert resp.status == HTTPStatus.NOT_FOUND
resp = await client.get("/media/recordings/invalid.txt")
assert resp.status == HTTPStatus.NOT_FOUND
# Protects against non-media files
resp = await client.get("/media/local/not_media.txt")
assert resp.status == HTTPStatus.NOT_FOUND
# Protects against unknown local media sources
resp = await client.get("/media/unknown_source/not_media.txt")
assert resp.status == HTTPStatus.NOT_FOUND
# Fetch available media
resp = await client.get("/media/local/test.mp3")
assert resp.status == HTTPStatus.OK
resp = await client.get("/media/local/Epic Sax Guy 10 Hours.mp4")
assert resp.status == HTTPStatus.OK
resp = await client.get("/media/recordings/test.mp3")
assert resp.status == HTTPStatus.OK
async def test_upload_view(
hass: HomeAssistant,
hass_client: ClientSessionGenerator,
temp_dir: str,
tmp_path: Path,
hass_admin_user: MockUser,
) -> None:
"""Allow uploading media."""
# We need a temp dir that's not under tempdir fixture
extra_media_dir = tmp_path
hass.config.media_dirs["another_path"] = temp_dir
img = (Path(__file__).parent.parent / "image_upload/logo.png").read_bytes()
def get_file(name):
pic = io.BytesIO(img)
pic.name = name
return pic
client = await hass_client()
# Test normal upload
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("logo.png"),
},
)
assert res.status == 200
assert (Path(temp_dir) / "logo.png").is_file()
# Test with bad media source ID
for bad_id in (
# Subdir doesn't exist
"media-source://media_source/test_dir/some-other-dir",
# Main dir doesn't exist
"media-source://media_source/test_dir2",
# Location is invalid
"media-source://media_source/test_dir/..",
# Domain != media_source
"media-source://nest/test_dir/.",
# Other directory
f"media-source://media_source/another_path///{extra_media_dir}/",
# Completely something else
"http://bla",
):
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": bad_id,
"file": get_file("bad-source-id.png"),
},
)
assert res.status == 400, bad_id
assert not (Path(temp_dir) / "bad-source-id.png").is_file()
# Test invalid POST data
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("invalid-data.png"),
"incorrect": "format",
},
)
assert res.status == 400
assert not (Path(temp_dir) / "invalid-data.png").is_file()
# Test invalid content type
text_file = io.BytesIO(b"Hello world")
text_file.name = "hello.txt"
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": text_file,
},
)
assert res.status == 400
assert not (Path(temp_dir) / "hello.txt").is_file()
# Test invalid filename
with patch(
"aiohttp.formdata.guess_filename", return_value="../invalid-filename.png"
):
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("../invalid-filename.png"),
},
)
assert res.status == 400
assert not (Path(temp_dir) / "../invalid-filename.png").is_file()
# Remove admin access
hass_admin_user.groups = []
res = await client.post(
"/api/media_source/local_source/upload",
data={
"media_content_id": "media-source://media_source/test_dir/.",
"file": get_file("no-admin-test.png"),
},
)
assert res.status == 401
assert not (Path(temp_dir) / "no-admin-test.png").is_file()
async def test_remove_file(
hass: HomeAssistant,
hass_ws_client: WebSocketGenerator,
temp_dir: str,
hass_admin_user: MockUser,
) -> None:
"""Allow uploading media."""
msg_count = 0
file_count = 0
def msgid():
nonlocal msg_count
msg_count += 1
return msg_count
def create_file():
nonlocal file_count
file_count += 1
to_delete_path = Path(temp_dir) / f"to_delete_{file_count}.txt"
to_delete_path.touch()
return to_delete_path
client = await hass_ws_client(hass)
to_delete = create_file()
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": f"media-source://media_source/test_dir/{to_delete.name}",
}
)
msg = await client.receive_json()
assert msg["success"]
assert not to_delete.exists()
# Test with bad media source ID
extra_id_file = create_file()
for bad_id, err in (
# Not exists
(
"media-source://media_source/test_dir/not_exist.txt",
websocket_api.ERR_NOT_FOUND,
),
# Only a dir
("media-source://media_source/test_dir", websocket_api.ERR_NOT_SUPPORTED),
# File with extra identifiers
(
f"media-source://media_source/test_dir/bla/../{extra_id_file.name}",
websocket_api.ERR_INVALID_FORMAT,
),
# Location is invalid
("media-source://media_source/test_dir/..", websocket_api.ERR_INVALID_FORMAT),
# Domain != media_source
("media-source://nest/test_dir/.", websocket_api.ERR_INVALID_FORMAT),
# Completely something else
("http://bla", websocket_api.ERR_INVALID_FORMAT),
):
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": bad_id,
}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == err
assert extra_id_file.exists()
# Test error deleting
to_delete_2 = create_file()
with patch("pathlib.Path.unlink", side_effect=OSError):
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": f"media-source://media_source/test_dir/{to_delete_2.name}",
}
)
msg = await client.receive_json()
assert not msg["success"]
assert msg["error"]["code"] == websocket_api.ERR_UNKNOWN_ERROR
# Test requires admin access
to_delete_3 = create_file()
hass_admin_user.groups = []
await client.send_json(
{
"id": msgid(),
"type": "media_source/local_source/remove",
"media_content_id": f"media-source://media_source/test_dir/{to_delete_3.name}",
}
)
msg = await client.receive_json()
assert not msg["success"]
assert to_delete_3.is_file()
| [
"[email protected]"
] | |
155e6f8d2612353259928900fac73b905ca32da0 | e5d8b15cbd899283d6ead4742334e997db06d6e0 | /web/config/settings/base.py | 37124bc82aab5552b2646ceca937c109e33f6676 | [] | no_license | Maliaotw/dashboard-django | 628d777d88b61dad7c3c551b72979b38c2065e15 | cabbc3e6e8156510dd4ba91ffe1066c9cb040eac | refs/heads/main | 2023-02-16T02:52:02.169754 | 2021-01-12T03:13:55 | 2021-01-12T03:13:55 | 289,612,737 | 0 | 0 | null | 2021-01-12T03:13:56 | 2020-08-23T04:05:36 | JavaScript | UTF-8 | Python | false | false | 5,390 | py | """
Django settings for web project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
from .conf import load_user_config
from pathlib import Path
CONFIG = load_user_config()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent.parent
# vsphere_monitor/
APPS_DIR = ROOT_DIR / "web"
# BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# PROJECT_DIR = os.path.dirname(BASE_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = CONFIG.DEBUG
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# SECURITY WARNING: keep the secret key used in production secret!
# from django.core.management.utils import get_random_secret_key
# get_random_secret_key()
SECRET_KEY = CONFIG.SECRET_KEY
ALLOWED_HOSTS = ['*']
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
# "django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = [
'rest_framework',
'django_filters',
'widget_tweaks',
]
LOCAL_APPS = [
'app.apps.AppConfig',
'common.apps.CommonConfig',
'authentication.apps.AuthenticationConfig',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
"DIRS": [str(APPS_DIR / "templates")],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
"NAME": str(APPS_DIR / "db.sqlite3"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
str(APPS_DIR / "static"),
)
STATIC_ROOT = str(APPS_DIR / "data" / "static")
# Media files (File, ImageField) will be save these
MEDIA_URL = '/media/'
# MEDIA_ROOT = os.path.join(BASE_DIR, "data", 'media')
MEDIA_ROOT = str(APPS_DIR / "data" / "media")
LOGIN_URL = "/login/"
# SESSION
SESSION_COOKIE_AGE = 60 * 60 # 設置session過期時間為60分鐘
SESSION_EXPIRE_AT_BROWSER_CLOSE = True # 當瀏覽器被關閉的時候將session失效,但是不能刪除數據庫的session數據
SESSION_SAVE_EVERY_REQUEST = True # 每次請求都要保存一下session
# LOGGING
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'normal': {
'format': '[%(levelname)s] %(asctime)s | %(name)s:%(lineno)d | %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler', # Default logs to stderr
'formatter': 'normal', # use the above "normal" formatter
}
},
'loggers': {
'': { # means "root logger"
'handlers': ['console'], # use the above "console" handler
'level': 'DEBUG', # logging level
},
},
}
| [
"[email protected]"
] | |
eecdc90449ea3bbc47e90548ca8004f0872498f7 | ac03d9f3a8c2e6209940ae30900e9b2e32084dce | /main.py | 9ef8df5eafeff0357882459573d9ee1b460c71e4 | [
"Apache-2.0"
] | permissive | cls1991/github-projects-remover | 29f28e0a23b596a7e07b0c07b65092626b42de05 | d924100fedccbb0fd6e20365d4f4df98bf04b292 | refs/heads/master | 2022-12-11T12:31:59.498180 | 2019-10-23T14:22:14 | 2019-10-23T14:22:14 | 84,054,255 | 0 | 0 | Apache-2.0 | 2019-10-23T14:22:16 | 2017-03-06T09:25:51 | Python | UTF-8 | Python | false | false | 1,042 | py | # coding: utf8
import os
# 切换工作目录到项目根目录
project = os.path.split(os.path.realpath(__file__))[0]
os.chdir(project)
from core.github import GithubSample
if __name__ == '__main__':
gs = GithubSample('8709c9b9d01ec8e7388378c3992eff61aa7df813')
# pretty_print(gs.query_api_info())
# pretty_print(gs.query_user_info('cls1991'))
# pretty_print(gs.query_user_repos('cls1991'))
# print(gs.star_repo('torvalds', 'linux'))
"""
star all forked repos, then remove all, for personal use!
"""
user_repos = gs.query_user_repos('cls1991', page=1, per_page=50)
# pretty_print(user_repos)
for repo in user_repos:
if repo['fork']:
repo_info = gs.query_repo_info('cls1991', repo['name'])
if 'source' not in repo_info:
continue
status_code = gs.star_repo(repo_info['source']['owner']['login'], repo['name'])
print(status_code)
if status_code == 204:
gs.remove_repo('cls1991', repo['name'])
| [
"[email protected]"
] | |
4bd4bebebcee12e2cf64dd1eacd1a163512bbeee | 78c76c8ec506080ff83edd7a3619a6b1e709a4e5 | /apps/courses/__init__.py | 3b3a526b16976686d4850ba0f61ebd17bc4992e1 | [] | no_license | wadayu/mxonline | dd0a08d21b858a49f2107974ba13b6e283a1f01f | 58e808b3415e51935c15b1e5f7b30461c879d861 | refs/heads/master | 2021-01-20T23:47:55.025272 | 2018-02-08T01:41:06 | 2018-02-08T01:41:06 | 101,853,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | #xadmin后台中文显示
default_app_config = 'courses.apps.CoursesConfig' | [
"[email protected]"
] | |
f9d8898f58752cd3781b1c1101eefbc33a20667c | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/perf/CascadeMaskRCNN_iflytek_for_PyTorch/configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py | 853289e67b4a5019eddfc1bbefb0b44e53dd49e2 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,714 | py | _base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
pretrained='torchvision://resnet101',
backbone=dict(depth=101),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
| [
"[email protected]"
] | |
3d825b0e036a2c4f6a56c755ea8fe0225bc2d1f8 | 6610ebe9141f00678851a6f068ec1e5458bf050c | /code/graph_keyboard.py | 19a6ffbf2f3e96351320d674a186a385b8d5dedc | [
"MIT"
] | permissive | iamrajee/bio_medical | e9cec1d9e12c04d87b893d0c12c92d3a1b8fb963 | 8d91cd3838f46685faa057f93f5d22f8e6c4187b | refs/heads/master | 2020-05-04T23:47:45.595827 | 2019-09-17T17:14:19 | 2019-09-17T17:14:19 | 179,555,562 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | import matplotlib.pyplot as plt
import networkx as nx
G=nx.Graph()
# G.add_nodes_from([1,2,3,4,5,6,7,8,9,0],key="A")
# # G.add_edges_from([(1,2),(2,3),(3,4),(5,8),(9,1),(2,3),(4,6),(8,2),(7,3)])
# G.add_weighted_edges_from([(1,2,1),(2,3,2),(3,4,3),(5,8,4),(9,1,5),(2,3,6),(4,6,7),(8,2,8),(7,3,9)])
# keyboard_config = [
# ('1','2','3','4','5','6','7','8','9','0'),
# ('q','w','e','r','t','y','u','i','o','p'),
# ('a','s','d','f','g','h','j','k','l'),
# ('z','x','c','v','b','n','m'),
# ('\t\tspace\t\t','backspace','enter','save')
# ]
keyboard_config = [
('1','2','3'),
('q','w','e'),
('a','s','d'),
]
for t_ in range(len(keyboard_config)):
G.add_nodes_from(list(t))
for i in range(0,len(t)):
e=[(t[i],t[i+1],1) for i in range(0,len(t)-1)]
e.append((t[0],t[len(t)-1],1))
G.add_weighted_edges_from(e)
for i in range(0,len(t)):
print(G.nodes(data=True))
nx.draw(G)
plt.show() | [
"[email protected]"
] | |
e234a8079711be2c3d06150ede58ce02370a578b | c75ec82316ed5322c5844912ce9c528c24360b9f | /nsd1905/devweb/myansible/webadmin/admin.py | dcb81958f29fbd102b6bac0302d385ab3985e950 | [] | no_license | MrZhangzhg/nsd2019 | a94cde22f2e4bd648bb9e56ca63827f558f3c083 | 54f6d2c7b348a69f13ad5f38f2fbdc8207528749 | refs/heads/master | 2021-08-22T17:38:27.697675 | 2020-02-22T08:36:21 | 2020-02-22T08:36:21 | 183,539,489 | 21 | 24 | null | 2020-05-17T12:07:55 | 2019-04-26T02:06:16 | HTML | UTF-8 | Python | false | false | 167 | py | from django.contrib import admin
from .models import HostGroup, Host, Module, Argument
for item in [HostGroup, Host, Module, Argument]:
admin.site.register(item)
| [
"[email protected]"
] | |
42a3440f5055af7250f46ea5da4734991bae627f | b30d7e28932056d69b3a3dba4e9c0c552ac19029 | /model_evaluation_utils.py | 2b1870d19748d7fd2e3cd4fd9a62a61b72ecc57d | [] | no_license | rsawin/transfer_learning | 19b9818a9f978ddc31080354bdc3538f32b870e3 | d2a28f7947f481f3466f425e3fd21011c0c91243 | refs/heads/master | 2021-07-08T23:43:55.211542 | 2020-08-03T21:33:47 | 2020-08-03T21:33:47 | 174,645,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,201 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 20:05:23 2017
@author: DIP
@Copyright: Dipanjan Sarkar
"""
from sklearn import metrics
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.base import clone
from sklearn.preprocessing import label_binarize
from scipy import interp
from sklearn.metrics import roc_curve, auc
def get_metrics(true_labels, predicted_labels):
print('Accuracy:', np.round(
metrics.accuracy_score(true_labels,
predicted_labels),
4))
print('Precision:', np.round(
metrics.precision_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('Recall:', np.round(
metrics.recall_score(true_labels,
predicted_labels,
average='weighted'),
4))
print('F1 Score:', np.round(
metrics.f1_score(true_labels,
predicted_labels,
average='weighted'),
4))
def train_predict_model(classifier,
train_features, train_labels,
test_features, test_labels):
# build model
classifier.fit(train_features, train_labels)
# predict using model
predictions = classifier.predict(test_features)
return predictions
def display_confusion_matrix(true_labels, predicted_labels, classes=[1, 0]):
total_classes = len(classes)
level_labels = [total_classes * [0], list(range(total_classes))]
cm = metrics.confusion_matrix(y_true=true_labels, y_pred=predicted_labels,
labels=classes)
cm_frame = pd.DataFrame(data=cm,
columns=pd.MultiIndex(levels=[['Predicted:'], classes],
labels=level_labels),
index=pd.MultiIndex(levels=[['Actual:'], classes],
labels=level_labels))
print(cm_frame)
def display_classification_report(true_labels, predicted_labels, classes=[1, 0]):
report = metrics.classification_report(y_true=true_labels,
y_pred=predicted_labels,
labels=classes)
print(report)
def display_model_performance_metrics(true_labels, predicted_labels, classes=[1, 0]):
print('Model Performance metrics:')
print('-' * 30)
get_metrics(true_labels=true_labels, predicted_labels=predicted_labels)
print('\nModel Classification report:')
print('-' * 30)
display_classification_report(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
print('\nPrediction Confusion Matrix:')
print('-' * 30)
display_confusion_matrix(true_labels=true_labels, predicted_labels=predicted_labels,
classes=classes)
def plot_model_decision_surface(clf, train_features, train_labels,
plot_step=0.02, cmap=plt.cm.RdYlBu,
markers=None, alphas=None, colors=None):
if train_features.shape[1] != 2:
raise ValueError("X_train should have exactly 2 columnns!")
x_min, x_max = train_features[:, 0].min() - plot_step, train_features[:, 0].max() + plot_step
y_min, y_max = train_features[:, 1].min() - plot_step, train_features[:, 1].max() + plot_step
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
clf_est = clone(clf)
clf_est.fit(train_features, train_labels)
if hasattr(clf_est, 'predict_proba'):
Z = clf_est.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
else:
Z = clf_est.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
le = LabelEncoder()
y_enc = le.fit_transform(train_labels)
n_classes = len(le.classes_)
plot_colors = ''.join(colors) if colors else [None] * n_classes
label_names = le.classes_
markers = markers if markers else [None] * n_classes
alphas = alphas if alphas else [None] * n_classes
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y_enc == i)
plt.scatter(train_features[idx, 0], train_features[idx, 1], c=color,
label=label_names[i], cmap=cmap, edgecolors='black',
marker=markers[i], alpha=alphas[i])
plt.legend()
plt.show()
def plot_model_roc_curve(clf, features, true_labels, label_encoder=None, class_names=None):
## Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
if hasattr(clf, 'classes_'):
class_labels = clf.classes_
elif label_encoder:
class_labels = label_encoder.classes_
elif class_names:
class_labels = class_names
else:
raise ValueError('Unable to derive prediction classes, please specify class_names!')
n_classes = len(class_labels)
y_test = label_binarize(true_labels, classes=class_labels)
if n_classes == 2:
if hasattr(clf, 'predict_proba'):
prob = clf.predict_proba(features)
y_score = prob[:, prob.shape[1] - 1]
elif hasattr(clf, 'decision_function'):
prob = clf.decision_function(features)
y_score = prob[:, prob.shape[1] - 1]
else:
raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC curve (area = {0:0.2f})'
''.format(roc_auc),
linewidth=2.5)
elif n_classes > 2:
if hasattr(clf, 'predict_proba'):
y_score = clf.predict_proba(features)
elif hasattr(clf, 'decision_function'):
y_score = clf.decision_function(features)
else:
raise AttributeError("Estimator doesn't have a probability or confidence scoring system!")
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
## Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
## Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
## Plot ROC curves
plt.figure(figsize=(6, 4))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]), linewidth=3)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]), linewidth=3)
for i, label in enumerate(class_labels):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(label, roc_auc[i]),
linewidth=2, linestyle=':')
else:
raise ValueError('Number of classes should be atleast 2 or more')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend(loc="lower right")
plt.show()
| [
"[email protected]"
] | |
03fea34e3c11916cfd291a566610c03c8d3de9fc | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17r_2_00/interface/ethernet/qos/flowcontrol/link_level_flowcontrol/__init__.py | 3960a6dbe85e3ab3c78e7385317b495bc048176a | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,751 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class link_level_flowcontrol(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/ethernet/qos/flowcontrol/link-level-flowcontrol. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__flowcontrol_tx','__flowcontrol_rx',)
_yang_name = 'link-level-flowcontrol'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__flowcontrol_tx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
self.__flowcontrol_rx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'qos', u'flowcontrol', u'link-level-flowcontrol']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'qos', u'flowcontrol']
def _get_flowcontrol_tx(self):
"""
Getter method for flowcontrol_tx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_tx (enumeration)
"""
return self.__flowcontrol_tx
def _set_flowcontrol_tx(self, v, load=False):
"""
Setter method for flowcontrol_tx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_tx (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flowcontrol_tx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flowcontrol_tx() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flowcontrol_tx must be of a type compatible with enumeration""",
'defined-type': "brocade-qos-mls:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)""",
})
self.__flowcontrol_tx = t
if hasattr(self, '_set'):
self._set()
def _unset_flowcontrol_tx(self):
self.__flowcontrol_tx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-tx", rest_name="tx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause generation', u'display-when': u'((/local-node/swbd-number = "3000") or (/local-node/swbd-number = "3001") or (/local-node/swbd-number = "163") or (/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "4000"))', u'alt-name': u'tx', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
def _get_flowcontrol_rx(self):
"""
Getter method for flowcontrol_rx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_rx (enumeration)
"""
return self.__flowcontrol_rx
def _set_flowcontrol_rx(self, v, load=False):
"""
Setter method for flowcontrol_rx, mapped from YANG variable /interface/ethernet/qos/flowcontrol/link_level_flowcontrol/flowcontrol_rx (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_flowcontrol_rx is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_flowcontrol_rx() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """flowcontrol_rx must be of a type compatible with enumeration""",
'defined-type': "brocade-qos-mls:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)""",
})
self.__flowcontrol_rx = t
if hasattr(self, '_set'):
self._set()
def _unset_flowcontrol_rx(self):
self.__flowcontrol_rx = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'on': {'value': 1}, u'off': {'value': 0}},), is_leaf=True, yang_name="flowcontrol-rx", rest_name="rx", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Pause reception', u'alt-name': u'rx', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos-mls', defining_module='brocade-qos-mls', yang_type='enumeration', is_config=True)
flowcontrol_tx = __builtin__.property(_get_flowcontrol_tx, _set_flowcontrol_tx)
flowcontrol_rx = __builtin__.property(_get_flowcontrol_rx, _set_flowcontrol_rx)
_pyangbind_elements = {'flowcontrol_tx': flowcontrol_tx, 'flowcontrol_rx': flowcontrol_rx, }
| [
"[email protected]"
] | |
69a1634d445e07945fcf4295399a9402133a27b5 | 2cf543b38f17b1fc7b9c20d19d2da7fde235abca | /docs/conf.py | e41b0aa24138b21a5d5e76f11cd9ab6762e0e5b7 | [
"MIT"
] | permissive | arvimal/ceph_check | 0de8b93462a8e1449b2cddbbe008ed256aa1a072 | 7e82b7838de247568e64ef84a0fcdaf20e6c1728 | refs/heads/master | 2020-04-16T02:17:36.186334 | 2017-12-25T16:45:34 | 2017-12-25T16:45:34 | 46,115,573 | 6 | 0 | null | 2017-09-07T09:20:02 | 2015-11-13T10:28:24 | Python | UTF-8 | Python | false | false | 8,414 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ceph_check documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import ceph_check
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ceph_check'
copyright = u"2017, Vimal A.R"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = ceph_check.__version__
# The full version, including alpha/beta/rc tags.
release = ceph_check.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ceph_checkdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'ceph_check.tex',
u'ceph_check Documentation',
u'Vimal A.R', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ceph_check',
u'ceph_check Documentation',
[u'Vimal A.R'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ceph_check',
u'ceph_check Documentation',
u'Vimal A.R',
'ceph_check',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
111b0e3fdbd6beabd602738595a0fdf949089ff2 | b65cfcda05fd72350c7b9e11e5995cc1d10fdd75 | /shop/models.py | df6a76b04ed4d1f1473b36d641881c259f5e0b06 | [] | no_license | gauraviit1/pahal_project_2 | f4e6a2cf1cfd613088ad27344279460bb72c9786 | bc89c3848359ae0b95cb55c24d6fe24d637caabd | refs/heads/master | 2021-04-28T21:20:44.614522 | 2017-01-01T09:57:25 | 2017-01-01T09:57:25 | 77,773,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,480 | py | from django.db import models
from django.core.urlresolvers import reverse
from django.utils.functional import lazy
from PIL import Image
from django.contrib.postgres.fields import HStoreField
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from decimal import Decimal
# Create your models here.
class Cateogry(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(db_index=True, unique=True)
class Meta:
ordering = ['name']
verbose_name = 'cateogry'
verbose_name_plural = 'cateogries'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_list_by_cateogry', args=[self.slug])
def save(self, *args, **kwargs):
for field_name in ['name',]:
val = getattr(self, field_name, False)
if val:
setattr(self, field_name, val.capitalize())
super(Cateogry, self).save(*args, **kwargs)
class Product(models.Model):
cateogry = models.ForeignKey('Cateogry', related_name='products')
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True)
image = models.ImageField(upload_to="products/%Y/%m/%d", blank=True)
description = models.TextField(blank=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
stock = models.PositiveIntegerField()
available = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
ordering = ['name']
index_together = [('id', 'slug')]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('shop:product_detail', args=[self.id, self.slug])
class Attribute(models.Model):
product = models.ForeignKey('Product', related_name="patt")
weight = models.DecimalField(max_digits=7, decimal_places=3, blank=True, null=True)
waist_size = models.PositiveSmallIntegerField(blank=True, null=True)
size = models.CharField(max_length=2, blank=True, null=True)
def clean(self, *args, **kwargs):
super(Attribute, self).clean(*args, **kwargs)
if self.weight == Decimal('0.350'):
raise ValidationError({'weight': _('Cannot use this value')})
class Meta:
unique_together = ('product', 'weight')
| [
"[email protected]"
] | |
2c4ab74cda2680598623c66912579b5d2540ef70 | edf510cc5bbbe24469d8ff262c022b33b4d80a75 | /tacotron2/model/tacotron2.py | fafca0078fcb2bc687a7f48b30a31e19137b81ac | [
"Apache-2.0"
] | permissive | rheehot/Tacotron2 | e8b8a4be614708800b10b9fa7829264407510fa8 | ddbe55b426397d40cadd14f5040c55ba7c25615d | refs/heads/master | 2022-12-26T14:13:39.966498 | 2020-10-06T18:34:57 | 2020-10-06T18:34:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py | # -*- coding: utf-8 -*-
# Soohwan Kim @sooftware
# This source code is licensed under the Apache 2.0 License license found in the
# LICENSE file in the root directory of this source tree
import torch.nn as nn
from torch import Tensor
from typing import Optional
from tacotron2.model.encoder import Encoder
from tacotron2.model.decoder import Decoder
from tacotron2.model.postnet import PostNet
class Tacotron2(nn.Module):
""" Neural Speech-To-Text Models called Tacotron2 """
def __init__(self, args) -> None:
super(Tacotron2, self).__init__()
self.encoder = Encoder(
vocab_size=args.vocab_size,
embedding_dim=args.embedding_dim,
encoder_lstm_dim=args.encoder_lstm_dim,
num_lstm_layers=args.num_encoder_lstm_layers,
conv_dropout_p=args.conv_dropout_p,
num_conv_layers=args.num_encoder_conv_layers,
conv_kernel_size=args.encoder_conv_kernel_size,
lstm_bidirectional=args.encoder_lstm_bidirectional,
device=args.device
)
self.decoder = Decoder(
num_mel_bins=args.num_mel_bins,
prenet_dim=args.prenet_dim,
decoder_lstm_dim=args.decoder_lstm_dim,
attn_lstm_dim=args.attn_lstm_dim,
embedding_dim=args.embedding_dim,
attn_dim=args.attn_dim,
location_conv_filter_size=args.location_conv_filter_size,
location_conv_kernel_size=args.location_conv_kernel_size,
prenet_dropout_p=args.prenet_dropout_p,
attn_dropout_p=args.attn_dropout_p,
decoder_dropout_p=args.decoder_dropout_p,
max_decoding_step=args.max_decoding_step,
stop_threshold=args.stop_threshold
)
self.postnet = PostNet(
num_mel_bins=args.num_mel_bins,
postnet_dim=args.postnet_dim,
num_conv_layers=args.num_postnet_conv_layers,
kernel_size=args.postnet_conv_kernel_size,
dropout_p=args.postnet_dropout_p
)
def forward(
self,
inputs: Tensor,
input_lengths: Optional[Tensor] = None,
targets: Optional[Tensor] = None,
teacher_forcing_ratio: float = 1.0
):
encoder_outputs = self.encoder(inputs, input_lengths)
decoder_outputs = self.decoder(encoder_outputs, targets, teacher_forcing_ratio)
postnet_outputs = self.postnet(decoder_outputs["mel_outputs"])
decoder_outputs["mel_outputs"] += postnet_outputs
return decoder_outputs
| [
"[email protected]"
] | |
d44cd5123695d6c48fef84f95857d085ddda8775 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/2D_20200722180003.py | f67079f7677c6a1e5d41c5b12f694fad5f417ffe | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py | def array(n,m):
# where n is row size and m is column size
array = [[0 for x in range(n)] for x in range(m)]
print(array)
a = [[2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20]]
# where the first arguement reps the row and second arguement reps the column
print(a[0][3])
def hourGlass(arr):
# you have a 2d array
# get max hour glass
# var maxCount to keep record of the max count
# what do you know about an hourglass
# the indicies fall in a pattern where
# i and i+2 are not equal to 0 and i + 1 is equal to 0
maxCount = 1
totalCount = 0
count = 0
j = 3
if arr !=[]:
for i in range(len(arr)-2):
totalCount = 0
# remember j is looping through arr[i]
for j in range(len(arr[i])-2):
totalCount = arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]
if totalCount > maxCount:
maxCount = totalCount
# print(arr[i][j],arr[i][j+1],arr[i][j+2],"below",arr[i+1][j+1],"next",arr[i+2][j],arr[i+2][j+1],arr[i+2][j+2])
print(maxCount)
else:
return 0
print(hourGlass([[1,1,1,0,0,0],[0,1,0,0,0,0],[1,1,1,0,0,0],[0,0,2,4,4,0],[0,0,0,2,0,0],[0,0,1,2,4,0]])) | [
"[email protected]"
] | |
3fd59004b8a3ada46670dc8f08e82e5d397cce55 | b7b5f5b52f07b576a20e74839136d397f14d0566 | /main/admin.py | cd3bbcc3572a48186b2724bb94ba97c49bfe0e18 | [] | no_license | Chudische/Shabashka | 02d7e81cb2bd317b36e73620fc197868c4d65e1c | c3bab797601e8509439dc6538ec1f712755eb8c9 | refs/heads/main | 2023-07-08T07:54:04.044559 | 2021-08-18T13:40:44 | 2021-08-18T13:40:44 | 315,249,268 | 0 | 1 | null | 2021-08-10T06:42:42 | 2020-11-23T08:34:46 | Python | UTF-8 | Python | false | false | 4,510 | py | import datetime
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from import_export import resources
from .models import ShaUser, SubCategory, SuperCategory, Offer, AdditionalImage, Comment, ShaUserAvatar
from .models import UserReview, ChatMessage, Location
from .utilities import send_activation_notification
from .forms import SubCategoryForm
def send_activation_notifications(modeladmin, request, queryset):
""" Sending a messages with activation notification"""
for rec in queryset:
if not rec.is_activated:
send_activation_notification(rec)
modeladmin.message_user(request, "Письма с оповещением отправлены")
send_activation_notifications.short_description = 'Отправка писем с оповещением об активации'
class NonativatedFilter(admin.SimpleListFilter):
title = 'Прошли активацию?'
parameter_name = 'actstate'
def lookups(self, request, model_admin):
return (
("activated", "Прошли активацию"),
("threedays", "Не прошли более 3 дней"),
("week", "Не прошли более недели")
)
def queryset(self, request, queryset):
if self.value() == 'activated':
return queryset.filter(is_active=True, is_activated=True)
if self.value() == 'threedays':
date = datetime.date.today() - datetime.timedelta(days=3)
return queryset.filter(is_active=False, is_activated=False, date_joined__date__lt=date)
if self.value() == 'week':
date = datetime.date.today() - datetime.timedelta(weeks=1)
return queryset.filter(is_active=False, is_activated=False, date_joined__date__lt=date)
class LocationInline(admin.TabularInline):
model = Location
class ShaUserAdmin(admin.ModelAdmin):
list_display = ('__str__', 'is_activated', 'date_joined')
search_fields = ('username', 'email', 'first_name', 'last_name')
list_filter = (NonativatedFilter, )
inlines = (LocationInline, )
fields = (('username', 'email'), ('first_name', 'last_name'), 'average_rating',
('send_message', 'is_active', 'is_activated'),
('is_staff', 'is_superuser'),
'groups', 'user_permissions',
('last_login', 'date_joined'),
'favorite')
readonly_fields = ('last_login', 'date_joined')
actions = (send_activation_notifications, )
class SubCategoryInline(admin.TabularInline):
model = SubCategory
class SuperCategoryAdmin(admin.ModelAdmin):
exclude = ('super_category',)
inlines = (SubCategoryInline,)
class SubCategoryAdmin(admin.ModelAdmin):
form = SubCategoryForm
class AdditionalImageInline(admin.TabularInline):
model = AdditionalImage
class OfferAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'content', 'winner','author', 'created', 'status')
fields = (('category', 'author', 'status', 'winner'), 'title', 'content', 'price', 'image', 'is_active')
inlines = (AdditionalImageInline, LocationInline,)
class CommentAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'content', 'price', 'created', 'is_active')
fields = (('offer', 'author', 'created'), 'content', ('price', 'time_amount', 'measure'), 'is_active')
readonly_fields = ('created',)
class UserReviewAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'reviewal', 'speed', 'cost', 'accuracy', 'content', 'created')
fields = (('offer', 'author', 'reviewal', 'created'), ('speed', 'cost', 'accuracy'), 'content')
readonly_fields = ('created',)
class ChatMessageAdmin(admin.ModelAdmin):
list_display = ('offer', 'author', 'receiver', 'content', 'created')
feields = ('offer', ('author', 'receiver, created'), 'content')
readonly_fields = ('created',)
class LocationAdmin(admin.ModelAdmin):
list_display = ('search_id', 'name')
# Register your models here.
admin.site.register(ShaUser, ShaUserAdmin)
admin.site.register(SuperCategory, SuperCategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(Offer, OfferAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(ShaUserAvatar)
admin.site.register(UserReview, UserReviewAdmin)
admin.site.register(ChatMessage, ChatMessageAdmin)
admin.site.register(Location, LocationAdmin)
| [
"="
] | = |
93b4ff666a4c0dbc1475a16fb53d3a864ecec53d | 1e0ec4d34def6d1d31665551b4aecbb644323249 | /disambig_creation_constants.py | 2fca0360764a115a37e4d1aa2de947aad7ea4777 | [] | no_license | RheingoldRiver/leaguepedia_archive | e10615530846080446fa5a56ae2e570f9376f875 | 52703d4fb0fef2345353945044a78915d78688bf | refs/heads/master | 2022-06-19T21:37:47.480986 | 2022-06-01T18:44:32 | 2022-06-01T18:44:32 | 242,654,649 | 1 | 1 | null | 2021-12-15T20:07:19 | 2020-02-24T05:33:07 | Python | UTF-8 | Python | false | false | 928 | py | originalName = 'Limit'
irlName = 'Ju Min-gyu'
newName = '{} ({})'.format(originalName,irlName)
initmove = True
blankedit = False
limit = -1
timeoutLimit = 30
listplayerTemplates = ["listplayer", "listplayer/Current"]
rosterTemplates = ["ExtendedRosterLine", "ExtendedRosterLine/MultipleRoles"]
scoreboardTemplates = ["MatchRecap/Player", "MatchRecapS4/Player",
"MatchRecapS5/Player", "MatchRecapS6/Player",
"MatchRecapS7/Player", "MatchRecapS8/Player",
"MatchRecapS6NoSMW/Player", "MatchRecapS7NoKeystones/Player"]
statTemplates = ["IPS","CareerPlayerStats","MatchHistoryPlayer"]
rosterChangeTemplates = ["RosterChangeLine","RosterRumorLine2"]
summary = "Disambiguating {} to {}".format(originalName, newName)
cssStyle = "{\n color:orange!important;\n font-weight:bold;\n}"
origNameLC = originalName[0].lower() + originalName[1:]
newNameLC = newName[0].lower() + newName[1:]
blankEditThese = [] | [
"[email protected]"
] | |
797e2d7a43e4b15dea8e59a7e042f26e1eb14caf | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/bin/format/pdb2/pdbreader/symbol/LocalData32MsSymbol.pyi | aede84acc0cf9dc7b6e4bd9ac3bccadd11448bf2 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | pyi | import ghidra.app.util.bin.format.pdb2.pdbreader
import ghidra.app.util.bin.format.pdb2.pdbreader.symbol
import java.lang
class LocalData32MsSymbol(ghidra.app.util.bin.format.pdb2.pdbreader.symbol.AbstractLocalDataMsSymbol):
PDB_ID: int = 4364
def __init__(self, __a0: ghidra.app.util.bin.format.pdb2.pdbreader.AbstractPdb, __a1: ghidra.app.util.bin.format.pdb2.pdbreader.PdbByteReader): ...
def emit(self, __a0: java.lang.StringBuilder) -> None: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getName(self) -> unicode: ...
def getOffset(self) -> long: ...
def getPdbId(self) -> int: ...
def getSegment(self) -> int: ...
def getTypeRecordNumber(self) -> ghidra.app.util.bin.format.pdb2.pdbreader.RecordNumber: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def pdbId(self) -> int: ...
| [
"[email protected]"
] | |
b6f56697fb41c5e23e58b13a4e63f3780c4b41ea | db338cf7720a0ecbf181f7077b0dcf22b499d822 | /src/mobot_client/migrations/0003_auto_20210902_2106.py | 0f96c067c7d54e446c9525c7deab02ba86fffdfe | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | isabella232/mobot | 94a7e33755cdf3b1916b6642ee7dc9bdfdebf84d | 8a1fc884351211b4730e7de1c0bad1e18a1b1c8f | refs/heads/main | 2023-08-31T17:00:21.341368 | 2021-09-16T00:55:35 | 2021-09-16T04:49:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,800 | py | # Generated by Django 3.2.7 on 2021-09-02 21:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mobot_client', '0002_auto_20210902_2053'),
]
operations = [
migrations.AlterField(
model_name='bonuscoin',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='chatbotsettings',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customer',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customerdroprefunds',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='customerstorepreferences',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='drop',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='dropsession',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='item',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='message',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='order',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='sku',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='store',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"[email protected]"
] | |
2c615eeec86ee49817a3513724374a206511e132 | 060fbf2a69a90ad92de5fc877521d5ea6b298007 | /test/vanilla/Expected/AcceptanceTests/BodyComplex/bodycomplex/models/double_wrapper.py | 598e2b460d799b8c9b576803570caa93bfc99961 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | iscai-msft/autorest.python | db47a8f00253148fbc327fe0ae1b0f7921b397c6 | a9f38dd762fbc046ce6197bfabea2f56045d2957 | refs/heads/master | 2021-08-02T13:06:34.768117 | 2018-11-21T00:29:31 | 2018-11-21T00:29:31 | 161,554,205 | 0 | 0 | MIT | 2018-12-12T22:42:14 | 2018-12-12T22:42:14 | null | UTF-8 | Python | false | false | 1,552 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DoubleWrapper(Model):
"""DoubleWrapper.
:param field1:
:type field1: float
:param
field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose:
:type
field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose:
float
"""
_attribute_map = {
'field1': {'key': 'field1', 'type': 'float'},
'field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose': {'key': 'field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose', 'type': 'float'},
}
def __init__(self, **kwargs):
super(DoubleWrapper, self).__init__(**kwargs)
self.field1 = kwargs.get('field1', None)
self.field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose = kwargs.get('field_56_zeros_after_the_dot_and_negative_zero_before_dot_and_this_is_a_long_field_name_on_purpose', None)
| [
"[email protected]"
] | |
b1dc65782f757d291f0b3c8796390124c41932ae | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_204/285.py | 05697ee8e397c0a3ab1f3fbfedb01bf8d507a112 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | from __future__ import print_function, division
from math import ceil, floor
import numpy as np
# Fernando Gonzalez del Cueto. Code Jam 2017
#infile = 'test2.in'
infile = 'B-small-attempt2.in'
outfile = infile.replace('.in', '.out')
fid = open(infile, 'r')
n_cases = int(fid.readline().strip())
f_out = open(outfile, 'w')
def solver(rata_q, p):
assert isinstance(rata_q, np.ndarray)
assert isinstance(p, np.ndarray)
n_ingredients, n_packages = p.shape
taken = np.zeros_like(p, dtype=bool)
lb = int(floor(np.min(np.min(0.9*p / rata_q, axis=1))))
ub = int(ceil(np.max(np.max(1.1*p / rata_q, axis=1))))
kits = 0
for q in range(lb, ub+1):
if (p==0).all():
return kits
t = (p >= rata_q * (q * 0.9)) & (p <= rata_q * (q * 1.1))
can_make = t.astype(np.uint8).sum(axis=1)
max_kits = can_make.min()
if max_kits.min() > 0:
kits += max_kits
if test_case==88:
pass
for row in range(p.shape[0]):
eliminated = 0
for col in range(p.shape[1]):
if t[row,col]:
p[row,col] = 0 # used, take them out
eliminated += 1
if eliminated >= max_kits:
break
return kits
for test_case in range(1,n_cases+1):
n_ingredients, n_packages = map(int, fid.readline().strip().split())
rata_q = map(int, fid.readline().strip().split())
r = np.array(rata_q).reshape((n_ingredients,1))
l = []
for i_ing in range(n_ingredients):
l.append(map(int, fid.readline().strip().split()))
a = np.array(l, dtype=np.float64)
print('Case %i' % test_case)
print(n_ingredients, n_packages)
print(rata_q)
print(a)
if test_case == 5:
pass
sol = solver(r, a)
print(sol)
l = 'Case #%i: %i\n' % (test_case, sol)
print(l)
f_out.write(l)
f_out.close() | [
"[email protected]"
] | |
f095f493b8c86691cddc688e4d19ccaf71870c88 | c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34 | /source/Clarification/Backtracking/52.N皇后2.py | 0850985c798d542a6dbf9bbf340bfa76bed00408 | [
"MIT"
] | permissive | zhangwang0537/LeetCode-Notebook | 73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1 | 1dbd18114ed688ddeaa3ee83181d373dcc1429e5 | refs/heads/master | 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 | MIT | 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null | UTF-8 | Python | false | false | 1,700 | py | # n 皇后问题研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。
#
#
#
# 上图为 8 皇后问题的一种解法。
#
# 给定一个整数 n,返回 n 皇后不同的解决方案的数量。
#
# 示例:
#
# 输入: 4
# 输出: 2
# 解释: 4 皇后问题存在如下两个不同的解法。
# [
# [".Q..", // 解法 1
# "...Q",
# "Q...",
# "..Q."],
#
# ["..Q.", // 解法 2
# "Q...",
# "...Q",
# ".Q.."]
# ]
class Solution:
def totalNQueens(self, n: int) -> int:
def is_not_under_attack(row,col):
return not (rows[col] or hills[row - col] or dales[row + col])
# 放置皇后
def place_queen(row,col):
rows[col] = 1
hills[row - col] = 1 # 主对角线
dales[row + col] = 1 # 副对角线
# 移除皇后
def remove_queen(row,col):
rows[col] = 0
hills[row - col] = 0 # 主对角线
dales[row + col] = 0 # 副对角线
# 回溯
def backtrack(row = 0,count = 0):
for col in range(n):
if is_not_under_attack(row, col):
place_queen(row, col)
if row + 1 == n: # 如果放了n个皇后,则有一种解决方案
count += 1
else:
count = backtrack(row + 1,count)
remove_queen(row, col)
return count
rows = [0] * n
hills = [0] * (2 * n - 1) # 主对角线
dales = [0] * (2 * n - 1) # 副对角线
return backtrack() | [
"[email protected]"
] | |
e78baff9bdff094df6a9bde81c9da4513c0aa5b9 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/jyb.py | ca9d3c9ec4d59763f4fd3413bf1ece65baed5ad0 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'jYB':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
f60622ab5bd5f34311c951a2a60f776f25a2aa47 | 33a50bb13812090a36257078522b798762978c66 | /top/api/rest/SimbaNonsearchAllplacesGetRequest.py | 6f07639d12207a871566ac06ac186d09de431e25 | [] | no_license | aa3632840/quanlin | 52ac862073608cd5b977769c14a7f6dcfb556678 | 2890d35fa87367d77e295009f2d911d4b9b56761 | refs/heads/master | 2021-01-10T22:05:14.076949 | 2014-10-25T02:28:15 | 2014-10-25T02:28:15 | 23,178,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | '''
Created by auto_sdk on 2014-09-08 16:48:02
'''
from top.api.base import RestApi
class SimbaNonsearchAllplacesGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'taobao.simba.nonsearch.allplaces.get'
| [
"[email protected]"
] | |
9d9ed5f5767b7fd951eb6ad1a2a01ca63fc8e5ed | 56b63ee537f872af0fc028016d1508b4c1dd5c60 | /school/migrations/0267_auto_20210317_1657.py | 926120cf7259917f9c79aaa27206e75ae9e960a4 | [] | no_license | jacknjillsolutionsrevanth/EMS1 | 01fc571120f765b0fbfe3aa654b15ff578d6e9b9 | db14d8e6c15669b5938aa9276c5e22006218814a | refs/heads/main | 2023-08-03T19:40:50.073133 | 2021-10-01T07:02:37 | 2021-10-01T07:02:37 | 410,202,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | # Generated by Django 3.1.4 on 2021-03-17 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('school', '0266_auto_20210315_1612'),
]
operations = [
migrations.AddField(
model_name='daily_data',
name='routename',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='milkdata',
name='branch',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
] | |
1f6e97b6fae3bcc121943a41542b27b69deeafab | 8c77dcc0fd3e497194e572c8641200f08b32dc97 | /general/function_factory.py | f2c923b5d537856aae039428a6462973dfd14e56 | [
"MIT"
] | permissive | bpuderer/python-snippets | 633a1e382f7c9812621d61ec16a15e106d1d5fc8 | 3277b76b03f3ceb11e4571be4cecae68051aac45 | refs/heads/master | 2021-12-27T09:19:27.212312 | 2021-12-22T13:08:56 | 2021-12-22T13:08:56 | 46,539,064 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | def raise_to(exp):
def raise_to_exp(x):
return pow(x, exp)
return raise_to_exp
cube = raise_to(3)
print(cube(4))
| [
"[email protected]"
] | |
c10fbf1e704a93a27b39f55a903786ffa970dab7 | f9d942b2fed83e9d6c101ebaedc1d4b36dee2754 | /logistics/tests/util.py | b392e9568f05b3984dcf23beedbd376f6d40b26b | [] | no_license | unicefuganda/rapidsms-logistics | 7cde229ac2619366d253d099c0f222eb96b1468e | 7d9609a7b9d6fa3f4502aba52ab56acc23a6e928 | refs/heads/master | 2020-12-25T05:26:59.459389 | 2012-12-17T12:00:52 | 2012-12-17T12:00:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from rapidsms.tests.scripted import TestScript
from logistics import loader as logi_loader
def load_test_data():
logi_loader.init_reports()
logi_loader.init_supply_point_types()
logi_loader.init_test_location_and_supplypoints()
logi_loader.init_test_product_and_stock()
| [
"[email protected]"
] | |
05d8af9bcacd6c3653138d5f6101b153625fb68c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Docs/Controlling the Keyboard and Mouse/typer/docs_src/commands/callback/tutorial001.py | 5438b3ead7b25f5be9db58189b57b8e6e3f410a3 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f0921ed2e19ca15b1c6e7817d91c7dbefd69a5a39c79520e0d5e519b11cdd10c
size 756
| [
"[email protected]"
] | |
4df9ea69048a2ad0b8f86d6c9a2ba6a5b4c33d67 | e06f94c1cf7748352516e15a983df38e0693319d | /venv/lib/python3.9/site-packages/web3/pm.py | 5257db22bd20bce2db4460aaffcd9d35406a644c | [
"MIT"
] | permissive | Ruben1701/Blockchain-for-Permission-Management | 3fcf2c2fad62a1219715cb106ef11aed857d8a71 | f063f3da2dc9c12d4c68332e309e402a67fd7a8b | refs/heads/main | 2023-06-05T14:41:05.237873 | 2021-06-26T21:13:38 | 2021-06-26T21:13:38 | 347,065,325 | 0 | 1 | MIT | 2021-05-26T11:49:34 | 2021-03-12T12:51:50 | Python | UTF-8 | Python | false | false | 21,146 | py | from abc import (
ABC,
abstractmethod,
)
import json
from pathlib import (
Path,
)
from typing import (
Any,
Dict,
Iterable,
NamedTuple,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from eth_typing import (
URI,
Address,
ChecksumAddress,
ContractName,
Manifest,
)
from eth_utils import (
is_canonical_address,
is_checksum_address,
to_checksum_address,
to_text,
to_tuple,
)
from ethpm import (
ASSETS_DIR,
Package,
)
from ethpm.uri import (
is_supported_content_addressed_uri,
resolve_uri_contents,
)
from ethpm.validation.manifest import (
validate_manifest_against_schema,
validate_raw_manifest_format,
)
from ethpm.validation.package import (
validate_package_name,
validate_package_version,
)
from web3 import Web3
from web3._utils.ens import (
is_ens_name,
)
from web3.exceptions import (
InvalidAddress,
ManifestValidationError,
NameNotFound,
PMError,
)
from web3.module import (
Module,
)
from web3.types import (
ENS,
)
# Package Management is still in alpha, and its API is likely to change, so it
# is not automatically available on a web3 instance. To use the `PM` module,
# please enable the package management API on an individual web3 instance.
#
# >>> from web3.auto import w3
# >>> w3.pm
# AttributeError: The Package Management feature is disabled by default ...
# >>> w3.enable_unstable_package_management_api()
# >>> w3.pm
# <web3.pm.PM at 0x....>
T = TypeVar("T")
class ReleaseData(NamedTuple):
package_name: str
version: str
manifest_uri: URI
class ERC1319Registry(ABC):
"""
The ERC1319Registry class is a base class for all registry implementations to inherit from. It
defines the methods specified in `ERC 1319 <https://github.com/ethereum/EIPs/issues/1319>`__.
All of these methods are prefixed with an underscore, since they are not intended to be
accessed directly, but rather through the methods on ``web3.pm``. They are unlikely to change,
but must be implemented in a `ERC1319Registry` subclass in order to be compatible with the
`PM` module. Any custom methods (eg. not definied in ERC1319) in a subclass
should *not* be prefixed with an underscore.
All of these methods must be implemented in any subclass in order to work with `web3.pm.PM`.
Any implementation specific logic should be handled in a subclass.
"""
@abstractmethod
def __init__(self, address: Address, w3: Web3) -> None:
"""
Initializes the class with the on-chain address of the registry, and a web3 instance
connected to the chain where the registry can be found.
Must set the following properties...
* ``self.registry``: A `web3.contract` instance of the target registry.
* ``self.address``: The address of the target registry.
* ``self.w3``: The *web3* instance connected to the chain where the registry can be found.
"""
pass
#
# Write API
#
@abstractmethod
def _release(self, package_name: str, version: str, manifest_uri: str) -> bytes:
"""
Returns the releaseId created by successfully adding a release to the registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
* ``version``: Version identifier string, can conform to any versioning scheme.
* ``manifest_uri``: URI location of a manifest which details the release contents
"""
pass
#
# Read API
#
@abstractmethod
def _get_package_name(self, package_id: bytes) -> str:
"""
Returns the package name associated with the given package id, if the
package id exists on the connected registry.
* Parameters:
* ``package_id``: 32 byte package identifier.
"""
pass
@abstractmethod
def _get_all_package_ids(self) -> Iterable[bytes]:
"""
Returns a tuple containing all of the package ids found on the connected registry.
"""
pass
@abstractmethod
def _get_release_id(self, package_name: str, version: str) -> bytes:
"""
Returns the 32 bytes release id associated with the given package name and version,
if the release exists on the connected registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
* ``version``: Version identifier string, can conform to any versioning scheme.
"""
pass
@abstractmethod
def _get_all_release_ids(self, package_name: str) -> Iterable[bytes]:
"""
Returns a tuple containg all of the release ids belonging to the given package name,
if the package has releases on the connected registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
"""
pass
@abstractmethod
def _get_release_data(self, release_id: bytes) -> ReleaseData:
"""
Returns a tuple containing (package_name, version, manifest_uri) for the given release id,
if the release exists on the connected registry.
* Parameters:
* ``release_id``: 32 byte release identifier.
"""
pass
@abstractmethod
def _generate_release_id(self, package_name: str, version: str) -> bytes:
"""
Returns the 32 byte release identifier that *would* be associated with the given
package name and version according to the registry's hashing mechanism.
The release *does not* have to exist on the connected registry.
* Parameters:
* ``package_name``: Valid package name according the spec.
* ``version``: Version identifier string, can conform to any versioning scheme.
"""
pass
@abstractmethod
def _num_package_ids(self) -> int:
"""
Returns the number of packages that exist on the connected registry.
"""
pass
@abstractmethod
def _num_release_ids(self, package_name: str) -> int:
"""
Returns the number of releases found on the connected registry,
that belong to the given package name.
* Parameters:
* ``package_name``: Valid package name according the spec.
"""
pass
@classmethod
@abstractmethod
def deploy_new_instance(cls: Type[T], w3: Web3) -> T:
"""
Class method that returns a newly deployed instance of ERC1319Registry.
* Parameters:
* ``w3``: Web3 instance on which to deploy the new registry.
"""
pass
BATCH_SIZE = 100
class SimpleRegistry(ERC1319Registry):
"""
This class represents an instance of the `Solidity Reference Registry implementation
<https://github.com/ethpm/solidity-registry>`__.
"""
def __init__(self, address: ChecksumAddress, w3: Web3) -> None:
abi = get_simple_registry_manifest()["contractTypes"]["PackageRegistry"][
"abi"
]
self.registry = w3.eth.contract(address=address, abi=abi)
self.address = address
self.w3 = w3
def _release(self, package_name: str, version: str, manifest_uri: str) -> bytes:
tx_hash = self.registry.functions.release(
package_name, version, manifest_uri
).transact()
self.w3.eth.wait_for_transaction_receipt(tx_hash)
return self._get_release_id(package_name, version)
def _get_package_name(self, package_id: bytes) -> str:
package_name = self.registry.functions.getPackageName(package_id).call()
return package_name
@to_tuple
def _get_all_package_ids(self) -> Iterable[bytes]:
num_packages = self._num_package_ids()
pointer = 0
while pointer < num_packages:
new_ids, new_pointer = self.registry.functions.getAllPackageIds(
pointer,
(pointer + BATCH_SIZE)
).call()
if not new_pointer > pointer:
break
yield from reversed(new_ids)
pointer = new_pointer
def _get_release_id(self, package_name: str, version: str) -> bytes:
return self.registry.functions.getReleaseId(package_name, version).call()
@to_tuple
def _get_all_release_ids(self, package_name: str) -> Iterable[bytes]:
num_releases = self._num_release_ids(package_name)
pointer = 0
while pointer < num_releases:
new_ids, new_pointer = self.registry.functions.getAllReleaseIds(
package_name,
pointer,
(pointer + BATCH_SIZE)
).call()
if not new_pointer > pointer:
break
yield from reversed(new_ids)
pointer = new_pointer
def _get_release_data(self, release_id: bytes) -> ReleaseData:
name, version, uri = self.registry.functions.getReleaseData(release_id).call()
return ReleaseData(name, version, uri)
def _generate_release_id(self, package_name: str, version: str) -> bytes:
return self.registry.functions.generateReleaseId(package_name, version).call()
def _num_package_ids(self) -> int:
return self.registry.functions.numPackageIds().call()
def _num_release_ids(self, package_name: str) -> int:
return self.registry.functions.numReleaseIds(package_name).call()
@classmethod
def deploy_new_instance(cls, w3: Web3) -> 'SimpleRegistry':
manifest = get_simple_registry_manifest()
registry_package = Package(manifest, w3)
registry_factory = registry_package.get_contract_factory(ContractName("PackageRegistry"))
tx_hash = registry_factory.constructor().transact()
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
return cls(tx_receipt["contractAddress"], w3)
class PM(Module):
"""
The PM module will work with any subclass of ``ERC1319Registry``, tailored to a particular
implementation of `ERC1319 <https://github.com/ethereum/EIPs/issues/1319>`__, set as
its ``registry`` attribute.
"""
def get_package_from_manifest(self, manifest: Manifest) -> Package:
"""
Returns a `Package <https://github.com/ethpm/py-ethpm/blob/master/ethpm/package.py>`__
instance built with the given manifest.
* Parameters:
* ``manifest``: A dict representing a valid manifest
"""
return Package(manifest, self.web3)
def get_package_from_uri(self, manifest_uri: URI) -> Package:
"""
Returns a `Package <https://github.com/ethpm/py-ethpm/blob/master/ethpm/package.py>`__
instance built with the Manifest stored at the URI.
If you want to use a specific IPFS backend, set ``ETHPM_IPFS_BACKEND_CLASS``
to your desired backend. Defaults to Infura IPFS backend.
* Parameters:
* ``uri``: Must be a valid content-addressed URI
"""
return Package.from_uri(manifest_uri, self.web3)
def get_local_package(self, package_name: str, ethpm_dir: Path = None) -> Package:
"""
Returns a `Package <https://github.com/ethpm/py-ethpm/blob/master/ethpm/package.py>`__
instance built with the Manifest found at the package name in your local ethpm_dir.
* Parameters:
* ``package_name``: Must be the name of a package installed locally.
* ``ethpm_dir``: Path pointing to the target ethpm directory (optional).
"""
if not ethpm_dir:
ethpm_dir = Path.cwd() / '_ethpm_packages'
if not ethpm_dir.name == "_ethpm_packages" or not ethpm_dir.is_dir():
raise PMError(f"{ethpm_dir} is not a valid ethPM packages directory.")
local_packages = [pkg.name for pkg in ethpm_dir.iterdir() if pkg.is_dir()]
if package_name not in local_packages:
raise PMError(
f"Package: {package_name} not found in {ethpm_dir}. "
f"Available packages include: {local_packages}."
)
target_manifest = json.loads(
(ethpm_dir / package_name / "manifest.json").read_text()
)
return self.get_package_from_manifest(target_manifest)
def set_registry(self, address: Union[Address, ChecksumAddress, ENS]) -> None:
"""
Sets the current registry used in ``web3.pm`` functions that read/write to an on-chain
registry. This method accepts checksummed/canonical addresses or ENS names. Addresses
must point to an on-chain instance of an ERC1319 registry implementation.
To use an ENS domain as the address, make sure a valid ENS instance set as ``web3.ens``.
* Parameters:
* ``address``: Address of on-chain Registry.
"""
if is_canonical_address(address):
addr_string = to_text(address)
self.registry = SimpleRegistry(to_checksum_address(addr_string), self.web3)
elif is_checksum_address(address):
self.registry = SimpleRegistry(cast(ChecksumAddress, address), self.web3)
elif is_ens_name(address):
self._validate_set_ens()
addr_lookup = self.web3.ens.address(str(address))
if not addr_lookup:
raise NameNotFound(
"No address found after ENS lookup for name: {0}.".format(address)
)
self.registry = SimpleRegistry(addr_lookup, self.web3)
else:
raise PMError(
"Expected a canonical/checksummed address or ENS name for the address, "
"instead received {0}.".format(type(address))
)
def deploy_and_set_registry(self) -> ChecksumAddress:
"""
Returns the address of a freshly deployed instance of `SimpleRegistry`
and sets the newly deployed registry as the active registry on ``web3.pm.registry``.
To tie your registry to an ENS name, use web3's ENS module, ie.
.. code-block:: python
w3.ens.setup_address(ens_name, w3.pm.registry.address)
"""
self.registry = SimpleRegistry.deploy_new_instance(self.web3)
return to_checksum_address(self.registry.address)
def release_package(
self, package_name: str, version: str, manifest_uri: URI
) -> bytes:
"""
Returns the release id generated by releasing a package on the current registry.
Requires ``web3.PM`` to have a registry set. Requires ``web3.eth.default_account``
to be the registry owner.
* Parameters:
* ``package_name``: Must be a valid package name, matching the given manifest.
* ``version``: Must be a valid package version, matching the given manifest.
* ``manifest_uri``: Must be a valid content-addressed URI. Currently, only IPFS
and Github content-addressed URIs are supported.
"""
validate_is_supported_manifest_uri(manifest_uri)
raw_manifest = to_text(resolve_uri_contents(manifest_uri))
validate_raw_manifest_format(raw_manifest)
manifest = json.loads(raw_manifest)
validate_manifest_against_schema(manifest)
if package_name != manifest["name"]:
raise ManifestValidationError(
f"Provided package name: {package_name} does not match the package name "
f"found in the manifest: {manifest['name']}."
)
if version != manifest["version"]:
raise ManifestValidationError(
f"Provided package version: {version} does not match the package version "
f"found in the manifest: {manifest['version']}."
)
self._validate_set_registry()
return self.registry._release(package_name, version, manifest_uri)
@to_tuple
def get_all_package_names(self) -> Iterable[str]:
"""
Returns a tuple containing all the package names available on the current registry.
"""
self._validate_set_registry()
package_ids = self.registry._get_all_package_ids()
for package_id in package_ids:
yield self.registry._get_package_name(package_id)
def get_package_count(self) -> int:
"""
Returns the number of packages available on the current registry.
"""
self._validate_set_registry()
return self.registry._num_package_ids()
def get_release_count(self, package_name: str) -> int:
"""
Returns the number of releases of the given package name available on the current registry.
"""
validate_package_name(package_name)
self._validate_set_registry()
return self.registry._num_release_ids(package_name)
def get_release_id(self, package_name: str, version: str) -> bytes:
"""
Returns the 32 byte identifier of a release for the given package name and version,
if they are available on the current registry.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
return self.registry._get_release_id(package_name, version)
@to_tuple
def get_all_package_releases(self, package_name: str) -> Iterable[Tuple[str, str]]:
"""
Returns a tuple of release data (version, manifest_ur) for every release of the
given package name available on the current registry.
"""
validate_package_name(package_name)
self._validate_set_registry()
release_ids = self.registry._get_all_release_ids(package_name)
for release_id in release_ids:
release_data = self.registry._get_release_data(release_id)
yield (release_data.version, release_data.manifest_uri)
def get_release_id_data(self, release_id: bytes) -> ReleaseData:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
release id, *if* it is available on the current registry.
* Parameters:
* ``release_id``: 32 byte release identifier
"""
self._validate_set_registry()
return self.registry._get_release_data(release_id)
def get_release_data(self, package_name: str, version: str) -> ReleaseData:
"""
Returns ``(package_name, version, manifest_uri)`` associated with the given
package name and version, *if* they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
release_id = self.registry._get_release_id(package_name, version)
return self.get_release_id_data(release_id)
def get_package(self, package_name: str, version: str) -> Package:
"""
Returns a ``Package`` instance, generated by the ``manifest_uri`` associated with the
given package name and version, if they are published to the currently set registry.
* Parameters:
* ``name``: Must be a valid package name.
* ``version``: Must be a valid package version.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
release_data = self.get_release_data(package_name, version)
return self.get_package_from_uri(URI(release_data.manifest_uri))
def _validate_set_registry(self) -> None:
try:
self.registry
except AttributeError:
raise PMError(
"web3.pm does not have a set registry. "
"Please set registry with either: "
"web3.pm.set_registry(address) or "
"web3.pm.deploy_and_set_registry()"
)
if not isinstance(self.registry, ERC1319Registry):
raise PMError(
"web3.pm requires an instance of a subclass of ERC1319Registry "
"to be set as the web3.pm.registry attribute. Instead found: "
f"{type(self.registry)}."
)
def _validate_set_ens(self) -> None:
if not self.web3:
raise InvalidAddress(
"Could not look up ENS address because no web3 " "connection available"
)
elif not self.web3.ens:
raise InvalidAddress(
"Could not look up ENS address because web3.ens is " "set to None"
)
def get_simple_registry_manifest() -> Dict[str, Any]:
return json.loads((ASSETS_DIR / "simple-registry" / "v3.json").read_text())
def validate_is_supported_manifest_uri(uri: URI) -> None:
if not is_supported_content_addressed_uri(uri):
raise ManifestValidationError(
f"URI: {uri} is not a valid content-addressed URI. "
"Currently only IPFS and Github content-addressed URIs are supported."
)
| [
"[email protected]"
] | |
2c2eba017b299584cc34574addc7412cb5c9635b | 8ed4bf9fbead471c9e5f88e4d18ac432ec3d628b | /hackerrank/algorithm/string/gem_stones.py | e3ff22b12568fff4bf1fd684c35dd47da7151f2d | [] | no_license | hizbul25/programming_problem | 9bf26e49ed5bb8c9c829d00e765c9401222fb35c | 2acca363704b993ffe5f6c2b00f81a4f4eca7204 | refs/heads/master | 2021-01-10T22:28:26.105787 | 2018-01-21T16:45:45 | 2018-01-21T16:45:45 | 65,394,734 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | #URL: https://www.hackerrank.com/challenges/gem-stones
n = int(input())
all_elem = set(input())
for g in range(n - 1):
all_elem &= set(input())
print(len(all_elem))
| [
"[email protected]"
] | |
9df035da71a8354e73397ed5fd9483a3a837b5d5 | 62e985b6bc2cd04be506c9f4b586f6a0bd5a8b1c | /docs/_docs | 2e46ca4836023863c54a487374eead67897a2d9d | [
"MIT"
] | permissive | delfick/nose-focus | ece09553d26ce4323e449b5e50f98e63a21d1699 | 89ceae691fabb27c35d4a67f0edf8dec17737f3f | refs/heads/master | 2023-07-10T22:44:29.271678 | 2023-06-23T06:36:00 | 2023-06-23T06:36:00 | 20,155,739 | 0 | 3 | null | 2019-11-06T22:59:43 | 2014-05-25T13:57:39 | Python | UTF-8 | Python | false | false | 512 | #!/usr/bin/env python3
from venvstarter import ignite
import runpy
import os
this_dir = os.path.dirname(__file__)
nose_focus_version = runpy.run_path(
os.path.join(this_dir, "..", "nose_focus", "__init__.py")
)["VERSION"]
with open(os.path.join(this_dir, "requirements.txt"), "r") as fle:
deps = [line.strip() for line in fle.readlines() if line.strip()]
deps.append(f"-e file:{this_dir}/..#egg=nose_focus=={nose_focus_version}")
ignite(this_dir, "sphinx-build", deps=deps, min_python_version=3.6)
| [
"[email protected]"
] | ||
e842d47c65b49f7baf66ad14c86f7b7c9b1e413b | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /BitPim/rev3177-3237/right-branch-3237/midifile.py | 2c8d9d84bbc3c8f068fe079043d3fd8f31067e0b | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,795 | py | import common
import fileinfo
module_debug=False
class MIDIEvent(object):
META_EVENT=0
SYSEX_EVENT=1
SYSEX1_EVENT=2
MIDI_EVENT=3
LAST_MIDI_EVENT=4
type_str=('Meta', 'SYSEX', 'SYSEX cont', 'MIDI', 'Last MIDI')
def __init__(self, file, offset, last_cmd=None):
self.__f=file
self.__start=self.__ofs=offset
self.__time_delta=self.__get_var_len()
b=self.__get_int()
if b==0xff:
self.__get_meta_event()
elif b==0xf0 or b==0xf7:
self.__get_sysex_event(b)
else:
self.__get_midi_event(b, last_cmd)
self.__total_len=self.__ofs-self.__start
def __get_int(self):
i=int(self.__f.GetByte(self.__ofs))
self.__ofs+=1
return i
def __get_bytes(self, len):
data=self.__f.GetBytes(self.__ofs, len)
self.__ofs+=len
return data
def __get_var_len(self):
t=0
b=self.__get_int()
while (b&0x80):
t=(t<<7)|(b&0x7f)
b=self.__get_int()
return (t<<7)|(b&0x7f)
def __get_meta_event(self):
self.__type=self.META_EVENT
self.__cmd=self.__get_int()
self.__len=self.__get_var_len()
if self.__len:
self.__param1=self.__get_bytes(self.__len)
else:
self.__param1=None
self.__param2=None
def __get_sysex_event(self, cmd):
if cmd==0xf0:
self.__type=self.SYSEX_EVENT
else:
self.__type=self.SYSEX1_EVENT
self.__cmd=cmd
self.__len=self.__get_var_len()
if self.__len:
self.__param1=self.__get_bytes(self.__len)
else:
self.__param1=None
self.__param2=None
def __get_midi_event(self, cmd, last_cmd):
if cmd&0x80:
i=cmd
self.__type=self.MIDI_EVENT
self.__param1=self.__get_int()
else:
i=last_cmd
self.__type=self.LAST_MIDI_EVENT
self.__param1=cmd
self.__cmd=(i&0xf0)>>4
self.__midi_channel=i&0x0f
if self.__cmd==0x0c or self.__cmd==0x0d:
self.__len=1
self.__param2=None
else:
self.__len=2
self.__param2=self.__get_int()
def __get_type(self):
return self.__type
type=property(fget=__get_type)
def __get_time_delta(self):
return self.__time_delta
time_delta=property(fget=__get_time_delta)
def __get_total_len(self):
return self.__total_len
total_len=property(fget=__get_total_len)
def __get_cmd(self):
return self.__cmd
cmd=property(fget=__get_cmd)
def __get_midi_channel(self):
return self.__midi_channel
midi_channel=property(fget=__get_midi_channel)
def __get_param_len(self):
return self.__len
param_len=property(fget=__get_param_len)
def __get_params(self):
return self.__param1, self.__param2
params=property(fget=__get_params)
def __str__(self):
if self.type==self.MIDI_EVENT or \
self.type==self.LAST_MIDI_EVENT:
return '0x%04x: %s cmd: 0x%x, Channel: %d, Len: %d'%\
(self.time_delta, self.type_str[self.type],
self.cmd, self.midi_channel, self.param_len)
else:
return '0x%04x: %s cmd: 0x%x, Len: %d'%\
(self.time_delta, self.type_str[self.type],
self.cmd, self.param_len)
class MIDITrack(object):
def __init__(self, file, offset):
self.__f=file
self.__ofs=offset
if module_debug:
print 'New Track @ ofs:', offset
if self.__f.GetBytes(self.__ofs, 4)!='MTrk':
raise TypeError, 'not an MIDI track'
self.__len=self.__f.GetMSBUint32(self.__ofs+4)
ofs=self.__ofs+8
ofs_end=ofs+self.__len
last_cmd=None
self.__time_delta=0
self.__mpqn=None
while ofs<ofs_end:
e=MIDIEvent(file, ofs, last_cmd)
if module_debug:
print e
ofs+=e.total_len
self.__time_delta+=e.time_delta
if e.type==e.META_EVENT:
if e.cmd==0x51:
p1, p2=e.params
self.__mpqn=(ord(p1[0])<<16)|(ord(p1[1])<<8)|ord(p1[2])
if e.type==e.MIDI_EVENT or e.type==e.LAST_MIDI_EVENT:
last_cmd=(e.cmd<<4)|e.midi_channel
else:
last_cmd=e.cmd
self.__total_len=ofs-self.__ofs
if module_debug:
print 'self.__ofs', self.__ofs+8, 'self.__len:', self.__len, 'ofs: ', ofs
print 'time delta:', self.__time_delta, 'MPQN: ', self.__mpqn
def __get_time_delta(self):
return self.__time_delta
time_delta=property(fget=__get_time_delta)
def __get_total_len(self):
return self.__total_len
total_len=property(fget=__get_total_len)
def __get_mpqn(self):
return self.__mpqn
mpqn=property(fget=__get_mpqn)
class MIDIFile(object):
def __init__(self, file_wraper):
try:
self.__valid=False
self.__file=file_wraper
if self.__file.GetBytes(0, 4)!='MThd' or \
self.__file.GetMSBUint32(4)!=6:
return
self.__valid=True
self.__type=self.__file.GetMSBUint16(8)
self.__num_tracks=self.__file.GetMSBUint16(10)
self.__time_division=self.__file.GetMSBUint16(12)
self.__tracks=[]
self.__mpqn=2000000
file_ofs=14
time_delta=0
for i in range(self.__num_tracks):
trk=MIDITrack(self.__file, file_ofs)
self.__tracks.append(trk)
file_ofs+=trk.total_len
time_delta=max(time_delta, trk.time_delta)
if trk.mpqn is not None:
self.__mpqn=trk.mpqn
self.__duration=(self.__mpqn*time_delta/self.__time_division)/1000000.0
if module_debug:
print 'type:', self.__type
print 'time division:', self.__time_division
print 'num of tracks:', self.__num_tracks
print 'MPQN:', self.__mpqn
print 'longest time delta: ', time_delta
print 'duration:', self.__duration
except:
self.__valid=False
def __get_valid(self):
return self.__valid
valid=property(fget=__get_valid)
def __get_type(self):
return self.__type
type=property(fget=__get_type)
def __get_num_tracks(self):
return self.__num_tracks
num_tracks=property(fget=__get_num_tracks)
def __get_duration(self):
return self.__duration
duration=property(fget=__get_duration)
| [
"[email protected]"
] | |
33490b42c85fee01be3f6432c411c486ae7157e5 | aca253ff1a97c96a1a0a9a5802aa623789662bb1 | /p034/statue_bar.py | ebd6c7ce45949c996b6d834401e27a09c8df4f7f | [] | no_license | KD-huhu/PyQt5 | a6128a34b93f6e2da7216d5818f66dc9614216bc | 1c33a6549c2fcf663168256553d8c24e25d9a69c | refs/heads/master | 2022-07-03T07:37:29.837547 | 2020-05-17T14:54:39 | 2020-05-17T14:54:39 | 261,768,854 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import sys, math
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class StatusBar(QMainWindow):
def __init__(self):
super(StatusBar, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("状态栏演示")
self.resize(300, 200)
bar = self.menuBar() # 创建菜单对象
file = bar.addMenu("File") # 添加菜单对象
file.addAction("show")
file.triggered.connect(self.processTrigger) # 绑定槽
self.setCentralWidget(QTextEdit())
self.statusBar = QStatusBar() # 创建状态栏对象
self.setStatusBar(self.statusBar) # 设置状态栏
def processTrigger(self, q):
if q.text() == "show":
self.statusBar.showMessage(q.text() + " 菜单被点击了", 5000)
if __name__ == '__main__':
app = QApplication(sys.argv)
main = StatusBar()
main.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
c73005c81aaec8e7c0613dea2e18f7b12afbb9dd | f45cc0049cd6c3a2b25de0e9bbc80c25c113a356 | /LeetCode/双指针(two points)/16. 3Sum Closest.py | 999c518d3f03cb923594bff7a42b551b460d21fb | [] | no_license | yiming1012/MyLeetCode | 4a387d024969bfd1cdccd4f581051a6e4104891a | e43ee86c5a8cdb808da09b4b6138e10275abadb5 | refs/heads/master | 2023-06-17T06:43:13.854862 | 2021-07-15T08:54:07 | 2021-07-15T08:54:07 | 261,663,876 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,263 | py | '''
Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution.
Example:
Given array nums = [-1, 2, 1, -4], and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
'''
from typing import List
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
"""
思路:双指针
1. 主要在于优化
2. 首先得排序
3. if i>0 and nums[i]==nums[i-1]:continue
4. 如果最小的三个数大于target,此时最接近target的数已存在
5. 如果最大的三个数小于target,continue
"""
nums.sort()
n = len(nums)
res = nums[0] + nums[1] + nums[2]
for i in range(n - 2):
l, r = i + 1, n - 1
if i > 0 and nums[i] == nums[i - 1]:
continue
threeSum = nums[i] + nums[i + 1] + nums[i + 2]
if threeSum >= target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
return res
if nums[i] + nums[-1] + nums[-2] < target:
res = nums[i] + nums[-1] + nums[-2]
continue
while l < r:
threeSum = nums[i] + nums[l] + nums[r]
if threeSum < target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
l += 1
# 连续的数相等,则跳过
while l < r and nums[l] == nums[l - 1]:
l += 1
elif threeSum > target:
if abs(threeSum - target) < abs(res - target):
res = threeSum
r -= 1
# 连续的数相等,则跳过
while l < r and nums[r] == nums[r + 1]:
r -= 1
else:
return target
return res
if __name__ == '__main__':
nums = [-1, 2, 1, -4]
target = 1
print(Solution().threeSumClosest(nums, target))
| [
"[email protected]"
] | |
70add22be9a70d8ceca4e71014665764dd5f5aff | bc2945c99f828083ca78b3bfcfe220a134fbd8b0 | /users/migrations/0010_auto_20200725_1159.py | c0fe7d7e4a714113bfff57bc26a2b57875bf0f3a | [] | no_license | Kyeza/web_system | 5bde9231551b7a94b535fe707db99ade351bd4fb | 686a701469b13454d39e4f0c6b342b22befdb345 | refs/heads/uganda-master | 2022-12-14T13:43:17.833502 | 2020-12-11T07:23:19 | 2020-12-11T07:23:19 | 176,704,006 | 2 | 1 | null | 2022-12-08T11:07:51 | 2019-03-20T09:55:33 | Python | UTF-8 | Python | false | false | 612 | py | # Generated by Django 3.0.6 on 2020-07-25 11:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reports', '0014_auto_20200605_0638'),
('users', '0009_auto_20200721_0727'),
]
operations = [
migrations.AlterField(
model_name='payrollprocessors',
name='summary_report',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='earning_or_deduction', to='reports.ExtraSummaryReportInfo'),
),
]
| [
"[email protected]"
] | |
00c870418471d3cbd644cbea12a5c9bdb8ff5530 | f8a114b410803515195a156bc678d76a990d1f83 | /config/settings/base.py | 58f39d4a9fcd2124f896be5e4d4bfe7177a1ba1d | [
"MIT"
] | permissive | mahidul-islam/chatbot_cookiecutter | 86c03941f56e815d4b27fbfec2e7499aa675b065 | 52c8cdf01147b647e076d5dbe5fa1cc13b0bf1ee | refs/heads/master | 2022-12-08T14:20:55.851457 | 2020-08-28T16:27:54 | 2020-08-28T16:27:54 | 291,094,465 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,971 | py | """
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# chatbot/
APPS_DIR = ROOT_DIR / "chatbot"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "Asia/Dhaka"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///chatbot")
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"allauth",
"allauth.account",
"allauth.socialaccount",
]
LOCAL_APPS = [
"chatbot.users.apps.UsersConfig",
# Your stuff: custom apps go here
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "chatbot.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"chatbot.utils.context_processors.settings_context",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.smtp.EmailBackend"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""zihan""", "[email protected]")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "chatbot.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "chatbot.users.adapters.SocialAccountAdapter"
# Your stuff...
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
14ec29e30beb9428142b51e4d0cb06ebde3e6971 | a23ec1e8470f87d1b3fa34b01506d6bdd63f6569 | /algorithms/282. Expression Add Operators.py | 3f93dd1ffb26e5d220725678cb98469a2ceaaf91 | [] | no_license | xiaohai0520/Algorithm | ae41d2137e085a30b2ac1034b8ea00e6c9de3ef1 | 96945ffadd893c1be60c3bde70e1f1cd51edd834 | refs/heads/master | 2023-04-14T17:41:21.918167 | 2021-04-20T13:57:09 | 2021-04-20T13:57:09 | 156,438,761 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
res, self.target = [], target
for i in range(1,len(num)+1):
if i == 1 or (i > 1 and num[0] != "0"): # prevent "00*" as a number
self.dfs(num[i:], num[:i], int(num[:i]), int(num[:i]), res) # this step put first number in the string
return res
def dfs(self, num, temp, cur, last, res):
if not num:
if cur == self.target:
res.append(temp)
return
for i in range(1, len(num)+1):
val = num[:i]
if i == 1 or (i > 1 and num[0] != "0"): # prevent "00*" as a number
self.dfs(num[i:], temp + "+" + val, cur+int(val), int(val), res)
self.dfs(num[i:], temp + "-" + val, cur-int(val), -int(val), res)
self.dfs(num[i:], temp + "*" + val, cur-last+last*int(val), last*int(val), res)
class Solution:
def addOperators(self, num: str, target: int) -> List[str]:
results = []
self.helper(num, 0, target, 0, 0, "", results)
return results
def helper(self, string, start, target, sum_so_far, last, path, results):
if start == len(string) and sum_so_far == target:
results.append(path)
for end in range(start+1, len(string)+1):
sub_string = string[start:end]
if len(sub_string) > 1 and sub_string[0] == '0':
break
cur = int(sub_string)
if start == 0:
self.helper(string, end, target, sum_so_far + cur, cur, path + sub_string, results)
else:
self.helper(string, end, target, sum_so_far + cur, cur, path + "+" + sub_string, results)
self.helper(string, end, target, sum_so_far - cur, -cur, path + "-" + sub_string, results)
self.helper(string, end, target, sum_so_far - last + cur * last, cur * last, path + "*" + sub_string, results)
| [
"[email protected]"
] | |
3487f385af9cf1c3384d8a9a9c5360459fd67f89 | 93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3 | /python/paddle/fluid/tests/unittests/ir/test_ir_fc_fuse_pass.py | cb485609b55ec330ad7dff0ed4d10d8a13a8f865 | [
"Apache-2.0"
] | permissive | hutuxian/Paddle | f8b7693bccc6d56887164c1de0b6f6e91cffaae8 | a1b640bc66a5cc9583de503e7406aeba67565e8d | refs/heads/develop | 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 | Apache-2.0 | 2023-06-16T09:47:39 | 2019-01-10T02:50:31 | Python | UTF-8 | Python | false | false | 1,978 | py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from pass_test import PassTest
import paddle.fluid as fluid
import paddle.fluid.core as core
class FCFusePassTest(PassTest):
def setUp(self):
with fluid.program_guard(self.main_program, self.startup_program):
data = fluid.data(
name="data", shape=[32, 128], dtype="float32", lod_level=0)
tmp_0 = fluid.layers.fc(input=data,
size=128,
num_flatten_dims=1,
act="relu")
tmp_1 = fluid.layers.fc(input=tmp_0, size=32, num_flatten_dims=1)
tmp_2 = fluid.layers.softmax(input=tmp_1)
self.feeds = {"data": np.random.random((32, 128)).astype("float32")}
self.fetch_list = [tmp_0, tmp_1, tmp_2]
self.pass_names = "fc_fuse_pass"
self.fused_op_type = "fc"
self.num_fused_ops = 2
def test_check_output(self):
use_gpu_set = [False]
if core.is_compiled_with_cuda():
use_gpu_set.append(True)
for use_gpu in use_gpu_set:
self.pass_attrs = {"fc_fuse_pass": {"use_gpu": use_gpu}}
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
self.check_output_with_place(place, startup_on_cpu=True)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
7024ae05ff1844eca1d1409094787f626f186dca | 98ebce5d5fbf3eb36642b3ffefe51ffcb81e9d5a | /uv/asgi.py | 3a64a8aef60b33aa8102ee73af4ce975123cecea | [
"CC0-1.0"
] | permissive | codesankalp/E-commerce-Website | cf5d07587e790761b07e68b47669f7cf1f25269b | 9091a7e27a9e63242b9067377a697196879fc707 | refs/heads/master | 2023-01-19T23:29:51.071061 | 2020-11-25T07:35:09 | 2020-11-25T07:35:09 | 286,974,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | """
ASGI config for uv project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uv.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
3e6274f68a32a64cdaad8f145058730bafa63415 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Sklearn_scipy_numpy/source/numpy/distutils/system_info.py | d7eb49ecd6b2ad37af5555202623dde6a903977b | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 85,533 | py | #!/bin/env python
"""
This file defines a set of system_info classes for getting
information about various resources (libraries, library directories,
include directories, etc.) in the system. Currently, the following
classes are available:
atlas_info
atlas_threads_info
atlas_blas_info
atlas_blas_threads_info
lapack_atlas_info
lapack_atlas_threads_info
atlas_3_10_info
atlas_3_10_threads_info
atlas_3_10_blas_info,
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
blas_info
lapack_info
openblas_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
x11_info
lapack_src_info
blas_src_info
numpy_info
numarray_info
numpy_info
boost_python_info
agg2_info
wx_info
gdk_pixbuf_xlib_2_info
gdk_pixbuf_2_info
gdk_x11_2_info
gtkp_x11_2_info
gtkp_2_info
xft_info
freetype2_info
umfpack_info
Usage:
info_dict = get_info(<name>)
where <name> is a string 'atlas','x11','fftw','lapack','blas',
'lapack_src', 'blas_src', etc. For a complete list of allowed names,
see the definition of get_info() function below.
Returned info_dict is a dictionary which is compatible with
distutils.setup keyword arguments. If info_dict == {}, then the
asked resource is not available (system_info could not find it).
Several *_info classes specify an environment variable to specify
the locations of software. When setting the corresponding environment
variable to 'None' then the software will be ignored, even when it
is available in system.
Global parameters:
system_info.search_static_first - search static libraries (.a)
in precedence to shared ones (.so, .sl) if enabled.
system_info.verbosity - output the results to stdout if enabled.
The file 'site.cfg' is looked for in
1) Directory of main setup.py file being run.
2) Home directory of user running the setup.py file as ~/.numpy-site.cfg
3) System wide directory (location of this file...)
The first one found is used to get system configuration options The
format is that used by ConfigParser (i.e., Windows .INI style). The
section ALL has options that are the default for each section. The
available sections are fftw, atlas, and x11. Appropiate defaults are
used if nothing is specified.
The order of finding the locations of resources is the following:
1. environment variable
2. section in site.cfg
3. ALL section in site.cfg
Only the first complete match is returned.
Example:
----------
[ALL]
library_dirs = /usr/lib:/usr/local/lib:/opt/lib
include_dirs = /usr/include:/usr/local/include:/opt/include
src_dirs = /usr/local/src:/opt/src
# search static libraries (.a) in preference to shared ones (.so)
search_static_first = 0
[fftw]
fftw_libs = rfftw, fftw
fftw_opt_libs = rfftw_threaded, fftw_threaded
# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
atlas_libs = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
Authors:
Pearu Peterson <[email protected]>, February 2002
David M. Cooke <[email protected]>, April 2002
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import re
import copy
import warnings
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
from ConfigParser import NoOptionError, ConfigParser
else:
from configparser import NoOptionError, ConfigParser
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
from distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import \
find_executable, exec_command, get_pythonexe
from numpy.distutils.misc_util import is_sequence, is_string, \
get_shared_lib_extension
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
import distutils.ccompiler
import tempfile
import shutil
# Determine number of bits
import platform
_bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
Inputs:
paths : sequence
A sequence of strings (typically paths)
bits : int
An integer, the only valid values are 32 or 64. A ValueError exception
is raised otherwise.
Examples:
Consider a list of directories
>>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']
For a 32-bit platform, this is already valid:
>>> np.distutils.system_info.libpaths(paths,32)
['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']
On 64 bits, we prepend the '64' postfix
>>> np.distutils.system_info.libpaths(paths,64)
['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',
'/usr/lib64', '/usr/lib']
"""
if bits not in (32, 64):
raise ValueError("Invalid bit size in libpaths: 32 or 64 only")
# Handle 32bit case
if bits == 32:
return paths
# Handle 64bit case
out = []
for p in paths:
out.extend([p + '64', p])
return out
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
os.path.join(distutils.sysconfig.EXEC_PREFIX,
'libs')]
default_runtime_dirs = []
default_include_dirs = []
default_src_dirs = ['.']
default_x11_lib_dirs = []
default_x11_include_dirs = []
else:
default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',
'/opt/local/lib', '/sw/lib'], platform_bits)
default_runtime_dirs = []
default_include_dirs = ['/usr/local/include',
'/opt/include', '/usr/include',
# path of umfpack under macports
'/opt/local/include/ufsparse',
'/opt/local/include', '/sw/include',
'/usr/include/suitesparse']
default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']
default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',
'/usr/lib'], platform_bits)
default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',
'/usr/include']
if os.path.exists('/usr/lib/X11'):
globbed_x11_dir = glob('/usr/lib/*/libX11.so')
if globbed_x11_dir:
x11_so_dir = os.path.split(globbed_x11_dir[0])[0]
default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
import subprocess as sp
tmp = None
try:
# Explicitly open/close file to avoid ResourceWarning when
# tests are run in debug mode Python 3.
tmp = open(os.devnull, 'w')
p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
stderr=tmp)
except (OSError, DistutilsError):
# OSError if gcc is not installed, or SandboxViolation (DistutilsError
# subclass) if an old setuptools bug is triggered (see gh-3160).
pass
else:
triplet = str(p.communicate()[0].decode().strip())
if p.returncode == 0:
# gcc supports the "-print-multiarch" option
default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
finally:
if tmp is not None:
tmp.close()
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
default_include_dirs.append(os.path.join(sys.prefix, 'include'))
default_src_dirs.append(os.path.join(sys.prefix, 'src'))
default_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]
default_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]
default_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]
default_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]
so_ext = get_shared_lib_extension()
def get_standard_file(fname):
"""Returns a list of files named 'fname' from
1) System-wide directory (directory-location of this module)
2) Users HOME directory (os.environ['HOME'])
3) Local directory
"""
# System-wide file
filenames = []
try:
f = __file__
except NameError:
f = sys.argv[0]
else:
sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],
fname)
if os.path.isfile(sysfile):
filenames.append(sysfile)
# Home directory
# And look for the user config file
try:
f = os.path.expanduser('~')
except KeyError:
pass
else:
user_file = os.path.join(f, fname)
if os.path.isfile(user_file):
filenames.append(user_file)
# Local file
if os.path.isfile(fname):
filenames.append(os.path.abspath(fname))
return filenames
def get_info(name, notfound_action=0):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead
'atlas_threads': atlas_threads_info, # ditto
'atlas_blas': atlas_blas_info,
'atlas_blas_threads': atlas_blas_threads_info,
'lapack_atlas': lapack_atlas_info, # use lapack_opt instead
'lapack_atlas_threads': lapack_atlas_threads_info, # ditto
'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead
'atlas_3_10_threads': atlas_3_10_threads_info, # ditto
'atlas_3_10_blas': atlas_3_10_blas_info,
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
# openblas with embedded lapack
'openblas_lapack': openblas_lapack_info, # use blas_opt instead
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
'fftw2': fftw2_info,
'fftw3': fftw3_info,
'dfftw': dfftw_info,
'sfftw': sfftw_info,
'fftw_threads': fftw_threads_info,
'dfftw_threads': dfftw_threads_info,
'sfftw_threads': sfftw_threads_info,
'djbfft': djbfft_info,
'blas': blas_info, # use blas_opt instead
'lapack': lapack_info, # use lapack_opt instead
'lapack_src': lapack_src_info,
'blas_src': blas_src_info,
'numpy': numpy_info,
'f2py': f2py_info,
'Numeric': Numeric_info,
'numeric': Numeric_info,
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
'blas_opt': blas_opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,
'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,
'gdk_pixbuf_2': gdk_pixbuf_2_info,
'gdk-pixbuf-2.0': gdk_pixbuf_2_info,
'gdk': gdk_info,
'gdk_2': gdk_2_info,
'gdk-2.0': gdk_2_info,
'gdk_x11_2': gdk_x11_2_info,
'gdk-x11-2.0': gdk_x11_2_info,
'gtkp_x11_2': gtkp_x11_2_info,
'gtk+-x11-2.0': gtkp_x11_2_info,
'gtkp_2': gtkp_2_info,
'gtk+-2.0': gtkp_2_info,
'xft': xft_info,
'freetype2': freetype2_info,
'umfpack': umfpack_info,
'amd': amd_info,
}.get(name.lower(), system_info)
return cl().get_info(notfound_action)
class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
class AtlasNotFoundError(NotFoundError):
"""
Atlas (http://math-atlas.sourceforge.net/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [lapack]) or by setting
the LAPACK environment variable."""
class LapackSrcNotFoundError(LapackNotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [lapack_src]) or by setting
the LAPACK_SRC environment variable."""
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
class BlasSrcNotFoundError(BlasNotFoundError):
"""
Blas (http://www.netlib.org/blas/) sources not found.
Directories to search for the sources can be specified in the
numpy/distutils/site.cfg file (section [blas_src]) or by setting
the BLAS_SRC environment variable."""
class FFTWNotFoundError(NotFoundError):
"""
FFTW (http://www.fftw.org/) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [fftw]) or by setting
the FFTW environment variable."""
class DJBFFTNotFoundError(NotFoundError):
"""
DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [djbfft]) or by setting
the DJBFFT environment variable."""
class NumericNotFoundError(NotFoundError):
"""
Numeric (http://www.numpy.org/) module not found.
Get it from above location, install it, and retry setup.py."""
class X11NotFoundError(NotFoundError):
"""X11 libraries not found."""
class UmfpackNotFoundError(NotFoundError):
"""
UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)
not found. Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [umfpack]) or by setting
the UMFPACK environment variable."""
class system_info(object):
""" get_info() is the only public method. Don't use others.
"""
section = 'ALL'
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
verbosity = 1
saved_results = {}
notfounderror = NotFoundError
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {}
defaults['library_dirs'] = os.pathsep.join(default_lib_dirs)
defaults['include_dirs'] = os.pathsep.join(default_include_dirs)
defaults['runtime_library_dirs'] = os.pathsep.join(default_runtime_dirs)
defaults['rpath'] = ''
defaults['src_dirs'] = os.pathsep.join(default_src_dirs)
defaults['search_static_first'] = str(self.search_static_first)
defaults['extra_compile_args'] = ''
defaults['extra_link_args'] = ''
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
self.files.extend(get_standard_file('site.cfg'))
self.parse_config_files()
if self.section is not None:
self.search_static_first = self.cp.getboolean(
self.section, 'search_static_first')
assert isinstance(self.search_static_first, int)
def parse_config_files(self):
self.cp.read(self.files)
if not self.cp.has_section(self.section):
if self.section is not None:
self.cp.add_section(self.section)
def calc_libraries_info(self):
libs = self.get_libraries()
dirs = self.get_lib_dirs()
# The extensions use runtime_library_dirs
r_dirs = self.get_runtime_lib_dirs()
# Intrinsic distutils use rpath, we simply append both entries
# as though they were one entry
r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))
info = {}
for lib in libs:
i = self.check_libs(dirs, [lib])
if i is not None:
dict_append(info, **i)
else:
log.info('Library %s was not found. Ignoring' % (lib))
i = self.check_libs(r_dirs, [lib])
if i is not None:
# Swap library keywords found to runtime_library_dirs
# the libraries are insisting on the user having defined
# them using the library_dirs, and not necessarily by
# runtime_library_dirs
del i['libraries']
i['runtime_library_dirs'] = i.pop('library_dirs')
dict_append(info, **i)
else:
log.info('Runtime library %s was not found. Ignoring' % (lib))
return info
def set_info(self, **info):
if info:
lib_info = self.calc_libraries_info()
dict_append(info, **lib_info)
# Update extra information
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
def has_info(self):
return self.__class__.__name__ in self.saved_results
def calc_extra_info(self):
""" Updates the information in the current information with
respect to these flags:
extra_compile_args
extra_link_args
"""
info = {}
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
if opt:
tmp = {key : [opt]}
dict_append(info, **tmp)
return info
def get_info(self, notfound_action=0):
""" Return a dictonary with items that are compatible
with numpy.distutils.setup keyword arguments.
"""
flag = 0
if not self.has_info():
flag = 1
log.info(self.__class__.__name__ + ':')
if hasattr(self, 'calc_info'):
self.calc_info()
if notfound_action:
if not self.has_info():
if notfound_action == 1:
warnings.warn(self.notfounderror.__doc__)
elif notfound_action == 2:
raise self.notfounderror(self.notfounderror.__doc__)
else:
raise ValueError(repr(notfound_action))
if not self.has_info():
log.info(' NOT AVAILABLE')
self.set_info()
else:
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
if self.verbosity > 0 and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
v = v[:120] + '...\n...\n...' + v[-120:]
log.info(' %s = %s', k, v)
log.info('')
return copy.deepcopy(res)
def get_paths(self, section, key):
dirs = self.cp.get(section, key).split(os.pathsep)
env_var = self.dir_env_var
if env_var:
if is_sequence(env_var):
e0 = env_var[-1]
for e in env_var:
if e in os.environ:
e0 = e
break
if not env_var[0] == e0:
log.info('Setting %s=%s' % (env_var[0], e0))
env_var = e0
if env_var and env_var in os.environ:
d = os.environ[env_var]
if d == 'None':
log.info('Disabled %s: %s',
self.__class__.__name__, '(%s is None)'
% (env_var,))
return []
if os.path.isfile(d):
dirs = [os.path.dirname(d)] + dirs
l = getattr(self, '_lib_names', [])
if len(l) == 1:
b = os.path.basename(d)
b = os.path.splitext(b)[0]
if b[:3] == 'lib':
log.info('Replacing _lib_names[0]==%r with %r' \
% (self._lib_names[0], b[3:]))
self._lib_names[0] = b[3:]
else:
ds = d.split(os.pathsep)
ds2 = []
for d in ds:
if os.path.isdir(d):
ds2.append(d)
for dd in ['include', 'lib']:
d1 = os.path.join(d, dd)
if os.path.isdir(d1):
ds2.append(d1)
dirs = ds2 + dirs
default_dirs = self.cp.get(self.section, key).split(os.pathsep)
dirs.extend(default_dirs)
ret = []
for d in dirs:
if not os.path.isdir(d):
warnings.warn('Specified path %s is invalid.' % d)
continue
if d not in ret:
ret.append(d)
log.debug('( %s = %s )', key, ':'.join(ret))
return ret
def get_lib_dirs(self, key='library_dirs'):
return self.get_paths(self.section, key)
def get_runtime_lib_dirs(self, key='runtime_library_dirs'):
return self.get_paths(self.section, key)
def get_include_dirs(self, key='include_dirs'):
return self.get_paths(self.section, key)
def get_src_dirs(self, key='src_dirs'):
return self.get_paths(self.section, key)
def get_libs(self, key, default):
try:
libs = self.cp.get(self.section, key)
except NoOptionError:
if not default:
return []
if is_string(default):
return [default]
return default
return [b for b in [a.strip() for a in libs.split(',')] if b]
def get_libraries(self, key='libraries'):
return self.get_libs(key, '')
def library_extensions(self):
static_exts = ['.a']
if sys.platform == 'win32':
static_exts.append('.lib') # .lib is used by MSVC
if self.search_static_first:
exts = static_exts + [so_ext]
else:
exts = [so_ext] + static_exts
if sys.platform == 'cygwin':
exts.append('.dll.a')
if sys.platform == 'darwin':
exts.append('.dylib')
# Debian and Ubuntu added a g3f suffix to shared library to deal with
# g77 -> gfortran ABI transition
# XXX: disabled, it hides more problem than it solves.
#if sys.platform[:5] == 'linux':
# exts.append('.so.3gf')
return exts
def check_libs(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks for all libraries as shared libraries first, then
static (or vice versa if self.search_static_first is True).
"""
exts = self.library_extensions()
info = None
for ext in exts:
info = self._check_libs(lib_dirs, libs, opt_libs, [ext])
if info is not None:
break
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def check_libs2(self, lib_dirs, libs, opt_libs=[]):
"""If static or shared libraries are available then return
their info dictionary.
Checks each library for shared or static.
"""
exts = self.library_extensions()
info = self._check_libs(lib_dirs, libs, opt_libs, exts)
if not info:
log.info(' libraries %s not found in %s', ','.join(libs),
lib_dirs)
return info
def _lib_list(self, lib_dir, libs, exts):
assert is_string(lib_dir)
liblist = []
# under windows first try without 'lib' prefix
if sys.platform == 'win32':
lib_prefixes = ['', 'lib']
else:
lib_prefixes = ['lib']
# for each library name, see if we can find a file for it.
for l in libs:
for ext in exts:
for prefix in lib_prefixes:
p = self.combine_paths(lib_dir, prefix + l + ext)
if p:
break
if p:
assert len(p) == 1
# ??? splitext on p[0] would do this for cygwin
# doesn't seem correct
if ext == '.dll.a':
l += '.dll'
liblist.append(l)
break
return liblist
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Find mandatory and optional libs in expected paths.
Missing optional libraries are silently forgotten.
"""
# First, try to find the mandatory libraries
if is_sequence(lib_dirs):
found_libs, found_dirs = [], []
for dir_ in lib_dirs:
found_libs1 = self._lib_list(dir_, libs, exts)
# It's possible that we'll find the same library in multiple
# directories. It's also possible that we'll find some
# libraries on in directory, and some in another. So the
# obvious thing would be to use a set instead of a list, but I
# don't know if preserving order matters (does it?).
for found_lib in found_libs1:
if found_lib not in found_libs:
found_libs.append(found_lib)
if dir_ not in found_dirs:
found_dirs.append(dir_)
else:
found_libs = self._lib_list(lib_dirs, libs, exts)
found_dirs = [lib_dirs]
if len(found_libs) > 0 and len(found_libs) == len(libs):
info = {'libraries': found_libs, 'library_dirs': found_dirs}
# Now, check for optional libraries
if is_sequence(lib_dirs):
for dir_ in lib_dirs:
opt_found_libs = self._lib_list(dir_, opt_libs, exts)
if opt_found_libs:
if dir_ not in found_dirs:
found_dirs.extend(dir_)
found_libs.extend(opt_found_libs)
else:
opt_found_libs = self._lib_list(lib_dirs, opt_libs, exts)
if opt_found_libs:
found_libs.extend(opt_found_libs)
return info
else:
return None
def combine_paths(self, *args):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
return combine_paths(*args, **{'verbosity': self.verbosity})
class fft_opt_info(system_info):
def calc_info(self):
info = {}
fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')
djbfft_info = get_info('djbfft')
if fftw_info:
dict_append(info, **fftw_info)
if djbfft_info:
dict_append(info, **djbfft_info)
self.set_info(**info)
return
class fftw_info(system_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}]
def calc_ver_info(self, ver_param):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
incl_dir = None
libs = self.get_libs(self.section + '_libs', ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
for d in incl_dirs:
if len(self.combine_paths(d, ver_param['includes'])) \
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
else:
info = None
if info is not None:
self.set_info(**info)
return True
else:
log.info(' %s not found' % (ver_param['name']))
return False
def calc_info(self):
for i in self.ver_info:
if self.calc_ver_info(i):
break
class fftw2_info(fftw_info):
#variables to override
section = 'fftw'
dir_env_var = 'FFTW'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw2',
'libs':['rfftw', 'fftw'],
'includes':['fftw.h', 'rfftw.h'],
'macros':[('SCIPY_FFTW_H', None)]}
]
class fftw3_info(fftw_info):
#variables to override
section = 'fftw3'
dir_env_var = 'FFTW3'
notfounderror = FFTWNotFoundError
ver_info = [{'name':'fftw3',
'libs':['fftw3'],
'includes':['fftw3.h'],
'macros':[('SCIPY_FFTW3_H', None)]},
]
class dfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw',
'libs':['drfftw', 'dfftw'],
'includes':['dfftw.h', 'drfftw.h'],
'macros':[('SCIPY_DFFTW_H', None)]}]
class sfftw_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw',
'libs':['srfftw', 'sfftw'],
'includes':['sfftw.h', 'srfftw.h'],
'macros':[('SCIPY_SFFTW_H', None)]}]
class fftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'fftw threads',
'libs':['rfftw_threads', 'fftw_threads'],
'includes':['fftw_threads.h', 'rfftw_threads.h'],
'macros':[('SCIPY_FFTW_THREADS_H', None)]}]
class dfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'dfftw threads',
'libs':['drfftw_threads', 'dfftw_threads'],
'includes':['dfftw_threads.h', 'drfftw_threads.h'],
'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]
class sfftw_threads_info(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
class djbfft_info(system_info):
section = 'djbfft'
dir_env_var = 'DJBFFT'
notfounderror = DJBFFTNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['djbfft']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
info = None
for d in lib_dirs:
p = self.combine_paths(d, ['djbfft.a'])
if p:
info = {'extra_objects': p}
break
p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])
if p:
info = {'libraries': ['djbfft'], 'library_dirs': [d]}
break
if info is None:
return
for d in incl_dirs:
if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:
dict_append(info, include_dirs=[d],
define_macros=[('SCIPY_DJBFFT_H', None)])
self.set_info(**info)
return
return
class mkl_info(system_info):
section = 'mkl'
dir_env_var = 'MKL'
_lib_mkl = ['mkl', 'vml', 'guide']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
for d in open(ld_so_conf, 'r'):
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for d in dirs:
if os.path.isdir(os.path.join(d, 'lib')):
return d
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
l = 'mkl' # use shared library
if cpu.is_Itanium():
plt = '64'
#l = 'mkl_ipf'
elif cpu.is_Xeon():
plt = 'intel64'
#l = 'mkl_intel64'
else:
plt = '32'
#l = 'mkl_ia32'
if l not in self._lib_mkl:
self._lib_mkl.insert(0, l)
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
class lapack_mkl_info(mkl_info):
def calc_info(self):
mkl = get_info('mkl')
if not mkl:
return
if sys.platform == 'win32':
lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack'])
else:
lapack_libs = self.get_libs('lapack_libs',
['mkl_lapack32', 'mkl_lapack64'])
info = {'libraries': lapack_libs}
dict_append(info, **mkl)
self.set_info(**info)
class blas_mkl_info(mkl_info):
pass
class atlas_info(system_info):
section = 'atlas'
dir_env_var = 'ATLAS'
_lib_names = ['f77blas', 'cblas']
if sys.platform[:7] == 'freebsd':
_lib_atlas = ['atlas_r']
_lib_lapack = ['alapack_r']
else:
_lib_atlas = ['atlas']
_lib_lapack = ['lapack']
notfounderror = AtlasNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',
'sse', '3dnow', 'sse2']) + [d])
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
atlas = self.check_libs2(d, atlas_libs, [])
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
if lapack is not None:
break
if atlas:
atlas_1 = atlas
log.info(self.__class__)
if atlas is None:
atlas = atlas_1
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
if lapack is not None:
dict_append(info, **lapack)
dict_append(info, **atlas)
elif 'lapack_atlas' in atlas['libraries']:
dict_append(info, **atlas)
dict_append(info,
define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])
self.set_info(**info)
return
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
message = """
*********************************************************************
Could not find lapack library within the ATLAS installation.
*********************************************************************
"""
warnings.warn(message)
self.set_info(**info)
return
# Check if lapack library is complete, only warn if it is not.
lapack_dir = lapack['library_dirs'][0]
lapack_name = lapack['libraries'][0]
lapack_lib = None
lib_prefixes = ['lib']
if sys.platform == 'win32':
lib_prefixes.append('')
for e in self.library_extensions():
for prefix in lib_prefixes:
fn = os.path.join(lapack_dir, prefix + lapack_name + e)
if os.path.exists(fn):
lapack_lib = fn
break
if lapack_lib:
break
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
message = """
*********************************************************************
Lapack library (from ATLAS) is probably incomplete:
size of %s is %sk (expected >4000k)
Follow the instructions in the KNOWN PROBLEMS section of the file
numpy/INSTALL.txt.
*********************************************************************
""" % (lapack_lib, sz / 1024)
warnings.warn(message)
else:
info['language'] = 'f77'
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(info, **atlas_extra_info)
self.set_info(**info)
class atlas_blas_info(atlas_info):
_lib_names = ['f77blas', 'cblas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_threads_info(atlas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class atlas_blas_threads_info(atlas_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['ptf77blas', 'ptcblas']
class lapack_atlas_info(atlas_info):
_lib_names = ['lapack_atlas'] + atlas_info._lib_names
class lapack_atlas_threads_info(atlas_threads_info):
_lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names
class atlas_3_10_info(atlas_info):
_lib_names = ['satlas']
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_info(atlas_3_10_info):
_lib_names = ['satlas']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
atlas_libs = self.get_libs('atlas_libs',
self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
include_dirs = self.get_include_dirs()
h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])
h = h[0]
if h:
h = os.path.dirname(h)
dict_append(info, include_dirs=[h])
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
atlas_version, atlas_extra_info = get_atlas_version(**atlas)
dict_append(atlas, **atlas_extra_info)
dict_append(info, **atlas)
self.set_info(**info)
return
class atlas_3_10_threads_info(atlas_3_10_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
#if sys.platfcorm[:7] == 'freebsd':
## I don't think freebsd supports 3.10 at this time - 2014
_lib_atlas = _lib_names
_lib_lapack = _lib_names
class atlas_3_10_blas_threads_info(atlas_3_10_blas_info):
dir_env_var = ['PTATLAS', 'ATLAS']
_lib_names = ['tatlas']
class lapack_atlas_3_10_info(atlas_3_10_info):
pass
class lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):
pass
class lapack_info(system_info):
section = 'lapack'
dir_env_var = 'LAPACK'
_lib_names = ['lapack']
notfounderror = LapackNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
lapack_libs = self.get_libs('lapack_libs', self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
info['language'] = 'f77'
self.set_info(**info)
class lapack_src_info(system_info):
section = 'lapack_src'
dir_env_var = 'LAPACK_SRC'
notfounderror = LapackSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'dgesv.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
# The following is extracted from LAPACK-3.0/SRC/Makefile.
# Added missing names from lapack-lite-3.1.1/SRC/Makefile
# while keeping removed names for Lapack-3.0 compatibility.
allaux = '''
ilaenv ieeeck lsame lsamen xerbla
iparmq
''' # *.f
laux = '''
bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1
laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2
lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre
larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4
lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1
lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf
stebz stedc steqr sterf
larra larrc larrd larr larrk larrj larrr laneg laisnan isnan
lazq3 lazq4
''' # [s|d]*.f
lasrc = '''
gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak
gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv
gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2
geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd
gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal
gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd
ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein
hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0
lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb
lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp
laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv
lartv larz larzb larzt laswp lasyf latbs latdf latps latrd
latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv
pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2
potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri
pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs
spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv
sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2
tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs
trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs
tzrqf tzrzf
lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5
''' # [s|c|d|z]*.f
sd_lasrc = '''
laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l
org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr
orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3
ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx
sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd
stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd
sygvx sytd2 sytrd
''' # [s|d]*.f
cz_lasrc = '''
bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev
heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv
hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd
hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf
hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7
laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe
laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv
spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq
ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2
unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr
''' # [c|z]*.f
#######
sclaux = laux + ' econd ' # s*.f
dzlaux = laux + ' secnd ' # d*.f
slasrc = lasrc + sd_lasrc # s*.f
dlasrc = lasrc + sd_lasrc # d*.f
clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f
zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f
oclasrc = ' icmax1 scsum1 ' # *.f
ozlasrc = ' izmax1 dzsum1 ' # *.f
sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \
+ ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \
+ ['c%s.f' % f for f in (clasrc).split()] \
+ ['z%s.f' % f for f in (zlasrc).split()] \
+ ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]
sources = [os.path.join(src_dir, f) for f in sources]
# Lapack 3.1:
src_dir2 = os.path.join(src_dir, '..', 'INSTALL')
sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']
# Lapack 3.2.1:
sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']
sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']
# Should we check here actual existence of source files?
# Yes, the file listing is different between 3.0 and 3.1
# versions.
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
atlas_version_c_text = r'''
/* This file is generated from numpy/distutils/system_info.py */
void ATL_buildinfo(void);
int main(void) {
ATL_buildinfo();
return 0;
}
'''
_cached_atlas_version = {}
def get_atlas_version(**config):
libraries = config.get('libraries', [])
library_dirs = config.get('library_dirs', [])
key = (tuple(libraries), tuple(library_dirs))
if key in _cached_atlas_version:
return _cached_atlas_version[key]
c = cmd_config(Distribution())
atlas_version = None
info = {}
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
use_tee=(system_info.verbosity > 0))
if not s:
warnings.warn("""
*****************************************************
Linkage with ATLAS requires gfortran. Use
python setup.py config_fc --fcompiler=gnu95 ...
when building extension libraries that use ATLAS.
Make sure that -lgfortran is used for C++ extensions.
*****************************************************
""")
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
# look at directory name
for o in library_dirs:
m = re.search(r'ATLAS_(?P<version>\d+[.]\d+[.]\d+)_', o)
if m:
atlas_version = m.group('version')
if atlas_version is not None:
break
# final choice --- look at ATLAS_VERSION environment
# variable
if atlas_version is None:
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
return atlas_version or '?.?.?', info
if not s:
m = re.search(r'ATLAS version (?P<version>\d+[.]\d+[.]\d+)', o)
if m:
atlas_version = m.group('version')
if atlas_version is None:
if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):
atlas_version = '3.2.1_pre3.3.6'
else:
log.info('Status: %d', s)
log.info('Output: %s', o)
if atlas_version == '3.2.1_pre3.3.6':
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
])
result = _cached_atlas_version[key] = atlas_version, info
return result
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
def calc_info(self):
openblas_info = get_info('openblas_lapack')
if openblas_info:
self.set_info(**openblas_info)
return
lapack_mkl_info = get_info('lapack_mkl')
if lapack_mkl_info:
self.set_info(**lapack_mkl_info)
return
atlas_info = get_info('atlas_3_10_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10')
if not atlas_info:
atlas_info = get_info('atlas_threads')
if not atlas_info:
atlas_info = get_info('atlas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system lapack from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
#atlas_info = {} ## uncomment for testing
need_lapack = 0
need_blas = 0
info = {}
if atlas_info:
l = atlas_info.get('define_macros', [])
if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
or ('ATLAS_WITHOUT_LAPACK', None) in l:
need_lapack = 1
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
need_lapack = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_lapack:
lapack_info = get_info('lapack')
#lapack_info = {} ## uncomment for testing
if lapack_info:
dict_append(info, **lapack_info)
else:
warnings.warn(LapackNotFoundError.__doc__)
lapack_src_info = get_info('lapack_src')
if not lapack_src_info:
warnings.warn(LapackSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('flapack_src', lapack_src_info)])
if need_blas:
blas_info = get_info('blas')
#blas_info = {} ## uncomment for testing
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
def calc_info(self):
blas_mkl_info = get_info('blas_mkl')
if blas_mkl_info:
self.set_info(**blas_mkl_info)
return
openblas_info = get_info('openblas')
if openblas_info:
self.set_info(**openblas_info)
return
atlas_info = get_info('atlas_3_10_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_3_10_blas')
if not atlas_info:
atlas_info = get_info('atlas_blas_threads')
if not atlas_info:
atlas_info = get_info('atlas_blas')
if sys.platform == 'darwin' and not atlas_info:
# Use the system BLAS from Accelerate or vecLib under OSX
args = []
link_args = []
if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \
'x86_64' in get_platform() or \
'i386' in platform.platform():
intel = 1
else:
intel = 0
if os.path.exists('/System/Library/Frameworks'
'/Accelerate.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])
elif os.path.exists('/System/Library/Frameworks'
'/vecLib.framework/'):
if intel:
args.extend(['-msse3'])
else:
args.extend(['-faltivec'])
args.extend([
'-I/System/Library/Frameworks/vecLib.framework/Headers'])
link_args.extend(['-Wl,-framework', '-Wl,vecLib'])
if args:
self.set_info(extra_compile_args=args,
extra_link_args=link_args,
define_macros=[('NO_ATLAS_INFO', 3),
('HAVE_CBLAS', None)])
return
need_blas = 0
info = {}
if atlas_info:
info = atlas_info
else:
warnings.warn(AtlasNotFoundError.__doc__)
need_blas = 1
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
if need_blas:
blas_info = get_info('blas')
if blas_info:
dict_append(info, **blas_info)
else:
warnings.warn(BlasNotFoundError.__doc__)
blas_src_info = get_info('blas_src')
if not blas_src_info:
warnings.warn(BlasSrcNotFoundError.__doc__)
return
dict_append(info, libraries=[('fblas_src', blas_src_info)])
self.set_info(**info)
return
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
_lib_names = ['blas']
notfounderror = BlasNotFoundError
def calc_info(self):
lib_dirs = self.get_lib_dirs()
blas_libs = self.get_libs('blas_libs', self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
if platform.system() == 'Windows':
# The check for windows is needed because has_cblas uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
else:
lib = self.has_cblas(info)
if lib is not None:
info['language'] = 'c'
info['libraries'] = [lib]
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
def has_cblas(self, info):
# primitive cblas check by looking for the header and trying to link
# cblas or blas
res = False
c = distutils.ccompiler.new_compiler()
tmpdir = tempfile.mkdtemp()
s = """#include <cblas.h>
int main(int argc, const char *argv[])
{
double a[4] = {1,2,3,4};
double b[4] = {5,6,7,8};
return cblas_ddot(4, a, 1, b, 1) > 10;
}"""
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'wt') as f:
f.write(s)
try:
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
# check we can link (find library)
# some systems have separate cblas and blas libs. First
# check for cblas lib, and if not present check for blas lib.
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["cblas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "cblas"
except distutils.ccompiler.LinkError:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
libraries=["blas"],
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
res = "blas"
except distutils.ccompiler.CompileError:
res = None
finally:
shutil.rmtree(tmpdir)
return res
class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
return True
def calc_info(self):
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
if info is None:
return
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
if not self.check_embedded_lapack(info):
return
info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
class openblas_lapack_info(openblas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
notfounderror = BlasNotFoundError
def check_embedded_lapack(self, info):
res = False
c = distutils.ccompiler.new_compiler()
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
{
zungqr_();
return 0;
}"""
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
except:
extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
library_dirs=info['library_dirs'],
extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
finally:
shutil.rmtree(tmpdir)
return res
class blas_src_info(system_info):
section = 'blas_src'
dir_env_var = 'BLAS_SRC'
notfounderror = BlasSrcNotFoundError
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['blas']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'daxpy.f')):
src_dir = d
break
if not src_dir:
#XXX: Get sources from netlib. May be ask first.
return
blas1 = '''
caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot
dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2
srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg
dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax
snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap
scabs1
'''
blas2 = '''
cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv
chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv
dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv
sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger
stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc
zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2
ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv
'''
blas3 = '''
cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k
dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm
ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm
'''
sources = [os.path.join(src_dir, f + '.f') \
for f in (blas1 + blas2 + blas3).split()]
#XXX: should we check here actual existence of source files?
sources = [f for f in sources if os.path.isfile(f)]
info = {'sources': sources, 'language': 'f77'}
self.set_info(**info)
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
def __init__(self):
system_info.__init__(self,
default_lib_dirs=default_x11_lib_dirs,
default_include_dirs=default_x11_include_dirs)
def calc_info(self):
if sys.platform in ['win32']:
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
x11_libs = self.get_libs('x11_libs', ['X11'])
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
inc_dir = None
for d in include_dirs:
if self.combine_paths(d, 'X11/X.h'):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
self.set_info(**info)
class _numpy_info(system_info):
section = 'Numeric'
modulename = 'Numeric'
notfounderror = NumericNotFoundError
def __init__(self):
include_dirs = []
try:
module = __import__(self.modulename)
prefix = []
for name in module.__file__.split(os.sep):
if name == 'lib':
break
prefix.append(name)
# Ask numpy for its own include path before attempting
# anything else
try:
include_dirs.append(getattr(module, 'get_include')())
except AttributeError:
pass
include_dirs.append(distutils.sysconfig.get_python_inc(
prefix=os.sep.join(prefix)))
except ImportError:
pass
py_incl_dir = distutils.sysconfig.get_python_inc()
include_dirs.append(py_incl_dir)
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
d = os.path.join(d, os.path.basename(py_incl_dir))
if d not in include_dirs:
include_dirs.append(d)
system_info.__init__(self,
default_lib_dirs=[],
default_include_dirs=include_dirs)
def calc_info(self):
try:
module = __import__(self.modulename)
except ImportError:
return
info = {}
macros = []
for v in ['__version__', 'version']:
vrs = getattr(module, v, None)
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
'"\\"%s\\""' % (vrs)),
(self.modulename.upper(), None)]
break
## try:
## macros.append(
## (self.modulename.upper()+'_VERSION_HEX',
## hex(vstr2hex(module.__version__))),
## )
## except Exception as msg:
## print msg
dict_append(info, define_macros=macros)
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
if self.combine_paths(d,
os.path.join(self.modulename,
'arrayobject.h')):
inc_dir = d
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir])
if info:
self.set_info(**info)
return
class numarray_info(_numpy_info):
section = 'numarray'
modulename = 'numarray'
class Numeric_info(_numpy_info):
section = 'Numeric'
modulename = 'Numeric'
class numpy_info(_numpy_info):
section = 'numpy'
modulename = 'numpy'
class numerix_info(system_info):
section = 'numerix'
def calc_info(self):
which = None, None
if os.getenv("NUMERIX"):
which = os.getenv("NUMERIX"), "environment var"
# If all the above fail, default to numpy.
if which[0] is None:
which = "numpy", "defaulted"
try:
import numpy
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
import Numeric
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
import numarray
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
log.info(msg1)
log.info(msg2)
log.info(msg3)
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric' "
"or 'numarray' or 'numpy' but the value obtained"
" from the %s was '%s'." % (which[1], which[0]))
os.environ['NUMERIX'] = which[0]
self.set_info(**get_info(which[0]))
class f2py_info(system_info):
def calc_info(self):
try:
import numpy.f2py as f2py
except ImportError:
return
f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')
self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],
include_dirs=[f2py_dir])
return
class boost_python_info(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [distutils.sysconfig.get_python_inc()]
py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
class agg2_info(system_info):
section = 'agg2'
dir_env_var = 'AGG2'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['agg2*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):
src_dir = d
break
if not src_dir:
return
if sys.platform == 'win32':
agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',
'win32', 'agg_win32_bmp.cpp'))
else:
agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))
agg2_srcs += [os.path.join(src_dir, 'src', 'platform',
'X11',
'agg_platform_support.cpp')]
info = {'libraries':
[('agg2_src',
{'sources': agg2_srcs,
'include_dirs': [os.path.join(src_dir, 'include')],
}
)],
'include_dirs': [os.path.join(src_dir, 'include')],
}
if info:
self.set_info(**info)
return
class _pkg_config_info(system_info):
section = None
config_env_var = 'PKG_CONFIG'
default_config_exe = 'pkg-config'
append_config_exe = ''
version_macro_name = None
release_macro_name = None
version_flag = '--modversion'
cflags_flag = '--cflags'
def get_config_exe(self):
if self.config_env_var in os.environ:
return os.environ[self.config_env_var]
return self.default_config_exe
def get_config_output(self, config_exe, option):
cmd = config_exe + ' ' + self.append_config_exe + ' ' + option
s, o = exec_command(cmd, use_tee=0)
if not s:
return o
def calc_info(self):
config_exe = find_executable(self.get_config_exe())
if not config_exe:
log.warn('File not found: %s. Cannot determine %s info.' \
% (config_exe, self.section))
return
info = {}
macros = []
libraries = []
library_dirs = []
include_dirs = []
extra_link_args = []
extra_compile_args = []
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
'"\\"%s\\""' % (version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
if self.release_macro_name:
release = self.get_config_output(config_exe, '--release')
if release:
macros.append((self.release_macro_name + '_%s'
% (release.replace('.', '_')), None))
opts = self.get_config_output(config_exe, '--libs')
if opts:
for opt in opts.split():
if opt[:2] == '-l':
libraries.append(opt[2:])
elif opt[:2] == '-L':
library_dirs.append(opt[2:])
else:
extra_link_args.append(opt)
opts = self.get_config_output(config_exe, self.cflags_flag)
if opts:
for opt in opts.split():
if opt[:2] == '-I':
include_dirs.append(opt[2:])
elif opt[:2] == '-D':
if '=' in opt:
n, v = opt[2:].split('=')
macros.append((n, v))
else:
macros.append((opt[2:], None))
else:
extra_compile_args.append(opt)
if macros:
dict_append(info, define_macros=macros)
if libraries:
dict_append(info, libraries=libraries)
if library_dirs:
dict_append(info, library_dirs=library_dirs)
if include_dirs:
dict_append(info, include_dirs=include_dirs)
if extra_link_args:
dict_append(info, extra_link_args=extra_link_args)
if extra_compile_args:
dict_append(info, extra_compile_args=extra_compile_args)
if info:
self.set_info(**info)
return
class wx_info(_pkg_config_info):
section = 'wx'
config_env_var = 'WX_CONFIG'
default_config_exe = 'wx-config'
append_config_exe = ''
version_macro_name = 'WX_VERSION'
release_macro_name = 'WX_RELEASE'
version_flag = '--version'
cflags_flag = '--cxxflags'
class gdk_pixbuf_xlib_2_info(_pkg_config_info):
section = 'gdk_pixbuf_xlib_2'
append_config_exe = 'gdk-pixbuf-xlib-2.0'
version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'
class gdk_pixbuf_2_info(_pkg_config_info):
section = 'gdk_pixbuf_2'
append_config_exe = 'gdk-pixbuf-2.0'
version_macro_name = 'GDK_PIXBUF_VERSION'
class gdk_x11_2_info(_pkg_config_info):
section = 'gdk_x11_2'
append_config_exe = 'gdk-x11-2.0'
version_macro_name = 'GDK_X11_VERSION'
class gdk_2_info(_pkg_config_info):
section = 'gdk_2'
append_config_exe = 'gdk-2.0'
version_macro_name = 'GDK_VERSION'
class gdk_info(_pkg_config_info):
section = 'gdk'
append_config_exe = 'gdk'
version_macro_name = 'GDK_VERSION'
class gtkp_x11_2_info(_pkg_config_info):
section = 'gtkp_x11_2'
append_config_exe = 'gtk+-x11-2.0'
version_macro_name = 'GTK_X11_VERSION'
class gtkp_2_info(_pkg_config_info):
section = 'gtkp_2'
append_config_exe = 'gtk+-2.0'
version_macro_name = 'GTK_VERSION'
class xft_info(_pkg_config_info):
section = 'xft'
append_config_exe = 'xft'
version_macro_name = 'XFT_VERSION'
class freetype2_info(_pkg_config_info):
section = 'freetype2'
append_config_exe = 'freetype2'
version_macro_name = 'FREETYPE2_VERSION'
class amd_info(system_info):
section = 'amd'
dir_env_var = 'AMD'
_lib_names = ['amd']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
amd_libs = self.get_libs('amd_libs', self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, 'amd.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_AMD_H', None)],
swig_opts=['-I' + inc_dir])
self.set_info(**info)
return
class umfpack_info(system_info):
section = 'umfpack'
dir_env_var = 'UMFPACK'
notfounderror = UmfpackNotFoundError
_lib_names = ['umfpack']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')
if p:
inc_dir = os.path.dirname(p[0])
break
if inc_dir is not None:
dict_append(info, include_dirs=[inc_dir],
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
return
## def vstr2hex(version):
## bits = []
## n = [24,16,8,4,0]
## r = 0
## for s in version.split('.'):
## r |= int(s) << n[0]
## del n[0]
## return r
#--------------------------------------------------------------------
def combine_paths(*args, **kws):
""" Return a list of existing paths composed by all combinations of
items from arguments.
"""
r = []
for a in args:
if not a:
continue
if is_string(a):
a = [a]
r.append(a)
args = r
if not args:
return []
if len(args) == 1:
result = reduce(lambda a, b: a + b, map(glob, args[0]), [])
elif len(args) == 2:
result = []
for a0 in args[0]:
for a1 in args[1]:
result.extend(glob(os.path.join(a0, a1)))
else:
result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))
verbosity = kws.get('verbosity', 1)
log.debug('(paths: %s)', ','.join(result))
return result
language_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}
inv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}
def dict_append(d, **kws):
languages = []
for k, v in kws.items():
if k == 'language':
languages.append(v)
continue
if k in d:
if k in ['library_dirs', 'include_dirs',
'extra_compile_args', 'extra_link_args',
'runtime_library_dirs', 'define_macros']:
[d[k].append(vv) for vv in v if vv not in d[k]]
else:
d[k].extend(v)
else:
d[k] = v
if languages:
l = inv_language_map[max([language_map.get(l, 0) for l in languages])]
d['language'] = l
return
def parseCmdLine(argv=(None,)):
import optparse
parser = optparse.OptionParser("usage: %prog [-v] [info objs]")
parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
default=False,
help='be verbose and print more messages')
opts, args = parser.parse_args(args=argv[1:])
return opts, args
def show_all(argv=None):
import inspect
if argv is None:
argv = sys.argv
opts, args = parseCmdLine(argv)
if opts.verbose:
log.set_threshold(log.DEBUG)
else:
log.set_threshold(log.INFO)
show_only = []
for n in args:
if n[-5:] != '_info':
n = n + '_info'
show_only.append(n)
show_all = not show_only
_gdict_ = globals().copy()
for name, c in _gdict_.items():
if not inspect.isclass(c):
continue
if not issubclass(c, system_info) or c is system_info:
continue
if not show_all:
if name not in show_only:
continue
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
if __name__ == "__main__":
show_all()
| [
"[email protected]"
] | |
b20a8ceb62e68cea4660e241d323d08b5c8a9a34 | b05b89e1f6378905bbb62e2a2bf2d4f8e3187932 | /contiguousSubarrayWithMaxSum.py | cca7da339ae673798a2108e9eca5e36101113136 | [
"MIT"
] | permissive | anishmo99/Daily-Interview-Pro | c959cd336209132aebad67a409df685e654cfdfc | d8724e8feec558ab1882d22c9ca63b850b767753 | refs/heads/master | 2023-04-10T08:09:46.089227 | 2021-04-27T07:27:38 | 2021-04-27T07:27:38 | 269,157,996 | 1 | 1 | MIT | 2020-06-08T07:09:19 | 2020-06-03T17:57:21 | C++ | UTF-8 | Python | false | false | 257 | py | class Solution:
def maxSubArraySum(self, arr: List[int]) -> int:
cur_sum,max_sum=arr[0],arr[0]
for i in range(1,len(arr)):
cur_sum = max(arr[i],arr[i]+cur_sum)
max_sum = max(cur_sum,max_sum)
return max_sum | [
"[email protected]"
] | |
2d7ea85777003a35886a2ed9a54c7eacb02feeac | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/4/kfa.py | fa58bfbad8c36ae3ec8f5d8bfeacb3f8cb899cab | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'kFA':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
aaac6a94ac555dc58dda780437398f3de9ad0d12 | 8ae5c8bd19fe77c44b8485f646ff78db2605522a | /control/test.py | fd1e4612d0f2926ef1c3bc836f63ac6f6fbc1337 | [] | no_license | yunshengtian/pendular-codesign | 8bec44de67401d8db9b3e19b9afe4808e6eb84bd | 7f939bb0b00907b367a9ad89a5004ecb3a6aad78 | refs/heads/main | 2023-05-03T22:54:07.379998 | 2021-05-23T04:57:25 | 2021-05-23T04:57:25 | 368,543,614 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | import gym
import numpy as np
from argparse import ArgumentParser
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import env
from env.utils import utils
from control import get_control
parser = ArgumentParser()
parser.add_argument('--env', type=str, default='pendulum', choices=['acrobot', 'pendulum'])
parser.add_argument('--control', type=str, default='ilqr', choices=['ilqr', 'mppi'])
parser.add_argument('--seed', type=int, default=0)
args = parser.parse_args()
np.random.seed(args.seed)
env = gym.make(f'{args.env}-v0')
Control = get_control(args.control)
control = Control(env)
x_trj, u_trj, info = control.solve()
if args.control == 'ilqr':
cost_trace = info['cost_trace']
final_cost = cost_trace[-1]
elif args.control == 'mppi':
final_cost = info['cost']
print(f'Final cost: {final_cost}')
design = env.sim.get_design_params(env.sim.design)
Animation = utils[args.env]['animate']
animation = Animation()
animation.show(design, x_trj)
| [
"[email protected]"
] | |
64618a6ac65022117f48efe65d74d536eb1d4461 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/request/AntfortuneEquityShopCustrelationQueryRequest.py | fa4fa39402e4465d04ccbe7c01ba6dec5c1768fa | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 4,021 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AntfortuneEquityShopCustrelationQueryModel import AntfortuneEquityShopCustrelationQueryModel
class AntfortuneEquityShopCustrelationQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AntfortuneEquityShopCustrelationQueryModel):
self._biz_content = value
else:
self._biz_content = AntfortuneEquityShopCustrelationQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'antfortune.equity.shop.custrelation.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
] | |
7a90a3c285d5b1d163f9550befa75c5b01f6fdc4 | 0b3c5260cd5c33a1beccc5710a5d0fd097a5ea15 | /anchore_engine/services/policy_engine/engine/policy/gates/npm_check.py | 40e0d49fe309d0fdfc2a14343f4df6cec46099e9 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | omerlh/anchore-engine | fb2d7cb3d8bd259f6c973b450fbaa2c2e00497f0 | 669a0327f8baaee3f5c7c64b482909fe38830d80 | refs/heads/master | 2021-09-02T12:48:51.661648 | 2018-01-02T19:26:47 | 2018-01-02T19:26:47 | 116,236,136 | 1 | 0 | null | 2018-01-04T08:41:39 | 2018-01-04T08:41:39 | null | UTF-8 | Python | false | false | 7,044 | py | from anchore_engine.services.policy_engine.engine.policy.gate import Gate, BaseTrigger
from anchore_engine.services.policy_engine.engine.policy.utils import NameVersionListValidator, CommaDelimitedStringListValidator, barsplit_comma_delim_parser, delim_parser
from anchore_engine.db import NpmMetadata
from anchore_engine.services.policy_engine.engine.logs import get_logger
from anchore_engine.services.policy_engine.engine.feeds import DataFeeds
log = get_logger()
# TODO; generalize these for any feed, with base classes and children per feed type
FEED_KEY = 'npm'
NPM_LISTING_KEY = 'npms'
NPM_MATCH_KEY = 'matched_feed_npms'
class NotLatestTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOTLATEST'
__description__ = 'triggers if an installed NPM is not the latest version according to NPM data feed'
def evaluate(self, image_obj, context):
"""
Fire for any npm in the image that is in the official npm feed but is not the latest version.
Mutually exclusive to NPMNOTOFFICIAL and NPMBADVERSION
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.latest for p in feed_npms}
for npm, versions in img_npms.items():
if npm not in feed_names:
continue # Not an official
for v in versions:
if v and v != feed_names.get(npm):
self._fire("NPMNOTLATEST Package ("+npm+") version ("+v+") installed but is not the latest version ("+feed_names[npm]['latest']+")")
class NotOfficialTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOTOFFICIAL'
__description__ = 'triggers if an installed NPM is not in the official NPM database, according to NPM data feed'
def evaluate(self, image_obj, context):
"""
Fire for any npm that is not in the official npm feed data set.
Mutually exclusive to NPMNOTLATEST and NPMBADVERSION
:param image_obj:
:param context:
:return:
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.versions_json for p in feed_npms}
for npm in img_npms.keys():
if npm not in feed_names:
self._fire(msg="NPMNOTOFFICIAL Package ("+str(npm)+") in container but not in official NPM feed.")
class BadVersionTrigger(BaseTrigger):
__trigger_name__ = 'NPMBADVERSION'
__description__ = 'triggers if an installed NPM version is not listed in the official NPM feed as a valid version'
def evaluate(self, image_obj, context):
"""
Fire for any npm that is in the official npm set but is not one of the official versions.
Mutually exclusive to NPMNOTOFFICIAL and NPMNOTLATEST
:param image_obj:
:param context:
:return:
"""
feed_npms = context.data.get(NPM_MATCH_KEY)
img_npms = context.data.get(NPM_LISTING_KEY)
if feed_npms or not img_npms:
return
feed_names = {p.name: p.versions_json for p in feed_npms}
for npm, versions in img_npms.items():
if npm not in feed_names:
continue
non_official_versions = set(versions).difference(set(feed_names.get(npm, [])))
for v in non_official_versions:
self._fire(msg="NPMBADVERSION Package ("+npm+") version ("+v+") installed but version is not in the official feed for this package ("+str(feed_names.get(npm, '')) + ")")
class PkgFullMatchTrigger(BaseTrigger):
__trigger_name__ = 'NPMPKGFULLMATCH'
__description__ = 'triggers if the evaluated image has an NPM package installed that matches one in the list given as a param (package_name|vers)'
__params__ = {
'BLACKLIST_NPMFULLMATCH': NameVersionListValidator()
}
def evaluate(self, image_obj, context):
"""
Fire for any npm that is on the blacklist with a full name + version match
:param image_obj:
:param context:
:return:
"""
npms = image_obj.npms
if not npms:
return
pkgs = context.data.get(NPM_LISTING_KEY)
if not pkgs:
return
for pkg, vers in barsplit_comma_delim_parser(self.eval_params.get('BLACKLIST_NPMFULLMATCH', '')).items():
try:
if pkg in pkgs and vers in pkgs.get(pkg, []):
self._fire(msg='NPMPKGFULLMATCH Package is blacklisted: '+pkg+"-"+vers)
except Exception as e:
continue
class PkgNameMatchTrigger(BaseTrigger):
__trigger_name__ = 'NPMPKGNAMEMATCH'
__description__ = 'triggers if the evaluated image has an NPM package installed that matches one in the list given as a param (package_name)'
__params__ = {
'BLACKLIST_NPMNAMEMATCH': CommaDelimitedStringListValidator()
}
def evaluate(self, image_obj, context):
npms = image_obj.npms
if not npms:
return
pkgs = context.data.get(NPM_LISTING_KEY)
if not pkgs:
return
for match_val in delim_parser(self.eval_params.get('BLACKLIST_NPMNAMEMATCH', '')):
if match_val and match_val in pkgs:
self._fire(msg='NPMPKGNAMEMATCH Package is blacklisted: ' + match_val)
class NoFeedTrigger(BaseTrigger):
__trigger_name__ = 'NPMNOFEED'
__description__ = 'triggers if anchore does not have access to the NPM data feed'
def evaluate(self, image_obj, context):
try:
feed_meta = DataFeeds.instance().packages.group_by_name(FEED_KEY)
if feed_meta and feed_meta[0].last_sync:
return
except Exception as e:
log.exception('Error determining feed presence for npms. Defaulting to firing trigger')
self._fire()
return
class NpmCheckGate(Gate):
__gate_name__ = "NPMCHECK"
__triggers__ = [
NotLatestTrigger,
NotOfficialTrigger,
BadVersionTrigger,
PkgFullMatchTrigger,
PkgNameMatchTrigger,
NoFeedTrigger
]
def prepare_context(self, image_obj, context):
"""
Prep the npm names and versions
:param image_obj:
:param context:
:return:
"""
if not image_obj.npms:
return context
context.data[NPM_LISTING_KEY] = {p.name: p.versions_json for p in image_obj.npms}
npms = context.data[NPM_LISTING_KEY].keys()
context.data[NPM_MATCH_KEY] = []
chunks = [npms[i: i+100] for i in xrange(0, len(npms), 100)]
for key_range in chunks:
context.data[NPM_MATCH_KEY] += context.db.query(NpmMetadata).filter(NpmMetadata.name.in_(key_range)).all()
return context
| [
"[email protected]"
] | |
f711172c3480c5580dd6594014f2a13fb124054c | f26dd860c8d764fc7a47bde656f393795cd8d763 | /david13.py | f0f78ee556259290f4fcefbd2eb9801ee2858e03 | [] | no_license | chokkuu1998/david | 8e9fa162f657c8b9bb55502f1cdd730a08ff0235 | 4dc999cdb73383b5a5d7ed3d98b2c1a4d6b5f7ee | refs/heads/master | 2020-03-28T17:05:04.046963 | 2019-07-16T08:07:37 | 2019-07-16T08:07:37 | 148,756,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | AA,BB=map(int,input().split())
CC=list(map(int,input().split()))
pp=list(map(int,input().split()))
qq=[]
rr=0
for i in range(AA):
x=pp[i]/C[i]
qq.append(x)
while B>=0 and len(qq)>0:
mindex=qq.index(max(qq))
if B>=C[mindex]:
rr=rr+pp[mindex]
B=B-C[mindex]
CC.pop(mindex)
pp.pop(mindex)
qq.pop(mindex)
print(rr)
| [
"[email protected]"
] | |
b70385e17427bd7ad30abd8179b7962f293e20f5 | 5837e04e53e0434c8b10eb9647804901d3a6ee7a | /pyseries/metrics/__init__.py | 77d6184c5b0dba8ce8889cd431ef362b1d01afb2 | [
"BSD-3-Clause"
] | permissive | nubiofs/pyseries | b26fd4dff4b55cc3b338a2ebee9260b91d6fa902 | 59c8a321790d2398d71305710b7d322ce2d8eaaf | refs/heads/master | 2020-04-05T14:18:35.453540 | 2014-10-27T17:47:54 | 2014-10-27T17:47:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | # -*- coding: utf8
from __future__ import division, print_function
'''
Array and time series metrics package
'''
| [
"[email protected]"
] | |
e8e08e4b4c84e23d22c92940cf1d38e721e9617e | dc80f94c1a244002db468fc7242d5fcaafe439dc | /powerdns_client/api/stats_api.py | 865ce4494cac1c4a36ceedb5e0f8587189c76576 | [
"MIT"
] | permissive | sanvu88/python-powerdns-client | f675e1ee162bb76190b41ddf0cfc34e2305a757b | 57dd0460995a5407c6f5c963553b4df0f4859667 | refs/heads/master | 2023-02-04T07:05:31.095951 | 2020-12-15T16:48:15 | 2020-12-15T16:48:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,803 | py | # coding: utf-8
"""
PowerDNS Authoritative HTTP API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.0.13
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from powerdns_client.api_client import ApiClient
class StatsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_stats(self, server_id, **kwargs): # noqa: E501
"""Query statistics. # noqa: E501
Query PowerDNS internal statistics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stats(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str statistic: When set to the name of a specific statistic, only this value is returned. If no statistic with that name exists, the response has a 422 status and an error message.
:param bool includerings: “true” (default) or “false”, whether to include the Ring items, which can contain thousands of log messages or queried domains. Setting this to ”false” may make the response a lot smaller.
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_stats_with_http_info(server_id, **kwargs) # noqa: E501
else:
(data) = self.get_stats_with_http_info(server_id, **kwargs) # noqa: E501
return data
def get_stats_with_http_info(self, server_id, **kwargs): # noqa: E501
"""Query statistics. # noqa: E501
Query PowerDNS internal statistics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_stats_with_http_info(server_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str server_id: The id of the server to retrieve (required)
:param str statistic: When set to the name of a specific statistic, only this value is returned. If no statistic with that name exists, the response has a 422 status and an error message.
:param bool includerings: “true” (default) or “false”, whether to include the Ring items, which can contain thousands of log messages or queried domains. Setting this to ”false” may make the response a lot smaller.
:return: list[object]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['server_id', 'statistic', 'includerings'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_stats" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'server_id' is set
if ('server_id' not in params or
params['server_id'] is None):
raise ValueError("Missing the required parameter `server_id` when calling `get_stats`") # noqa: E501
collection_formats = {}
path_params = {}
if 'server_id' in params:
path_params['server_id'] = params['server_id'] # noqa: E501
query_params = []
if 'statistic' in params:
query_params.append(('statistic', params['statistic'])) # noqa: E501
if 'includerings' in params:
query_params.append(('includerings', params['includerings'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/servers/{server_id}/statistics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[object]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
6229e7231c45038a0d515693de51d6b3b5ee16fe | 9b10d8482a7af9c90766747f5f2ddc343871d5fa | /Gemtek/AutoTest/DropAP/WRTM-326ACN-DropAP2/premises/library/test.py | f158319fe329093f6d1fd74a233f2a489a42b9b0 | [] | no_license | DarcyChang/MyProjects | 86d33f5cf8bdfd4b21e64922e4eb25c1afc3c135 | 47efb2dfe13ace264f8943b59b701f39f23c4c17 | refs/heads/master | 2021-05-12T12:43:39.255082 | 2020-09-23T06:42:03 | 2020-09-23T06:42:03 | 117,419,269 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 609 | py | __author__ = 'alu'
import re
import time
import cafe
from cafe.resp.response_map import ResponseMap
from collections import OrderedDict
from demo.alu_demo.User_Cases.test_lib import Teststeplib_e7 as e7_lib
res = "ONT Subscriber Info Status" \
"---------- ------------------------------------------------ ---------------" \
"205 <no subscriber ID> enabled" \
" Last Location: 2/1"
r = ResponseMap(res)
table1 = r.table_match_by_delimiter()
print"table1:",table1[-1]
print type(table1[-1])
| [
"[email protected]"
] | |
d95f0b89899c28fd7e790e02a64cba46aff3d59d | 1ad2ae0383341f2b92fe38173612be5d9c4970e8 | /polls/models.py | 75a460d4f5e68fc9d5052737ed7677900239b83f | [
"MIT"
] | permissive | pizzapanther/ppp | 9b0df90ddf2e52ffdaf43394026613dbd884c0e9 | 3286f39f8e90f3473841a154ff7189a3efd9ca94 | refs/heads/master | 2021-09-23T03:52:27.915606 | 2020-03-04T18:04:08 | 2020-03-04T18:04:08 | 222,154,111 | 0 | 0 | MIT | 2021-09-22T18:10:01 | 2019-11-16T20:16:27 | Python | UTF-8 | Python | false | false | 1,400 | py | from django.conf import settings
from django.db import models
from django.contrib.postgres.fields import ArrayField
class Presentation(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(max_length=100)
def __str__(self):
return self.title
def current(self):
return self.poll_set.filter(live=True).first()
class Poll(models.Model):
question = models.CharField(max_length=254)
choices = ArrayField(models.CharField(max_length=254))
live = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
presentation = models.ForeignKey(Presentation, on_delete=models.SET_NULL, blank=True, null=True)
class Meta:
ordering = ('-created',)
def __str__(self):
return self.question
def json_data(self):
votes = []
for (i, choice) in enumerate(self.choices):
votes.append(self.vote_set.filter(choice=i).count())
return {
'id': self.id,
'slug': self.presentation.slug,
'question': self.question,
'choices': self.choices,
'votes': votes,
'total': self.vote_set.all().count(),
}
class Vote(models.Model):
poll = models.ForeignKey(Poll, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
choice = models.PositiveSmallIntegerField()
def __str__(self):
return f'{self.poll} - {self.user}'
| [
"[email protected]"
] | |
2b24ec034a34c513b9c6b1bd086580ec9964d106 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /Geometry/HcalEventSetup/python/CaloTowerGeometryDBWriter_cfi.py | dc75ba33a6cedd5c4191026f97719656397c89c3 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 531 | py | import FWCore.ParameterSet.Config as cms
CaloTowerHardcodeGeometryEP = cms.ESProducer( "CaloTowerHardcodeGeometryEP" ,
appendToDataLabel = cms.string("_master")
)
CaloTowerGeometryToDBEP = cms.ESProducer( "CaloTowerGeometryToDBEP" ,
applyAlignment = cms.bool(False) ,
appendToDataLabel = cms.string("_toDB")
)
| [
"[email protected]"
] | |
35a47b027566248963ff354a2a07b0ef7377d61c | 1bccf0b1374dcfddfc3e320fd5b6af499334df2d | /scripts/hashtagUserCounts.py | 4a780d5a584536af79f7279e772bc4f2cc89c7c9 | [
"Unlicense"
] | permissive | chebee7i/twitter | 6b245f5a7b7510089b62d48567e6208e1fe8a1db | ec1d772c3ef7d2288ac8051efb8637378f3ec195 | refs/heads/master | 2021-01-01T16:25:13.242941 | 2015-06-24T19:39:24 | 2015-06-24T19:39:24 | 23,846,593 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,533 | py | """
Insert the number of users that tweeted each hashtag.
"""
import twitterproj
import pymongo
from collections import defaultdict
import itertools
import json
import os
import io
db = twitterproj.connect()
def add_user_counts(bot_filtered=True):
collection = db.tweets.with_hashtags
if bot_filtered:
skip_users = twitterproj.subcollections.get_skip_users()
target = db.hashtags.bot_filtered
else:
skip_users = set([])
target = db.hashtags
counts = defaultdict(int)
users = defaultdict(set)
for i, tweet in enumerate(collection.find()):
user_id = tweet['user']['id']
if user_id in skip_users:
continue
for hashtag in tweet['hashtags']:
counts[hashtag] += 1
users[hashtag].add(user_id)
for i, (hashtag, count) in enumerate(counts.iteritems()):
target.update({'_id': hashtag, 'count': count},
{"$set": {'user_count': len(users[hashtag])}},
upsert=False)
def to_json(filename, mincount=1000, bot_filtered=True):
if bot_filtered:
collection = db.hashtags.bot_filtered
else:
collection = db.hashtags
rows = []
if mincount is not None:
it = collection.find({'user_count': {'$gte': mincount}})
else:
it = colelction.find()
for doc in it:
row = [doc['_id'], doc['count'], doc['user_count']]
rows.append(row)
data = {'data': rows}
with open(filename, 'w') as fobj:
json.dump(data, fobj)
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.izip_longest(fillvalue=fillvalue, *args)
def to_csv(filename, mincount=1000, bot_filtered=True):
"""
Writes hashtags to CSV, filtering hashtags that were not mentioned by
some minimum number of users.
"""
if bot_filtered:
collection = db.hashtags.bot_filtered
else:
collection = db.hashtags
rows = []
if mincount is not None:
it = collection.find({'user_count': {'$gte': mincount}})
else:
it = colelction.find()
it = it.sort('user_count', pymongo.DESCENDING)
basename, ext = os.path.splitext(filename)
if not ext:
ext = '.csv'
data = """
This file contains information regarding the UTF-8 encoded CSV file:
{0}{1}
Each line of that file contains 3 pieces of information, separated by commas:
1. hashtag
2. number of times the hashtag was tweeted
3. number of users who tweeted the hashtag
Lines are sorted, descendingly, according to column 3.
Counts are tabulated wrt geotagged tweets in the contiguous states.
{2}
Hashtags were included only if they were tweeted by at least {3} users across all regions.
"""
if bot_filtered:
text = 'Tweets from users determined to be robots were excluded from the counting process.'
else:
text = ''
data = data.format(basename, ext, text, mincount)
with open(basename + '.txt', 'w') as fobj:
fobj.write(data)
with io.open(basename + ext, 'w', encoding='utf-8') as fobj:
for docs in grouper(10000, it):
rows = []
for doc in docs:
if doc is None:
break
row = [doc['_id'], str(doc['count']), str(doc['user_count'])]
rows.append(','.join(row))
fobj.write(u'\n'.join(rows))
fobj.write(u'\n') # So groups are separated.
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.