anchor
stringlengths 2
528
| positive
stringlengths 4
6k
| negative
stringlengths 6
6k
|
---|---|---|
์์ ์ฌ๋ฆ์ ๋ชฉ์ ์ ๋ฌด์์ด์์ต๋๊น?
|
์์ ์ฌ๋ฆ์ ๋ํ ์ ๊ตญ ๊ตํ ํ์ํ์ ์ง์์ ๋ฐ์์ผ๋ฉฐ, ์ฌ๋ฆ ๋์ ์ธ๊ถ์ ์ํ ์๋ฃ ์์ํ์ ์์๋ด์ฌ์๋ค๊ณผ ๋ค์ํ ๋จ์ฒด์ ๋ณํธ์ฌ๋ค์ด ๋ฏธ์์ํผ์์ ํ๋ํ๋ค. ์์ ์ฌ๋ฆ์ ์ฅ๊ธฐ์ ์ธ ๋ชฉํ๋ ๋ฏธ์์ํผ์ ๊ถ๋ ฅ ๊ตฌ์กฐ๋ฅผ ๋ณํ์ํค๋ ๊ฒ์ด์๋ค.
|
์์ ์ ์ฌ๋ฆ์ ๋ํ ์ ๊ตญ ๊ตํ ํ์ํ์ ์ง์์ ๋ฐ์์ผ๋ฉฐ, ์ฌ๋ฆ ๋์ ์ธ๊ถ์ ์ํ ์๋ฃ ์์ํ์ ์์๋ด์ฌ์๋ค๊ณผ ๋ค์ํ ๋จ์ฒด์ ๋ณํธ์ฌ๋ค์ด ๋ฏธ์์ํผ์์ ํ๋ํ์ต๋๋ค. ์์ ์ ์ฌ๋ฆ์ ์ฅ๊ธฐ์ ์ธ ๋ชฉํ๋ ๋ฏธ์์ํผ์ ๊ถ๋ ฅ ๊ตฌ์กฐ๋ฅผ ๋ณํ์ํค๋ ๊ฒ์ด์์ต๋๋ค.
|
End do_enable; called after state changes but before command acknowledged. This method connects to the HVAC server.
|
async def end_enable(self, id_data: salobj.BaseDdsDataType) -> None:
if not self.connected:
await self.connect()
await super().end_enable(id_data)
|
def on_enable(self):
self.i_err = 0
|
Function returns a list of creation datetimes for the files in the list of jpg files paths
|
def get_creation_times(list_of_jpg):
list_of_creation_times = []
for jpg in list_of_jpg:
try:
image = Image.open(jpg)
exif = image.getexif()
creation_time = exif.get(36867)
except:
creation_time = None
list_of_creation_times.append(creation_time)
return list_of_creation_times
|
def image_timestamp_list(self) -> Dict[str, List[int]]:
assert self.image_list is not None
assert self._image_list is not None
if self._image_timestamp_list is None:
self._image_timestamp_list = {}
for log in self.log_list:
self._image_timestamp_list[log] = {}
for camera in CAMERA_LIST:
self._image_timestamp_list[log][camera] = [
int(x.split("/")[-1][:-4].split("_")[-1]) for x in self._image_list[log][camera]
]
return self._image_timestamp_list[self.current_log]
|
Add a new snapshot node. It adds an entry in self.snapshotsCached and creates a new Checkbox object
|
def __addNewSnapshot__(self, snapshotNode):
nodeID = snapshotNode.GetID()
print("Added new node " + nodeID)
name = snapshotNode.GetName()
description = snapshotNode.GetSnapshotDescription()
ckb = qt.QCheckBox()
ckb.checked = True
ckb.text = name
ckb.toolTip = "%s. Uploaded to Picasa: NO" % description
# Add the checkbox to the layout
self.currentSnapshotsInnerLayout.addWidget(ckb)
# Add a new snapshot node to the cached collection (Name, Description, Uploaded, Widget)
self.snapshotsCached[nodeID] = [name, description, False, ckb]
# Add an observer in case the node is modified (example: renamed)
self.__addModifiedObserver__(snapshotNode)
# Remove no items label if visible
self.noItemsLabel.hide()
|
def push_snapshot(self):
|
Performs a commit A commit of known offsets for read partitions And subscription positions for untouched partitions
|
async def _do_commit(self):
offsets = self._subscriptions.subscription \
.assignment.all_consumed_offsets()
offsets.update(self._offsets)
if offsets: await self._consumer.commit(offsets)
|
def _do_update_update(self, data, offset):
assert IMutableUploadable.providedBy(data)
assert self.is_mutable()
# offset == self.get_size() is valid and means that we are
# appending data to the file.
assert offset <= self.get_size()
segsize = self._version[3]
# We'll need the segment that the data starts in, regardless of
# what we'll do later.
start_segment = offset // segsize
# We only need the end segment if the data we append does not go
# beyond the current end-of-file.
end_segment = start_segment
if offset + data.get_size() < self.get_size():
end_data = offset + data.get_size()
# The last byte we touch is the end_data'th byte, which is actually
# byte end_data - 1 because bytes are zero-indexed.
end_data -= 1
end_segment = end_data // segsize
self._start_segment = start_segment
self._end_segment = end_segment
# Now ask for the servermap to be updated in MODE_WRITE with
# this update range.
return self._update_servermap(update_range=(start_segment,
end_segment))
|
Return a pandas dataframe Goes through all the events and tallies up the points of the students.
|
def parse_sheets(event_attendance, member_list):
attendance = pd.DataFrame()
#parse_bonus_points(attendance, member_list)
attendance["Name:"] = member_list["First Name"].str.strip() + " " + member_list["Last Name"].str.strip()
attendance["Total Spark Points:"] = 0
print(attendance)
print(member_list)
#Go through all events in the folder and check if the person attended. If yes, add points
for index, member in attendance.iterrows():
#print(event_attendance)
#print(member)
name = str(member["Name:"].strip()).lower()
valid = student_list.get(name, "NA")
#Add a new student to the dictionary of student_list if not already there
print(valid)
if(valid == 'NA'):
print("Added student " + name)
student_list[name.lower()] = {'EID' : 0, 'Points' : 0}
for event in event_attendance:
eventdata = list(event.columns)
event_name = eventdata[-1]
#print(event_name)
points_to_add = event.iat[0, len(eventdata)-1]
attendance.at[index, event_name] = 0
#If the person is in the database and at the meeting, sum up each person's spark points and add to their total
if check_attendance(member["Name:"].lower(), event):
attendance.at[index, event_name] += points_to_add
attendance.at[index, "Total Spark Points:"] += points_to_add
#parse_shop()
return attendance
|
def frame_info_to_df(obj_info_aggregated, frame_ind, camera_id, date_time):
frame_df = pd.DataFrame(obj_info_aggregated, columns=[
'obj_bounds', 'obj_classification', 'confidence'])
frame_df["frame_id"] = frame_ind
frame_df["camera_id"] = camera_id
frame_df["video_upload_datetime"] = date_time
return frame_df
|
์๋๋ ๋์ด ์ธ๊ฐ์๊ฒ ๋ฏธ์น ์ ์๋ ์ํฅ์ ๋ฌด์์ธ๊ฐ?
|
์๋๋ ๋์ ์ํํ๋ฏผ์ผ๋ก, ์ฃผ์๋ ฅ ๊ฒฐํ ๊ณผ๋ค ํ๋ ์ฅ์ (ADHD)์ ๊ฐ์ ์ฅ์ ๋ฅผ ์น๋ฃํ๋ ๋ฐ ์ฌ์ฉ๋๋ ์ค์ถ ์ ๊ฒฝ๊ณ ์๊ทน์ ์
๋๋ค. ๋ฏธ๊ตญ์์ ์ผ๋ฐ์ ์ผ๋ก ์ฌ์ฉ๋๊ณ ์ฒ๋ฐฉ๋๋ ์ํํ๋ฏผ์๋ ๋ฑ์คํธ๋ก์ํํ๋ฏผ์ด ํฌํจ๋์ด ์์ต๋๋ค. ์๋๋ ๋์ ์ํํ๋ฏผ๊ณผ ๋ฑ์คํธ๋ก์ํํ๋ฏผ์ ์กฐํฉ์ผ๋ก, ADHD์ ๊ธฐ๋ฉด์ฆ ์น๋ฃ์ ๋๋ฆฌ ์ฒ๋ฐฉ๋๋ ์์ฝํ์
๋๋ค. ์๋๋ ๋์ด ํ๋ชจ๋ฅผ ์ ๋ฐํฉ๋๊น? ๊ทธ๋ ๋ค๋ฉด ์ด๋ป๊ฒ ์ ๋ฐํฉ๋๊น? ์ผ๋ฐ์ ์ผ๋ก ์ํํ๋ฏผ์ ์ธ์ฒด์์ ์ ๊ฒฌ๋์ง๋ง ์ผ๋ถ ๋ถ์์ฉ์ด ์์ ์ ์์ต๋๋ค. ์ฅ๊ธฐ๊ฐ ์ฌ์ฉ ๋ฐ ์ค๋
์ ๋ถ์์ฉ์ด ํจ์ฌ ๋ ์ปค์ง๋ฉฐ, ์ฌ๊ธฐ์๋ ๋ชจ๋ฐ์ด ๊ฐ๋์ด์ง๊ฑฐ๋ ํ๋ชจ๊ฐ ํฌํจ๋ฉ๋๋ค. ํ๋ชจ๋ ์ผ๋ฐ์ ์ผ๋ก ๋ํผ ์ ๋ฐ์ ๊ฑธ์ณ ํผ์ ธ ์์ผ๋ฉฐ ํน์ ํ ํ ์ง์ญ์๋ง ์ง์ค๋์ง ์์ต๋๋ค.
|
์ ๋๋ ๋์๋ ์ธ๊ฐ ์ฑ์ฅ ํธ๋ฅด๋ชฌ์ด ํฌํจ๋์ด ์์ต๋๊น? ์ ๋๋ ๋๊ณผ ์ธ๊ฐ ์ฑ์ฅ ํธ๋ฅด๋ชฌ์ ํจ๊ป ๋ณต์ฉํ ์ ์์ต๋๊น? Treato์์ ์ ๋๋ ๋๊ณผ ์ธ๊ฐ ์ฑ์ฅ ํธ๋ฅด๋ชฌ์ ๋ํ 5๊ฐ์ง ๋
ผ์ ๋ด ๋ถ๋งํฌ
|
์ ์ค, ์, ์ธ์ด, ๋๋ถ๋ถ, ์ฌ๋, ์กด์ฌ, ๋ฌด์ญ๋ค
|
์ ์ค ์ ์ธ์ด๋ ๋๋ถ๋ถ ์ฃ ์๋ ์ฌ๋์ ํด์น๋ ๋ฌด์์ด ์กด์ฌ์ด๋ค.
|
์๋ ๋ค๊ณผ์ ์ฐจ๋ฆผ์ ๋
น์ฐจ, ๋ฌด์ง๊ฐ๋ก, ๋ฐค๋ค์, ์๋, ์จ๋, ์กฐ๋ ๋ฑ์ด๋ค.
|
Retrieves the pixel value from image Will keep requested pixel values within the bounds of the image
|
def get_pixel(image, x, y):
y = y*image['width'] #Necessary to multiply by width for correct indexing in a list
if x < 0:
x = 0
if y < 0:
y = 0
if x > image['width']-1:
x = image['width']-1
if y > (image['height']-1)*image['width']:
y = (image['height']-1)*image['width']
return image['pixels'][x+y]
|
def get_input(self, idx):
img_filename = os.path.join(
self.data_dir,
self._input_array[idx])
x = Image.open(img_filename).convert('RGB')
return x
|
Loads a specific dataset file from the csv. Split refers to the EMNIST split in the dataset.
|
def load_split(self, split, stage):
if not split in self.splits:
print(split, "is not a valid EMNIST split")
return
if not stage in self.stages:
print(stage, "is not a valid stage")
return
csv_filename = self.data_folder + "/emnist-" + split + "-" + stage + ".csv"
print("Loading", csv_filename)
data_frame = pd.read_csv(csv_filename, header=None, dtype=np.int8)
tensor_x = torch.Tensor(data_frame.iloc[:,1:].values.reshape((-1, 1, 28, 28))).float() # Load the images
tensor_y = torch.Tensor(data_frame.iloc[:,0].values).long() # Load the labels
return torch.utils.data.TensorDataset(tensor_x, tensor_y)
|
def load_data(self):
data_frame = pd.read_csv(self.URL, encoding='ISO-8859-1')
data_frame = data_frame[['cont_africa', 'rugged', 'rgdppc_2000']]
data_frame.rgdppc_2000 = np.log(data_frame.rgdppc_2000)
data_frame = data_frame[np.isfinite(data_frame.rgdppc_2000)]
datasets = []
for flag in [0, 1]:
split = data_frame[data_frame.cont_africa == flag]
x, y = split.rugged.to_numpy(), split.rgdppc_2000.to_numpy()
datasets.append([x, y])
return datasets
|
Create a new variable in the model, annotated with the given term, and in the given units.
|
def _create_annotated_variable(self, prefixed_name, units):
#1903 TODO: Be more careful to create unique local names and ids
prefix, local_name = prefixed_name.split(':')
var = self.add_variable(self._get_protocol_component(), local_name, units, id=prefix + '_' + local_name)
var.add_rdf_annotation(('bqbiol:is', NSS['bqbiol']), (prefixed_name, self._protocol_namespaces[prefix]))
return var
|
def add_term(self, term, cursor=None, loci_cursor=None):
if not cursor:
cur = self.m80.db.cursor()
cur.execute("BEGIN TRANSACTION")
else:
cur = cursor
if not loci_cursor:
lcur = self.loci.m80.db.cursor()
lcur.execute("BEGIN TRANSACTION")
else:
lcur = loci_cursor
# Add the term id and description
cur.execute(
"""
INSERT OR ABORT INTO terms (name, desc)
VALUES (?, ?)""",
(term.name, term.desc),
)
(TID,) = cur.execute("SELECT last_insert_rowid()").fetchone()
if TID is None: # pragma: no cover
# I dont know when this would happen without another exception being thrown
raise ValueError(f"{term} was not assigned a valid TID!")
for key, val in term.attrs.items():
cur.executemany("""
INSERT INTO term_attrs
(TID, key, val)
VALUES (?,?,?)
""", ((TID,key,xval) for xval in val)
)
# separate the new loci from the existing loci
new_LIDs = []
existing_LIDs = []
for l in term.loci:
try:
existing_LIDs.append(self.loci._get_LID(l))
except MissingLocusError:
new_LIDs.append(self.loci.add_locus(l, cur=lcur))
for LID in new_LIDs + existing_LIDs:
cur.execute(
"""
INSERT INTO term_loci
(TID,LID)
VALUES (?,?)
""",
(TID, LID),
)
if not cursor:
cur.execute("END TRANSACTION")
|
Attaches to the gdbserver, running locally or portforwarded. If |remote_address| is set, it is used for ssh.
|
def _attach_bare_metal_gdb(
remote_address, plugin_pid, ssh_options, nacl_helper_nonsfi_path, gdb_type):
gdb_port = _get_bare_metal_gdb_port(plugin_pid)
# Before launching 'gdb', we wait for that the target port is opened.
_wait_by_busy_loop(
lambda: _is_remote_port_open(_LOCAL_HOST, gdb_port))
gdb_args = []
if nacl_helper_nonsfi_path:
gdb_args.append(nacl_helper_nonsfi_path)
gdb_args.extend([
'-ex', 'target remote %s:%d' % (_LOCAL_HOST, gdb_port)])
gdb_args.extend(get_gdb_python_init_args())
library_path = os.path.abspath(build_common.get_load_library_path())
gdb_args.extend(get_gdb_python_script_init_args(
'bare_metal_support',
arc_nexe=os.path.join(
library_path,
os.path.basename(build_common.get_runtime_main_nexe())),
library_path=library_path,
runnable_ld_path=os.path.join(library_path, 'runnable-ld.so'),
lock_file=os.path.join(_BARE_METAL_GDB_LOCK_DIR, str(plugin_pid)),
remote_address=remote_address,
ssh_options=ssh_options))
gdb_args.extend(['-ex', r'echo To start: c or cont\n'])
_launch_plugin_gdb(gdb_args, gdb_type)
|
def create_server_socket(self, local_address, peer, remote_address):
self.server_sockets[local_address] = server_socket(self, local_address, peer, remote_address)
self.server_sockets[local_address].run_thread.start()
|
ํธ์ฃผ๋ ์์์ง ์๊ฐ์ ๊ณต๋ฌด์์๊ฒ ๋ณธ๋ด ์ธ์ ๋ณด์๋ฅผ ์ด๋ป๊ฒ ์ฃผ๊ณ ์์ง
|
3) ํธ์ฃผ
ํธ์ฃผ์ ๊ณต๋ฌด์ ์์ฉ์ ์ ๊ท์ง ์ ์ผ์ , ์ ๊ท์ง ์๊ฐ์ ๊ณต๋ฌด์, ์์์ง ์ ์ผ์ , ์์์ง ์๊ฐ์ ๋ฑ 4๊ฐ์ง๋ก ๋ถ๋ฅ๋๋ค. ์ ์ผ์ ๋ ์ฃผ 35์๊ฐ ์ด์, ์๊ฐ์ ๋ ์ฃผ 35์๊ฐ ๋ฏธ๋ง ๊ทผ๋ฌดํ๋ ๊ฒ์ ์์น์ผ๋ก ํ๋ฉฐ, ์ ๊ท์ง ์๊ฐ์ ๊ณต๋ฌด์์ ๊ฒฝ์ฐ ๊ธ์ฌ, ์๋น, ๋ณต์งํ์, ํด๊ฐ ๋ฑ ์ ๊ท์ง ์ ์ผ์ ๊ณต๋ฌด์๊ณผ ๋๋ฑํ๋ค. ์ฃผ๋น๊ทผ๋ฌด์๊ฐ์ด ์ ์ผ๋ฏ๋ก ๊ธ์ฌ๋ ์๋น ๋ฑ์ด ์ ์ผ์ ์ ๋นํด ์ ๋ค.
์์์ง ์๊ฐ์ ๊ณต๋ฌด์์ ๊ฒฝ์ฐ ๋ค๋ฅธ ์ ๊ท์ง(์ ์ผ์ , ์๊ฐ์ ํฌํจ) ๊ณต๋ฌด์๊ณผ ์์์ง ์ ์ผ์ ๊ณต๋ฌด์์ ์๊ธ, ์๋น, ํด๊ฐ, ๊ณตํด์ผ ๊ท์ ์ด ์ ์ฉ๋์ง ์๋๋ค. ๊ธฐ๋ณธ๊ธ ์ด์ธ์ ์๋น์ด ์ง๊ธ๋ ๊ฒฝ์ฐ ๊ธฐ๋ณธ๊ธ์ 15% ๋ด์ธ๋ก ์ง๊ธ๋๋ค.
์ ์ฒด ๊ณต๋ฌด์์ 14%๊ฐ ์๊ฐ์ ๊ณต๋ฌด์์ธ ๊ฒ์ผ๋ก ๋ํ๋๊ณ ์๋ค.
|
โก ๋ํ ๊ณ ๊ฐ์ผ๋ก๋ถํฐ ๋ณด์๋ฅผ ์์ทจํ ๋์๋ ์๋ฌธ์๋น์ค๋ฅผ ์ ๊ณตํ๊ธฐ ์ด์ ์ ๋ณด์๊ตฌ์กฐ๋ฅผ ๋ช
์ํ๋๋ก ํ๊ณ ์ด๋ฅผ ๋ฐํ์ผ๋ก ๊ณ ๊ฐ๊ณผ ํ์ํ์ฌ ์๋ฌธ๋ณด์๋ฅผ ๊ฒฐ์ ํ๋๋ก ํจ.
โ ์๊ตญ์ ๊ฒฝ์ฐ services and costs disclosure document๋ combined initial disclosure document ์ค ์ ์ ํ ๊ฒ์ ์ ํํ์ฌ ์๋น์ค ์ ๊ณต์ด์ ์ ๊ณ ๊ฐ์๊ฒ ์ ์ํ๋๋ก ํ๊ณ ์์.
โ ๋ํ ์๊ตญ์ ์ด์๋ฌธ๋น์ฉ ๊ณต์์ ๋ํ ์ค์น์ ์ ํฉ์ฑ ๋ณด๊ณ ์์ ์ ๊ณตํ๋๋ก ๊ท์ ํ๊ณ ์์.
โ ๊ณ ๊ฐ์๊ฒ ์ ์ํ ๋ณด์๊ตฌ์กฐ ์ธ์ ์ฌํ ๋น์ฉ์ ๋ถ๊ณผํ ์ ์์ผ๋ฉฐ ๋ณด์๊ตฌ์กฐ๋ฅผ ์ค๋ช
ํ๋ ๋ด์ฉ์ ๊ฑฐ์ง์ด ์์ด์ผ ํ๋ค๋ ์ ๋ ๊ท์
โ ํธ์ฃผ์ ๊ฒฝ์ฐ์๋ ์์๋ฃ๋ฅผ ํฌํจํ ๋ณด์๋ ๊ธ์ ์ ์ด์ต์ ๋ํ ์ ๋ณด ๋ฑ์ Financial service guides(๊ณ ๊ฐ์ด ์๋ฌธ์
์๋ฅผ ์ ํํ ๋ ์ ๊ณต๋ฐ๋ ์๋ฃ), Statement of Advice(์๋ฌธ์ด ์ ๊ณต๋ ๋ ์ ๊ณต ๋ฐ๋ ์๋ฃ)๋ฑ์ ๊ณต์ํ๋๋ก ํจ.
โก ๋์๊ฐ ์ ์ง๊ด๋ฆฌ์ ๊ด๋ จ๋ ๋ณด์์ ๊ฒฝ์ฐ, ์๋ฌธ์
์๊ฐ ์๋น์ค๋ ์ ๋๋ก ์ ๊ณตํ์ง ์์ผ๋ฉด์ ๊ด๋ จ ๋ณด์๋ง์ ์์ทจํ๋ ๊ฒฝ์ฐ๊ฐ ๋ฐ์ํ์ง ์๋๋ก ๊ด๋ จ ๋ด์ฉ์ ๋ช
ํํ ๊ณต์ํ๊ณ ์ค๋ช
ํ๋๋ก ๊ท์ ํจ.
โ ์๊ตญ์ ๊ฒฝ์ฐ ์ ์ง๊ด๋ฆฌ ๊ด๋ จ ๋ณด์(On going fee)๋ฅผ ์์์ ์ผ๋ก ์์ทจํ๋ ค๋ฉด ๊ณ ๊ฐ์๊ฒ ์ ๊ณต๋๋ ์๋น์ค์ ์ธ๋ถ๋ด์ญ๊ณผ ๊ด๋ จ ์์๋ฃ๋ฅผ ๋งค์นญํ์ฌ ์๋ ค์ฃผ๊ณ ๋ง์ฝ ๊ณ ๊ฐ์ด ์๋น์ค๋ฅผ ์ค๋์ ์ทจ์ํ๊ธธ ์ํ๋ค๋ฉด ์ง๋ถ์ ์ค๋จํ ์ ์๋ ๋ฐฉ๋ฒ๋ ์๋ ค์ฃผ๋๋ก ๊ท์ ํจ.
โ ๋ํ ์ ์ง๊ด๋ฆฌ๊ด๋ จ ๋ณด์๊ฐ ์ด์ฉํ๋์ ์ผ์ ๋น์จ๋ก ๊ณ์ฐํ์ฌ ์์ทจํ๋ ๊ฒฝ์ฐ์๋ ํ๋๊ธ์ก์ด ์ฆ๊ฐํจ์ ๋ฐ๋ผ ๊ด๋ จ ์ง๋ถ๊ธ์ก ์ญ์์ฆ๊ฐํ ์ ์๋ค๋ ์ ์ ๊ณ ๊ฐ์๊ฒ ๋ช
์์ ์ผ๋ก ์๋ ค์ฃผ๋๋ก ํจ.
โ ํธ์ฃผ์ ๊ฒฝ์ฐ์๋ ์๊ตญ๊ณผ ์ ์ฌํ๊ฒ ์ ๊ณต๋ฐ๋ ์๋น์ค์ ์ข
๋ฅ๋ฅผ ๋ช
์ํ๊ณ ์ทจ์๋ฐฉ๋ฒ ๋ฑ์ ์๋ ค์ฃผ๋๋ก ๊ท์ ํ๊ณ ์์.
|
Sort the book based on the price (highest to lowest) and then the timestamp (earliest to latest).
|
def sort_orders(self):
self.orders = sorted(self.orders, key=attrgetter('timestamp'))
self.orders = sorted(self.orders, key=attrgetter('price'), reverse=True)
#self.orders = sorted(self.orders, key=lambda order: order.timestamp)
#print('<><><> BOOK AFTER ADD ORDER <><><>')
#self.show_book() # print the book
# size of order book after order added
self.total_size_post_order = self.check_size()
# what's the prize of the book (up to target_price shares) after order
self.total_potential_price_post_trade = self.check_total_price()
|
def sortByRating(self):
try:
self.items.sort(key=lambda x: float(x[3]), reverse=False)
except:
pass
|
๊ธฐํ์ฌ์ ๋ถ๊ฐ ๋ฌ๊ฑ๋ฅ 8๊ฐ์ง ํญ๋ชฉ์ ๊ดํ ํ ๋น๊ด์ธ ๊ฐ์ ์์ ๊ฒฐ์ ํ ๊ฑด ์ธ์ ์ผ
|
๊ธฐ์ฌ๋ถ๋ ๊ณ๋๋ฅ 8๊ฐ ํ๋ชฉ 3๋ง6,000ํค์ ๋ํ ํ ๋น๊ด์ธ ๊ท์ ๊ฐ์ ์๋ ์ด๋ ์๊ฒฐํ๋ค.
|
ํ์ง๋ง ๊ฐ์ ์์ด ํต๊ณผ๋ผ๋ ์ผ๋ฌ์ผ 9์์๋ ์ํํ ์ ์๋ ๋ฐ๋ค, ์์ฐ์ ์ฅ ๊ธฐํ์ฌ์ ๋ถ์ ๊ฐ๋ ฅํ ๋ฐ๋๊ฐ ์ฌ์ ํ ๊ฑธ๋ฆผ๋์ด๋ค.
|
๊ด์ ๋ฅ ๋ด์ฐ๋ฆฌ๋ฅผ ์ธ ๊ตฐ๋ฐ์์ ๊ด์ธกํ ์ ์๋ ์ด์ ๊ฐ ๋ญ์ผ?
|
๊ด์ ๋ฅ ๋ด์ฐ๋ฆฌ๋ ์ธ ๊ณณ์์ ๊ด์ธกํ ์ ์๋๋ฐ ๊ทธ ์ด์ ๋ \( \mathrm{CdGa}_{2} \mathrm{Se}_{4} \) ๋จ๊ฒฐ์ ๋ฐ๋ง์ ์ก๋ฐฉ์ ๊ณ ๊ตฌ์กฐ๋ก ์ฑ์ฅ๋์ด spin-orbit splitting๊ณผ non cubic crystalline field ์ ๋์ํจ๊ณผ์ ์ํ์ฌ band splitting ์ด ์ผ์ด๋ ๊ฒ์ผ๋ก ๋ณผ ์ ์๋ค.
|
๊ด์ ๋ฅ ์คํํธ๋ผ์ ์ธก์ ์์ ์๋์ง ๊ฐญ์ ํด๋น๋๋ ๊ฐ์ ์๋์์ ์ ๋๋๋ก ๋ค๋ฌ ์ ์๋ค์ ์ํ ๊ด์ ๋ฅ ๋ด์ฐ๋ฆฌ๋ค๊ณผ ๋จํ์ฅ๋์์ ๊ฐ ์ ์๋ splitting์ ์ํ ๊ด์ ๋ฅ ๋ด์ฐ๋ฆฌ๋ค์ด ๊ด์ธก๋์๋ค.
|
์ ๋ฝ๋ถ์๊ท์ฝ์ ๊ฐ์ธ์ ๋ณด ๋ณดํธ์ ์ด์ต์ ์ด๋ค ๋ฐฉ์์ผ๋ก ๊ฐ์ํ์์ด
|
์ ๋ฝ๋ถ์๊ท์ฝ์ ํํธ์ผ๋ก๋, ์ค์๋น๊ตญ์ด ๋ง๋ จํด ๋ ์ ๋ณด๋ง์ ์ ๊ณตํ๋๋ก ํ๊ณ , ๋ค๋ฅธ ํํธ์ผ๋ก๋ ์ ๊ณตํ ์ ๋ณด๋ฅผ ์ฃผ์, ์๋, ์ฌ์ฉ์, ๊ณ์ข๋ณด์ ์ฌ๋ถ ๋ฐ ์ฌ์ฐ ๋ฑ์ผ๋ก ์ด๊ฑฐํ๋ ๋ฐฉ์์ ํตํ์ฌ ๊ฐ์ธ์ ๋ณด ๋ณดํธ์ ์ด์ต์ ๊ณ ๋ คํ์๋ค. ์ด ๊ท์ฝ์ ๋ฐ๋ผ ๋
์ผ์ ์ธ๊ตญ์ ๋ถ์์งํ๊ถ์์ ๊ธฐํ ๊ฐ์ ์งํ๊ณผ ๊ด๋ จํ์ฌ ๏ฝข๊ตญ์ ๋ถ์์ฒญ๊ตฌ๊ถ ํ์ฌ์ ๊ดํ๋ฒ๋ฅ ๏ฝฃ ์ 17์กฐ์ ์ค์๋น๊ตญ์ ์ ๋ณด์ฒญ๊ตฌ๊ถ๊ณผ ์ ๋ณด์๋ฌด๋ฅผ ๊ท์ ํด ๋๊ณ ์๋ค. ์ ๋ฒ์ 17์กฐ์ ๊ท์ ๋ด์ฉ์ ์ค์๋น๊ตญ์ด ๋ฒ์ ์ฐ๊ธ๋ณดํ๊ธฐ๊ด์ ํนํ ๋น์์ ์ฌ์ฉ์์ ๊ดํ ์ ๋ณด๋ฅผ ์กฐํํ ์ ์๋๋ก ํ๊ณ , 4๋๋ณดํ๊ธฐ๊ด์ ๊ตฌ์ง์์ ๋
ธ๋ฌด์ ๊ณต ํํฉ์ ์กฐํํ๋๋ก ํ๋ฉฐ, ์ฐ๋ฐฉ๊ตญ์ธ์ฒญ์ ๊ตญ์ธ๊ธฐ๋ณธ๋ฒ ์ 93b์กฐ ์ 1ํญ์ ๊ณ์ข์ ๋ณด๋ฅผ ์กฐํํ ์ ์๋๋ก ํ๊ณ ์๋ค. ์ฌ๊ธฐ์ ์๋์ฐจ์ ๋ํ ์ ๋ณด๊น์ง ๊ท์ ํ๊ณ ์์ด ๋์ธ๋ถ์๋ฒ ์ 17์กฐ์ ๊ท์ ์ ๋ฏผ์ฌ์์ก๋ฒ์์ ์ฌ์ฐ์กฐํ์ ๋์ ์ ์ฌํ๋ค.
|
2. ์๊ตญ ๏ฝข์ต๋ช
ํ ์ค์ฒ๊ท์ฝ๏ฝฃ์ ์ฃผ์๋ด์ฉ
โก ์๊ตญ ์ ๋ณด๋ณดํธ์์ํ ๏ฝข์ต๋ช
ํ ์ค์ฒ๊ท์ฝ๏ฝฃ์ ์ผ์ข
์ ์๋ด์์ด๊ธฐ ๋๋ฌธ์, ๊ทธ ์์ฒด๊ฐ ๋ฒ์ ์ธ ๊ฐ์ ๋ ฅ์ ๊ฐ์ง๋ ๊ฒ์ ์๋์ง๋ง, ์ด ์ค์ฒ๊ท์ฝ์์ ๊ท์ ํ ์ฌํญ๋ค์ ์ค์ํ๋ค๋ ์ฌ์ค์ ์ ๋ณด๋ณดํธ์์ํ๊ฐ ํ์ธํ ๊ฒฝ์ฐ, ์ด๋ ์ถํ ์ ๋ณด๋ณดํธ์์ํ๊ฐ ๊ฐ์ธ์ ๋ณด ๋ณดํธ์ ๊ด๋ จํ ๋ฒ ์๋ฐ ์ฌ์ค์ ๋ํ ์กฐ์ฌ๋ ๋ฒ์งํ์ ํ ๊ฒฝ์ฐ ์ ๊ทน์ ์ผ๋ก ๊ณ ๋ คํ๊ฒ ๋๋ค๋ ์ ์์, ์ด ๊ท์ฝ์ ๊ฐ์ ์ ์ธ ์ํฅ๋ ฅ์ ์ง๋๋ ๊ฐ์ด๋๋ผ์ธ์ ํน์ฑ์ ์ง๋๊ณ ์๋ค๊ณ ํ๊ฐํ ์ ์์
โก ์ด ์ค์ฒ๊ท์ฝ์ ๊ฐ์ธ์ ๋ณด์ ์ด์ฉ์ ์์ด ๋ณดํธ์
๋ฌด๋ฅผ ์ถฉ์คํ ์ํํ๋ ๊ณผ์ ์์ ์ ๋ฐ๋ ์ ์๋ ์ํ์ ๊ด๋ฆฌํ๋ ๋ฐฉ์์ผ๋ก ์ต๋ช
ํ๋ฅผ ์๊ฐํ๊ณ ์์
โ ์ฐ์ ์ด ๊ท์ฝ์ด ์ด๋ค ๊ฒ์ด๋ฉฐ ์ ๋ฑ์ฅํ๊ณ ๋ฒ์ ์ธ ์ง์๊ฐ ์ด๋ ํ์ง(์ 1์ฅ)๋ฅผ ์ค๋ช
ํ๊ณ , ์ต๋ช
ํ๋ผ๋ ๊ฒ์ ๊ฐ์ธ์ ๋ณด์ ์ ์์ ๋ถ๋ฆฌํ๊ธฐ ์ด๋ ค์ด ์กด์ฌ์์ ๋ฐํ ํ(์ 2์ฅ), ๊ฐ์ธ์ ๋ณด์ ์ต๋ช
ํ๊ฐ ๊ฐ์ธ์ ๋ณด ๋ณดํธ์ ํจ๊ณผ์ ์ด๋ผ๋ ์ ์ ์ค๋ช
ํจ(์ 3์ฅ)
โ ์ด๋ฅผ ๋ฐํ์ผ๋ก ์ต๋ช
ํ๋ ์ ๋ณด๋ฅผ ์์ฐํ๊ฑฐ๋ ๊ณต๊ฐํ ๋์ ์ธ์ ๋ ์ง ์ ๋ณด์ฃผ์ฒด์ ๋์๊ฐ ํ์ํ ๊ฒ์ ์๋๋ผ๋ ์ (์ 4์ฅ)๊ณผ ๊ณต๊ฐ์ ๋ณด๋ฅผ ๊ฐ์ธ์ ๋ณด์ฒ๋ผ ์ทจ๊ธํด์ผ ํ ์ง๋ฅผ ๋ค๋ฃฌ ํ(์ 5์ฅ), ๊ฐ์ธ์ ๋ณด ๋ณดํธ๋ฒ์ ์ ๋ฐ๋ผ์๋ ๊ณต๊ฐํด๋ ๋๋ ์ ๋ณด๋ ์ธ๊ถ๋ฒ ๊ฐ์ ๊ธฐํ ๋ฒ๋ น์ ์ํด ๊ณต๊ฐํ์ง ๋ง์์ผ ํ๋ ๋๊ฐ ์์(์ 6์ฅ)์ ๋ฐํ๊ณ ์์
โ ๋ํ ๊ฐ์ธ์ ๋ณด ๋ณดํธ๋ฅผ ์ํด์๋ ์ ๋ณด์ ์ ํ๋ณ๋ก ๋ฌ๋ฆฌ ์ทจ๊ธ๋ ํ์๊ฐ ์์ผ๋ฉฐ(์ 7์ฅ), ์ ๋ณด๋ณดํธ๋ฅผ ์ํ ๊ฑฐ๋ฒ๋์ค๊ฐ ์ค์ํ๋ค๋ ์ (์ 8์ฅ) ๋ฐ ๋ณดํธ์ ์ด์ฉ์ ๊ท ํ์ ์ํ ์ฐ๊ตฌ๋ชฉ์ ์ ๊ฐ์ธ์ ๋ณด ๋ณดํธ๋ฒ์ ์ ์ ์ฉ์์ธ(์ 9์ฅ)์ ๋ํด ์ค๋ช
ํ๊ณ ์์
|
Add InvertdIndex doc_index of the document doc_id to the MultiDocument InvertedIndex (inverted), using doc_id as document identifier.
|
def inverted_index_add(inverted, doc_id, doc_index):
for word, locations in doc_index.iteritems():
indices = inverted.setdefault(word, {})
indices[doc_id] = locations
return inverted
|
def _act_index_exact(fieldname, doc, value, context):
doc.add_term(fieldname, value, 0)
|
์ด๊ธฐ ๋จ๊ณ ์กฐ์น์ ๋ฐ๋ผ ํฅํ ๋ช ๊ฐ์๊ฐ ๊ตญ์ ์ฌํ์ ๋์ด๋ ์ ์ฌ๊ฐ ์ผ๋ถ ์ํ๋ ์ ์์ด
|
์ฐ๋ฆฌ๋๋ผ์ ๋ํ 2012๋
๋ฏธ๊ตญ ๊ตญ๋ฐฉ์๊ถ๋ฒ์ ๋์ด๋์ ์ฌ ์์ธ ์ง์ ์ฐ์ฅ
3. ํํธ, โP5+1โ ๊ตญ๊ฐ๋ค(์ ์ ์๋ณด๋ฆฌ ์์์ด์ฌ๊ตญ์ธ ๋ฏธ, ๋ฌ, ์ค, ์, ํ 5๊ฐ๊ตญ ๋ฐ ๋
์ผ)๊ณผ ์ด๋์ด 11.24(์ผ) ์ ๋ค๋ฐ์์ ํฉ์ํ โ์ด๊ธฐ ๋จ๊ณ ์กฐ์นโ์๋, ์ด๋ ์ธก์ ์ฝ์ ์ค์๋ฅผ ์กฐ๊ฑด์ผ๋ก ํฅํ 6๊ฐ์๊ฐ ๊ตญ์ ์ฌํ์ ๋์ด๋ ์ ์ฌ๊ฐ ์ผ๋ถ ์ํ๋ ์ ์๋ค๊ณ ๋์ด ์๋ค.
ใ
๋ฐ๋ผ์, ์์ผ๋ก์ ์ด๋์ฐ ์์ ์์
๊ฐ์ถ ๋ฌธ์ ๋ฑ ์ ์ฌ ์ํ์ ๊ด๋ จํ ๋ด์ฉ์ ํฅํ ์ ๋ฐ ์ํฉ์ ๋ฐ๋ผ ๊ตฌ์ฒดํ๋์ด ๊ฐ ๊ฒ์ผ๋ก ๋ณด๋ฉฐ, ์ ๋ถ๋ ๊ทธ ๊ณผ์ ์์ ๊ด๋ จ ๊ตญ๊ฐ๋ค๊ณผ ํ์ํ ํ์๋ฅผ ํด ๋๊ฐ ์์ ์ด๋ค.
โป ๋ค๋ง, ์ด๋ฒ ์ ๋ค๋ฐ ํฉ์์๋ ๋ถ๊ตฌํ๊ณ , ๋ฏธ๊ตญ์ใ2013๋
๊ตญ๋ฐฉ์๊ถ๋ฒใ์ ํฌํจ๋ ์ด๋์์ ๋ฐ๋ฐํ์ฐ๋ฒ(IFCA: Iran Freedom and Counterproliferation Act) ๋ฐ 2010๋
ํตํฉ์ด๋์ ์ฌ๋ฒ(CISADA: Comprehensive Iran Sanctions, Accountability and Divestment Act) ๋ฑ ๋ค๋ฅธ ๋ฏธ๊ตญ ๋ฒ์ ๋ฐ๋ฅธ ๋์ด๋ ์ ์ฌ๋ ์ง์ ์ ์ฉ๋๋ฏ๋ก ์ ์ ํ์
4. ์ฐ๋ฆฌ ์ ๋ถ๋ ๊ตญ์ ์ฌํ์ ์ฑ
์์ ๋คํ๋ ์ผ์์ผ๋ก์ ์ด๋ํต ๋ฌธ์ ์ ํํ์ ํด๊ฒฐ์ ์ํ ๋
ธ๋ ฅ์ ๊ณ์ํด์ ์ ๊ทน ๋์ฐธํ ์์ ์ด๋ฉฐ, ์ด๋ฌํ ๊ณผ์ ์์ ์ฐ๋ฆฌ ๊ธฐ์
๋ค์ด ๋ถํ์ํ ํผํด๋ฅผ ์
์ง ์๋๋ก ์์ผ๋ก๋ ์ต๋ํ์ ๋
ธ๋ ฅ์ ๊ธฐ์ธ์ฌ ๋๊ฐ ๊ฒ์ด๋ค.
|
๊ธ๋ก๋ฒ ์์ ์์ฅ ๋ณํ๋ฅผ ์์ ์์
๋ค๋ณํ ๊ธฐํ๋ก
3. ์ต๊ทผ ์์ ์์ฅ ๋ณํ ๋ฐ ๋์
[ ์ด๋์ ์ฌ ํด์ ๋ ๋์
์ ๋ค๋ณํ์ ๊ธฐํ ]
โก ์ ๋ฌธ๊ฐ๋ค์ ๋ด๋
๋ ์๋ฐ๊ธฐ ์ด๋ ๊ฒฝ์ ์ ์ฌ ํด์ ๋ก ์ด๋์ฐ ์์ ๊ฐ ์์ฅ์ ๊ณต๊ธ๋ ๊ฒฝ์ฐ ์ ๊ฐํ๋ฝ์ ์ํฅ์ ์ค ๊ฒ์ผ๋ก ์ ๋ง
ใ
์ค๋ ์ ์ฌ๊ธฐ๊ฐ์ผ๋ก ์ด๋ ์์ ๊ณต๊ธ์ด ๋จ๊ธฐ๊ฐ๋ด ์ ์ฌ์ ์์ค์ผ๋ก ํ๋ณต๋๊ธฐ๋ ์ฝ์ง ์์ผ๋, 2016๋
์ 50๋ง๏ฝ100๋งB/d ๊ท๋ชจ์ ์์ ๊ณต๊ธ์ด ๊ฐ๋ฅํ๊ณ , 3,000๋งB~6,000๋งB ๊ท๋ชจ์ ํด์์ฌ๊ณ ๋ฅผ ๋ณด์ ํ๊ณ ์์ด ํฅํ ์ ๊ฐ ํ๋ฝ์์ธ์ผ๋ก ์์ฉํ ๊ฒ์ผ๋ก ์ ๋ง
* ์ด๋์ ์ฌ ํด์ ๋ก ์ ๊ฐ ์ถ๊ฐํ๋ฝ์ ์ ๋งํ๋ ๊ฒฌํด, ์ด๋ฏธ ์ ๊ฐ์ ์ ๋ฐ์๋์ด ์๋ค๋ ๊ฒฌํด๊ฐ ์์ด ํด์ ์ ์ ๊ฐํ๋ฝ ๊ท๋ชจ์๋ ๋ถํ์ค์ฑ์ด ์กด์ฌ
โก ์ ์ ์
๊ณ๋ ์ด๋์ ์ฌ ํด์ ์, ์ ์ฌ์ ์์ค์ผ๋ก ์ด๋ ์์ ์์
์ ๋๋ฆด ๊ฒ์ผ๋ก ์ ๋ง
ใ
ํ์ฌ ์ด๋์ฐ ์์ ์์
์ ์ ์ฌ ์ ๊ณผ ๋น๊ตํ์ฌ ์ ๋ฐ ์์ค์ผ๋ก ๊ฐ์ํ ์ํฉ์ผ๋ก, ํฅํ ์ปจ๋ด์ธ์ดํธ๋ฅผ ์ค์ฌ์ผ๋ก ์ด๋์์ ์ ๋ํ ์์ ์ฆ๊ฐ ์ ๋ง
|
Method is defined by the implemented data loaders to yield the sample indexes. Only used in data_generator.
|
def sample_index_generator(self):
raise NotImplementedError
|
def get_sample(self, dataset, sample_idx):
pass
|
check if loop brackets are balanced. if not exit assume the program was already sanitized from comments
|
def check_valid(program):
balance = 0
for c in program:
if c == '[':
balance += 1
elif c == ']':
balance -= 1
if balance < 0:
print("\nERROR: Loop brackets not balanced", file=sys.stderr,
flush=True)
exit(-1)
if balance > 0:
print("\nERROR: Loop bracket not balanced, not enough closing",
file=sys.stderr, flush=True)
exit(-1)
|
def is_balanced(string: str) -> bool:
open_brackets = {'(', '{', '['}
close_brackets = {')', '}', ']'}
bracket_pairs = {
'}': '{',
')': '(',
']': '['
}
stack = []
for bracket in string:
if bracket in open_brackets:
stack.append(bracket)
elif bracket in close_brackets:
if len(stack) and bracket_pairs[bracket] == stack[-1]:
stack.pop()
else:
return False
if not len(stack):
return True
return False
|
Return Relay vars from input shapes and create entries based on expected graph inputs to allow translation
|
def _get_relay_input_vars(graph, input_infos, prelude, is_module=True, default_dtype="float32"):
graph_inputs = list(graph.inputs())
if is_module:
# a module has "self" as first input, which we do not need/want
graph_inputs = graph_inputs[1:]
if not isinstance(input_infos, list):
msg = "Graph inputs input_infos should be a list"
raise RuntimeError(msg)
if len(graph_inputs) != len(input_infos):
msg = f"PyTorch has {len(graph_inputs)} inputs and input_infos lists {len(input_infos)}."
raise RuntimeError(msg)
def get_relay_ty(ishape, itype, pt_type):
if pt_type.kind() == "TensorType":
if not (_is_int_seq(ishape) or len(ishape) == 0):
msg = "Shape for Tensors must be lists of ints"
raise RuntimeError(msg)
if (pt_type.dim() is not None and pt_type.dim() != len(ishape)) or (
pt_type.sizes() is not None
and any([s1 != s2 for s1, s2 in zip(pt_type.sizes(), ishape)])
):
msg = "Shapes of input list and information in the graph do not match"
raise RuntimeError(msg)
if len(ishape) > 1 and any(dim <= 0 for dim in ishape[1:]):
msg = (
"Expected input's non-batch dimensions to have positive length, "
f"but input has a shape of {pt_type.sizes()}"
)
raise RuntimeError(msg)
pt_dtype = pt_type.scalarType()
if not pt_dtype and itype:
pt_dtype = itype
dtype = _convert_data_type(pt_dtype, default_dtype=default_dtype)
return TensorType(ishape, dtype)
elif pt_type.kind() == "TupleType":
if not isinstance(ishape, tuple):
msg = "Shapes for tuples must be tuples"
raise RuntimeError(msg)
return TupleType(
[get_relay_ty(elem, itype, pt_t) for elem, pt_t in zip(ishape, pt_type.elements())]
)
elif pt_type.kind() == "ListType":
if not isinstance(ishape, list):
msg = "Shapes for lists must be lists"
raise RuntimeError(msg)
pt_elemtype = pt_type.getElementType()
elem_tys = [get_relay_ty(s, itype, pt_elemtype) for s in ishape]
if len(elem_tys) > 0 and not all(map(lambda ty: ty == elem_tys[0], elem_tys)):
msg = "List elements need have identical types"
raise RuntimeError(msg)
rlist, _, _ = prelude.mod.get_type("List")
return rlist(elem_tys[0])
elif pt_type.kind() == "OptionalType":
# we do not support None yet, so we fill in the type
return get_relay_ty(ishape, itype, pt_type.getElementType())
# TODO: scalar inputs
raise NotImplementedError("unsupported input type")
input_vars = {}
new_input_infos = []
for num, inp in enumerate(input_infos):
if not isinstance(inp, tuple):
msg = f"Graph input {num} is not a tuple"
raise RuntimeError(msg)
if len(inp) != 2 or not isinstance(inp[0], str):
msg = (
f"Graph input {inp} is not valid,"
f" expected ('name', shape) or ('name', (shape, dtype))"
)
raise RuntimeError(msg)
if not isinstance(inp[1], tuple) or len(inp[1]) == 0 or not isinstance(inp[1][-1], str):
new_input_infos.append((inp[0], (inp[1], default_dtype)))
else:
new_input_infos.append(inp)
input_types = [
(name, get_relay_ty(info[0], info[1], gi.type()))
for (name, info), gi in zip(new_input_infos, graph_inputs)
]
ir_inputs = [i.debugName() for i in graph_inputs]
for ir_input, (name, itype) in zip(ir_inputs, input_types):
inp = _expr.var(name, type_annotation=itype)
# Translate from graph input to user input name
input_vars[ir_input] = inp
return input_vars
|
def parseInputs():
# Import past year's data
data = {} # {year: df}
# df row indices are months 0-11. colums are zones Z1-Z7
# values are energy consumption in that month in that zone, in GWh
for year in const.DATA_YEARS:
data[year] = pd.read_csv(f'{path}/{const.DATA_DIR}/NBTrend20{year}.csv', names=const.ZONES)
# Import Incentive Rates
incentiveRates = pd.read_csv(f'{path}/{const.INFO_DIR}/IncentiveRates.csv')
emissionTax, nonEmissiveIncentive = incentiveRates.columns # $/kWh
# Import Penalty Values
penaltyValues = pd.read_csv(f'{path}/{const.INFO_DIR}/PenaltyValues.csv', names=const.PENALTY_ZONES)
# Import Plant Production Rates
plantProductionRates = pd.read_csv(f'{path}/{const.INFO_DIR}/PlantProductionRates.csv', names=const.PLANT_TYPES)
return data, emissionTax, nonEmissiveIncentive, penaltyValues, plantProductionRates
|
This function creates a figure (chart) that is a Sankey Chart for the neighborhood that is input
|
def createSankeyChart(nbd):
nbd_col = (
"neighbourhood_group_cleansed" if nbd == "All" else "neighbourhood_cleansed"
)
if nbd != "All":
three_proptype_df = rental_df[
rental_df["neighbourhood_group_cleansed"] == nbd
].copy()
else:
three_proptype_df = rental_df.copy()
three_proptype_df = three_proptype_df[[nbd_col, "property_type_class"]].copy()
# Limit types of property to House, Private Room and Shared Room
three_proptype_df = three_proptype_df[
three_proptype_df["property_type_class"].str.contains(
"House|Private Room|Shared Room|Condominium|Seviced apartment|Apartment|Townhouse"
)
]
# we are not interested in Houseboats in the Sankey chart
three_proptype_df = three_proptype_df[
~three_proptype_df["property_type_class"].str.contains("Houseboat")
]
label_list = three_proptype_df[nbd_col].unique().tolist()
label_list.sort()
label_list += three_proptype_df["property_type_class"].unique().tolist()
# Create a new column count_listings with number of listings per neighborhood/neighborhood group and propert type
sankey_df = three_proptype_df.groupby([nbd_col, "property_type_class"]).agg(
count_listings=("property_type_class", "count")
)
sankey_df = sankey_df.reset_index()
# Create an dictionary of the indices of the nodes we are going to link (the sankey cchart links are created between these indices)
label_idx_dict = {}
for idx, label in enumerate(label_list):
label_idx_dict[label] = idx
# Use the dictionary to map the nodes to the indizes in the dataframe
sankey_df["nbd_idx"] = sankey_df[nbd_col].map(label_idx_dict)
sankey_df["prop_idx"] = sankey_df["property_type_class"].map(label_idx_dict)
color_list = full_color_list[: len(three_proptype_df[nbd_col].unique().tolist())]
group_color = dict(zip(list(sankey_df.groupby(nbd_col).groups.keys()), color_list,))
sankey_df["color_link"] = sankey_df[nbd_col].map(group_color)
source = sankey_df["nbd_idx"].tolist()
target = sankey_df["prop_idx"].tolist()
values = sankey_df["count_listings"].tolist()
# There are as many colors as nodes = 17 + 3
if nbd == "All":
color_node = full_color_list + ["#befdb7", "#1B03A3", "#FEFCD7"]
else:
color_node = full_color_list[: len(label_list)]
# For every neighborhood we use the same color for the link
color_link = sankey_df["color_link"].tolist()
fig = go.Figure(
data=[
go.Sankey(
node=dict(
pad=15,
thickness=20,
line=dict(color="black", width=0.5),
label=label_list,
color=color_node,
customdata=label_list,
hovertemplate="%{customdata} has %{value} listings<extra></extra>",
),
link=dict(
source=source,
target=target,
value=values,
color=color_link,
hovertemplate="Link from %{source.customdata}<br />"
+ "to %{target.customdata}<br />has %{value} listings<extra></extra>",
),
)
]
)
fig.update_layout(
title_text="Available houses and rooms",
font_size=12,
title_font_color=dashboard_colors["medium-blue-grey"],
font=dict(size=12, color=dashboard_colors["medium-blue-grey"]),
)
# Set the theme
fig.layout.template = "custom_dark"
return fig
|
def show_custom_graph(self):
pass
|
์ผ์ธ ์บ๋
ธํผ/ํํฐ์ค์ ๋น์ฉ์ ์ผ๋ง์ธ๊ฐ์?
|
์ผ๋ฐ์ ์ธ ์ฃผํ์ ์ผ๋ฐ์ ์ธ ์ฐฝ๋ฌธ์ ๋ํ ์ฐจ์์ ํฌ๊ธฐ์ ๋ฐ๋ผ 300๋ฌ๋ฌ์์ 700๋ฌ๋ฌ์ ๋น์ฉ์ด ๋ค ๊ฒ์
๋๋ค. ์ฌ๊ธฐ์๋ ํ๋ ์๊ณผ ์๋ ๋ฆฌํธ๋์
๋ฉ์ปค๋์ฆ, ์กฐ๋ฆฝ ํ๋์จ์ด ๋ฐ ์ค์น ๋ธ๋ํท์ด ํฌํจ๋ฉ๋๋ค. ๋น๋ฆฌํธ๋์
๊ธ์ ์ฐจ์์ ์ฌ์ฉํ๊ธฐ๋ก ๊ฒฐ์ ํ ๊ธ์์ ๋ฐ๋ผ 500๋ฌ๋ฌ์์ 800๋ฌ๋ฌ์ ๋น์ฉ์ด ๋ค ์ ์์ต๋๋ค. ๊ณฐํก์ด ๋ฐฉ์ง ์ฒ๋ฆฌ๋ฅผ ์ ์ฉํ๋ฉด ๋น์ฉ์ ์ ๊ฐํ ์ ์์ผ๋ฉฐ, ๋ณ์ ์ ๊ฑฐ๋ฅผ ์ํ ์ฒญ์ ๋น์ฉ์ 40๋ฌ๋ฌ์
๋๋ค. ํํฐ์ค์ฉ ๋ชจํฐํ๋ ๋ฆฌํธ๋์
์ฐจ์์ 2,500๋ฌ๋ฌ์์ 3,500๋ฌ๋ฌ์ ๋น์ฉ์ด ๋ค ์ ์์ต๋๋ค.
|
ํจ๋ธ๋ฆญ ์ฐจ์์ ๋น์ฉ์ ์ผ๋ง์ธ๊ฐ์? ์ง์ญ ํํฐ์ค ๋ฐ ๊ฒฝ๋ก ๊ณ์ฝ์์๊ฒ ๋ฌด๋ฃ ๊ฒฌ์ ์ ๋ฐ์ผ์ธ์. ์ผ์ธ ์ํ ๊ณต๊ฐ์์ ๋ณด๋ด๋ ์๊ฐ์ ๊ทน๋ํํ๊ธฐ ์ํด ๊ทธ๋์ ์ถ๊ฐํ๋ ๊ฒ์ ์ฌ๋ฆ์ ๋์ ์์์ ๋ฌด์๋๋ ํํฐ์ค์ ์ค๋ฝ์ ์ ํฉํ ๋ถ์ฃผํ ํํฐ์ค์ ์ฐจ์ด๋ฅผ ๋ง๋ค ์ ์์ต๋๋ค.
|
Displays a text on the frame with a shadow behind it for better visualization on any background
|
def text_on_frame(frame, text, position, thickness, font_size=1, text_color=(255, 255, 255), shadow_color=(128, 128, 128), font_style=cv2.FONT_HERSHEY_SIMPLEX, line_style=cv2.LINE_AA):
cv2.putText(frame, text, position, font_style, font_size, shadow_color, thickness+1, line_style)
cv2.putText(frame, text, position, font_style, font_size, text_color, thickness, line_style)
|
def render_text(self, text, color, bg=None):
return self.font.render(text, True, color, bg) # type: pygame.SurfaceType
|
Return the image from the input ``images`` list that has the largest overlap with the ``refimage`` image.
|
def max_overlap_image(refimage, images, enforce_user_order):
if len(images) < 1:
return None
if enforce_user_order:
# revert to old tweakreg behavior
return images.pop(0)
area = [refimage.intersection_area(im) for im in images]
idx = np.argmax(area)
return images.pop(idx)
|
def get_images_id_from_database(db, tags, img_type, use_count, limit):
# First find intersection of each tag index in the databases
all_tags_list = []
for tag in tags:
coll = db[tag]
raw_result = coll.find({})
image = [image['_id'] for image in list(raw_result)]
result = db.images.find(
{"_id" : {"$in" : image},
"img_type": img_type,
"use_count": {"$gt": use_count}},
{"_id": 1})
images = [str(image['_id']) for image in list(result)]
all_tags_list.append(images)
id_list = set(all_tags_list[0]).intersection(*all_tags_list[1:])
# Second, check whether we have to fetch more images from default image pool
if len(id_list) >= limit:
print("Get images from index!")
images_list = id_list[:limit]
else:
print("Not enough images from index!")
print("Try to find more in the default image pool!")
coll = db['images']
# Then to find other images from the default image pool
all_tags_list = []
for tag in tags:
tag_str = 'tags.%s' % tag
result = coll.find(
{tag_str: {"$exists": True},
"img_type": img_type,
"use_count": {"$gt": use_count}},
{"_id": 1})
images = [str(image['_id']) for image in list(result)]
all_tags_list.append(images)
images_list = list(set(all_tags_list[0]).intersection(*all_tags_list[1:]))
if len(images_list) >= limit:
images_list = images_list[:limit]
return images_list[:limit]
|
ํํํ ๊ธฐํ์ด๋ ๋ฌด์์ธ๊ฐ?
|
์ ๋ฆฌ ๊ธฐํ์ ์ก์ ํ
๋ ๋น์ (LCD), ํ๋ผ์ฆ๋ง ํ
๋ ๋น์ (PDP), ๋
ธํธ๋ถ ๋์คํ๋ ์ด์ ๊ฐ์ ํํ ๋์คํ๋ ์ด(FPD)๋ฅผ ๋ง๋๋ ์ํธ ์ ๋ฆฌ์
๋๋ค. ์ด ์ฌ๋ฃ๋ ์ฐฝ์ ๋ฆฌ์ ์ฌ์ฉ๋๋ ์ํธ ์ ๋ฆฌ์ ๋ค๋ฆ
๋๋ค. ์ก์ ๋์คํ๋ ์ด์ ํ๋ผ์ฆ๋ง ๋์คํ๋ ์ด ๊ฐ์๋ ์ฌ๋ฃ๊ฐ ๋ค๋ฆ
๋๋ค.
|
ํนํ ์ค์ํ๊ณ ๋ด์ค ๊ฐ์น๊ฐ ์๋ ๊ฒ์ ๊ฐค๋ญ์ S6 ์ฃ์ง(๋ฐ ๊ฐค๋ญ์ ๋
ธํธ ์ฃ์ง)์ ์ ์ฐํ ํ๋ผ์คํฑ ๊ธฐํ์์์ OLED ๋์คํ๋ ์ด ์ฑ๋ฅ์ด ๊ฐค๋ญ์ S6์ ์ ํต์ ์ธ ํ๋ฉด ๋ฐ ๋จ๋จํ ๊ธฐํ์์์ ์ฑ๋ฅ๊ณผ ๋ณธ์ง์ ์ผ๋ก ๋์ผํ๋ค๋ ์ ์
๋๋ค. ์ด๋ 500ํฝ์
์ด์ ๋ฐ 2560x1440 ํด์๋์์๋ ๋ง์ฐฌ๊ฐ์ง์
๋๋ค.
|
Count `constraints` for part one. Lines where `char` occurs at least `low` and at most `high` times.
|
def part_one(constraints: t.List[Constraint]) -> int:
n_valid = 0
for low, high, char, password in constraints:
n_valid += low <= password.count(char) <= high
return n_valid
|
def main(text, char_lower, char_upper, total_lower, total_upper):
answer_char, answer_total = [], []
_ = [char_lower.append(i) for i in text if i not in char_lower and i.islower()]
_ = [char_upper.append(i) for i in text if i not in char_upper and i.isupper()]
char_lower, char_upper = sorted(char_lower), sorted(char_upper)
_ = [total_lower.append(text.count(i)) for i in char_lower]
_ = [total_upper.append(text.count(i)) for i in char_upper]
_ = answer_char.extend(char_lower), answer_char.extend(char_upper)
_ = answer_total.extend(total_lower), answer_total.extend(total_upper)
for i in range(len(answer_char)):
total1, total2 = answer_total[i] // 5, answer_total[i] % 5
print("%s : %s" % (answer_char[i], (("-----|" * total1 + "-" * total2).rstrip("|"))))
|
This view should return a list of all the enrollments organized for the currently authenticated user's group.
|
def get_queryset(self):
user = self.request.user
if user.is_superuser:
return Enrollment.objects.all()
return Enrollment.objects.filter(organizer__in=user.groups.all())
|
def _GetEntitiesInEntityGroup(self, entity_group):
raise NotImplementedError
|
Integration test with a real consul agent. Start a service, register it, close it, verify it's deregistered.
|
async def test_integration(self, deregister):
tags = ("foo", "bar")
service_id = str(uuid.uuid4()) # allow for parallel tests
con = _LocalConsulAgentClient(token=None)
ca = ConsulAgent(
name="test-metrics",
service_id=service_id,
tags=tags,
deregister=deregister,
)
try:
server = await aio.web.start_http_server(
addr="127.0.0.1", service_discovery=ca
)
except aiohttp.ClientOSError:
pytest.skip("Missing consul agent.")
svc = (await con.get_services())[service_id]
assert "test-metrics" == svc["Service"]
assert sorted(tags) == sorted(svc["Tags"])
assert server.socket.addr == svc["Address"]
assert server.socket.port == svc["Port"]
await server.close()
services = await con.get_services()
if deregister:
# Assert service is gone iff we are supposed to deregister.
assert service_id not in services
else:
assert service_id in services
# Clean up behind ourselves.
resp = await con.deregister_service(service_id)
assert 200 == resp.status
|
def test_service_status_on_daemon_with_pid():
mock_service_list = (
'{\n\t"LimitLoadToSessionType" = "System";\n\t"Label" ='
' "com.salt";\n\t"OnDemand" = false;\n\t"LastExitStatus" = 0;\n\t"PID" ='
' 218;\n\t"Program" = "/opt/salt";\n\t\t"--disable-keepalive";\n\t);\n};'
)
salt_dict = {
"service.list": MagicMock(return_value=mock_service_list),
}
with patch.dict(mac_service.__salt__, salt_dict):
with patch.object(mac_service, "_launch_agent", lambda _: False):
with patch.object(mac_service, "_get_service", lambda _: {"": ""}):
with patch.object(
mac_service, "_always_running_service", lambda _: True
):
assert mac_service.status("com.salt") is True
|
Counts the number of trainable tf.Variables to get a rough idea of how complex this Model is
|
def count_num_trainable(self):
self.num_trainable_variables = 0
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
self.num_trainable_variables += variable_parameters
return self.num_trainable_variables
|
def param_size(model: torch.nn.Module) -> int:
return sum(v.numel() for name, v in model.named_parameters() if "auxiliary" not in name)
|
This method accesses the get all profiles endpoint which takes a token since the route is authenticated
|
def get_all_profiles(self, token):
return self.client.get(
"/api/profiles/",
HTTP_AUTHORIZATION='Bearer ' +
token,
format="json"
)
|
def callback():
app = _get_app()
app.authorize_access_token()
return redirect(url_for('.profile'))
|
Returns the luminance of a pixel, which indicates its subjective brightness. This implementation uses the NTSC formula.
|
def luminance(pixel):
r = GImage.getRed(pixel)
g = GImage.getGreen(pixel)
b = GImage.getBlue(pixel)
return round(0.299 * r + 0.587 * g + 0.114 * b)
|
def __call__(self, image: np.ndarray) -> np.ndarray:
if self.mode == "luminance":
lum = hdrpy.get_lumianance(image)
factor = 1. / np.amax(lum)
else:
factor = 1. / np.amax(image)
return multiply_scalar(image, factor)
|
Run queued jobs. Since we've already sent the jobs to the PBS job system, don't do anything.
|
def run_queued_jobs(self):
pass
|
def execute_queue(self):
self.legacy_queue_execution()
|
Test _check_cache_for_egg checks the cache for the egg, returns path if present locally, or None if not.
|
def test_03_check_cache_for_egg(self):
# Cleanup on isle one!
launcher = ZMQEggDriverProcess("DUMMY_VAL")
self.assertEqual(launcher._get_remote_egg(EGG), "/tmp/%s" % EGG)
self.assertEqual(launcher._check_cache_for_egg("NOT_FOUND_EGG"), None)
self.assertEqual(launcher._check_cache_for_egg(EGG), "/tmp/%s" % EGG)
|
def get(cache_dir=CACHE_DIR):
current_wall = os.path.join(cache_dir, "pyc")
if os.path.isfile(current_wall):
return read_file(current_wall)[0]
return "None"
|
๋ถํ์ ์ง๋์์ ์ด๋์ ์์นํด ์์ต๋๊น?
|
๋ถํ ์ง๋๋ ๋ถํ์ด ๋ ํ๋ง๋ผ์ผ ์ฐ๋งฅ์ ๋์ชฝ์ ์์นํ ๋ด๋ฅ ๊ตญ๊ฐ์์ ๋ณด์ฌ์ค๋๋ค. ๋ถํ์ ๋ถ์ชฝ์ผ๋ก๋ ์ค๊ตญ๊ณผ, ๋จ์ชฝ์ผ๋ก๋ ์ธ๋์ ๊ตญ์ ๊ฒฝ๊ณ๋ฅผ ๊ณต์ ํ๊ณ ์์ต๋๋ค.
|
๋ถํ ๋ฐ ์ธ์ ๊ตญ๊ฐ์ ์์ธ ์ง๋. ๊ตฌ๊ธ ๋งต์ค ๋ถํ ์์น ๋ชฉ๋ก์ ์ค์ ๊ฒ์ ํ์ํฉ๋๋ค. ๊ตฌ๊ธ ๋งต์ค ๊ด๊ด์ด ์๋ฏธ ์๋ ๊ณณ์ ์ค์ ๊ฒ์ ํ์ํฉ๋๋ค! ์ข
ํฉ์ ์ธ ๋ชฉ์ ์ง ์๋ด์์ ํจ๊ป maplandia.com์ ์์ธํ ์์ฑ ์ด๋ฏธ์ง๋ฅผ ํตํด ๋ถํ์ ํํํ ์ ์๋๋ก ํ์ฌ, ๊ทธ ์ด๋ ๋๋ณด๋ค ๋น ๋ฅด๊ณ ์ฝ๊ฒ ๊ฐ๋ฅํฉ๋๋ค.
|
๋๋๊น์น, ์๋ค, ์ค์ง์ด๋ฌด๋ง๋ญ์ด๊น์น
|
๊ทธ๊ฒ์ ์ข
๋ฅ๋ก๋ ๋๋๊น์น, ์ค์ง์ด๋ฌด๋ง๋ญ์ด๊น์น ๋ฑ์ด ์๋ค.
|
์ด๊ณณ์ ๋ฐฅ์์ ๊น์น์ ์ฐ๊ฑฐ์ง๊ตญ, ๊ทธ๋ฆฌ๊ณ ๋ฌด๋ง๋ญ์ด ๋ฒ๋ฌด๋ฆฐ ๊ฒ๋ฟ์ด์๋ค.
|
1985๋
๋ถํฐ 1997๋
๊น์ง ๋ํ์๊ถํํ์ฅ์ ์ ๋ชฝ๊ตฌ ํ๋์ฐจ๊ทธ๋ฃน ํ์ฅ์ด ๋งก์๊ณ 2005๋
๋ถํฐ ์ฌํ๊น์ง๋ ์ ์์ ๋ถํ์ฅ์ด ๋งก๊ณ ์๋ ์ค์ด๋ค.
|
1985๋
๋ถํฐ 1997๋
๊น์ง ๋ํ์๊ถํํ์ฅ์ ์ ๋ชฝ๊ตฌ ํ๋์ฐจ๊ทธ๋ฃน ํ์ฅ์ด์๊ณ 2005๋
๋ถํฐ ์ง๊ธ๊น์ง๋ ์ ์์ ๋ถํ์ฅ์ด๋ค.
|
1983๋
๋ถํฐ 1985๋
๊น์ง ์ฌ์ํ ์ด๋ ์๊ถํํ์ฅ์ ์ ๋ชฝ์ค ์์ฐ์ฌ๋จ ์ด์ฌ์ฅ์ด์๊ณ ๋ค์ด์ด ์ ๋ชฝ๊ตฌ ํ์ฅ์ด ๋ค ๋ฒ ํ์ฅ์ด ๋์๋ค.
|
Remove slave(s) from and existing zone (multiroom). Zone must already exist and slaves array cannot be empty.
|
def remove_zone_slave(self, slaves):
if not slaves:
_LOGGER.warning("Unable to find slaves to remove")
else:
_LOGGER.info(
"Removing slaves from zone with master %s", self._device.config.name
)
# SoundTouch API seems to have a bug and won't remove slaves if there are
# more than one in the payload. Therefore we have to loop over all slaves
# and remove them individually
for slave in slaves:
# make sure to not try to remove the master (aka current device)
if slave.entity_id != self.entity_id:
self._device.remove_zone_slave([slave.device])
|
def get_redis_slave_nodes():
az = _get_aci_client()
return [cntr for cntr in az.container_groups.list() if _REDIS_SLAVE_REGEX.match(cntr.name)]
|
Stitch a series of images into an animation. Currently supports animated gifs, other formats coming as needed.
|
def stitch_to_animation(images, outpath=None, duration=0.5, palettesize=256,
verbose=True):
# import imageio
try:
import imageio
except ImportError:
raise ImportError(
'WrightTools.artists.stitch_to_animation requires imageio - https://imageio.github.io/')
# parse filename
if outpath is None:
outpath = os.path.splitext(images[0])[0] + '.gif'
# write
try:
t = wt_kit.Timer(verbose=False)
with t, imageio.get_writer(outpath, mode='I', duration=duration,
palettesize=palettesize) as writer:
for p in images:
image = imageio.imread(p)
writer.append_data(image)
except BaseException:
print('Error: {0}'.format(sys.exc_info()[0]))
return None
# finish
if verbose:
interval = np.round(t.interval, 2)
print('gif generated in {0} seconds - saved at {1}'.format(interval, outpath))
return outpath
|
def dream_seq(self):
for i, path in enumerate(self.img_list[self.config["start_position"] :]):
if i == 0:
img1 = Image.open(path)
d_img = self.deep_dream(
self.transform(img1), self.model, i, seq="first"
)
self.save_img(d_img, "", i)
d_img = convert(d_img)
flow_iter = 0
# the iterations needs to be reduced
self.config["num_iterations"] -= 5
if i > 0:
img2 = Image.open(path)
feature_img, background_masked = self.get_opflow_image(
img1, d_img, img2
)
feature_img = np.clip(feature_img, 0, 255)
background_masked[background_masked > 0] = 1 - (flow_iter * 0.1) # 0.5
background_masked[background_masked == 0] = flow_iter * 0.1
d_img = self.deep_dream(
self.transform(np.uint8(feature_img)),
self.model,
i,
seq="first",
mask=background_masked,
)
# change position
img1 = img2
self.save_img(d_img, "", i)
d_img = convert(d_img)
flow_iter += 1
flow_iter = 0 if flow_iter > 5 else flow_iter
|
Recives OCR text, extracts product. Text could contain garbage or wrong reads. >>> extract_product_from_text(''' ... d J
|
def extract_product_from_text(text):
def extract(pattern):
return extract_ocr_pattern(pattern, text)
return {
'VENDORID': 1,
'VENDOR': 'example',
'ITEMNO': extract(r'ITEM ?ID(.*)DESC'),
'DESCRIPTION': extract(r'DESC(.*)UPC'),
'UPC': extract(r'UPC ?NO(.*)WEIGHT'),
'COST': extract(r'PRICE(.*?)\n'),
}
|
def extract_text(self, input_file):
logging.info("Extracting text from file " + input_file)
try:
tree = etree.parse(input_file)
except etree.XMLSyntaxError:
logging.error("XML syntaxerror in file " + input_file)
if self.force:
return
else:
sys.exit(-1)
root = tree.getroot()
for element in root.iter():
try:
self.extract_element_info(element, input_file)
except KeyError:
pass
|
๋๋ง์ด๊ฐ ์๋ณ์ ์ผ๋ง๋ ์ค๋ ๋จ์ ์๋์ง
|
์๋ณ์ ๋ง๋ฆฌํ๋๋ฅผ ๋จ ํ ๋ฒ ์ฌ์ฉํ ํ ์ต๋ 8์ผ ๋์ THC ๋์ฌ์ฐ๋ฌผ์ธ THC-COOH์ ์กด์ฌ๋ฅผ ๋ํ๋
๋๋ค. ์๋ณ ๊ฒ์ฌ ์ ํ๋๋ ์ฝ ์ผ์ฃผ์ผ ํ ๊ฐ์ํ์ง๋ง, ๋ง์ ๋น๋ฒํ๊ฑฐ๋ ๊ณผ๋ํ ๋ง๋ฆฌํ๋ ์ฌ์ฉ์๋ค์ ๊ฐ๊ฐ ์ต๋ 15์ผ ๋ฐ 90์ผ ๋์ ์์ฑ ๋ฐ์์ ๋ณด์ผ ์ ์์ต๋๋ค.
|
THC๊ฐ ์๋ณ์ ์ผ๋ง๋ ์ค๋ ๋จ์ ์๋์ง๋ ๊ฐ์ธ์ ๋ฐ๋ผ ๋ค๋ฆ
๋๋ค. ์ฒด์ค๊ณผ ์ด์ ์ ๋ง๋ฆฌํ๋ ๋
ธ์ถ์ ํ
ํธ๋ผํ์ด๋๋ก์นด๋๋น๋(Tetrahydrocannabinol)์ด ์ฒด๋ด์ ์ผ๋ง๋ ์ค๋ ๋จ์ ์์ ์ ์๋์ง๋ฅผ ๊ฒฐ์ ํ๋ ์ฃผ์ ์์ธ ์ค ์ผ๋ถ์
๋๋ค.
|
Setter method to set the next turn. Calls get turn and sets opposite player to next turn.
|
def set_next_turn(self):
if self.get_turn() == 'B':
self._turn = 'W'
else:
self._turn = 'B'
|
def set_next(self,next_class: 'Class') -> None:
self.next = next_class
|
Return the second element of the given list. If the list has no second element, return None.
|
def select_second(L):
if len(L)<2:
return None
return L[1]
|
def _return_elem_or_list(self, args):
return args[0] if len(args) == 1 else args
|
Clear only parameter values, not the list of managed parameters. All parameters will be set to None.
|
def clear_parameter_values(self):
for param_name in self.managed_parameters:
self.managed_parameters[param_name] = None
|
def unfreeze_parameters(self):
self.are_parameters_frozen = False
|
์ฐธ์ฌ์, ์ข
์ด, ๋ฉ์์ง, ๋จ๊ธฐ๋ค, ์์
|
๋ชจ๋ ์ฐธ์ฌ์๋ ์ฃผ์ด์ง ์ข
์ด์ ์์์ ๋ฉ์์ง๋ฅผ ๋จ๊ธด๋ค.
|
์ฌ์ฃผ๋ผ๋ ๊ฒ์ ์์ , ์์
, ํ์๊ธฐ, ๊ธ์ฐ๊ธฐ, ๋งํ๊ธฐ, ๊ณ์ฐํ๊ธฐ๋ค.
|
์ ํ TV ์ํฐํ
์ธ๋จผํธ ์ฑ๋์ ๋ช ๊ฐ์
๋๊น?
|
2007๋
์ ์ฒ์ ์ถ์๋ ์ดํ, ์ ํ TV๋ ๋จ์ผ ๋ฐฉ์ก ๋คํธ์ํฌ์์๋ถํฐ ๊ฑฐ์ 20๊ฐ์ ํ๊ตญ TV ๋คํธ์ํฌ๋ฅผ ํ๋์ ์ฑ๋๋ก ์ง๊ณํ ๊ฒ๊น์ง, ์ฝ 50๊ฐ์ "์ฑ๋" ๋๋ ์ ํธํ๋ ์ฉ์ด์ธ ์ฑ์ ๋ณด์ ํ๊ณ ์์ต๋๋ค.
|
ํธ์ฃผ ๋ด ์ธ๋ TV ๋ฐ ๋ผ๋์ค ์ฑ๋. ์ธ๋ ๋๋ผ๋ง, ์์
๋ฐ ์ฝ๋ฏธ๋ ์ผ ๋ฑ์ ๋ชจ๋ ์ธ๋์ธ์ ๋ง์์ ๊ฐ๊น์ต๋๋ค. ์ฐ๋ฆฌ๋ ์ธ๋ TV ์ฑ๋๊ณผ ๋ผ๋์ค๋ฅผ ํตํด ์ธ๋์ ์ฐ๊ฒฐ๋์ด ์๋ค๊ณ ๋๋๋๋ค. ์ธ๋ ์ํฐํ
์ธ๋จผํธ์์ ์ฐ๊ฒฐ์ ์ ์งํ๊ธฐ ์ํด ํธ์ฃผ์์ ์ธ๋ TV ์ฑ๋ ๋ฐ ๋ผ๋์ค๋ฅผ ์ป๋ ๋ฐฉ๋ฒ์ ๋ํ ํฌ๊ด์ ์ธ ๋ชฉ๋ก์ ํ์ธํ์ญ์์ค: ์ธ๋ TV ์ฑ๋ ์ป๊ธฐ: Yupptv: ํธ์ฃผ์์ YuppTV๋ฅผ ํตํด ์ ๊ณต๋๋ ์ธ๋ TV ์ฑ๋.
|
๊ฐ๋ฉด๋ผ์ด๋ ๋์ผ์ด๋์ ํ์ด๊ธฐ๊ฐ ๋ญ์ผ
|
๋ํ ์ปดํ๋ฆฌํธ ํผ ์ํ์์ ๋ค๋ฅธ ๋ผ์ด๋๋ฅผ ์ํํ ๋ค ์ด ์นด๋๋ฅผ ์ฌ์ฉํ๋ฉด, ๋์ผ์ด๋๋ ๋ผ์ด๋ ๋ถ์ปค๋ฅผ ํ์ฉํด์ ์์ ๊ณผ ๋๊ฐ์ด ์์ง์ด๋, ์ํ๋ ๋ผ์ด๋์ ํจ๊ป, ์ํ๋ ๋ผ์ด๋์ ์ต์ข
ํํ๊ฐ ์ฌ์ฉํ๋ ํ์ด๊ธฐ๋ฅผ ์์ ํ๋ค. ๋์ผ์ด๋๋ ๋๋ฉ์
ํฅ ์ด๋ผ๋ ๋ผ์ด๋ ํฅ์ ํ์ด๊ธฐ๋ก ์ฌ์ฉํ๋ค. ๋ผ์ด๋ ๋ถ์ปค ์๋ ๋ชจ๋์์๋ ์ ์ ํ๊ป ๋ฒ ๋ ๋๋ฉ์
์ฌ๋์ ๋ฅผ ์ฌ์ฉํ ์ ์๋ค. ๋ํ ๊ฑด ๋ชจ๋์์๋ ์ ์๊ฒ ๊ฐ๋ ฅํ ์๋์งํ์ ์๋ ๋๋ฉ์
๋ธ๋์คํธ ๋ฅผ ์ฌ์ฉํ ์ ์๋ค. ๊ฐ ํ์ด๊ธฐ๋ฅผ ์ฌ์ฉํ ๋๋ง๋ค ๋์ผ์ด๋์ ํ์ด๋ ์ดํ ๋ผ์ด๋ ์นด๋๋ฅผ ํ์ํํ ํ์๋ค์ด ์ ์ ํฅํด ์ ๊ฐ๋๋ค. ํ์ด๋ ๊ฐ๋ฉด๋ผ์ด๋ ๋จ๋ง K-ํฐ์น ๋ ํฐ์น(ํฐ์น์คํฌ๋ฆฐ)ํฐ(ํด๋ํฐ)์ ์ผ์ข
์ผ๋ก, ๊ฐ๋ฉด๋ผ์ด๋ ๋์ผ์ด๋์ ๊ฐํ ๋ณ์ ์ฅ๋น์ด๋ค. ๋ค๊ฑฐ๋นํธ์ ์ธ๊ณ(๊ฐ๋ฉด๋ผ์ด๋ ๋์ผ์ด๋์ ์๋ก์ด ์ธ๊ณ์ ๋ฑ์ฅ ์ธ๋ฌผ#๋ค๊ฑฐํฐ๋ธ์ ์ธ๊ณ)์์ ์ธ ์นด์ฌ๊ฐ ์์ ๋ฃ์๋ค.
|
์ ํค ์กฐ์ง๊ฐ ๋ณ์ ํ๋ ๋ผ์ด๋๋งจ์ ๊ทน์ฅํ์์ ๋ชจ๋ ๋ผ์ด๋๋ค์ด ๋ํ๋ ๋ ๊ฐ์ด ๋ฑ์ฅํ์ง๋ง, ๋ณ์ ํ๋ ์ฅ๋ฉด์ ๋ฌ์ฌ๋์ง ์์๋ค. ์ ํค ์กฐ์ง์ ์ธ๊ฐ ๋ชจ์ต์ ๊ฐํธ๊ฐ ์ฐ๊ธฐํ์ผ๋ฉฐ, "The Next Decade"์์๋ ์ฐ๊ธฐํ๋ค. ๋์ผ์ปค๋ ์๋ง์ ์
์ ๊ฒฐ์ฌ๊ฐ ์งํฉํ ๊ฑฐ๋ํ ์
์ ์กฐ์ง์ด๋ค. ๋ก๊ณ ๋ ์๋์ ์ผ์ปค ๋ก๊ณ ์ ๋์ผ์ด๋๋ฅผ ์์งํ๋ "DCD"๊ฐ ์ถ๊ฐ๋ ํํ์ด๋ค. ์ฌ์ ๋ฐ์ฌ(๊ฐ๋ฉด๋ผ์ด๋ ๋์ผ์ด๋์ ๋ฑ์ฅ ์ธ๋ฌผ#ํ์นด๋ฆฌ ์์ด์ง๋ก), ์ํด๋ก ๊ฐ์ด์คํธ(๊ฐ๋ฉด๋ผ์ด๋ ๋์ผ์ด๋์ ๋ฑ์ฅ ์ธ๋ฌผ#์ํด๋ก ๊ฐ์ด์คํธ) ์ธ ์ญ๋ ๊ฐ๋ฉด๋ผ์ด๋ ์๋ฆฌ์ฆ์ ์
๋น๊ณผ ๊ดด์ธ๋ค์ด ๋ชจ์ฌ ์๋ค. ์ง์ฅ๋์ฌ๋ ๊ฐ๋ฉด๋ผ์ด๋์ ์ธ๊ณ(๊ฐ๋ฉด๋ผ์ด๋)์์ ์จ, ์ผ์ปค(์ผ์ปค (๊ฐ๋ฉด๋ผ์ด๋))์ ๊ฐ๋ถ์ด๋ค. ๋ฐฉ์ธ๋ฑ ํํ์ ๊ฐ๋ผ๊ฐ๋๋ค๋ผ๋ ๊ดด์ธ์ผ๋ก ๋ณ์ ํ ์ ์๋ค.
|
Public method to get the word at the current position. the word at that current position (string)
|
def getCurrentWord(self):
line, index = self.getCursorPosition()
return self.getWord(line, index)
|
def echoPreviousWord(self, obj, offset=None):
try:
text = obj.queryText()
except NotImplementedError:
return
if not offset:
offset = text.caretOffset - 1
if (offset < 0):
return
[char, startOffset, endOffset] = \
text.getTextAtOffset( \
offset,
pyatspi.TEXT_BOUNDARY_CHAR)
if not self.utilities.isWordDelimiter(char):
return
# OK - we seem to be cool so far. So...starting with what
# should be the last character in the word (caretOffset - 2),
# work our way to the beginning of the word, stopping when
# we hit another word delimiter.
#
wordEndOffset = offset - 1
wordStartOffset = wordEndOffset
while wordStartOffset >= 0:
[char, startOffset, endOffset] = \
text.getTextAtOffset( \
wordStartOffset,
pyatspi.TEXT_BOUNDARY_CHAR)
if self.utilities.isWordDelimiter(char):
break
else:
wordStartOffset -= 1
# If we came across a word delimiter before hitting any
# text, we really don't have a previous word.
#
# Otherwise, get the word. Remember we stopped when we
# hit a word delimiter, so the word really starts at
# wordStartOffset + 1. getText also does not include
# the character at wordEndOffset, so we need to adjust
# for that, too.
#
if wordStartOffset == wordEndOffset:
return
else:
word = self.utilities.\
substring(obj, wordStartOffset + 1, wordEndOffset + 1)
if self.utilities.linkIndex(obj, wordStartOffset + 1) >= 0:
voice = self.voices[settings.HYPERLINK_VOICE]
elif word.decode("UTF-8").isupper():
voice = self.voices[settings.UPPERCASE_VOICE]
else:
voice = self.voices[settings.DEFAULT_VOICE]
word = self.utilities.adjustForRepeats(word)
speech.speak(word, voice)
|
Wraps an agent's program function and prints the inputs/outputs, so you can watch it in its environment.
|
def trace_agent(agent:'agent') -> 'agent':
old_program = agent.program
def new_program(percept):
action = old_program(percept)
print('{} perceives {} and does {}'.format(agent, percept, action))
return action
agent.program = new_program
return agent
|
def foo(args):
output("Foo!")
return 0
|
Returns the index ID of the column. The structure of the CSV is defined in the configuration.
|
def __get_column_id(element: str) -> Optional[int]:
idx = 0
for col in config.csv_headers:
if col == element:
return idx
else:
idx += 1
|
def ts_id_col(self):
return self._ts_id_col if self._ts_id_col else ["ts_col_1"]
|
Representative content for a taxon. Will throw an error if the file it needs does not exist
|
def representative_content_for_taxon(taxon):
# TODO: What to do about test/train content?
representative_content_path = RepresentativeContent.path_for_representative_content(taxon)
if os.path.exists(representative_content_path) and os.path.isfile(representative_content_path):
most_representative_content = pd.read_csv(representative_content_path, low_memory=False)
return most_representative_content['combined_text'].to_list()
else:
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), f"Representative content file for {taxon.unique_title()} has not been generated, should be at: {representative_content_path}")
|
def usage():
with open('./usage.txt') as file_obj:
contents = file_obj.read()
return contents
|
๊ทธ๋ฆฐ์ค๋ณด๋ก์ ๋ ์จ๋ ์ด๋ค๊ฐ์?
|
๋
ธ์ค์บ๋กค๋ผ์ด๋ ๋ ์จ > ๊ทธ๋ฆฐ์ค๋ณด๋ก ๋ ์จ. ๊ทธ๋ฆฐ์ค๋ณด๋ก, NC์ ๊ธฐํ๋ ์ฌ๋ฆ์ ๊ธฐ์จ์ด 70๋๋์ ์ด๋ฅด๋ ๋ฐ๋ปํ ๋ ์จ๋ฅผ ๋ณด์ด๋ฉฐ, ๊ฒจ์ธ์๋ ๊ธฐ์จ์ด 30๋๋์ ์ด๋ฅด๋ ๋งค์ฐ ์ถ์ด ๋ ์จ๋ฅผ ๋ณด์
๋๋ค.
|
๊ทธ๋ฆฐ์ค๋ณด๋ก ๋ ์จ ํ๊ท ๋ฐ ๊ธฐํ ๊ทธ๋ฆฐ์ค๋ณด๋ก, ๋
ธ์ค์บ๋กค๋ผ์ด๋. ์๋ณ ์จ๋, ๊ฐ์๋ ๋ฐ ์ผ์กฐ ์๊ฐ. ๊ฐ์๋ ๋ฐ์ดํฐ, ์จ๋ ๋ฐ ์ ์๊ฐ์ ๋ณด์ฌ์ฃผ๋ ๊ธฐํ ๊ทธ๋ํ. ํ๊ท ๋ ์จ ๊ทธ๋ฆฐ์ค๋ณด๋ก, NC.
|
Tests that newly created story cells decompose into concept cells, matching the concepts appearing in the text.
|
def test_cells_concept_extraction(self):
# verify that the expected concepts do not exist
self.assertEquals(0, ConceptCell.objects.filter(name="EveryBlock").count(), "Expected that the concept EveryBlock does not exist in the test fixtures")
self.assertEquals(0, ConceptCell.objects.filter(name="Glenn").count(), "Expected that the concept Glenn Ford does not exist in the test fixtures")
# add agents
self.society_cell.add_agent("Bill", "Bill", DATASOURCE_TYPE_TWITTER)
self.society_cell.add_agent("Al", "Al", DATASOURCE_TYPE_TWITTER)
# fetch agents
query = AgentCell.objects.filter(user__user_name="Bill")
self.assertTrue(query.count() > 0, "Agent wasn't created")
bill = query[0]
query = AgentCell.objects.filter(user__user_name="Al")
self.assertTrue(query.count() > 0, "Agent wasn't created")
al = query[0]
# add stories
al.add_read_story("RT @yahoo RE YQL, EveryBlock API: were working \w senator Glenn Ford on all the details", [bill.user])
self.society_cell.process()
# look up the concepts: YQL, EveryBlock, Glenn Ford
self.assertEquals(1, ConceptCell.objects.filter(name="EveryBlock").count(), "Expected that the concept EveryBlock would be extracted from the added story")
self.assertEquals(1, ConceptCell.objects.filter(name="Glenn").count(), "Expected that the concept Glenn Ford would be extracted from the added story")
# get the concepts
everyblock = ConceptCell.objects.filter(name="EveryBlock")[0]
glenn = ConceptCell.objects.filter(name="Glenn")[0]
# test concept recipient
self.assertTrue(everyblock.recipients.all().count() > 0, "Expected to find reciipients for the new concepts")
self.assertEquals(al.user, everyblock.recipients.all()[0], "Expected to find a specific recipient of the concept")
self.assertTrue(glenn.recipients.all().count() > 0, "Expected to find reciipients for the new concepts")
self.assertEquals(al.user, glenn.recipients.all()[0], "Expected to find a specific recipient of the concept")
|
def test_trait_count_12(self):
things = self.model_factory.create_batch(20, source_study_version__i_is_deprecated=False)
table = self.table_class(things)
row = table.rows[0]
n_traits = 12
factories.SourceTraitFactory.create_batch(
n_traits, source_dataset=row.record)
# Remake the table, to update trait counts.
table = self.table_class(things)
row = table.rows[0]
self.assertEqual(row.get_cell('trait_count'), '{:,}'.format(n_traits))
|
Given an m x n matrix zero the rows and columns that have a 0 in it.
|
def zero_rows_columns(matrix):
rows = []
columns = []
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
rows.append(i)
columns.append(j)
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if i in rows or j in columns:
matrix[i][j] = 0
return matrix
|
def find_empty(board):
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 0:
return i, j
return None
|
๋ฏธ๊ตญ ๊ณต๊ตฐ ์ฌ๊ดํ๊ต๋ ์ด๋์ ์์ต๋๊น?
|
ํฌ๋ธ์ค ๋ฆฌ์คํธ. ํ๋กํ. ๋ฏธ๊ตญ ๊ณต๊ตฐ ์ฌ๊ดํ๊ต๋ ์ฝ๋ก๋ผ๋ ์คํ๋ง์ค, CO์ ์์นํ ํ์ ์ฌ๊ด์๋ค์ ์ํ ๊ตฐ์ฌ ์๋น์ค ์์นด๋ฐ๋ฏธ๋ก, ๊ณต๊ตฐ ๊ธฐ์ง๋ก๋ ๊ธฐ๋ฅํฉ๋๋ค. ๊ณต๊ตฐ์ ๊ณตํ ํ๋ก๊ทธ๋จ์ผ๋ก ์ ๋ช
ํ๋ฉฐ, ์ด ํ๊ต๋ ๊ตญ๊ฐ ์๋ณด ์ฐ๊ตฌ์์ ํ๋ ฅํ์ฌ ๊ด๋ฒ์ํ ์ฐ๊ตฌ ๊ธฐํ์ ๋ณด์กฐ๊ธ์ ์ ๊ณตํฉ๋๋ค.
|
๋ฏธ๊ตญ ๊ณต๊ตฐ ์ฌ๊ดํ๊ต. ๋ฏธ๊ตญ ๊ณต๊ตฐ ์ฌ๊ดํ๊ต(USAFA ๋๋ ๊ณต๊ตฐ ์ฌ๊ดํ๊ต๋ก๋ ์๋ ค์ง)๋ ๋ฏธ๊ตญ ๊ณต๊ตฐ์ ์ฅ๊ต ํ๋ณด์์ ์ํ ๊ตฐ์ฌ ํ๊ต์
๋๋ค. ์ด ์บ ํผ์ค๋ ๋ฏธ๊ตญ ์๋ถ ์ฝ๋ก๋ผ๋์ ์์นํ๊ณ ์์ผ๋ฉฐ, ์ํ์ ์นด์ดํฐ์ ์ฝ๋ก๋ผ๋ ์คํ๋ง์ค ๋ฐ๋ก ๋ถ์ชฝ์ ์์ต๋๋ค.
|
Disable Intan impedance testing Make sure DAQ is running, e.g. by calling startStreaming_boardSamples(), before calling this function.
|
def disableZCheck():
"""
cmdData_DACconfig = ((0x1 << 24) |
(0xFF << 16) |
(0b10000101 << 8) |
0) # clear register
cmdData_DACchan = ((0x1 << 24) |
(0xFF << 16) |
(0b10000111 << 8) |
0) # clear register
"""
####
cmds = []
#cmds.append(DC.reg_write(DC.MOD_DAQ, DC.DAQ_CHIP_CMD, cmdData_DACconfig))
cmds.append(_intanRegWrite(address=5, data=0))
#cmds.append(DC.reg_write(DC.MOD_DAQ, DC.DAQ_CHIP_CMD, cmdData_DACchan))
cmds.append(_intanRegWrite(address=7, data=0))
#cmds.append(DC.reg_write(DC.MOD_DAQ, DC.DAQ_CHIP_CMD, 0)) # clear the CMD register
cmds.append(_intanRegWrite(clear=True))
resps = _controlCmdWrapper(cmds)
|
def test_reportexclude(self):
# default
self.assertIsNone(self.settings.reportexclude)
# turn things off
options = random.randrange(1, 4)
value = ''.join([random.choice('CDMSV') for option in xrange(options)])
self.settings.reportexclude = value
self.assertEqual(' --reportexclude {0}'.format(value), str(self.settings))
# bad exclusion (number)
with self.assertRaises(CameraobscuraError):
self.settings.reportexclude = random.randrange(100)
# invalid option
with self.assertRaises(CameraobscuraError):
self.settings.reportexclude = random.choice('ABEFGHIJKLNOP')
return
|
๋ง์ฐฐ๋ ฅ์ด ๊ฐํด์ ธ ์๋ฐ์ด ์์ค์ ์ ๋ฐฉ์ถํ ์ ์๊ฒ ๋จ๊ฑฐ์์ง๋ฉด, ์์ค์ ์ ๋ฌด์์ผ๋ก ๊ด์ฐฐ ๊ฐ๋ฅํ๊ฐ์?
|
๋ธ๋ํ
๊ฐ์ฐฉ์ ์ผ์ผํค๋ ์ฒ์ฒด๊ฐ ์ค์ฑ์๋ณ์ด๋ ๋ธ๋ํ์ผ ๊ฒฝ์ฐ, ๊ฐ์ฐฉ์๋ฐ ์์ชฝ์ ๊ฐ์ค๋ ๋ฐ์ง์ฑ ๋ณธ์ฒด์ ๋งค์ฐ ๊ฐ๊น์์ ธ ์์ฒญ๋ ๊ณ ์์ผ๋ก ํ์ ํ ๊ฒ์ด๋ค. ๊ทธ๋ฌ๋ฉด ๋ง์ฐฐ๋ ฅ์ด ๋๋ฌด ๊ฐํด์ ธ์ ์๋ฐ์ด ์ ์๊ธฐ ๋ณต์ฌ(์ฃผ๋ก ์์ค์ )๋ฅผ ๋ฐฉ์ถํ ๋งํผ ๋จ๊ฑฐ์์ง๋ค. ์ด๋ ๊ฒ ๋ฐ์ ์์ค์ ์๋ค์ ๋ง์๊ฒฝ์ผ๋ก ๊ด์ธก์ด ๊ฐ๋ฅํ๋ค. ๊ฐ์ฐฉ์๋ฐ์ ์์ค์ ๋ฐฉ์ถ์ ์ง๊ธ๊น์ง ์๋ ค์ง ๊ฐ์ฅ ์๋์ง ํจ์จ์ด ๋์ ๊ณผ์ ์ ์ํ๋ค. ํญ์ฑ ๋ด๋ถ ํต์ตํฉ์ด ์ง๋์ 0.7%๋ฅผ ์๋์ง๋ก ๋ฐฉ์ถํ๋ ๋ฐ๋ฉด, ๊ฐ์ฐฉ์๋ฐ์ ์ง๋์ 40%๋ฅผ ๋ณต์ฌ ์๋์ง๋ก ๋ฐฉ์ถ์ํจ๋ค. ๋ง์ ๊ฒฝ์ฐ ๊ฐ์ฐฉ์๋ฐ์ ๋ฐ์ง์ฑ์ ์๊ทน ๋ฐฉํฅ์ผ๋ก ๋ถ์ถ๋๋ ์์ฒญ๋ ์๋์ง์ ์๋๋ก ์ ์ ํธ๋ฅผ ์๋ฐํ๋ค. ์ด ์ ํธ๊ฐ ๋ง๋ค์ด์ง๋ ๊ณผ์ ์ ํ์ฌ๋ก์ ์์ ํ ์ดํด๋์ง ๋ชปํ๊ณ ์๋ค.
|
๋ธ๋ํ
์ฐฐ์ค ํ ๋จธ์ค ๋ณผํผ, ๋ฃจ์ด์ค ์น์คํฐ(Louise Webster), ํ์ธ ๋ฌด๋ฅด๋(Paul Murdin)์ด 1972๋
์ ์ต์ด์ ๊ฐ๋ ฅํ ๋ธ๋ํ ํ๋ณด ์ฒ์ฒด ๋ฐฑ์กฐ์๋ฆฌ X-1๋ฅผ ์ด ๋ฐฉ์์ผ๋ก ๋ฐ๊ฒฌํด๋๋ค. ๊ทธ๋ฌ๋ ๋๋ฐ์ฑ์ด ๋ธ๋ํ ํ๋ณด๋ณด๋ค ๋ ๋ฌด๊ฑฐ์ธ ์ ์๊ธฐ์ ์ด ๋ฐฉ๋ฒ์๋ ๋ค์์ ๋ถํ์ค์ฑ์ด ๋จ์ ์๋ค. ํ์ฌ๋ ์์ค์ ์์ฑ๊ณ ์ค์์๋ ์ผ์์ ์ฐ์์ค์ (X-ray transients)์ผ๋ก ๋ถ๋ฅ๋๋ ๊ณ๋ค์์ ๋ธ๋ํ ํ๋ณด๋ค์ด ๋ณด๋ค ๋ ์ ๋ฐ๊ฒฌ๋๋ค. ์ด ์ข
๋ฅ์ ์์ฑ๊ณ์์๋ ๋ฐ์ง์ฑ์ ๋๋ฐ์ฑ์ ์ง๋์ด ์๋์ ์ผ๋ก ์์ผ๋ฉฐ, ๊ทธ ๋๋ฌธ์ ๋ธ๋ํ์ ์ง๋์ ๋ณด๋ค ์ ํํ๊ฒ ์ถ์ฐํ ์ ์๋ค. ๋ํ ์ด๋ค ๊ณ๋ 10 ~ 50๋
๋์ ๋ถ๊ณผ ์ ๊ฐ์ ๋์๋ง ์์ค์ ์ ํ๋ฐํ๊ฒ ๋ฐฉ์ถํ๋ค. ์์ค์ ๋ฐฉ์ถ์ด ์ ์กฐํด์ง๋ ์๊ธฐ๋ฅผ ํด๋ฉด๊ธฐ(quiescence)๋ผ ํ๋ฉฐ, ์ด ๋ ๊ฐ์ฐฉ์๋ฐ์ด ๊ทน๋๋ก ์ด๋์์ ธ ๋๋ฐ์ฑ์ ์ธ๋ฐํ๊ฒ ๊ด์ธกํ ์ ์๊ฒ ๋๋ค. ์ด๋ฌํ ๋ธ๋ํ ํ๋ณด ์ค ๊ฐ์ฅ ์ ์ ํ ์๊ฐ ๋ฐฑ์กฐ์๋ฆฌ V404์ด๋ค.
|
Test that a ValueError gets raised when a required SignatureVerifyResponsePayload field is missing when encoding the struct.
|
def test_write_missing_validity_indicator(self):
payload = signature_verify.SignatureVerifyResponsePayload(
unique_identifier='49a1ca88-6bea-4fb2-b450-7e58802c3038'
)
stream = utils.BytearrayStream()
args = (stream, )
self.assertRaisesRegex(
ValueError,
"Payload is missing the validity indicator field.",
payload.write,
*args
)
|
def test_some_missing_data(self, client):
response = client.post(
"/api/v1/register",
data=json.dumps(some_missing),
content_type='application/json;charset=utf-8')
res_data = json.loads(response.get_data(as_text=True))
assert 'Bad Request' in res_data['status']
assert 'Missing data for required field.' in str(res_data['Message'])
|
Apply on a scalar field. If the argument is not a scalar field an error is raised.
|
def __call__(self, scalar_field):
return self.v1(self.v2(scalar_field)) - self.v2(self.v1(scalar_field))
|
def alter_field(self, model, old_field, new_field, strict=False):
# TODO : Logic to "upgrade" old data
pass
|
solve_tsp solve the traveling salesman problem start with assignment model add cuts until there are no subcycles
|
def solve_tsp(V, c):
# ้จๅๅทกๅ่ทฏ้คๅป่ฃฝ่ฌ
def addcut(edges):
G = nx.Graph()
G.add_nodes_from(V)
for (i, j) in edges:
G.add_edge(i, j)
Components = list(nx.connected_components(G))
if len(Components) == 1:
return False
for S in Components:
model.addConstr(quicksum(x[i, j] for i in S for j in S if j > i) <= len(S) - 1)
return True
model = Model("tsp")
x = {}
for i in V:
for j in V:
x[i, j] = model.addVar(ub=1)
model.update()
for i in V:
model.addConstr(quicksum(x[j, i] for j in V if j < i) + quicksum(x[i, j] for j in V if j > i) == 2)
model.setObjective(quicksum(c[i, j] * x[i, j] for i in V for j in V if j > i), GRB.MINIMIZE)
EPS = 1.e-6
while True:
model.optimize()
edges = []
for (i, j) in x:
if x[i, j].X > EPS:
edges.append((i, j))
if not addcut(edges):
if model.IsMIP:
break
for (i, j) in x:
x[i, j].VType = "B"
model.update()
return model.ObjVal, edges
|
def post_calc(self):
# self.mfp = fcl.mean_free_path(self.solver_T, self.solver_p,
# self.atmosphere.d)
# self.Kn = self.mfp / self.spacecraft.L
## self.Re = fcl.KnReMa(self.atmosphere.k, Kn=self.Kn,
## Ma=self.Ma)
# self.Re = fcl.Reynolds(self.solver_rho, self.V, self.spacecraft.L,
# self.solver_mu)
# Continuum: 0 < Kn < 0.001
# Slip: 0.001 <= Kn < 0.1
# Transition: 0.1 <= Kn < 10
# Free molecular: 10 < Kn
self.regimes = placeholder()
if len(np.argwhere(self.Kn > 10)) != 0:
self.index_tran_freemol = np.argwhere(self.Kn > 10)[-1]
self.regimes.free_molecular = np.argwhere(self.Kn >= 10)
else:
self.index_tran_freemol = None
if len(np.argwhere(self.Kn > 0.1)) != 0:
self.index_slip_tran = np.argwhere(self.Kn > 0.1)[-1]
self.regimes.transition = np.argwhere((self.Kn < 10) & (self.Kn >= 0.1))
else:
self.index_slip_tran = None
if len(np.argwhere(self.Kn > 0.001)) != 0:
self.index_cont_slip = np.argwhere(self.Kn > 0.001)[-1]
self.regimes.slip = np.argwhere((self.Kn < 0.1) & (self.Kn >= 0.001))
else:
self.index_cont_slip = None
if len(np.argwhere((self.Kn > 0) & (self.Kn <= 0.001))) != 0:
self.regimes.continuum = np.argwhere((self.Kn < 0.001) & (self.Kn >= 0))
else:
self.index_cont_slip = None
return [self.mfp, self.Kn, self.Re]
|
Get the auth time for the security context. Gets the auth time for the established security context.
|
def krb5_extract_authtime_from_sec_context(
context: "SecurityContext",
) -> int:
|
def getServerTimeForOTP(self) -> datetime.datetime:
# This method seems to be useless to generate the OTP(One Time Password), but I leave it there
if not self.__connected:
raise ConnectionError
r = self.__s.get(f"{BASE_URL}/nvsecurityapi/rest/enrollments/time")
httpTime = r.headers["Date"]
time = datetime.datetime.strptime(
httpTime, '%a, %d %b %Y %H:%M:%S GMT')
return time
|
Shows ann error if the user specifies an overly complicated system that requires too much memory allocation.
|
def show_size_error(self):
messagebox.showerror(title = 'Memory Error', message = 'The parameters ' +
'you have specified require too much memory to be ' +
'computed. Please decrease the simulation time ' +
'and/or the transport rates to rectify this issue.')
self.frame.quit()
|
def test_memory_usage(self):
manager = ApacheManager()
# Make sure there are Apache workers alive that have handled a couple of requests.
for i in range(10):
manager.fetch_status_page(manager.text_status_url)
assert sum(manager.memory_usage) > 0
# TODO Create a WSGI process group so we can perform a useful test here?
assert isinstance(manager.wsgi_process_groups, dict)
|
Adds an occupant to its list of occupants to keep track of as well as the rlt
|
def add(self, o):
self.__occupants.append(o[0])
self.__position_rlt[o[0]] = (o[1], o[2])
|
def increment_room_occupancy(room, occupancy_additional):
with sqlite3.connect(database) as c:
c.execute('''CREATE TABLE IF NOT EXISTS rooms (name text UNIQUE, capacity integer, occupancy integer, noiseLevel integer);''')
validate_room(room)
current_occupancy = c.execute('''SELECT occupancy FROM rooms WHERE name=?;''', (room,)).fetchone()[0]
new_occupancy = current_occupancy + occupancy_additional
c.execute('''UPDATE rooms SET occupancy=? WHERE name=?;''', (new_occupancy, room))
|
Writes output matches to a file in the subfolder "Matches". It supports any iterable as output matches.
|
def file_output(matches: list, output_file_name: str = 'matches.txt'):
with open("test/Matches/" + output_file_name, 'w') as f:
for match in matches:
for event in match.events:
f.write("%s\n" % event.payload)
f.write("\n")
|
def write(afile, seqs):
for s in seqs :
writeseq(afile, s)
|
Return whether finding group list for a specified node is supported by the resolver (in optional namespace).
|
def has_node_groups(self, namespace=None):
try:
return bool(self._source(namespace).reverse_upcall)
except GroupResolverSourceError:
return False
|
def is_in_group(user, group_name):
return user.groups.filter(name__exact=group_name).exists()
|
A callback method that is called when the desired properties of the devices device twin are updated.
|
def on_device_twin_desired_updated(self):
return self._on_device_twin_desired_updated
|
def set_update_callback(self):
pass
|
๊ตฐ์ฒญ, ์ง์, ์ ๋ค, ์๋ฅํ๋ค
|
๊ตฐ์ฒญ์ ์ ์ ์ง์์ด ์ ๋
๋ฌด์ฒ ์๋ฅํ ๊ฒ ๊ฐ๋ค.
|
์ฌ์ฃผ๋ ์์ , ์์
, ํ์๊ธฐ, ๊ธ์ฐ๊ธฐ, ๋งํ๊ธฐ, ๊ณ์ฐํ๊ธฐ ๋ฑ๋ฑ ์ด๋ค.
|
inherits the __init__ from Number if just normal number or uses new method if its a list.
|
def __init__(self, number=None):
if type(number) != list:
print("hei")
super().__init__(float(number))
elif type(number) == list:
self.number = []
for i in number:
self.number.append(i)
else:
raise TypeError("number added inn does not fill any of the type categories ยดint/float/listยด")
|
def __new__(*args, **kwargs): # real signature unknown
pass
|
Iteratively zoom in on the minimum position in an image until the deltapeak value is below `mindiff`
|
def iterative_zoom(image, mindiff=1., zoomshape=[10,10],
return_zoomed=False, zoomstep=2, verbose=False,
minmax=np.min, ploteach=False, return_center=True):
image_zoom = image
argminmax = np.argmin if "min" in minmax.__name__ else np.argmax
zf = 1. # "zoom factor" initialized to 1 for the base shift measurement
offset = np.array([0]*image.ndim,dtype='float') # center offset
delta_image = (image_zoom - minmax(image_zoom))
xaxzoom = np.indices(image.shape)
if ploteach:
ii = 1
pl.figure(ii)
pl.clf()
pl.pcolor(np.arange(image.shape[0]+1)-0.5,np.arange(image.shape[1]+1)-0.5, image)
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(minpos[1],minpos[0],'wx')
# check to make sure the smallest *nonzero* difference > mindiff
while np.abs(delta_image[np.abs(delta_image)>0]).min() > mindiff:
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
center = xaxzoom[0][minpos],xaxzoom[1][minpos]
offset = xaxzoom[0][minpos]-(image.shape[0]-1)/2,xaxzoom[1][minpos]-(image.shape[1]-1)/2
zf *= zoomstep
xaxzoom, image_zoom = zoom.zoom_on_pixel(image, center, usfac=zf,
outshape=zoomshape, return_xouts=True)
delta_image = image_zoom-minmax(image_zoom)
# base case: in case you can't do any better...
# (at this point, you're all the way zoomed)
if np.all(delta_image == 0):
if verbose:
print("Can't zoom any further. zf=%i" % zf)
break
if verbose:
print(("Zoom factor %6i, center = %30s, offset=%30s, minpos=%30s, min|diff|=%15g" %
(zf, ",".join(["%15g" % c for c in center]),
",".join(["%15g" % c for c in offset]),
",".join(["%5i" % c for c in minpos]),
np.abs(delta_image[np.abs(delta_image)>0]).min()
)))
if ploteach:
ii += 1
pl.figure(ii)
pl.clf()
pl.pcolor(centers_to_edges(xaxzoom[1][0,:]),centers_to_edges(xaxzoom[0][:,0]),image_zoom)
pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.plot(center[1],center[0],'wx')
minpos = np.unravel_index(argminmax(image_zoom), image_zoom.shape)
pl.plot(xaxzoom[1][minpos],
xaxzoom[0][minpos],
'w+')
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True)
pl.figure(1)
#pl.contour(xaxzoom[1],xaxzoom[0],image_zoom-image_zoom.min(),levels=[1,5,15],cmap=pl.cm.gray)
pl.arrow(center[1],center[0],xaxzoom[1][minpos]-center[1],xaxzoom[0][minpos]-center[0],color='w',
head_width=0.1/zf, linewidth=1./zf, length_includes_head=True)
if return_center:
result = center
else:
result = offset
if return_zoomed:
return image_zoom,zf,result
else:
return result
|
def initial_guess(cts, xmin, xmax, size_window_background_left, size_window_background_right):
# Automatic finding peak parameters
number_of_image = len(cts) # nbre of images in the scan
add_x0 = 0
add_IM = 0
add_H = 0
add_A = 0
add_B = 0
for ii in range(number_of_image):
number_of_pixel = len(cts[ii])
x = np.arange(0, number_of_pixel, 1)
step = x[1] - x[0]
# Obtaining and add x0, IM and H
add_x0 += (xmin + list_manipulation.index(cts[ii][xmin:xmax], max(cts[ii][xmin:xmax])))
add_IM += max(cts[ii][xmin:xmax])
add_H += (trapeze_method(cts[ii][xmin:xmax], step) / max(cts[ii][xmin:xmax]))
# Obtaining A and B
background_left = cts[ii][xmin:xmin + size_window_background_left]
background_right = cts[ii][xmax - size_window_background_right:xmax]
left_point_value = np.mean(background_left)
right_point_value = np.mean(background_right)
left_point_absc = xmin + size_window_background_left / 2
right_point_absc = xmax - size_window_background_right / 2
B_temp = (left_point_value - right_point_value) / (left_point_absc - right_point_absc)
A_temp = left_point_value - B_temp * left_point_absc
add_A += A_temp
add_B += B_temp
x0 = add_x0 / number_of_image
IM = add_IM / number_of_image
H = add_H / number_of_image
A = add_A / number_of_image
B = add_B / number_of_image
return x0, IM, H, A, B
|
BOOL SQLInstallTranslatorEx( LPCTSTR lpszTranslator, LPCTSTR lpszPathIn, LPTSTR lpszPathOut, WORD cbPathOutMax, WORD pcbPathOut, WORD fRequest, LPDWORD lpdwUsageCount )
|
def odbccp32_SQLInstallTranslatorEx(jitter, get_str, set_str):
ret_ad, args = jitter.func_args_stdcall(["lpszTranslator", "lpszPathIn", "lpszPathOut", "cbPathOutMax", "pcbPathOut", "fRequest", "lpdwUsageCount"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
|
def odbccp32_SQLInstallTranslator(jitter, get_str, set_str):
ret_ad, args = jitter.func_args_stdcall(["lpszInfFile", "lpszTranslator", "lpszPathIn", "lpszPathOut", "cbPathOutMax", "pcbPathOut", "fRequest", "lpdwUsageCount"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
|
Helper function that returns the AmpLayout.Layout name for a given index. See amp.validator.AmpLayout.Layout in validator.proto for details.
|
def GetLayout(validator_pb2, layout_index):
amp_layout = validator_pb2.DESCRIPTOR.message_types_by_name['AmpLayout']
layouts = amp_layout.fields_by_name['supported_layouts'].enum_type.values
return layouts[layout_index].name
|
def get_label(index):
if index == 0:
return "Sepal Length"
if index == 1:
return "Sepal Width"
if index == 2:
return "Petal Length"
if index == 3:
return "Petal Width"
|
when removing a room we wan't to be able to remove it from the cache as well
|
def remove_room_exists(self, channel_id, room_id):
|
def rm_from_cache(self, *args, **kwargs):
try:
del self._cache[self._prehash(
bind_callargs(self._signature, *args, **kwargs) )]
except KeyError:
pass
|
Generates a trajectory given a path. Does it using splines for each of the x, y, z
|
def create_trajectory(self, path):
# Added this last pose because in some cases the drone would stop at the before-last pose.
# Couldn't figure out the reason for this, I plotted the splines and they seemed fine. It
# looks like some bug in the crazyflie firmware.
path.add_pose(path.poses[-1], path.times[-1] + EPS)
self.__duration = path.times[-1]
# Generates splines for x, y and z, using the t array as knots.
x_spline = CubicSpline(path.times, [p.position().x for p in path.poses], bc_type='natural')
y_spline = CubicSpline(path.times, [p.position().y for p in path.poses], bc_type='natural')
z_spline = CubicSpline(path.times, [p.position().z for p in path.poses], bc_type='natural')
# Passing to the structure ros will read. Note that the polynomials must have 8 constants.
for i in range(1, len(path.times)):
x_coef = np.concatenate((x_spline.c[:, i - 1][::-1], [0] * 4))
y_coef = np.concatenate((y_spline.c[:, i - 1][::-1], [0] * 4))
z_coef = np.concatenate((z_spline.c[:, i - 1][::-1], [0] * 4))
p = Polynomial4D(path.times[i] - path.times[i - 1], x_coef, y_coef, z_coef, [0] * 8)
self.__polynomials.append(p)
|
def generate_spiral_path_dtu(poses, n_frames=120, n_rots=2, zrate=.5, perc=60):
# Get radii for spiral path using 60th percentile of camera positions.
positions = poses[:, :3, 3]
radii = np.percentile(np.abs(positions), perc, 0)
radii = np.concatenate([radii, [1.]])
# Generate poses for spiral path.
render_poses = []
cam2world = poses_avg(poses)
up = poses[:, :3, 1].mean(0)
z_axis = focus_pt_fn(poses)
for theta in np.linspace(0., 2. * np.pi * n_rots, n_frames, endpoint=False):
t = radii * [np.cos(theta), -np.sin(theta), -np.sin(theta * zrate), 1.]
position = cam2world @ t
render_poses.append(viewmatrix(z_axis, up, position, True))
render_poses = np.stack(render_poses, axis=0)
return render_poses
|
Unlike a given repo on the Hub (e.g. remove from favorite list). See also [`like`] and [`list_liked_repos`].
|
def unlike(
self,
repo_id: str,
*,
token: Optional[str] = None,
repo_type: Optional[str] = None,
) -> None:
if repo_type is None:
repo_type = REPO_TYPE_MODEL
response = get_session().delete(
url=f"{self.endpoint}/api/{repo_type}s/{repo_id}/like", headers=self._build_hf_headers(token=token)
)
hf_raise_for_status(response)
|
def repository(full_name):
session = db.get_session()
try:
repository = session.query(Repository) \
.filter(Repository.full_name == full_name) \
.one()
commit_shas = session.query(Commit.sha) \
.join(
commit_repository,
commit_repository.c.repository_clone_url == repository.clone_url,
) \
.filter(commit_repository.c.commit_sha == Commit.sha) \
.all()
commit_shas = [c[0] for c in commit_shas]
if commit_shas:
session.query(Commit) \
.filter(Commit.sha.in_(commit_shas)) \
.delete(synchronize_session=False)
session.query(Repository) \
.filter(Repository.full_name == full_name) \
.delete()
session.commit()
finally:
session.close()
|
๊ตญ์ธ๋ก ๋๊ฐ์ง ์๊ณ ๋ ๋ฒ ๋ฅผ๋ฆฐ ํํ๋ชจ๋ ์ค์ผ์คํธ๋ผ๊ฐ ์ด์ํ๋ ๊ณต์ฐ ์คํฉ์ค๊ณ ์์คํ
์ ํตํด ๊ณต์ฐ์ ๊ด๋ํ ์ ์๋ค.
|
๋ฒ ๋ฅผ๋ฆฐ ํํ๋ชจ๋ ์ค์ผ์คํธ๋ผ๊ฐ ๊ณต์ฐ ์คํฉ์ค๊ณ ์์คํ
์ ๊ตฌ์ถํจ์ผ๋ก ์ธํด ๊ตญ๋ด์์๋ ์ธํฐ๋ท์ ํตํด ๊ณต์ฐ ๊ด๋์ด ๊ฐ๋ฅํ๋ค.
|
์๋ก๊ฐ ํ๋ ๊ณต์ฐ ๊ด๋์ ์์ธ, ๋ถ์ฐ์ ์ค๊ฐ๋ฉฐ ํ๊ณ ๊ณต์ฐ์ ๋ง์น๋ฉด ํจ๊ป ์์ฃผ๋ฅผ ๋ง์
จ๋ค.
|
The number of seconds connection draining is active. Acceptable values are from 1 second to 3600 seconds.
|
def drain_timeout_in_sec(self) -> int:
return pulumi.get(self, "drain_timeout_in_sec")
|
def KeepAliveTimeout(self):
return self._get_attribute('keepAliveTimeout')
|
๊ฑ๊ฐ ํ ๋ฌ์ ๋ฒ๋ ๋์ ์ผ๋ง์ผ
|
๊ฑ๊ฐ ํ ๋ฌ์ ๋ฒ๋ ๋์ด ๋๋๋ฐ?
|
๊ทธ๋ฌ๋ฉด ๊ฐ ๋๋ง๋ค ์ผ๋ง์ฉ ๊ฑท๋ ๊ฑฐ์ผ ์๋๋ฉฐ ๋งค๋ฌ ์ผ๋ง์ฉ ๊ฑท๋ ๊ฑฐ์ผ?
|
Goes through all functions to calculate all important numbers for the tree Calculates num_child, num_grand, tree_size, depth
|
def calc_all(self):
self.calc_num_child() # calculate number of children
self.calc_num_grand() # calculate number of grandkids
self.calc_tree_size() # calculate number of tree size
self.calc_depth()
self.calc_max_depth()
|
def calc_num_child(self):
self.num_child = len(self.children) # count num of kids
for child in self.children: # Go thru all kids
child.calc_num_child() # Ask each kid to take stock also
|
Fit a quadratic around a point and find its local minimum/maximum Center should be in row,column format
|
def fitquadratic(im,center,n):
w = int((n-1)/2)
start = center - w
patch = im[start[0]:(start[0]+n), start[1]:(start[1]+n)]
x0 = (n+1)/2
x,y = np.meshgrid(range(n),range(n))
x = x.flatten()+1-x0
y = y.flatten()+1-x0
# Constants for coefficient vector [f a b c d e]
A = np.vstack([np.ones(n*n), x**2, y**2, x*y, x, y]).T
coef = np.linalg.lstsq(A, patch.flatten())[0]
# Find minimum coordinates
B = np.array([[2*coef[1], coef[3]], [coef[3], 2*coef[2]]])
rhs = np.array([[-coef[4]],[-coef[5]]]) # [2a c; c 2b][x;y]=-[d;e]
xymin = np.linalg.lstsq(B,rhs)[0]
xymin += np.array([[center[1]],[center[0]]])
return xymin, coef
|
def closest_point_to(self, x):
min_pt, min_dist = None, np.inf
for s in self.segments:
s_min_dist, s_min_pt = s.closest_point_to(x)
if s_min_dist < min_dist:
min_dist = s_min_dist
min_pt = s_min_pt
sd = np.copysign(min_dist, -1 if self.is_inside(x) else 1)
return sd, min_pt
|
Return an instance of BlackjackMDP where peeking is the optimal action at least 10% of the time.
|
def peekingMDP():
# BEGIN_YOUR_CODE (our solution is 2 lines of code, but don't worry if you deviate from this)
return BlackjackMDP(cardValues=[4, 5, 10, 11], multiplicity=1, threshold=20, peekCost=1)
# END_YOUR_CODE
|
def _greedy_policy(self, state):
raise NotImplementedError
|
์ธํ๋ฒ ์ดํฐ์์ ๋ณ์๋ฆฌ ์์ ๋ถํํ๋ ๋ฐ ์ผ๋ง๋ ๊ฑธ๋ฆฝ๋๊น?
|
๊ทธ๋ ๋ค๋ฉด ์์ ๋ ๋ญ ์์ด ๋ถํํ๋ ๋ฐ ์ผ๋ง๋ ๊ฑธ๋ฆด๊น์? ๊ทธ์ ๋ํ ๋ต์ ์ ํํ 21์ผ์
๋๋ค. ๊ทธ๋ฌ๋ ์ฌ๋ฌ ๊ฐ์ง ์ํฉ์ ๋ฐ๋ผ ๋ฌ๋ผ์ง ์ ์๋ค๋ ์ ์ ์ผ๋์ ๋์ด์ผ ํฉ๋๋ค. ์ด๋ค ์ด์ ๋ก ์ธํด ์จ๋๊ฐ ์ ์ ์จ๋ ์ดํ๋ก ๋จ์ด์ง ๊ฒฝ์ฐ, ์ผ๋ถ ์์ 22์ผ, 23์ผ, ๋๋ ์ฌ์ง์ด 24์ผ์ด๋ 25์ผ์ ๋ถํํ ์ ์์ต๋๋ค. ์์ ๋ ๋ญ ์์ ์ผ๋ฐ์ ์ผ๋ก ์ ํํ 21์ผ์ด ๊ฑธ๋ฆฌ๋ ๋ฐ๋ฉด, ๋๋ถ๋ถ์ ์ค๋ฆฌ ์์ 28์ผ์ด ๊ฑธ๋ฆฌ๋ฉฐ, ๋ฌด์ค์ฝ๋น ์ค๋ฆฌ๋ฅผ ์ ์ธํฉ๋๋ค.
|
๋ญ ์์ ์ผ๋ฐ์ ์ผ๋ก ๋ถํํ๊ธฐ ์ ์ ์ฝ 21์ผ ๋์ ์ธํ๋ฒ ์ด์
๋ฉ๋๋ค.
|
๋ฏผ์ค์ด, ์ฌ๋ฆ, ํด๊ฐ, ๊ธฐ์ฐจ, ์ฌํ, ๋ ๋๋ค, ํ๋ค
|
๋ฏผ์ค์ด๋ ์ฌ๋ฆ ํด๊ฐ๋ก ํผ์ ๊ธฐ์ฐจ ์ฌํ์ ๋ ๋๊ธฐ๋ก ํ๋ค.
|
์ฌ์ฃผ๋ ์์ , ์์
, ํ์๊ธฐ, ๊ธ์ฐ๊ธฐ, ๋งํ๊ธฐ, ๊ณ์ฐํ๊ธฐ ๋ฑ๋ฑ ์ด๋ค.
|
์ ์ ํ์๋ฐ์์ ํตํด ์ธํฌ์ ์กฐ์ง์ ๋
์ฑ์ ์ผ์ผํค๋๊ฑด ๋ญ์ผ?
|
\( \mathrm{O}_{2}^{-} \)์ ์ ์ ํ์๋ฐ์์ ํตํด ์ธํฌ์ ์กฐ์ง์ ๋
์ฑ์ ์ผ์ผ์ผ ์ง๋ณ์ ์ ๋ฐ์ํค๋ฉฐ, ์ด๋ ๋
ธํ์ ๋ฐ์ ํ ๊ด๋ จ์ด ์๋ ๊ฒ์ผ๋ก ์๋ ค์ ธ ์๋ค.
|
์ด๋ ๋ณธ ์ฐ๊ตฌ์์ ๋ํ๋ฐฐ์๋ ํผํฉ๋ฏธ์๋ฌผ์ด ํ์์ ๊ทน ๋ฐ์๊ธฐ ๋ด์์ ์ ๊ทน์ผ๋ก ๋ถํฐ ์ ์๋ฅผ ๋ฐ์ \( \mathrm{CO}_{2} \) ๋ฅผ ์์ธํธ์ฐ์ผ๋ก ํ์ํ๋ ์๋ฌผ์ ๊ธฐํ์ฑ๋ฐ์์ ์ผ์ผํค๋ ๊ฒ์ ๋ํ๋ธ๋ค.
|
์ฌ์ฉ๋๋ค, ์น๋ฃ์ , ํต์ฆ, ๋ด๋
|
๋ด๋
์ ์ฃผ๋ก ํต์ฆ ์น๋ฃ์ ๋ก ์ฌ์ฉ๋์ด ์๋ค.
|
์๋ฌ๋์ ์๋
์ผ๋ก ์ฃผ๋ก ์๊ธ, ํ์ถ, ํฅ๋ฃ, ์ผ์ดํผ ๋ฑ์ด ์ฌ์ฉ๋๊ณ ์๋ค.
|
Return a dictionary of css dictionaries representing this scheme. This can be fed to the ly.colorize.format_stylesheet() function.
|
def css_scheme(self):
scheme = {}
# base/default styles
d = scheme[None] = {}
for name, fmt in self.defaultStyles.items():
d[name] = fmt2css(fmt)
# mode/group styles
for mode, styles in self.allStyles.items():
d = scheme[mode] = {}
for name, fmt in styles.items():
d[name] = fmt2css(fmt)
return scheme
|
def stylers(self):
if self._stylers is None:
self._stylers = styler_factory(self.project_config().get('style', {}))
return self._stylers
|
Verifies that the report is not accredited if it contains a sample application that is not accredited.
|
def test_get_report_accreditation_false(report_api_mip_dna, mip_analysis_api, case_mip_dna):
# GIVEN a list of samples when one of them is not accredited
mip_metadata = mip_analysis_api.get_latest_metadata(case_mip_dna.internal_id)
samples = report_api_mip_dna.get_samples_data(case_mip_dna, mip_metadata)
samples[0].application.accredited = False
# WHEN retrieving the report accreditation
accredited = report_api_mip_dna.get_report_accreditation(samples)
# THEN check that the report is not accredited
assert not accredited
|
def test_get_sample_bad_sample(cli_runner: CliRunner, base_context: CGConfig):
# GIVEN an empty database
# WHEN getting a sample
name = "dummy_name"
result = cli_runner.invoke(get, ["sample", name], obj=base_context)
# THEN it should warn about missing sample id instead of getting a sample
# it will not fail since the API accepts multiple samples
assert result.exit_code == 0
|
Search for documents based on type filters, search_text or order_by and return a queryset of document objects
|
def search_documents(self, types=None, search_text=None, order_by=None):
documents = self
if types and isinstance(types, list) and types[0] != 'all':
documents = documents.filter(type__in=types)
if search_text:
documents = documents.filter(Q(name__icontains=search_text) | Q(description__icontains=search_text) |
Q(search__icontains=search_text))
if order_by: # TODO: Validate that order_by is a valid sort parameter
documents = documents.order_by(order_by)
return documents
|
def search(self, query_string, fields=None, backend='default'):
search_backend = get_search_backend(backend)
return search_backend.search(query_string, self, fields=None)
|
Performs a single read from the socket and hands the data off to the h2 connection object.
|
def _single_read(self):
# Begin by reading what we can from the socket.
#
# Concurrency
#
# Synchronizes reading the data
#
# I/O occurs while the lock is held; waiting threads will see a delay.
with self._lock:
if self._sock is None:
raise ConnectionError('tried to read after connection close')
self._sock.fill()
data = self._sock.buffer.tobytes()
self._sock.advance_buffer(len(data))
with self._conn as conn:
events = conn.receive_data(data)
stream_ids = set(getattr(e, 'stream_id', -1) for e in events)
stream_ids.discard(-1) # sentinel
stream_ids.discard(0) # connection events
self.recent_recv_streams |= stream_ids
for event in events:
if isinstance(event, h2.events.DataReceived):
self._adjust_receive_window(event.flow_controlled_length)
self.streams[event.stream_id].receive_data(event)
elif isinstance(event, h2.events.PushedStreamReceived):
if self._enable_push:
self._new_stream(event.pushed_stream_id, local_closed=True)
self.streams[event.parent_stream_id].receive_push(event)
else:
# Servers are forbidden from sending push promises when
# the ENABLE_PUSH setting is 0, but the spec leaves the
# client action undefined when they do it anyway. So we
# just refuse the stream and go about our business.
self._send_rst_frame(event.pushed_stream_id, 7)
elif isinstance(event, h2.events.ResponseReceived):
self.streams[event.stream_id].receive_response(event)
elif isinstance(event, h2.events.TrailersReceived):
self.streams[event.stream_id].receive_trailers(event)
elif isinstance(event, h2.events.StreamEnded):
self.streams[event.stream_id].receive_end_stream(event)
elif isinstance(event, h2.events.StreamReset):
if event.stream_id not in self.reset_streams:
self.reset_streams.add(event.stream_id)
self.streams[event.stream_id].receive_reset(event)
elif isinstance(event, h2.events.ConnectionTerminated):
# If we get GoAway with error code zero, we are doing a
# graceful shutdown and all is well. Otherwise, throw an
# exception.
self.close()
# If an error occured, try to read the error description from
# code registry otherwise use the frame's additional data.
if event.error_code != 0:
try:
name, number, description = errors.get_data(
event.error_code
)
except ValueError:
error_string = (
"Encountered error code %d" % event.error_code
)
else:
error_string = (
"Encountered error %s %s: %s" %
(name, number, description)
)
raise ConnectionError(error_string)
else:
log.info("Received unhandled event %s", event)
self._send_outstanding_data(tolerate_peer_gone=True, send_empty=False)
|
def read(self):
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logging.error("can't read frame from socket (get %d of %d bytes)" %
(len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
|
Verify that a generated quote came from a trusted TPM and matches the previously obtained PCR values
|
def quote_verify(data, validation, aik, pcrvalues):
select = 0
maxpcr = 0
# Verify that the validation blob was generated by a trusted TPM
pubkey = aik.get_pubkey()
n = m2.bin_to_bn(pubkey)
n = m2.bn_to_mpi(n)
e = m2.hex_to_bn("010001")
e = m2.bn_to_mpi(e)
rsa = M2Crypto.RSA.new_pub_key((e, n))
m = hashlib.sha1()
m.update(data)
md = m.digest()
try:
ret = rsa.verify(md, str(validation), algo='sha1')
except M2Crypto.RSA.RSAError:
return False
# And then verify that the validation blob corresponds to the PCR
# values we have
values = bytearray()
for pcr in sorted(pcrvalues):
values += pcrvalues[pcr]
select |= (1 << pcr)
maxpcr = pcr
if maxpcr < 16:
header = struct.pack('!H', 2)
header += struct.pack('@H', select)
header += struct.pack('!I', len(values))
else:
header = struct.pack('!H', 4)
header += struct.pack('@I', select)
header += struct.pack('!I', len(values))
pcr_blob = header + values
m = hashlib.sha1()
m.update(pcr_blob)
pcr_hash = m.digest()
if pcr_hash == data[8:28]:
return True
else:
return False
|
def test_change_secret(self):
sig1 = self.notary.compute_signature(self.nb)
self.notary.secret = b"different"
sig2 = self.notary.compute_signature(self.nb)
self.assertNotEqual(sig1, sig2)
|
this function returns the list of animals with a new session within the last few ('days_passed') days
|
def get_current_animals(root: Root, days_passed: int = 4):
now = datetime.datetime.now()
all_animals = root.get_all_animals()
if all_animals == []:
logging.warning('No animal found!')
return []
animalList = []
for animal in all_animals:
animalTag = File(root, animal)
sessionList = animalTag.get_all_sessions()
if not sessionList:
continue
lastSessionDate = animalTag.get_session_date(sessionList[-1])
if (now - lastSessionDate).days <= days_passed:
animalList.append(animal)
return animalList
|
def ended_recently(self):
return self.filter(
status=Contest.CLOSED,
created_at__gt=timezone.now()-(SUBMISSIONS_LENGTH+VOTING_LENGTH+RECENT_LENGTH)
).order_by("-created_at")
|
Activated at the end of the with statement. It automatically releases the lock if it isn't locked.
|
def __exit__(self, exc_type, exc_val, exc_tb):
if self.is_locked:
self.release()
|
def unlock(self):
utilities.lock.unlock(self.lockfd)
|
Returns the mean squared error for model (a polynomial of the specified degree) on X and y.
|
def mse(X, y, degree, model):
# calculate MSE for X and y and return both
MSE_x = np.square(np.subtract(X, model[0])).mean()/len(X)
MSE_y = np.square(np.subtract(y, model[1])).mean()/len(y)
return MSE_x, MSE_y
|
def error(self, x: np.array, y: np.array) -> float:
yh = self.predict(x)
return np.mean(y != yh)
|
์๋ค, ์ค์, ๋
, ๋
, ๊ฐ์น, ๊ทํ๋ค
|
๊ทธ ๋
์ ์ค์์ด ์๊ณ ๊ฒฝ์ ์ ๊ฐ์น๊ฐ ๊ทํ ๋
์ด๋ค.
|
์๋ฐ, ์ฐธ์ธ, ์กฐ๋กฑ๋ฐ, ์์ธ๋ฏธ, ์ค์ด, ๊ฐ์ง๋ฅผ ์ ์ฑ๊ป ์ฌ์ด ๋ฌด๋ญ๋ฌด๋ญ ํค์ ๋ค.
|
ํต์์์ ํต์ ํ์กฐ์ ์ ์ฌ์์ฌ์
ํ์ฝ์ ์ฒด๊ฒฐํ ๊ฑด ์ด๋์ง?
|
ํ๊ตญํ ์ง์ฃผํ๊ณต์ฌ(LH)๋ ํต์์ ์์ฌ ์ ์sb์กฐ์ ์์์ ๊ฒฝ์๋จ๋, ํต์์์ ํต์ ํ์กฐ์ ์ ์ฌ์์ฌ์
๊ธฐ๋ณธํ์ฝ์ ์ฒด๊ฒฐํ๋ค๊ณ 30์ผ ๋ฐํ๋ค
|
ํ๊ตญํ ์ง์ฃผํ๊ณต์ฌ(LH)๋ ํต์ ํ์กฐ์ ์ ์ฌ์์ฌ์
๋ง์คํฐํ๋ ๊ตญ์ ๊ณต๋ชจ ๋น์ ์์ผ๋ก ํฌ์ค์ฝ์์ด์ค์จ ์ปจ์์์์ ํต์ ์บ ํ ๋ง๋ ๋ฅผ ์ต์ข
์ ์ ํ๋ค๊ณ 10์ผ ๋ฐํ๋ค
|
Given a RefID and an optional name, create a profile and then return that newly created profile.
|
def new_profile_by_refid(self, refid: Optional[str], name: Optional[str], pid: Optional[int]) -> ValidatedDict:
if refid is None:
return None
if name is None:
name = 'ใชใ'
if pid is None:
pid = 51
userid = self.data.remote.user.from_refid(self.game, self.version, refid)
defaultprofile = ValidatedDict({
'name': name,
'pid': pid,
'settings': {
'flags': 223 # Default to turning on all optional folders
},
})
self.put_profile(userid, defaultprofile)
profile = self.get_profile(userid)
return profile
|
def post_create_user(sender, instance, created, raw, **kwargs):
if created and not raw:
user = instance
(user_profile, created) = UserProfile.objects.get_or_create(
user=user
)
|
Try to match country name with its code. Name of the city helps when country_name is "Korea".
|
def match_country_name_to_its_code(country_name, city=''):
if country_name:
country_name = country_name.upper().replace('.', '').strip()
if country_to_iso_code.get(country_name):
return country_to_iso_code.get(country_name)
elif country_name == 'KOREA':
if city.upper() in south_korean_cities:
return 'KR'
else:
for c_code, spellings in countries_alternative_spellings.items():
for spelling in spellings:
if country_name == spelling:
return c_code
return None
|
def find_code(city_name):
try:
code_2 = pycountry.subdivisions.lookup(city_name)
code_3 = pycountry.countries.get(alpha_2=code_2.country_code)
return code_3.alpha_3
except LookupError:
print(f'Sorry, could not find a code for {city_name}', file=sys.stderr)
return 'XXX'
|
takes a path to a folder and write all filenames recursively (files of all sub folders to)
|
def get_all_file_names(folderpath, out="../output.txt"):
print("folder pathh: ", folderpath)
file_list = os.listdir(folderpath)
print("file_list:")
print(file_list)
with open(out, "a") as file2:
for object_name in file_list:
file2.write(str(object_name) + "\n")
dir_path = os.path.join(folderpath, str(object_name))
if os.path.isdir(
dir_path
): # Return True if path is an existing directory. This follows symbolic links, so both islink() and isdir() can be true for the same path.)
print("Is dir", dir_path)
get_all_file_names(dir_path, out)
|
def get_files_in_folder(folder):
filenames = os.listdir(folder)
# os.path.join combines paths while dealing with /s and \s appropriately
full_filenames = [os.path.join(folder, filename) for filename in filenames]
return full_filenames
|
์ฝํฌ๋ฆฌํธ ์ฌ๋๋ธ ๋น์ฉ
|
์ฐ๋ฆฌ์ ์ฝํฌ๋ฆฌํธ ์ฌ๋๋ธ ์์ฌ ์ถ์ ์ ๋ฐ๋ฅด๋ฉด, ์ฝํฌ๋ฆฌํธ ์ฌ๋๋ธ์ ํ๊ท ์ต์ ๋น์ฉ์ ํ๋ฐฉํผํธ๋น 1.36๋ฌ๋ฌ์ด๋ฉฐ, ์ต๋ ๋น์ฉ์ 1.88๋ฌ๋ฌ์
๋๋ค. ๋น๊ตํ์๋ฉด, ์๋ฉํธ ์ฌ๋๋ธ์ ํ๋ฐฉํผํธ๋น ๊ฐ๊ฒฉ์ 5.50๋ฌ๋ฌ์
๋๋ค.
|
์ฝํฌ๋ฆฌํธ ์ฌ๋๋ธ์ ํฌ์ฅ์ฌ์ ๋น์ฉ ๋น๊ต. ์ผ๋ฐ์ ์ผ๋ก ํ์ค ์ฝํฌ๋ฆฌํธ ์ฌ๋๋ธ๋ ๋์๋ณด๋ค ์ ๊ณฑํผํธ๋น ๋น์ฉ์ด ๋ฎ์ต๋๋ค. ์ผ๋ฐ์ ์ผ๋ก ํ์ค ์ฝํฌ๋ฆฌํธ ์ฌ๋๋ธ ๋์ ํฌ์ฅ์์ ์ ํํ ๊ฒฝ์ฐ 10%-15% ๋ ์ง๋ถํ๊ฒ ๋ฉ๋๋ค. ์คํฌํ ์ฝํฌ๋ฆฌํธ๋ก ์
๊ทธ๋ ์ด๋ํ๊ธฐ๋ก ๊ฒฐ์ ํ๋ฉด, ๋๋ถ๋ถ์ ๊ฒฝ์ฐ ํฌ์ฅ์์ ๋น์ฉ์ด ๋์ผํ๊ฑฐ๋ ๋ ์ ๋ ดํ ๊ฐ๋ฅ์ฑ์ด ๋์ต๋๋ค.
|
Processes a range of filenames and labels in the MPII dataset corresponding to the given thread index.
|
def _process_image_files_single_thread(coder, thread_index, ranges, mpii_dataset):
if FLAGS.is_train:
base_name = 'train'
else:
base_name = 'test'
shards_per_thread = FLAGS.train_shards/FLAGS.num_threads
shard_ranges = pose_util.get_n_ranges(ranges[thread_index][0],
ranges[thread_index][1],
shards_per_thread)
for shard_index in range(len(shard_ranges)):
tfrecord_index = int(thread_index*shards_per_thread + shard_index)
tfrecord_filename = '{}{}.tfrecord'.format(base_name, tfrecord_index)
tfrecord_filepath = os.path.join(FLAGS.train_dir, tfrecord_filename)
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.ZLIB)
with tf.python_io.TFRecordWriter(path=tfrecord_filepath, options=options) as writer:
shard_start = shard_ranges[shard_index][0]
shard_end = shard_ranges[shard_index][1]
for img_index in range(shard_start, shard_end):
with tf.gfile.FastGFile(name=mpii_dataset.img_filenames[img_index], mode='rb') as f:
image_jpeg = f.read()
_write_example(coder,
image_jpeg,
mpii_dataset.people_in_imgs[img_index],
writer)
|
def parallel_search_range(data, query_range, n_processor):
results = []
pool = Pool(processes=n_processor)
### START CODE HERE ###
# Perform data partitioning first
partition_result=h_partition(data,n_processor)
for query in range(query_range[0],query_range[1]+1):
hash_value=s_hash(query,n_processor)
working_dataset=partition_result[hash_value]
indices=pool.apply(linear_search,[working_dataset,query])
for index in indices:
results.append(working_dataset[index])
### END CODE HERE ###
return results
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.